def watcher_db_info(self, event, true_path): self.reload_zkdbinfo() dbs = self.get_path(true_path, child=True) dbs = dbs.keys() if dbs else [] # 1.watch nodes when db_info nodes are new newdbs = list(set(dbs) - set(self.dbs)) for db in newdbs: path_authed_ip = os.path.join(ZKConf.ZK_PATH_DB, db, ZKConf.KEY_IP) # 2. listen /database/db_info/xxx/authed_ips # reset zkdbinfo self.zk.watch_node(path_authed_ip, self.watcher_authed_ip) path_dbconf = os.path.join(ZKConf.ZK_PATH_DB, db, ZKConf.KEY_DBCONF) # 3. listen /database/db_info/xxx/dbconf/db_host_r # reset /database/db_info/xxx/dbconf/db_host_w # reset zkdbinfo # reset busy_proobj rwclient connections # reset idle_rwclient connections self.zk.watch_node(os.path.join(path_dbconf, ZKConf.KEY_READ), self.watcher_rw_ip) self.zk.watch_node(os.path.join(path_dbconf, ZKConf.KEY_WRITE), self.watcher_rw_ip) # 2.close connections when db_info nodes are deleted diff = list(set(self.dbs) - set(dbs)) utils.log(utils.cur(), self.dbs, dbs, diff) for db in diff: if db in self.servers: self.servers[db].close_conns() self.dbs = dbs
def incremental_copy_tree(src, dst, match_pattern=None, ignore_pattern=None, debug=False): """copy tree with incremental: 0. ignore files in destination if not in source. 1. over write files in destination if existed in source. """ source_dir = Path(src) for item in source_dir.glob('**/*'): if item.is_file() and should_copy_file(item.name, ignore_pattern, match_pattern, debug): from_name = str(item) target_name = from_name.replace(src, dst) log('Copy: {} => {}'.format(item, target_name), debug) if exists(target_name): os.remove(target_name) target_dir = dirname(target_name) if not exists(target_dir): os.mkdir(target_dir) copy2(from_name, target_name)
def _zookeeperAuth(self, auth): is_legal = True """ 1. check whether client IP is authorized """ try: ippath = os.path.join(ZKConf.ZK_PATH_DB, auth["database"], ZKConf.KEY_IP) # utils.log(utils.cur(), ippath) is_legal = self.factory.get_path(ippath) # utils.log(utils.cur(), is_legal) if is_legal is not False: ip_json = is_legal is_legal = True # utils.log(utils.cur(), ip_json) try: self.ips = ip_helper.IpRangeList(*tuple(cjson.decode(ip_json))) except: self.ips = None peer = self.transport.getPeer() utils.log(utils.cur(), peer, self.ips, self.factory.ips) if not ((self.ips and peer.host in self.ips) or (self.factory.ips and peer.host in self.factory.ips)): is_legal = False ip_error = dict(ErrorCode.IP_RESTRICTED) ip_error["message"] = ip_error["message"] % {"ip":peer.host} self._write(self._goWrong(ip_error, self.next_idx)) # 2 return is_legal except Exception, err: utils.err(utils.cur(), traceback.format_exc()) is_legal = False
def _trans(cur, *args): def _exec(data, record): if len(data): sql = ",".join(["`%s`='%s'" % (k,v) for k,v in record.items()]) sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % (DBConf.TABLE_DBINFO, sql, dbnode) utils.log(utils.cur(), sql) cur.execute(sql) else: record["dbnode"] = dbnode keys = ",".join(["`%s`" % k for k in record.keys()]) vals = ",".join(["'%%(%s)s'" % v for v in record.keys()]) vals = vals % record sql = "INSERT INTO `%s` (%s) VALUES (%s)" % (DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("START TRANSACTION") for dbnode in dbnodes: if not dbnode: continue record = dict() for key, value in path.items(): node = os.path.join(root, dbnode, value) if self.zk.exists(node, None): (data, meta) = self.zk.get(node, None) record[key] = data cur.execute("SELECT * FROM `%s` WHERE `dbnode` = '%s'" % (DBConf.TABLE_DBINFO, dbnode)) dbinfo = cur.fetchall() _exec(dbinfo, record) # delete records that have been already deleted sql = "DELETE FROM `%s` WHERE `dbnode` not in (%s) " % (DBConf.TABLE_DBINFO, ",".join(["'"+d+"'" for d in dbnodes])) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("COMMIT")
def unpack_database(packet, idx=0): idx = skip_header() tag, idx = unpack_int8(packet, idx) if tag == Command.COM_INIT_DB: cmd, idx = unpack_string(packet, len(packet) - idx, idx) utils.log(utils.cur(), cmd) return cmd return tag
def unpack_database(packet, idx = 0): idx = skip_header() tag, idx = unpack_int8(packet, idx) if tag == Command.COM_INIT_DB: cmd, idx = unpack_string(packet, len(packet) - idx, idx) utils.log(utils.cur(), cmd) return cmd return tag
def verify(self, dbtype, forceOk, proxyconns, busyobj, idx): tag = SQLState.SQL_NORMAL if self.sql and len(self.tokens) == 0: self.tokenize() if not len(self.tokens): err_dict = dict(ErrorCode.SQL_FORBIDDEN) return self.tuple("err", tag, err_dict, self.opts) use_master = False first_token, first_token_id = self.tokens[0] utils.log(utils.cur(), first_token, first_token_id, self.tokens) if first_token_id == sce_token.TK_SQL_SELECT: # when self.tokens[1:] is None, use_master is still False for token, token_id in self.tokens[1:]: # select last_insert_id(); or select row_count(); if token_id == sce_token.TK_FUNCTION and token in ("LAST_INSERT_ID", "ROW_COUNT"): use_master = True break # select @@insert_id; elif token_id == sce_token.TK_LITERAL and token in ("@@INSERT_ID", "@@IDENTITY"): use_master = True break else: use_master = False else: use_master = True if forceOk: return self.tuple(use_master, tag, "", self.opts) wls = self.merged_wls if dbtype == ZKConf.INTERNAL else self.whitelists if first_token not in wls: err_dict = dict(ErrorCode.SQL_FORBIDDEN) return self.tuple("err", tag, err_dict, self.opts) whitelist = wls[first_token] assert isinstance(whitelist, (list, tuple)), "whitelist must be a list or tuple" privs = self.get_privs(whitelist) if not privs: err_dict = dict(ErrorCode.SQL_FORBIDDEN) return self.tuple("err", tag, err_dict, self.opts) specials = [x for x in privs if not x[2][0] == SQLState.SQL_NORMAL] utils.log(utils.cur(), privs, specials) for priv in specials: sql_state = priv[2][0] if sql_state == SQLState.SQL_ENGINE: tmp = self.deal_sql_engine(tag) if tmp: return tmp elif sql_state == SQLState.SQL_PRIVATE: tmp = self.deal_sql_private(tag, proxyconns, busyobj, idx) if tmp: return tmp return self.tuple(use_master, tag, "", self.opts)
def stopFactory(self): utils.log(utils.cur(), "dbproxy stopping") self.zk.close() self.mq.stop() self.monitor.stop() self.proxy_stats_log.stop() # time.sleep(0.5) self.close_free_servers() self.servers.stop() stats_conns.close()
def getDBname(self): # rewrite "USE $self.database" to "USE dbname" dbname = None try: dbpath = os.path.join(ZKConf.ZK_PATH_DB, self.database, ZKConf.KEY_DBCONF, ZKConf.KEY_DB) dbname = self.factory.get_path(dbpath) utils.log(utils.cur(), dbpath, dbname) except: pass return dbname
def sync_mysql(self): root = ZKConf.ZK_PATH_DB path = { "dbname": "dbconf/db_db", "dbtype": "dbtype", "dbdisabled": "db_disabled", "username": "******", "password": "******", "host_r": "dbconf/db_host_r", "host_w": "dbconf/db_host_w", "ips": "authed_ips" } dbnodes = self.zk.get_children(root, None) utils.log(utils.cur(), dbnodes) def _trans(cur, *args): def _exec(data, record): if len(data): sql = ",".join(["`%s`='%s'" % (k,v) for k,v in record.items()]) sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % (DBConf.TABLE_DBINFO, sql, dbnode) utils.log(utils.cur(), sql) cur.execute(sql) else: record["dbnode"] = dbnode keys = ",".join(["`%s`" % k for k in record.keys()]) vals = ",".join(["'%%(%s)s'" % v for v in record.keys()]) vals = vals % record sql = "INSERT INTO `%s` (%s) VALUES (%s)" % (DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("START TRANSACTION") for dbnode in dbnodes: if not dbnode: continue record = dict() for key, value in path.items(): node = os.path.join(root, dbnode, value) if self.zk.exists(node, None): (data, meta) = self.zk.get(node, None) record[key] = data cur.execute("SELECT * FROM `%s` WHERE `dbnode` = '%s'" % (DBConf.TABLE_DBINFO, dbnode)) dbinfo = cur.fetchall() _exec(dbinfo, record) # delete records that have been already deleted sql = "DELETE FROM `%s` WHERE `dbnode` not in (%s) " % (DBConf.TABLE_DBINFO, ",".join(["'"+d+"'" for d in dbnodes])) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("COMMIT") def _transaction(cur, *args): try: _trans(cur, *args) except Exception, err: cur.execute("ROLLBACK") utils.log(utils.cur(), err)
def getServer(self, db, callback, proObj): if db in self.servers and len(self.servers[db]): if callable(callback): utils.log(utils.cur(), "use old RWClient") try: proObj.server = self.servers.pop(db) proObj.server.changeProtocolObj(proObj) self.busy_proobj[db].append(proObj) except Exception, err: return proObj.pool_error(err) callback(proObj, True)
def check(self): forbidinfo = dict(self.sf.forbidinfo) for db, info in forbidinfo.iteritems(): if not info[Forbid.KEY_TYPE] in (Forbid.FORBID_WORKING, Forbid.FORBID_FOREVER): utils.log(utils.cur(), "erase forbid", db) self.sf.zk.erase_forbid(db) if info[Forbid.KEY_TYPE] == Forbid.FORBID_WORKING and \ info[Forbid.KEY_START] + info[Forbid.KEY_DURATION] < time.time(): utils.log(utils.cur(), "erase forbid", db) self.sf.zk.erase_forbid(db)
def unpack_command(packet, idx = 0): idx = skip_header() tag, idx = unpack_int8(packet, idx) if tag == Command.COM_QUERY: cmd, idx = unpack_string(packet, len(packet) - idx, idx) utils.log(utils.cur(), cmd) return tag, cmd elif tag == Command.COM_STMT_EXECUTE: statement_id, idx = unpack_int32(packet, idx) utils.log(utils.cur(), "Statement ID", statement_id) return tag, statement_id return tag, 0
def unpack_command(packet, idx=0): idx = skip_header() tag, idx = unpack_int8(packet, idx) if tag == Command.COM_QUERY: cmd, idx = unpack_string(packet, len(packet) - idx, idx) utils.log(utils.cur(), cmd) return tag, cmd elif tag == Command.COM_STMT_EXECUTE: statement_id, idx = unpack_int32(packet, idx) utils.log(utils.cur(), "Statement ID", statement_id) return tag, statement_id return tag, 0
def _exec(dblist, sql): LOGCONFIG = namedtuple( "LOGCONFIG", "id,dbname,dbtype,dbdisabled,username,password,host_r,host_w,ips,dbnode" ) for row in map(LOGCONFIG._make, dblist): utils.log(utils.cur(), row) dbpath = os.path.join(self.rootpath, row.dbnode) dbroot = self.zk.exists(dbpath, None) if not dbroot: self.zk.create(dbpath, "", [ZKConf.ZOO_CREATOR_ALL_ACL], 0) self.create_leafs(row, self.rootpath + "/" + row.dbnode)
def connectionLost(self, reason): utils.reset_logconf() self.factory.conns -= 1 utils.log(utils.cur(), "client is losing proxy %s" % self.factory.conns, self.requests) # mysql_stmt_close if self.stmt_id: self.server.mysql_stmt_close(self.stmt_id) # reclaim connection self.factory.takeServer(self.server, self) # clean connection self.server = None self.timeout.stop()
def _exec(dblist, sql): LOGCONFIG = namedtuple("LOGCONFIG", "id,dbname,dbtype,dbdisabled,username,password,host_r,host_w,ips,dbnode") for row in map(LOGCONFIG._make, dblist): utils.log(utils.cur(), row) dbpath = os.path.join(self.rootpath, row.dbnode) dbroot = self.zk.exists(dbpath, None) if not dbroot: self.zk.create(dbpath, "", [ZKConf.ZOO_CREATOR_ALL_ACL], 0) self.create_leafs(row, self.rootpath + "/" + row.dbnode)
def takeServer(self, server, proObj): if server and hasattr(server, "ready") and server.ready == 2: # clean SQL cache server.raw_sql_clear() db = server.getDB() stats_conns.decr(db) try: self.busy_proobj[db].remove(proObj) except: pass server.changeProtocolObj(None) self.servers.push(server.getDB(), server) utils.log(utils.cur(), stats_conns, self.servers)
def _create(info, path): for key, value in info.iteritems(): subpath = "".join([path, "/", key]) subnode = self.zk.exists(subpath, None) if not subnode: if type(value) is not dict: utils.log(utils.cur(), subpath, value) self.zk.create(subpath, str(value), [ZKConf.ZOO_CREATOR_ALL_ACL], 0) else: self.zk.create(subpath, "", [ZKConf.ZOO_CREATOR_ALL_ACL], 0) _create(value, subpath)
def _trans(cur, *args): try: keys = ",".join(["`%s`" % k for k in dbinfo.keys()]) vals = ",".join(["'%s'" % v for v in dbinfo.values()]) sql = "INSERT INTO `%s` (%s) VALUES(%s)" % ( DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), keys, vals, sql) cur.execute(sql) cur.execute("COMMIT") except Exception, e: cur.execute("ROLLBACK") utils.err(utils.cur(), e) raise Exception(e)
def _trans(cur, *args): cur.execute("START TRANSACTION") try: cur.execute("SELECT * FROM `%(table)s` WHERE dbtype='%(dbtype)s'" % dbtype) dbnums = cur.fetchall() if len(dbnums) == 0: insert_sql = """INSERT INTO `%(table)s` (`dbtype`, `maxconn`, `disk`, `network`, `slowquery_duration`, `slowquery_times`, `tablenums`, `tablerows`, `tablesize`, `cputime`) VALUES ('%(dbtype)s', %(maxconn)d, %(disk)d, %(network)d, %(slowquery_duration)d, %(slowquery_times)d, %(tablenums)d, %(tablerows)d, %(tablesize)d, %(cputime)d)""" % dbtype utils.log(utils.cur(), insert_sql) cur.execute(insert_sql) cur.execute("COMMIT") except Exception, err: utils.log(utils.cur(), err) cur.execute("ROLLBACK")
def watcher_rw_ip(self, event, true_path): self.reload_zkdbinfo() host = self.get_path(true_path) paths = true_path.split("/") db, rw_type = paths[3], paths[-1] # 1. reset idle_rwclient connections backends = self.servers[db] if db in self.servers else [] utils.log(utils.cur(), true_path, host, backends.copy() if backends else [], len(backends)) # reset connections when new IP and old IP are different for rwclient, pushtime in backends.copy() if backends else []: oldhost = rwclient.getHost(rw_type) utils.log(utils.cur(), oldhost, host) if oldhost and oldhost != host: rwclient.disconnect(host, rw_type) # 2. reset busy_proobj rwclient connections utils.log(utils.cur(), self.busy_proobj) # reset connections when new IP and old IP are different for proobj in self.busy_proobj[db]: if proobj.server: oldhost = proobj.server.getHost(rw_type) if oldhost and oldhost != host: utils.log(utils.cur(), oldhost, host) proobj.server.disconnect(host, rw_type)
def _exec(data, record): if len(data): sql = ",".join(["`%s`='%s'" % (k,v) for k,v in record.items()]) sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % (DBConf.TABLE_DBINFO, sql, dbnode) utils.log(utils.cur(), sql) cur.execute(sql) else: record["dbnode"] = dbnode keys = ",".join(["`%s`" % k for k in record.keys()]) vals = ",".join(["'%%(%s)s'" % v for v in record.keys()]) vals = vals % record sql = "INSERT INTO `%s` (%s) VALUES (%s)" % (DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), sql) cur.execute(sql)
def watcher_rw_ip(self, event, true_path): self.reload_zkdbinfo() host = self.get_path(true_path) paths = true_path.split("/") db, rw_type = paths[3], paths[-1] # 1. reset idle_rwclient connections backends = self.servers[db] if db in self.servers else [] utils.log(utils.cur(), true_path, host, backends.copy() if backends else [], len(backends)) # reset connections when new IP and old IP are different for rwclient, pushtime in (backends.copy() if backends else []): oldhost = rwclient.getHost(rw_type) utils.log(utils.cur(), oldhost, host) if oldhost and oldhost != host: rwclient.disconnect(host, rw_type) # 2. reset busy_proobj rwclient connections utils.log(utils.cur(), self.busy_proobj) # reset connections when new IP and old IP are different for proobj in self.busy_proobj[db]: if proobj.server: oldhost = proobj.server.getHost(rw_type) if oldhost and oldhost != host: utils.log(utils.cur(), oldhost, host) proobj.server.disconnect(host, rw_type)
def _trans(cur, *args): try: keys = ",".join(["`%s`" % k for k in dbinfo.keys()]) vals = ",".join(["'%s'" % v for v in dbinfo.values()]) sql = "INSERT INTO `%s` (%s) VALUES(%s)" % (DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), keys, vals, sql) cur.execute(sql) cur.execute("COMMIT") except Exception, e: cur.execute("ROLLBACK") utils.err(utils.cur(), e) raise Exception(e)
def _trans(cur, *args): cur.execute("START TRANSACTION") try: cur.execute( "SELECT * FROM `%(table)s` WHERE dbtype='%(dbtype)s'" % dbtype) dbnums = cur.fetchall() if len(dbnums) == 0: insert_sql = """INSERT INTO `%(table)s` (`dbtype`, `maxconn`, `disk`, `network`, `slowquery_duration`, `slowquery_times`, `tablenums`, `tablerows`, `tablesize`, `cputime`) VALUES ('%(dbtype)s', %(maxconn)d, %(disk)d, %(network)d, %(slowquery_duration)d, %(slowquery_times)d, %(tablenums)d, %(tablerows)d, %(tablesize)d, %(cputime)d)""" % dbtype utils.log(utils.cur(), insert_sql) cur.execute(insert_sql) cur.execute("COMMIT") except Exception, err: utils.log(utils.cur(), err) cur.execute("ROLLBACK")
def _clientAuth(self, data): """ 1. parse auth packet, db is mandatory """ auth = analyze_packet(data) if auth == -1 or not auth["database"]: self._write(self._goWrong(ErrorCode.PACKET_WRONG, self.next_idx)) return """ 2. retrieve database info from zookeeper and auth """ is_legal = self._zookeeperAuth(auth) utils.log(utils.cur(), "after zkauth", is_legal) """ 3. assign a pair of read/write connections when passed """ if is_legal: self.database = auth["database"] self.dbname = self.getDBname() utils.log(utils.cur(), self.database) def callback(me, old=False): # utils.log(utils.cur(), "auth inside") pkts = me.server.writeClient.protocol.packets if old: me._write(me._goRight(me.next_idx)) me.client_authed = True stats_conns.incr(self.database) utils.log(utils.cur(), stats_conns) return if pkts["err"]: me._write(pkts["err"]) else: """ ** NOTICE ** be care of index in pkts["ok"] and pkts["err"] """ # utils.log(utils.cur(), pkts) p = pkts["ok"][:3] + "\x02" + pkts["ok"][4:] me._write(p) me.client_authed = True stats_conns.incr(self.database) utils.log(utils.cur(), stats_conns) self.factory.getServer(self.database, callback, self)
def _create(info, path): for key,value in info.iteritems(): subpath = "".join([path, "/", key]) subnode = self.zk.exists(subpath, None) if not subnode: if type(value) is not dict: utils.log(utils.cur(), subpath, value) self.zk.create(subpath, str(value), [ZKConf.ZOO_CREATOR_ALL_ACL], 0) else: self.zk.create(subpath, "", [ZKConf.ZOO_CREATOR_ALL_ACL], 0) _create(value, subpath)
def _exec(data, record): if len(data): sql = ",".join( ["`%s`='%s'" % (k, v) for k, v in record.items()]) sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % ( DBConf.TABLE_DBINFO, sql, dbnode) utils.log(utils.cur(), sql) cur.execute(sql) else: record["dbnode"] = dbnode keys = ",".join(["`%s`" % k for k in record.keys()]) vals = ",".join(["'%%(%s)s'" % v for v in record.keys()]) vals = vals % record sql = "INSERT INTO `%s` (%s) VALUES (%s)" % ( DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), sql) cur.execute(sql)
def should_copy_file(filename, ignore_pattern, match_pattern, debug): should_copy = False if not ignore_pattern and not match_pattern: should_copy = True if ignore_pattern: if re.match(ignore_pattern, filename): log('Ignore pattern: {}'.format(filename), debug) should_copy = False else: should_copy = True if match_pattern: if re.match(match_pattern, filename): log('Match pattern: {}'.format(filename), debug) should_copy = True else: should_copy = False return should_copy
def mqCallback(self, channel, method_frame, header_frame, body): try: if not self.zk.is_proxy_master(): return # master's business data_dict = cjson.decode(body) # ** MUST ** ack channel.basic_ack(method_frame.delivery_tag) utils.log(utils.cur(), body, data_dict) if not isinstance(data_dict, dict): return for db, forbid in data_dict.iteritems(): if not forbid[Forbid.KEY_TYPE] in (Forbid.FORBID_WORKING, Forbid.FORBID_FOREVER): return forbid[Forbid.KEY_START] = time.time() path = os.path.join(ZKConf.ZK_PATH_FORBID, db) orig = self.get_path(path) if orig is False: self.zk.mknode(path, cjson.encode(forbid)) else: old = cjson.decode(orig) if ( old[Forbid.KEY_TYPE] == forbid[Forbid.KEY_TYPE] and old[Forbid.KEY_TYPE] == Forbid.FORBID_WORKING and old[Forbid.KEY_START] + old[Forbid.KEY_DURATION] > time.time() ): utils.log(utils.cur(), "still forbidding") else: utils.log(utils.cur(), "change forbid") # change /database/forbid/db self.forbidinfo[db] = forbid self.zk.set(path, cjson.encode(forbid)) except Exception, err: utils.err(utils.cur(), err)
def _trans(cur, *args): def _exec(data, record): if len(data): sql = ",".join( ["`%s`='%s'" % (k, v) for k, v in record.items()]) sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % ( DBConf.TABLE_DBINFO, sql, dbnode) utils.log(utils.cur(), sql) cur.execute(sql) else: record["dbnode"] = dbnode keys = ",".join(["`%s`" % k for k in record.keys()]) vals = ",".join(["'%%(%s)s'" % v for v in record.keys()]) vals = vals % record sql = "INSERT INTO `%s` (%s) VALUES (%s)" % ( DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("START TRANSACTION") for dbnode in dbnodes: if not dbnode: continue record = dict() for key, value in path.items(): node = os.path.join(root, dbnode, value) if self.zk.exists(node, None): (data, meta) = self.zk.get(node, None) record[key] = data cur.execute("SELECT * FROM `%s` WHERE `dbnode` = '%s'" % (DBConf.TABLE_DBINFO, dbnode)) dbinfo = cur.fetchall() _exec(dbinfo, record) # delete records that have been already deleted sql = "DELETE FROM `%s` WHERE `dbnode` not in (%s) " % ( DBConf.TABLE_DBINFO, ",".join(["'" + d + "'" for d in dbnodes])) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("COMMIT")
def mqCallback(self, channel, method_frame, header_frame, body): try: if not self.zk.is_proxy_master(): return # master's business data_dict = cjson.decode(body) # ** MUST ** ack channel.basic_ack(method_frame.delivery_tag) utils.log(utils.cur(), body, data_dict) if not isinstance(data_dict, dict): return for db, forbid in data_dict.iteritems(): if not forbid[Forbid.KEY_TYPE] in (Forbid.FORBID_WORKING, Forbid.FORBID_FOREVER): return forbid[Forbid.KEY_START] = time.time() path = os.path.join(ZKConf.ZK_PATH_FORBID, db) orig = self.get_path(path) if orig is False: self.zk.mknode(path, cjson.encode(forbid)) else: old = cjson.decode(orig) if old[Forbid.KEY_TYPE] == forbid[Forbid.KEY_TYPE] and \ old[Forbid.KEY_TYPE] == Forbid.FORBID_WORKING and \ old[Forbid.KEY_START] + old[Forbid.KEY_DURATION] > time.time(): utils.log(utils.cur(), "still forbidding") else: utils.log(utils.cur(), "change forbid") # change /database/forbid/db self.forbidinfo[db] = forbid self.zk.set(path, cjson.encode(forbid)) except Exception, err: utils.err(utils.cur(), err)
def check_whitelist(self, wl, crud, token_ids): if len(self.tokens) < len(wl[2:])+1: return False i = 0 for kw in wl[2:]: i += 1 if self.tokens[i][0] != kw: return False tbl = None utils.log(utils.cur(), wl, wl[1], token_ids, self.tokens) if wl[1] != sce_token.TK_UNKNOWN: try: idx = token_ids.index(wl[1], i) utils.log(utils.cur(), wl, idx) if idx+1 < len(self.tokens) \ and self.tokens[idx+1][1] == sce_token.TK_LITERAL: tbl = self.tokens[idx+1][0] except: pass if tbl: if tbl in self.tbopts: self.tbopts[tbl].append(crud) else: self.tbopts[tbl] = [crud] return (True, tbl, wl)
def callback(me, old=False): # utils.log(utils.cur(), "auth inside") pkts = me.server.writeClient.protocol.packets if old: me._write(me._goRight(me.next_idx)) me.client_authed = True stats_conns.incr(self.database) utils.log(utils.cur(), stats_conns) return if pkts["err"]: me._write(pkts["err"]) else: """ ** NOTICE ** be care of index in pkts["ok"] and pkts["err"] """ # utils.log(utils.cur(), pkts) p = pkts["ok"][:3] + "\x02" + pkts["ok"][4:] me._write(p) me.client_authed = True stats_conns.incr(self.database) utils.log(utils.cur(), stats_conns)
def check_whitelist(self, wl, crud, token_ids): if len(self.tokens) < len(wl[2:]) + 1: return False i = 0 for kw in wl[2:]: i += 1 if self.tokens[i][0] != kw: return False tbl = None utils.log(utils.cur(), wl, wl[1], token_ids, self.tokens) if wl[1] != sce_token.TK_UNKNOWN: try: idx = token_ids.index(wl[1], i) utils.log(utils.cur(), wl, idx) if idx + 1 < len(self.tokens) and self.tokens[idx + 1][1] == sce_token.TK_LITERAL: tbl = self.tokens[idx + 1][0] except: pass if tbl: if tbl in self.tbopts: self.tbopts[tbl].append(crud) else: self.tbopts[tbl] = [crud] return (True, tbl, wl)
def sendForbid(self, data, db, opts): """ check contraints of databases, tables and operations """ assert isinstance(data, dict) assert "object" in data and data["object"] in (Forbid.FORBID_DATABASE, Forbid.FORBID_TABLE) def _realForbid(crud, dbtb, errmsg, isDB=True): forbid_duration = int(data["start"] + data["duration"] - time.time()) if data["type"] == Forbid.FORBID_WORKING else sys.maxint forbid_error = dict(ErrorCode.QUOTA_EXCEEDED) forbid_error["message"] = forbid_error["message"] % (crud, "Database" if isDB else "Table", dbtb, errmsg, forbid_duration) self._write(self._goWrong(forbid_error, self.next_idx)) return True crud = data["crud"] utils.log(utils.cur(), crud) if not crud: return False if data["object"] == Forbid.FORBID_DATABASE: allopts = opts["db"] utils.log(utils.cur(), allopts) assert type(allopts) is list if Forbid.OPERATION_DEFAULT in crud: return _realForbid([Forbid.OPERATION_DEFAULT], db, data["errmsg"], True) else: if set(allopts).intersection(set(crud)): return _realForbid(crud, db, data["errmsg"], True) else: allopts = opts["tb"] utils.log(utils.cur(), allopts) assert type(allopts) is dict for tbl, tbopts in allopts.iteritems(): if tbl in crud: if Forbid.OPERATION_DEFAULT in crud[tbl]: return _realForbid([Forbid.OPERATION_DEFAULT], tbl, data["errmsg"], False) if set(tbopts).intersection(set(crud[tbl])): return _realForbid(crud[tbl], tbl, data["errmsg"], False) return False
def _transaction(cur, *args): try: _trans(cur, *args) except Exception, err: cur.execute("ROLLBACK") utils.log(utils.cur(), err)
class DBInfo(object): def __init__(self): self.zk = zk_helper.ZooKeeper("dbinfo") self.dbinfo = db_helper.DBPool(DBConf.APPENGINEDB, async=False) self.rootpath = ZKConf.ZK_PATH_DB self.zk.mknode(self.rootpath) def start(self): def _exec(dblist, sql): LOGCONFIG = namedtuple( "LOGCONFIG", "id,dbname,dbtype,dbdisabled,username,password,host_r,host_w,ips,dbnode" ) for row in map(LOGCONFIG._make, dblist): utils.log(utils.cur(), row) dbpath = os.path.join(self.rootpath, row.dbnode) dbroot = self.zk.exists(dbpath, None) if not dbroot: self.zk.create(dbpath, "", [ZKConf.ZOO_CREATOR_ALL_ACL], 0) self.create_leafs(row, self.rootpath + "/" + row.dbnode) self.dbinfo.execute("SELECT * FROM `%s`" % DBConf.TABLE_DBINFO, _exec) def create_dbinfo(self, dbinfo): if "dbnode" not in dbinfo: raise Exception("dbnode **MUST** contain") dbnode = dbinfo["dbnode"] if not dbnode: raise Exception("dbnode **MUST NOT** null") def _exec(nodes, sql): if nodes and len(nodes): raise Exception("dbnode '%s' exists" % dbnode) sql = """SELECT `dbnode` FROM `%s` WHERE `dbnode` = '%s'""" % (DBConf.TABLE_DBINFO, dbnode) self.dbinfo.execute(sql, _exec) # insert into mysql def _trans(cur, *args): try: keys = ",".join(["`%s`" % k for k in dbinfo.keys()]) vals = ",".join(["'%s'" % v for v in dbinfo.values()]) sql = "INSERT INTO `%s` (%s) VALUES(%s)" % ( DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), keys, vals, sql) cur.execute(sql) cur.execute("COMMIT") except Exception, e: cur.execute("ROLLBACK") utils.err(utils.cur(), e) raise Exception(e) self.dbinfo.transaction(_trans) # insert into zookeeper DBINFO = namedtuple( "DBINFO", "dbname,dbtype,dbdisabled,username,password,host_r,host_w,ips,dbnode" ) row = None try: row = DBINFO(**dbinfo) utils.log(utils.cur(), row) except Exception, e: raise Exception(e)
def stop(self): if self.isAlive(): utils.log(utils.cur(), 'stop thread %s' % self.name) self.running = False
"dbtype": "", "dbdisabled": "", "username": "", "password": "", "host_r": "", "host_w": "", "ips": "" } dbinfo["password"] = utils.encrypt(dbinfo["password"]) DBInfo().create_dbinfo(dbinfo) if __name__ == "__main__": utils.gen_logger("dbinfo_znode_logger", "/tmp/l.log") options = ["init_znode", "sync_mysql", "init_a_node"] if len(sys.argv) < 2: utils.log(utils.cur(), "Usage: python %s %s arguments" % (sys.argv[0], "/".join(options))) sys.exit(0) arg1 = sys.argv[1] if not arg1 in options: utils.log(utils.cur(), "Usage: python %s %s arguments" % (sys.argv[0], "/".join(options))) sys.exit(0) utils.parse_args(sys.argv[2:]) if arg1 == "init_znode": # init_znode() # print DBType().zk.get_dict("/database") DBType().list("/database/db_info/appstat") # DBType().zk.set("/database/db_info/appstat/dbconf/db_host_r", "10.11.150.126:3306") # DBType().zk.set("/database/db_info/appengine/dbconf/db_host_w", "10.11.150.114:3306") # DBType().zk.set("/database/db_info/appengine/dbconf/db_host_r", "10.11.150.114:3306") elif arg1 == "sync_mysql":
def _dealQuery(self, data): tag, cmd = unpack_command(data) """ if cmd is not COM_QUERY then forward data directly """ if isinstance(cmd, int): if tag == Command.COM_INIT_DB: token_db = unpack_database(data) utils.log(utils.cur(), token_db) dbname = self.dbname if (not dbname) or (token_db not in (dbname, self.database)): self.timeout.reset() self._write(self._goWrong(ErrorCode.USE_FORBIDDEN, self.next_idx)) return if token_db == self.database and dbname != self.database: data = command_packet(dbname, Command.COM_INIT_DB) elif tag == Command.COM_STMT_EXECUTE: self.stmt_id = cmd self.timeout.reset() if self.server: self.server.writeClient.protocol.queryRaw(data) return forceOk = False tokens = self.sql_parser.addSQL(cmd) dbname = self.dbname if len(tokens) and tokens[0][0] == "USE": if (not dbname) or (len(tokens) != 2) or (tokens[1][0] not in (dbname.upper(), self.database.upper())): self.timeout.reset() self._write(self._goWrong(ErrorCode.USE_FORBIDDEN, self.next_idx)) return forceOk = True if dbname != self.database and tokens[1][0] == self.database.upper(): data = command_packet("USE `%s`" % dbname) use_master, sql_state, msg, opts = self.sql_parser.verify(self.dbtype, forceOk, self.factory.servers, self.factory.busy_proobj, self.idx+1) utils.log(utils.cur(), use_master, sql_state, msg, opts) if not isinstance(use_master, int): self.timeout.reset() if use_master == "err": self._write(self._goWrong(msg if isinstance(msg, dict) else ErrorCode.SQL_FORBIDDEN, self.next_idx)) return elif use_master == "ok": if sql_state == SQLState.SQL_PRIVATE: if type(msg) == list: """ sce pool/proxy/client status """ for result_set_cell in msg: self._write(result_set_cell) return else: """ sce master = 0/1 whether read/write is splitting """ self.rwsplit = False if msg else True utils.log(utils.cur(), msg, type(msg), self.rwsplit, self.idx) self._write(self._goRight(self.next_idx)) return if self._checkForbid(opts, self.dbtype): return """ read/write splitting """ if self.server: in_trans = self.server.writeClient.protocol.in_trans # non-transactional SELECT if self.rwsplit and in_trans == False and use_master == False: # read utils.log(utils.cur(), "read") self.timeout.reset() self.server.readClient.protocol.queryRaw(data) else: # write utils.log(utils.cur(), "write") self.timeout.reset() self.server.writeClient.protocol.queryRaw(data)
except Exception, err: utils.err(utils.cur(), err) def getServer(self, db, callback, proObj): if db in self.servers and len(self.servers[db]): if callable(callback): utils.log(utils.cur(), "use old RWClient") try: proObj.server = self.servers.pop(db) proObj.server.changeProtocolObj(proObj) self.busy_proobj[db].append(proObj) except Exception, err: return proObj.pool_error(err) callback(proObj, True) else: utils.log(utils.cur(), "create new RWClient") dbpath = os.path.join(ZKConf.ZK_PATH_DB, db, ZKConf.KEY_DBCONF) if self.zk.exists(dbpath, None): try: stats_conns.check(db) except Exception, err: utils.log(utils.cur(), err) return proObj.pool_error(err) def cb(pro, db): if callable(callback): proObj.server = pro.factory.servers.pop(db) self.busy_proobj[db].append(proObj) callback(proObj) dbinfo = {"host_r": "", "host_w": "", "user": "", "passwd": "", "dbshow": db, "db": ""}
3. check whether there is password """ if is_legal and not auth["scramble_buff"]: is_legal = False # utils.log(utils.cur(), "passwd has", is_legal) """ 4. check whether password is correct """ if is_legal: for k in info: if k == "passwd" and server_check_auth(auth["scramble_buff"], self.factory.getHandshakeDic()["scramble"], info[k], False): # utils.log(utils.cur(), "passwd auth success") continue elif k in auth and info[k] == auth[k]: continue else: utils.log(utils.cur(), "auth failed", k) is_legal = False break # utils.log(utils.cur(), "passwd right", is_legal) """ 5. return error message of authentication """ if not is_legal: utils.log(utils.cur(), "illegal") auth_error = dict(ErrorCode.AUTH_WRONG) auth["host"] = self.transport.getHost().host auth_error["message"] = auth_error["message"] % (auth["user"], auth["host"], "YES" if auth["scramble_buff"] else "NO") self._write(self._goWrong(auth_error, self.next_idx)) # 2 return is_legal