def main_loop(): while 1: msg.g_now += 1 mgr_singleton.g_singleton.get_loger()._uptime() #mgr_singleton.g_singleton.get_loger().debug(_lineno(), msg.g_now) if not msg.g_enable_stdin: time.sleep(1) else: # 获得用户输入 try: if select.select([sys.stdin], [], [], 1) == ([sys.stdin], [], []): line = sys.stdin.readline() sys.stdout.write(line) sys.stdout.flush() if not line or line.rstrip()=='wwq': break except KeyboardInterrupt as e: mgr_singleton.g_singleton.get_loger().error(_lineno(), 'KeyboardInterrupt:%s' % e) #可以 mgr_singleton.g_singleton.get_loger().error(traceback.format_exc()) break except IOError as e: mgr_singleton.g_singleton.get_loger().error(_lineno(), 'IOError:%s' % (e)) #可以 mgr_singleton.g_singleton.get_loger().error(traceback.format_exc()) break except Exception as e: mgr_singleton.g_singleton.get_loger().error(_lineno(), 'exception:%s' % (e,)) #可以 mgr_singleton.g_singleton.get_loger().error(traceback.format_exc()) break if msg.g_enable_stdin and msg.old_settings: termios.tcsetattr(sys.stdin, termios.TCSADRAIN, msg.old_settings) stop_all()
def handler(self, data): if self.dbcon.conn_error: self.dbcon = MySQL.MySQL(self.dbip, mgr_conf.g_db_user, mgr_conf.g_db_passwd, mgr_conf.g_db_db, loger=self.loger) try: self.loger.care(_lineno(self), 'recv request class %s' % (data['class'])) for case in switch(data['class']): if case(msg.g_class_init_view_reply) or case( msg.g_class_init_dns_reply): self.just4testcnt += 1 req_handler.handle_proxy_init_reply( self, data, data['inner_addr'][0]) break if case(msg.g_class_inner_chk_task_db_heartbeat): req_handler.handle_inner_chk_task_db_heartbeat(self) break if case(msg.g_class_proxy_heartbeat): req_handler.handle_proxy_heartbeat(self, data) break if case(): self.loger.warn(_lineno(self), 'recv something else: ', data['class']) except Exception as e: self.loger.error(_lineno(self), 'inner error: ', repr(e)) self.loger.error(traceback.format_exc())
def __myconnect__(self): try: self.loger.info(_lineno(self), 'host[%s], user[%s], passwd[%s], db[%s]' % (self.host, self.user, self.passwd, self.db)) if self.db=='': if self.cursor: self.cursor.close() if self.conn: self.conn.close() self.conn = MySQLdb.connect(self.host,self.user,self.passwd,port=3306) else: if self.cursor: self.cursor.close() if self.conn: self.conn.close() self.conn = MySQLdb.connect(self.host,self.user,self.passwd,self.db,port=3306) self.conn_error= False mgr_singleton.g_singleton.get_err_info().del_db_error(mgr_singleton.g_singleton.get_err_info().db_desc_lose) except MySQLdb.Error,e: self.loger.error(_lineno(self), 'Cannot connect to server\nERROR: ', e) self.conn_error= True mgr_singleton.g_singleton.get_err_info().add_db_error(mgr_singleton.g_singleton.get_err_info().db_desc_lose) self.cursor = None self.loger.error(traceback.format_exc()) #raise Exception("Database configure error!!!") return
def add(self, worker, data, ali_tbl): #{u'opt': u'add', u'data': u'{"name":"@.ee.com","main":"ee.com","rid":324,"domain_ns":"ns2.dnspro.net.","level":"0","ttl":"600","viewid":"0"}', u'type': u'record'} self.loger.info(_lineno(), 'adding domain_ns:', data['main'], '-->', data['domain_ns'], ' into database') add_ret = worker.dbcon.query(msg.g_sql_add_a_domain_ns % (data['main'], int(data['ttl']), data['domain_ns'], data['rid'], data['main'], int(data['ttl']), data['domain_ns'])) self.loger.debug(_lineno(), 'add return ', add_ret) return add_ret, False, None
def handle_inner_chk_task(http_th, worker, _type): payload = { "type":msg.g_class_inner_reqtype_map[_type], "opt":"get", "ioopt":"中文", "data":{"sid":mgr_conf.g_mgr_sid} } payload_encode= 'data='+json.dumps(payload) mgr_singleton.g_singleton.get_loger().debug(_lineno(), 'post encode data:\n', repr(payload_encode)) mgr_singleton.g_singleton.get_loger().debug(_lineno(), 'test decode data:\n', repr(json.loads(payload_encode[5:]))) mgr_singleton.g_singleton.get_loger().debug(_lineno(), 'test pre ioopt--> utf8:', repr(payload['ioopt'])) test_de = json.loads(payload_encode[5:])['ioopt'] mgr_singleton.g_singleton.get_loger().debug(_lineno(), 'test enc and dec ioopt:', test_de.encode("UTF-8"), ', utf8:', repr(test_de.encode("UTF-8")), ', unicode:', repr(test_de)) res, post_error = http_th.http_send_post(mgr_conf.g_url_inner_chk_task_ip, mgr_conf.g_url_inner_chk_task_url, payload_encode) if not res: mgr_singleton.g_singleton.get_loger().warn(traceback.format_exc()) raise Exception(_lineno(), 'request task post code:', post_error) mgr_singleton.g_singleton.get_loger().debug(_lineno(), 'request task return:\n', repr(res)) decodejson = json.loads(res) mgr_singleton.g_singleton.get_loger().debug(_lineno(), 'json ret:', repr(decodejson['ret'])) mgr_singleton.g_singleton.get_loger().debug(_lineno(), 'json error:', repr(decodejson['error'])) mgr_singleton.g_singleton.get_loger().debug(_lineno(), 'json result:\n', repr(decodejson['result'])) if decodejson['ret'] != 0: mgr_singleton.g_singleton.get_loger().warn(traceback.format_exc()) raise Exception(_lineno(), 'request task return error! \ ret:%d error:%s'%(decodejson['ret'], decodejson['error'])) decodejson['class'] = msg.g_class_inner_map[_type] decodejson.pop('error') decodejson.pop('ret') worker.put(decodejson)
def sendto_(self, msgobj, addr, head, port=12345): if addr == None or not self.proxy_addr.has_key(addr): self.loger.error(_lineno(self), 'addr is error!!!! addr: ', repr(addr)) return False s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: host = self.proxy_addr[addr][0][0] encodedjson = json.dumps(msgobj) str_fmt = "H" + str(len(encodedjson)) + "s" str_send = struct.pack(str_fmt, head, encodedjson) s.connect((host, port)) cnt = len(str_send) self.loger.info(_lineno(self), 'need to send(%d)' % (cnt,)) ibegin = 0 iend = 0 ilen = 0 while True: ilen = mgr_conf.g_size_perpack if cnt>mgr_conf.g_size_perpack else cnt iend += ilen data = str_send[ibegin:iend] s.send(data) cnt -= ilen self.loger.info(_lineno(self), 'sent(%d)' % (ilen,)) ibegin = iend if cnt <= 0: break return True except socket.error, msg: self.loger.error(_lineno(self), 'dip(%s) (%s): %s' % (host, msg.args[0],msg.args[1])) self.loger.error(traceback.format_exc()) if self.proxy_addr.has_key(addr): self.proxy_addr.pop(addr) return False
def bat_notify(self, worker, data): if len(worker.proxy_addr.keys()) < 1 or not data.has_key('main'): return self.loger.care(_lineno(), 'bat_data:', repr(data)) sub_data = [] sub_ret = self.add_subrecord_inline(worker, data['main'], int(data['viewid']), sub_data) self.loger.info(_lineno(), 'updating subrecord:', repr(sub_data)) for record in sub_data: worker.dbcon.call_proc(msg.g_proc_add_task, ('dns', record[1], int(data['viewid']), record[0], 0, msg.g_opt_add)) cur_cnt = 0 msgobj = [] worker.dbcon.query(msg.g_init_sql_inittask_dns) result = worker.dbcon.show() for record in result: msgobj.append({'id':record[0],'opt':msg.g_opt_add, 'domain':record[3], 'view':row[2], 'type':record[1]}) cur_cnt += 1 if cur_cnt >= mgr_conf.g_row_perpack4init: if worker.sendto_(msgobj, worker.proxy_addr.keys()[0], msg.g_pack_head_init_dns, mgr_conf.g_reply_port) != True: return cur_cnt = 0 del msgobj[:] time.sleep(1) if cur_cnt > 0: if worker.sendto_(msgobj, worker.proxy_addr.keys()[0], msg.g_pack_head_init_dns, mgr_conf.g_reply_port) != True: return time.sleep(1)
def handle_inner_chk_task_db_heartbeat(worker): worker.dbcon.query(msg.g_inner_sql_db_heartbeat) result = worker.dbcon.show() mgr_singleton.g_singleton.get_loger().care(_lineno(), repr(result)) if not result: mgr_singleton.g_singleton.get_loger().warn(_lineno(), 'reconnecting to mysql!!!!!') worker.dbcon.query(msg.g_inner_sql_db_heartbeat)
def reply(self, msgobj, head, addr): if addr == None or not self.proxy_addr.has_key(addr): self.loger.error(_lineno(self), 'addr is error!!!! addr: ', repr(addr)) return encodedjson = json.dumps(msgobj) self.loger.care(_lineno(self), 'sending:', encodedjson) str_fmt = "H" + str(len(encodedjson)) + "s" str_send = struct.pack(str_fmt, head, encodedjson) self.__sendto_short__(str_send, self.proxy_addr[addr][0][0], mgr_conf.g_reply_port)
def level_watcher(event): if event.connection_state != zookeeper.CONNECTED_STATE: self.connected = False self.handle = -1 self.handle = zookeeper.init(mgr_conf.g_zkClis, self.connection_watcher, self.TIMEOUT) self.loger.error(_lineno(), "reconnecting to zks[%s] timeout is[%d]" % (mgr_conf.g_zkClis, self.TIMEOUT)) return value = self.get(event.path, level_watcher) self.loger.info(_lineno(), "log level change %s:[%s]" % (event.path, value[0])) self.loger.set_level(value[0])
def run_websvr(ip=msg.g_websvr_ip, port='7788'): try: pid = os.fork() if pid == 0: soap_app=soaplib.core.Application([HelloWorldService], 'dnspro') wsgi_app=wsgi.Application(soap_app) sys.stderr.write(_lineno() + ' listening on ' + ip + ':' + str(port) + '\n') sys.stderr.write(_lineno() + ' wsdl is at: http://' + ip + ':' + str(port) + '/dnspro/?wsdl\n') run_twisted( ( (wsgi_app, "dnspro"),), int(port)) except OSError, e: sys.stderr.write(traceback.format_exc())
def handler(self, data): self.loger.debug(_lineno(self), 'msg class: ', data['class']) for case in switch(data['class']): if case(msg.g_class_inner_chk_task_domain) or case(msg.g_class_inner_chk_task_record): req_handler.handle_inner_chk_task(self, self.worker, data['class']) break if case(msg.g_class_inner_chk_task_done): req_handler.handle_inner_chk_task_done(self, data) break if case(): self.loger.warn(_lineno(self), 'recv something else: ', data['class'])
def run(self ): #Overwrite run() method, put what you want the thread do here ADDR = (self.HOST, self.PORT) self.udpSerSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.udpSerSock.setblocking(False) self.udpSerSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.udpSerSock.bind(ADDR) rlists = [self.udpSerSock] while not self.thread_stop: try: rs, ws, es = select.select([self.udpSerSock], [], [], 1) if self.thread_stop: break if not (rs or ws or es): #timeout continue for s in rs: data, addr = s.recvfrom(self.BUFSIZE) self.loger.info(_lineno(self), 'received from ', addr, ' data:', data) if data == None: self.loger.info(_lineno(self), 'recv data none') continue if len(data) == 0: self.loger.info(_lineno(self), 'recv data len 0') continue decodejson = json.loads(data) decodejson['inner_addr'] = addr for case in switch(decodejson['class']): if case(msg.g_class_init_view_reply) or \ case(msg.g_class_init_dns_reply) or \ case(msg.g_class_proxy_heartbeat) : #if msg.g_init_complete == True: # self.loger.info(_lineno(self), 'send to worker') # self.worker.put(decodejson) #else: # self.loger.info(_lineno(self), 'send to 4init') # self.worker4init.put(decodejson) self.worker4init.put(decodejson) break if case(msg.g_class_init): req_handler.g_init_should_stop = 1 self.loger.info( _lineno(self), 'set g_init_should_stop %d' % (req_handler.g_init_should_stop, )) self.worker.put(decodejson) break if case(): self.worker.put(decodejson) except Exception as e: self.loger.error(_lineno(self), 'inner error:', repr(e)) self.loger.error(traceback.format_exc()) self.udpSerSock.close()
def connection_watcher(self, h, type, state, path): if state == zookeeper.CONNECTED_STATE: self.handle = h self.connected = True self.loger.info(_lineno(), "connect to zks[%s] successfully!" % mgr_conf.g_zkClis) self.__init_zk() self.register() else: self.connected = False self.handle = -1 self.handle = zookeeper.init(mgr_conf.g_zkClis, self.connection_watcher, self.TIMEOUT) self.loger.error(_lineno(), "try to connect to zks[%s] timeout is[%d]" % (mgr_conf.g_zkClis, self.TIMEOUT))
def handler(self, data): self.loger.debug(_lineno(self), 'msg class: ', data['class']) for case in switch(data['class']): if case(msg.g_class_inner_chk_task_domain) or case( msg.g_class_inner_chk_task_record): req_handler.handle_inner_chk_task(self, self.worker, data['class']) break if case(msg.g_class_inner_chk_task_done): req_handler.handle_inner_chk_task_done(self, data) break if case(): self.loger.warn(_lineno(self), 'recv something else: ', data['class'])
def run(self): #Overwrite run() method, put what you want the thread do here ADDR = (self.HOST,self.PORT) self.udpSerSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.udpSerSock.setblocking(False) self.udpSerSock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) self.udpSerSock.bind(ADDR) rlists = [self.udpSerSock] while not self.thread_stop: try: rs,ws,es = select.select([self.udpSerSock], [], [], 1) if self.thread_stop: break if not (rs or ws or es): #timeout continue for s in rs: data, addr = s.recvfrom(self.BUFSIZE) self.loger.info(_lineno(self), 'received from ', addr, ' data:', data) if data==None: self.loger.info(_lineno(self), 'recv data none') continue if len(data)==0: self.loger.info(_lineno(self), 'recv data len 0') continue decodejson = json.loads(data) decodejson['inner_addr'] = addr for case in switch(decodejson['class']): if case(msg.g_class_init_view_reply) or \ case(msg.g_class_init_dns_reply) or \ case(msg.g_class_proxy_heartbeat) : #if msg.g_init_complete == True: # self.loger.info(_lineno(self), 'send to worker') # self.worker.put(decodejson) #else: # self.loger.info(_lineno(self), 'send to 4init') # self.worker4init.put(decodejson) self.worker4init.put(decodejson) break if case(msg.g_class_init): req_handler.g_init_should_stop = 1 self.loger.info(_lineno(self), 'set g_init_should_stop %d' % (req_handler.g_init_should_stop,)) self.worker.put(decodejson) break if case(): self.worker.put(decodejson) except Exception as e: self.loger.error(_lineno(self), 'inner error:', repr(e)) self.loger.error(traceback.format_exc()) self.udpSerSock.close()
def level_watcher(event): if event.connection_state != zookeeper.CONNECTED_STATE: self.connected = False self.handle = -1 self.handle = zookeeper.init(mgr_conf.g_zkClis, self.connection_watcher, self.TIMEOUT) self.loger.error( _lineno(), "reconnecting to zks[%s] timeout is[%d]" % (mgr_conf.g_zkClis, self.TIMEOUT)) return value = self.get(event.path, level_watcher) self.loger.info( _lineno(), "log level change %s:[%s]" % (event.path, value[0])) self.loger.set_level(value[0])
def __del_item(self, _type, *args): retobj = None if not self.desc.has_key(_type): return retobj arr = self.desc[_type] for case in switch(_type): if case(self.type_err_view): for i in range(len(arr)-1,-1,-1): if arr[i]['opt'] == args[0] \ and arr[i]['view'] == args[1] \ and arr[i]['mask'] == args[2]: retobj = arr[i] del arr[i] break if case(self.type_err_record): for i in range(len(arr)-1,-1,-1): if arr[i]['opt'] == args[0] \ and arr[i]['view'] == args[1] \ and arr[i]['domain'] == args[2] \ and arr[i]['type'] == args[3]: retobj = arr[i] del arr[i] break break if case(self.type_err_db): for i in range(len(arr)-1,-1,-1): if arr[i]['desc'] == args[0]: retobj = arr[i] del arr[i] break break if case(): self.loger.warn(_lineno(self), 'type %s not implemented!' % (_type,)) return retobj
def create(self, path, data="", flags=0, acl=[ZOO_OPEN_ACL_UNSAFE]): start = time.time() result = zookeeper.create(self.handle, path, data, acl, flags) self.loger.info( _lineno(), "Node %s created in %d ms" % (path, int((time.time() - start) * 1000))) return result
def __init__(self, loger): queue_thread.Qthread.__init__(self, 'mgr_work_thread', self.handler_qsize, loger) self.check_thd = None self.proxy_addr = {} self.dbip = mgr_conf.g_db_ip self.dbcon = MySQL.MySQL(self.dbip, mgr_conf.g_db_user, mgr_conf.g_db_passwd, mgr_conf.g_db_db, loger=loger) if self.dbcon.conn_error: self.loger.error(traceback.format_exc()) raise Exception("[mgr_handler] Database configure error!!!") self.m_handlers['record'] = {} self.m_handlers['record']['A'] = req_handler_record_a(self.loger) self.m_handlers['record']['PTR'] = req_handler_record_ptr(self.loger) self.m_handlers['record']['AAAA'] = req_handler_record_aaaa(self.loger) self.m_handlers['record']['CNAME'] = req_handler_record_cname(self.loger) self.m_handlers['record']['NS'] = req_handler_record_ns(self.loger) self.m_handlers['record']['TXT'] = req_handler_record_txt(self.loger) self.m_handlers['record']['MX'] = req_handler_record_mx(self.loger) self.m_handlers['record']['domain_ns'] = req_handler_record_domain_ns(self.loger) self.m_handlers['domain'] = {} self.m_handlers['domain']['__any__'] = req_handler_domain(self.loger) self.m_handlers['view'] = {} self.m_handlers['view']['__any__'] = req_handler_view(self.loger) self.m_handlers['mask'] = {} self.m_handlers['mask']['__any__'] = req_handler_mask(self.loger) self.loger.info(_lineno(), 'handlers map:', repr(self.m_handlers))
def run(self): #Overwrite run() method, put what you want the thread do here self.thread_stop = False while not self.thread_stop: time.sleep(1) if self.thread_stop: break self.lock.acquire() for pos in range(len(self.tasknodeds)): if self.tasknodeds[pos].deadline <= msg.g_now: newtns = self.tasknodeds[pos:] del self.tasknodeds[pos:] for timeout in newtns: msgobj = {'class':timeout.nname} try: for case in switch(timeout.nname): if case(msg.g_class_inner_chk_task_domain) \ or case(msg.g_class_inner_chk_task_record): self.http_tq.put(msgobj, block=False) break if case(msg.g_class_inner_chk_task_db_heartbeat): self.tq.put(msgobj, block=False) self.tq4init.put(msgobj, block=False) break if case(): self.tq.put(msgobj, block=False) except Queue.Full, e: self.loger.debug(_lineno(self), 'taskq is ', repr(e)) self.loger.debug(traceback.format_exc()) finally: timeout.deadline = msg.g_now + timeout.interval self.add_tasknode(timeout)
def donotify(self, worker, msgobj, opt, data, odata, real_tbl): if len(worker.proxy_addr.keys()) < 1: return False self.loger.debug(_lineno(), 'opt:', opt, ' odata:', odata) for row in odata: worker.dbcon.query(msg.g_init_sql_inittask_dns_inited % (row[0],)) msgobj.append({'opt':row[4], 'domain':row[3], 'view':row[2], 'type':row[1], 'id':row[0], 'pkt_head':msg.g_pack_head_init_dns}) req_handler.notify_proxy(worker, msgobj, worker.proxy_addr.keys()[0], False) req_handler.notify_proxy(worker, msgobj, worker.proxy_addr.keys()[0], False) #self.loger.debug(_lineno(), 'opt:', opt, ' data:', data) #for case in switch(opt): # if case('add'): # #可以直接覆盖也就是发add # if (odata and len(odata) > 0 and len(odata[0]) >= 4): # odata0 = odata[0] # ropt = 'del' # if odata0[3] > 0: # ropt = 'add' # msgobj.append({'opt':http_opt_str2int[ropt], 'domain':odata0[0], 'view':odata0[1], 'type':msg.g_dict_type[odata0[2]], # 'pkt_head':msg.g_pack_head_init_dns}) # req_handler.notify_proxy(worker, msgobj, worker.proxy_addr.keys()[0], False) # # add: {u'A': u'9.9.9.9', u'viewid': u'2', u'main': u'test.com', u'name': u't9.test.com', u'ttl': u'10'} # msgobj.append({'opt':http_opt_str2int['add'], 'domain':data['name'].lstrip('@.'), 'view':int(data['viewid']), # 'type':msg.g_dict_type[real_tbl], 'pkt_head':msg.g_pack_head_init_dns}) # break # if case('set'): # if (odata and len(odata) > 0 and len(odata[0]) >= 4): # odata0 = odata[0] # ropt = 'del' # if odata0[3] > 0: # ropt = 'add' # msgobj.append({'opt':http_opt_str2int[ropt], 'domain':odata0[0], 'view':odata0[1], 'type':msg.g_dict_type[odata0[2]], # 'pkt_head':msg.g_pack_head_init_dns}) # req_handler.notify_proxy(worker, msgobj, worker.proxy_addr.keys()[0], False) # # set: {"name":"t4.test.com","main":"test.com","rid":133,"A":"4.4.4.4","ttl":"10","viewid":"2"} # ropt = 'add' # if data.has_key('enable') and int(data['enable'])==0: # ropt = 'del' # msgobj.append({'opt':http_opt_str2int[ropt], 'domain':data['name'].lstrip('@.'), 'view':int(data['viewid']), # 'type':msg.g_dict_type[real_tbl], 'pkt_head':msg.g_pack_head_init_dns}) # break # if case('del'): # if (odata and len(odata) > 0 and len(odata[0]) >= 3): # odata0 = odata[0] # ropt = opt # if odata0[2] > 0: # ropt = 'add' # msgobj.append({'opt':http_opt_str2int[ropt], 'domain':odata0[0], 'view':odata0[1], 'type':msg.g_dict_type[real_tbl], # 'pkt_head':msg.g_pack_head_init_dns}) # break # if case(): # self.loger.warn(_lineno(), 'opt:', opt, ' has not been implemented!') #req_handler.notify_proxy(worker, msgobj, worker.proxy_addr.keys()[0], False) return False
def register(self): self.path = self.WORKERS_PATH + "/" + mgr_conf.g_mgr_sid if not self.exists(self.path): self.path = self.create(self.path, self.loger.get_level(), flags=zookeeper.EPHEMERAL ) self.path = basename(self.path) self.loger.info(_lineno(), "register ok! I'm %s" % self.path) # check who is the master self.get_level()
def connection_watcher(self, h, type, state, path): if state == zookeeper.CONNECTED_STATE: self.handle = h self.connected = True self.loger.info( _lineno(), "connect to zks[%s] successfully!" % mgr_conf.g_zkClis) self.__init_zk() self.register() else: self.connected = False self.handle = -1 self.handle = zookeeper.init(mgr_conf.g_zkClis, self.connection_watcher, self.TIMEOUT) self.loger.error( _lineno(), "try to connect to zks[%s] timeout is[%d]" % (mgr_conf.g_zkClis, self.TIMEOUT))
def set(self, worker, data, ali_tbl): #{"name":"test.com", "enable":1/0} n_enable = 1 if int(data['enable'])==0 else 0 self.loger.info(_lineno(), 'update domain:', data['name'], '[', n_enable, '] from database') worker.dbcon.call_proc(msg.g_proc_set_a_domain, (data['name'], n_enable)) worker.dbcon.query(msg.g_init_sql_gettask_dns) result = worker.dbcon.show() return True, True, result
def add_subrecord_inline(self, worker, str_main, n_vid, add_data): add_ret = worker.dbcon.call_proc(msg.g_proc_get_subrecord_inline, (str_main, n_vid)) ars = worker.dbcon.show() self.loger.care(_lineno(), repr(ars)) hasnext = True while hasnext == True: if ars and len(ars) > 0: for i in range(len(ars)): add_data.append(ars[i]) hasnext = worker.dbcon.nextset() if None == hasnext: hasnext = False else: ars = worker.dbcon.show() self.loger.care(_lineno(), repr(ars)) worker.dbcon.fetch_proc_reset() return add_ret
def delete(self, worker, data, ali_tbl): #{“mask”:”123.150.107.0/24”,”vid”:2} data['vid'] = int(data['vid']) self.loger.info(_lineno(), 'deleting mask mask:', data['mask'], ' vid:', str(data['vid'])); del_ret = worker.dbcon.query(msg.g_sql_del_a_mask % (data['mask'], data['vid'])) worker.dbcon.call_proc(msg.g_proc_add_task, ('view', 0, data['vid'], data['mask'], 0, msg.g_opt_del)) worker.dbcon.query(msg.g_init_sql_gettask_mask) result = worker.dbcon.show() return del_ret, True, result
def delete(self, worker, data, ali_tbl): if False: data['enable'] = 0 return self.set(worker, data, ali_tbl) for atbl in msg.g_list_tbl: #worker.dbcon.query(msg.g_sql_get_exist_records) worker.dbcon.query(msg.g_sql_get_exist_records % (atbl, data['name'])) result = worker.dbcon.show() if result and len(result)>0: self.loger.error(_lineno(), 'deleting domain:', data['name'], ', find sub records:', repr(result), ' has sub records!!') return False, False, None #{"name":"test.com"} self.loger.info(_lineno(), 'deleting domain:', data['name'], ' from database') worker.dbcon.call_proc(msg.g_proc_del_a_domain, (data['name'],)) worker.dbcon.query(msg.g_init_sql_gettask_dns) result = worker.dbcon.show() return True, True, result
def nextset(self): try: if not self.conn_error: return self.cursor.nextset() except MySQLdb.Error,e: self.loger.error(_lineno(self), "Mysql Error:", e) self.__myset_conn_error(e) self.loger.error(traceback.format_exc()) return None
def __init__(self, loger): self.loger = loger self.path = None self.handle = -1 self.handle = zookeeper.init(mgr_conf.g_zkClis, self.connection_watcher, self.TIMEOUT) self.loger.info( _lineno(), "connecting to zks[%s] timeout is[%d]" % (mgr_conf.g_zkClis, self.TIMEOUT))
def reply_echo(self, data, host, port): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: addre = (host, port) encodedjson = json.dumps(data) s.sendto(encodedjson, addre) except socket.error, msg: self.loger.error(_lineno(self), 'dip(%s) (%s): %s' % (host, msg.args[0],msg.args[1])) self.loger.error(traceback.format_exc())
def set(self, worker, data, ali_tbl): data['vname'] = urllib.unquote(str(data['vname'])) data['comment'] = urllib.unquote(str(data['comment'])) data['ttl'] = int(data['ttl']) data['vid'] = int(data['vid']) self.loger.info(_lineno(), 'updating view name:', data['vname'], ' vid:', str(data['vid']), ' comment:', data['comment'], ' ttl:', str(data['ttl'])) add_ret = worker.dbcon.query(msg.g_sql_add_a_view % (data['vid'], data['vname'], data['comment'], data['ttl'], data['comment'], data['ttl'])) return add_ret, False, None
def register(self): self.path = self.WORKERS_PATH + "/" + mgr_conf.g_mgr_sid if not self.exists(self.path): self.path = self.create(self.path, self.loger.get_level(), flags=zookeeper.EPHEMERAL) self.path = basename(self.path) self.loger.info(_lineno(), "register ok! I'm %s" % self.path) # check who is the master self.get_level()
def run(self): while not self.thread_stop: #self.loger.debug(_lineno(self), '[', self.th_name, '] waiting for message...') try: data = self.tq.get(block=True, timeout=1) if self.thread_stop: self.tq.task_done() break self.handler(data) self.tq.task_done() except Queue.Empty as e: if self.log_queue: self.loger.debug(_lineno(self), '[', self.th_name, ':run] taskq is ', repr(e)) self.loger.debug(traceback.format_exc()) except Exception as e: self.tq.task_done() self.loger.error(_lineno(self), '[', self.th_name, ':run] inner error:', repr(e)) self.loger.error(traceback.format_exc()) self.onstop()
def commit(self): """ commit db """ try: if self.conn_error: self.__myconnect__() return self.conn.commit() except MySQLdb.Error,e: self.loger.error(_lineno(self), "Mysql Error %d: %s" % (e.args[0], e.args[1])) self.__myset_conn_error(e) self.loger.error(traceback.format_exc())
def query(self,sql,value=None): """ Execute SQL statement """ try: if self.conn_error: self.__myconnect__() return self.cursor.execute(sql,value) except MySQLdb.Error,e: self.loger.error(_lineno(self), "Mysql Error %d: %s" % (e.args[0], e.args[1])) self.__myset_conn_error(e) self.loger.error(traceback.format_exc())
def __sendto_short__(self, data, host, port=12345): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((host, port)) s.send(data) except socket.error, msg: self.loger.error(_lineno(self), 'dip(%s) (%s): %s' % (host, msg.args[0],msg.args[1])) self.loger.error(traceback.format_exc()) if self.proxy_addr.has_key(host): self.proxy_addr.pop(host)
def __myclose__(self): """ Terminate the connection """ try: if self.conn: self.conn.close() if self.cursor: self.cursor.close() except MySQLdb.Error,e: self.loger.error(_lineno(self), "Mysql Error %d: %s" % (e.args[0], e.args[1])) self.loger.error(traceback.format_exc())
def reply_echo(self, data, host, port): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: addre = (host, port) encodedjson = json.dumps(data) s.sendto(encodedjson, addre) except socket.error, msg: self.loger.error( _lineno(self), 'dip(%s) (%s): %s' % (host, msg.args[0], msg.args[1])) self.loger.error(traceback.format_exc())
def show(self): """ Return the results after executing SQL statement """ try: if self.conn_error: self.__myconnect__() return self.cursor.fetchall() except MySQLdb.Error,e: self.loger.error(_lineno(self), "Mysql Error: ", repr(e)) self.__myset_conn_error(e) self.loger.error(traceback.format_exc()) return None
def call_proc(self, proc_name, values): '''调用存储过程''' try: if self.conn_error: self.__myconnect__() self.cursor.callproc(proc_name, values) except MySQLdb.Error,e: self.loger.error(_lineno(self), "Mysql Error %d: %s" % (e.args[0], e.args[1])) self.__myset_conn_error(e) self.loger.error(traceback.format_exc()) return False
def __sendto_short__(self, data, host, port=12345): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((host, port)) s.send(data) except socket.error, msg: self.loger.error( _lineno(self), 'dip(%s) (%s): %s' % (host, msg.args[0], msg.args[1])) self.loger.error(traceback.format_exc()) if self.proxy_addr.has_key(host): self.proxy_addr.pop(host)
def main_loop(): while 1: msg.g_now += 1 mgr_singleton.g_singleton.get_loger()._uptime() #mgr_singleton.g_singleton.get_loger().debug(_lineno(), msg.g_now) if not msg.g_enable_stdin: time.sleep(1) else: # 获得用户输入 try: if select.select([sys.stdin], [], [], 1) == ([sys.stdin], [], []): line = sys.stdin.readline() sys.stdout.write(line) sys.stdout.flush() if not line or line.rstrip() == 'wwq': break except KeyboardInterrupt as e: mgr_singleton.g_singleton.get_loger().error( _lineno(), 'KeyboardInterrupt:%s' % e) #可以 mgr_singleton.g_singleton.get_loger().error( traceback.format_exc()) break except IOError as e: mgr_singleton.g_singleton.get_loger().error( _lineno(), 'IOError:%s' % (e)) #可以 mgr_singleton.g_singleton.get_loger().error( traceback.format_exc()) break except Exception as e: mgr_singleton.g_singleton.get_loger().error( _lineno(), 'exception:%s' % (e, )) #可以 mgr_singleton.g_singleton.get_loger().error( traceback.format_exc()) break if msg.g_enable_stdin and msg.old_settings: termios.tcsetattr(sys.stdin, termios.TCSADRAIN, msg.old_settings) stop_all()
def fetch_proc_reset(self): try: if self.conn_error: self.__myconnect__() result = self.cursor.fetchall() self.cursor.close() self.cursor = self.conn.cursor() return result except MySQLdb.Error,e: self.loger.error(_lineno(self), "Mysql Error:", e) self.__myset_conn_error(e) self.loger.error(traceback.format_exc()) return None
def sendto_(self, msgobj, addr, head, port=12345): if addr == None or not self.proxy_addr.has_key(addr): self.loger.error(_lineno(self), 'addr is error!!!! addr: ', repr(addr)) return False s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: host = self.proxy_addr[addr][0][0] encodedjson = json.dumps(msgobj) str_fmt = "H" + str(len(encodedjson)) + "s" str_send = struct.pack(str_fmt, head, encodedjson) s.connect((host, port)) cnt = len(str_send) self.loger.info(_lineno(self), 'need to send(%d)' % (cnt, )) ibegin = 0 iend = 0 ilen = 0 while True: ilen = mgr_conf.g_size_perpack if cnt > mgr_conf.g_size_perpack else cnt iend += ilen data = str_send[ibegin:iend] s.send(data) cnt -= ilen self.loger.info(_lineno(self), 'sent(%d)' % (ilen, )) ibegin = iend if cnt <= 0: break return True except socket.error, msg: self.loger.error( _lineno(self), 'dip(%s) (%s): %s' % (host, msg.args[0], msg.args[1])) self.loger.error(traceback.format_exc()) if self.proxy_addr.has_key(addr): self.proxy_addr.pop(addr) return False
def run(self): signal.signal(signal.SIGINT, sigint_handler) mgr_singleton.g_singleton = mgr_singleton.mgr_singleton(g_factory) #mgr_singleton.g_singleton.get_loger().debug(_lineno(), 'starting') #sys.exit() mgr_singleton.g_singleton.get_reply_thread() mgr_singleton.g_singleton.get_http_thread() if False: mgr_singleton.g_singleton.get_check_thread( ).add_tasknode_byinterval_lock(msg.g_class_inner_chk_init_ok, mgr_conf.g_inner_chk_init_ok_time) if False: #test mgr_singleton.g_singleton.get_check_thread( ).add_tasknode_byinterval_lock(msg.g_class_init_test, mgr_conf.g_inner_chk_init_ok_time) mgr_singleton.g_singleton.get_worker().set_buddy_thread( mgr_singleton.g_singleton.get_http_thread(), mgr_singleton.g_singleton.get_check_thread()) mgr_singleton.g_singleton.get_worker4init().set_buddy_thread( mgr_singleton.g_singleton.get_check_thread()) mgr_singleton.g_singleton.get_loger().start() time.sleep(1) mgr_singleton.g_singleton.get_zkhandler() mgr_singleton.g_singleton.get_worker4init().start() mgr_singleton.g_singleton.get_worker().start() mgr_singleton.g_singleton.get_reply_thread().start() time.sleep(1) mgr_singleton.g_singleton.get_http_thread().start() mgr_singleton.g_singleton.get_check_thread().start() mgr_singleton.g_singleton.get_check_thread( ).add_tasknode_byinterval_lock(msg.g_class_inner_chk_task_db_heartbeat, mgr_conf.g_inner_chk_task_db_heartbeat) #start_web() try: if msg.g_enable_stdin: msg.old_settings = termios.tcgetattr(sys.stdin) tty.setcbreak(sys.stdin.fileno()) except Exception as e: mgr_singleton.g_singleton.get_loger().error( _lineno(), 'Exception:%s' % (e, )) mgr_singleton.g_singleton.get_loger().error(traceback.format_exc()) finally: main_loop()
def __chmod__(self, l, ldata): if not self.realtime: return True if l > self.level: return False try: self.lock.acquire() self.__wlog__(l, ldata) except Exception as e: sys.stderr.write('<ERROR>(%s)%s error:%s\n' % (self.str_times[self.time_idx], _lineno(self), e)) sys.stderr.write(traceback.format_exc()) finally: self.lock.release() return False return False
def http_send_post(self, _dip, _url, _body): headerdata = { "Host": _dip, "Content-Type": "application/x-www-form-urlencoded" } conn = httplib.HTTPConnection(_dip) conn.request(method="POST", url=_url, body=_body, headers=headerdata) response = conn.getresponse() if msg.g_http_response_OK != response.status: self.loger.warn(_lineno(self), 'request task response: ', response.status) conn.close() return None, response.status res = response.read() #res = urllib.unquote(res) conn.close() return res, response.status
def __to_array(self): objs = [[], []] objs_err = objs[0] objs_regain = objs[1] for k in self.desc: v = self.desc.get(k) if type(v) is types.ListType: for j in v: if 1 == j['sent']: continue j['sent'] = 1 item = {'etype': k, 'desc': {}} set_where = -1 # -1 or 1 or 0 not set for case in switch(k): if case(self.type_err_db): item['desc']['regain'] = j['regain'] item['desc']['settime'] = j['settime'] item['desc']['detail'] = self.db_desc[j['desc']] item['desc']['desc'] = j['desc'] break if case(self.type_err_view): item['desc']['regain'] = j['regain'] item['desc']['settime'] = j['settime'] item['desc']['opt'] = j['opt'] item['desc']['view'] = j['view'] item['desc']['mask'] = j['mask'] break if case(self.type_err_record): item['desc']['regain'] = j['regain'] item['desc']['settime'] = j['settime'] item['desc']['opt'] = j['opt'] item['desc']['view'] = j['view'] item['desc']['domain'] = j['domain'] item['desc']['type'] = j['type'] break if case(): set_where = 0 self.loger.warn(_lineno(self), 'type %s not implemented!' % (k, )) if 0 != set_where: if 1 == j['regain']: objs_regain.append(item) else: objs_err.append(item) return objs
def __init__(self, loger): queue_thread.Qthread.__init__(self, 'mgr_work_thread', self.handler_qsize, loger) self.check_thd = None self.proxy_addr = {} self.dbip = mgr_conf.g_db_ip self.dbcon = MySQL.MySQL(self.dbip, mgr_conf.g_db_user, mgr_conf.g_db_passwd, mgr_conf.g_db_db, loger=loger) if self.dbcon.conn_error: self.loger.error(traceback.format_exc()) raise Exception("[mgr_handler] Database configure error!!!") self.m_handlers['record'] = {} self.m_handlers['record']['A'] = req_handler_record_a(self.loger) self.m_handlers['record']['PTR'] = req_handler_record_ptr(self.loger) self.m_handlers['record']['AAAA'] = req_handler_record_aaaa(self.loger) self.m_handlers['record']['CNAME'] = req_handler_record_cname( self.loger) self.m_handlers['record']['NS'] = req_handler_record_ns(self.loger) self.m_handlers['record']['TXT'] = req_handler_record_txt(self.loger) self.m_handlers['record']['MX'] = req_handler_record_mx(self.loger) self.m_handlers['record']['domain_ns'] = req_handler_record_domain_ns( self.loger) self.m_handlers['domain'] = {} self.m_handlers['domain']['__any__'] = req_handler_domain(self.loger) self.m_handlers['view'] = {} self.m_handlers['view']['__any__'] = req_handler_view(self.loger) self.m_handlers['mask'] = {} self.m_handlers['mask']['__any__'] = req_handler_mask(self.loger) self.loger.info(_lineno(), 'handlers map:', repr(self.m_handlers))
def __del_item(self, _type, *args): retobj = None if not self.desc.has_key(_type): return retobj arr = self.desc[_type] for case in switch(_type): if case(self.type_err_view): for i in range(len(arr) - 1, -1, -1): if arr[i]['opt'] == args[0] \ and arr[i]['view'] == args[1] \ and arr[i]['mask'] == args[2]: retobj = arr[i] del arr[i] break if case(self.type_err_record): for i in range(len(arr) - 1, -1, -1): if arr[i]['opt'] == args[0] \ and arr[i]['view'] == args[1] \ and arr[i]['domain'] == args[2] \ and arr[i]['type'] == args[3]: retobj = arr[i] del arr[i] break break if case(self.type_err_db): for i in range(len(arr) - 1, -1, -1): if arr[i]['desc'] == args[0]: retobj = arr[i] del arr[i] break break if case(): self.loger.warn(_lineno(self), 'type %s not implemented!' % (_type, )) return retobj
def _w2file(self, data): if self.dir_ == 1 and self.linec > self.maxline: self._closef() if self.dir_ == 1 and self.fp == None: self._openf() try: if self.dir_ == 1: self.fp.write(data) else: sys.stdout.write(data) except Exception as e: sys.stderr.write(self.dic_levelstr[self.lerror] + '(' + self.str_times[self.time_idx] + ')' + _lineno(self) \ + '\n' + traceback.format_exc()) self._closef()