def conn_redis(self): self.str_return = "999999" try: rdp = redis.ConnectionPool(host=self.host, port=self.port, db=self.db, password=self.password) self.r = redis.StrictRedis(connection_pool=rdp) self.r.get("test") return "000000" except Exception as e: self.str_return = "002001" logger.error("connect redis1 error code, trying redis2:", self.str_return) logger.error(e) #软负载切换 self.host = utility.get_myibs_ini("redis2", "hostname") self.port = int(utility.get_myibs_ini("redis2", "port")) self.db = int(utility.get_myibs_ini("redis2", "db")) self.password = utility.get_myibs_ini("redis2", "password") self.sleeptime_before_retry = int(utility.get_myibs_ini("redis2", "sleeptime_before_retry")) try: rdp = redis.ConnectionPool(host=self.host, port=self.port, db=self.db, password=self.password) self.r = redis.StrictRedis(connection_pool=rdp) return "000000" except Exception as e: logger.error("connect redis1 error code:", self.str_return) logger.error(e) return "000204"
def sftp_put(self, source, target): try: host = utility.get_myibs_ini("ftp_server1", "host") port = int(utility.get_myibs_ini("ftp_server1", "port")) username = utility.get_myibs_ini("ftp_server1", "username") password = utility.get_myibs_ini("ftp_server1", "password") transport = paramiko.Transport((host, port)) transport.connect(username=username, password=password) sftp = paramiko.SFTPClient.from_transport(transport) sftp.put(source, target) rtn_dict["rtn_code"] = "000000" rtn_dict["rsp_msg"] = None except Exception as e: rtn_dict["rtn_code"] = "000303" rtn_dict["rsp_msg"] = None logger.error("sftp_read error code:", rtn_dict["rtn_code"]) logger.error(e) return None finally: transport.close() return rtn_dict
def __init__(self): self.host = utility.get_myibs_ini("redis1", "hostname") self.port = int(utility.get_myibs_ini("redis1", "port")) self.db = int(utility.get_myibs_ini("redis1", "db")) self.password = utility.get_myibs_ini("redis1", "password") self.sleeptime_before_retry = int(utility.get_myibs_ini("redis1", "sleeptime_before_retry")) self.conn_redis()
def tcpServer(): port = int(utility.get_myibs_ini("asyn_TCP_server_001", "port")) timeout = int(utility.get_myibs_ini("asyn_TCP_server_001", "timeout")) msg_length = int(utility.get_myibs_ini("asyn_TCP_server_001", "msg_length")) con_connection = int( utility.get_myibs_ini("asyn_TCP_server_001", "con_connection")) rtn_code = "999999" try: s = socket.socket() host = socket.gethostname() s.bind((host, port)) s.listen(con_connection) print('Listening') logger.debug('Listening: ' + str(port)) while True: c, addr = s.accept() c.settimeout(timeout) # print('Got connection from', addr) logger.debug('Got connection from: ' + str(addr)) req = c.recv(msg_length).decode() print('Message received: %s' % req) logger.info('Message received: ' + str(req)) # if req == ":shutdown": # c.send(":shuttingdown_down".encode()) # c.close() # logger.debug("server shut down.") # break rep = parm_mas_add(req, random.randint(1, 10)) print("response message: " + str(rep)) logger.info('response message: ' + str(rep)) c.send(rep.encode()) # print("response message sent: ") logger.debug("response message sent: ") rtn_code = "000000" except Exception as e: logger.error("post method error code:" + str(rtn_code)) logger.error(e) rtn_code = "999999" finally: c.close() print("server connection closed") logger.info("server connection closed") return rtn_code
def tcpClient(msg): server = utility.get_myibs_ini("asyn_TCP_client_001", "server") port = int(utility.get_myibs_ini("asyn_TCP_client_001", "port")) timeout = int(utility.get_myibs_ini("asyn_TCP_client_001", "timeout")) msg_length = int(utility.get_myibs_ini("asyn_TCP_client_001", "msg_length")) sleep_time = float( utility.get_myibs_ini("asyn_TCP_client_001", "sleep_time")) float_msg = 1 print(msg) try: while True: print('Ready to create socket') s = socket.socket() print('Connecting') s.connect((server, port)) print('Connected') s.settimeout(timeout) float_msg = float_msg + 0.000001 req = str(float_msg) # if req ==':exit': # s.close() # break s.send(req.encode()) print('Waiting for the message from server...') rep = s.recv(msg_length).decode() print('Message received: %s' % rep) time.sleep(sleep_time) except Exception as e: print(e) finally: s.close()
def smtp_send(self, in_parm_dict): to_user = in_parm_dict["to_user"] msg_subject = in_parm_dict["msg_subject"] msg_body = in_parm_dict["msg_subject"] smtp_server = utility.get_myibs_ini("smtp_service", "smtp_server") smtp_server_port = utility.get_myibs_ini("smtp_service", "smtp_server_port") _user = utility.get_myibs_ini("smtp_service", "from_user") _pwd = utility.get_myibs_ini("smtp_service", "passcode") _to = to_user msg = MIMEText(msg_body) msg["Subject"] = msg_subject msg["From"] = _user msg["To"] = _to try: print("connecting SMTP server: %s:%s" % (smtp_server, smtp_server_port)) s = smtplib.SMTP_SSL(smtp_server, int(smtp_server_port)) print("login...") s.login(_user, _pwd) print("sending message") s.sendmail(_user, _to, msg.as_string()) s.quit() print("message sent successfully!") return "000000" except smtplib.SMTPException as e: print("message sent falied,%s" % e) return None
def sftp_read(self, commands): try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) host = utility.get_myibs_ini("ftp_server1", "host") port = int(utility.get_myibs_ini("ftp_server1", "port")) username = utility.get_myibs_ini("ftp_server1", "username") password = utility.get_myibs_ini("ftp_server1", "password") for command in commands: ssh.connect(hostname=host, port=port, username=username, password=password) stdin, stdout, stderr = ssh.exec_command(command) res, err = stdout.read(), stderr.read() result = res if res else err rtn_str = str(result.decode()) print(rtn_str) rtn_dict["rtn_code"] = "000000" rtn_dict["rsp_msg"] = rtn_str except Exception as e: rtn_dict["rtn_code"] = "000301" rtn_dict["rsp_msg"] = None logger.error("sftp_read error code:", rtn_dict["rtn_code"]) logger.error(e) finally: ssh.close() return rtn_dict
def batch_job_004(): if_batch_start = utility.get_myibs_ini("batch", "batch_job_004_start") batch_start_date = utility.get_myibs_ini("batch", "batch_job_004_start_date") batch_start_time = utility.get_myibs_ini("batch", "batch_job_004_start_time") batch_job_internal_time = utility.get_myibs_ini("batch", "batch_job_004_internal_time") batch_job_started = utility.get_myibs_ini("batch", "batch_job_004_status") if batch_job_started == "STARTED": print("Try to start the TCP server. However server started, quit.") logger.info("try to start the TCP server. However server started, quit.") return log_string = "interval (" + batch_job_internal_time + "s):" + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + " " +\ if_batch_start + " " + \ batch_start_date + " " + \ batch_start_time logger.info(log_string) if if_batch_start == "True": pass else: return "000001" rtn = utility.compare_string_with_now(batch_start_date, "days") if rtn.days < 0: logger.info("day<0, exit") return "000002" else: logger.info("day>=0, go on") rtn = utility.compare_string_with_now(batch_start_time, "seconds") if rtn.days < 0: logger.info("time.days<0, exit") return "000003" if rtn.seconds <= 0: logger.info("time.seconds<0, exit") return "000004" else: logger.info("time.seconds>0, go on") pass ########################### # scheduled job start here ########################### conf = configparser.ConfigParser() logger.info("Loading configuration...") conf.read(myibs_ini) rtn_str = None import mmap import contextlib utility.set_myibs_ini("batch", "batch_job_004_status", "STARTED") with contextlib.closing(mmap.mmap(-1, 1024*1024, tagname="myibs.ini", access=mmap.ACCESS_WRITE)) as m: sections = conf.sections() dict_myibs_ini = {} dict_sections = {} for section in sections: items = conf.items(section) dict_items = {} for item in items: in_parm_tagname = "myibs.ini" + ":" + section + ":" + item[0] + "=" + item[1] logger.info(in_parm_tagname) print(in_parm_tagname) dict_items[item[0]] = item[1] #print(dict_items) dict_sections[section] = dict_items #print(dict_sections) import json obj_json = json.dumps(dict_sections) print("dict->json", obj_json) try: m.seek(0) m.write(str(obj_json).encode()) m.flush() rtn_str = "000000" except Exception as e: rtn_code = None logger.error(e) print(e) logger.info("Configuration loaded.") #time.sleep(100000) print(obj_json) return rtn_str return rtn_code
def batch_job_001(): logger.info("starting batch_job_001...") if_batch_start = utility.get_myibs_ini("batch", "batch_job_001_start") batch_start_date = utility.get_myibs_ini("batch", "batch_job_001_start_date") batch_start_time = utility.get_myibs_ini("batch", "batch_job_001_start_time") batch_job_001_internal_time = utility.get_myibs_ini("batch", "batch_job_001_internal_time") batch_job_001_job = utility.get_myibs_ini("batch", "batch_job_001_job") log_string = "interval (" + batch_job_001_internal_time + "s):" + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + " " +\ if_batch_start + " " + \ batch_start_date + " " + \ batch_start_time logger.info(log_string) if if_batch_start == "True": pass else: return "000001" rtn = utility.compare_string_with_now(batch_start_date, "days") if rtn.days < 0: logger.info("day<0, exit") return "000002" else: logger.info("day>=0, go on") rtn = utility.compare_string_with_now(batch_start_time, "seconds") if rtn.days < 0: logger.info("time.days<0, exit") return "000003" if rtn.seconds <= 0: logger.info("time.seconds<0, exit") return "000004" else: logger.info("time.seconds>0, go on") pass ########################### # scheduled job start here ########################### # redis_cli = utility.Redis_cli() # for i in range(1, 100): # rtn = redis_cli.set_redis(i, i) # # from interface import client # sftp = client.sftp_client() # commands = ['ls ~', 'ls /', 'ls /tmp', 'get /tmp/kernal_parm_mas_tst.sql e:\\tmp\\kernal_parm_mas_tst.sql'] # rtn_dict = sftp.sftp_read(commands) # print(rtn_dict, rtn_dict["rtn_code"], rtn_dict["rsp_msg"]) # rtn_dict = sftp.sftp_get('/tmp/kernal_parm_mas_tst.sql', 'e:\\tmp\\hadoop-root-datanode.pid') # print(rtn_dict) # rtn_dict = sftp.sftp_put('e:\\tmp\\hadoop-root-datanode.pid', '/tmp/hadoop-root-datanode.pid') # print(rtn_dict) # 1. 先查看进程状态 thread_list = [] threads = threading.enumerate() batch_job_001_status = "---" server_name = batch_job_001_job for thread in threads: # stack = sys._current_frames()[thread.ident] thread_list.append("\nThread ID: %s, Name: %s\n" % (thread.ident, thread.name)) # print(thread.name) if thread.name == server_name: batch_job_001_status = "STARTED" break # 2. 如进程已启动就不必再启动了, 确保只有一个服务端启动 if batch_job_001_status == "STARTED": print("Try to start the TCP server. However server started, quit.") logger.info("try to start the TCP server. However server started, quit.") pass else: logger.info("scheduled job start here.....") # 3. 采用工厂模式装配对象 asyn_tcp_server = asyn_TCP_server.async_TCP_Server_Factory().create_async_TCP_Server(server_name) # 4. 采用异步方式调起后台socket进程 t = threading.Thread(target=asyn_tcp_server.tcpServer(), name=server_name, args=(server_name,)) print("Thread " + server_name + " starting here") logger.info("Thread " + server_name + " starting...") t.start() print("Thread " + server_name + " started.") logger.info("Thread " + server_name + " started.") return rtn_code
def batch_job_003(): if_batch_start = utility.get_myibs_ini("batch", "batch_job_003_start") batch_start_date = utility.get_myibs_ini("batch", "batch_job_003_start_date") batch_start_time = utility.get_myibs_ini("batch", "batch_job_003_start_time") batch_job_001_internal_time = utility.get_myibs_ini("batch", "batch_job_003_internal_time") log_string = "interval (" + batch_job_001_internal_time + "s):" + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + " " +\ if_batch_start + " " + \ batch_start_date + " " + \ batch_start_time logger.info(log_string) if if_batch_start == "True": pass else: return "000001" rtn = utility.compare_string_with_now(batch_start_date, "days") if rtn.days < 0: logger.info("day<0, exit") return "000002" else: logger.info("day>=0, go on") rtn = utility.compare_string_with_now(batch_start_time, "seconds") if rtn.days < 0: logger.info("time.days<0, exit") return "000003" if rtn.seconds <= 0: logger.info("time.seconds<0, exit") return "000004" else: logger.info("time.seconds>0, go on") pass ########################### # scheduled job start here ########################### logger.info("Connecting Redis...") redis_cli = client.Redis_cli() rtn_code=redis_cli.conn_redis() if rtn_code == "000000": logger.info("Redis connected...") else: logger.error("Redis connected Refused...") return conf = configparser.ConfigParser() logger.info("Loading configuration...") conf.read(myibs_ini) sections = conf.sections() for section in sections: items = conf.items(section) for item in items: logger.info("myibs.ini" + ":" + section + ":" + item[0] + "=" + item[1]) rtn_code = redis_cli.set_redis("myibs.ini" + ":" + section + ":" + item[0], item[1]) if rtn_code == "999999": # 如有问题可以修改为异步通知微信 pass else: pass logger.info("Configuration loaded.") return rtn_code
def batch_job_002(): if_batch_start = utility.get_myibs_ini("batch", "batch_job_002_start") batch_start_date = utility.get_myibs_ini("batch", "batch_job_002_start_date") batch_start_time = utility.get_myibs_ini("batch", "batch_job_002_start_time") batch_job_001_internal_time = utility.get_myibs_ini("batch", "batch_job_002_internal_time") log_string = "interval (" + batch_job_001_internal_time + "s):" + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + " " +\ if_batch_start + " " + \ batch_start_date + " " + \ batch_start_time logger.info(log_string) if if_batch_start == "True": pass else: return "000001" rtn = utility.compare_string_with_now(batch_start_date, "days") if rtn.days < 0: logger.info("day<0, exit") return "000002" else: logger.info("day>=0, go on") rtn = utility.compare_string_with_now(batch_start_time, "seconds") if rtn.days < 0: logger.info("time.days<0, exit") return "000003" if rtn.seconds <= 0: logger.info("time.seconds<0, exit") return "000004" else: logger.info("time.seconds>0, go on") pass ########################### # scheduled job start here ########################### # 1. 先查看进程状态 thread_list = [] threads = threading.enumerate() batch_job_002_status = "---" for thread in threads: thread_list.append("Thread ID: %s, Name: %s" % (thread.ident, thread.name)) # print(thread.name) if thread.name == "asyn_TCP_client_001": batch_job_002_status = "STARTED" break # 2. 如进程已启动就不必再启动了, 确保只有一个服务端启动 if batch_job_002_status == "STARTED": print("Try to start the TCP client. However client started, quit.") logger.info("Try to start the TCP client. However client started, quit.") pass else: logger.info("scheduled job start here.....") # 采用异步方式调起后台socket进程 msg="abcde" t = threading.Thread(target=asyn_TCP_server_001.tcpClient, name='asyn_TCP_client_001', args=(msg,)) print("Thread asyn_TCP_client_001 starting here") logger.info("Thread asyn_TCP_client_001 starting...") t.start() print("Thread asyn_TCP_client_001 started.") logger.info("Thread asyn_TCP_client_001 started.") return rtn_code
#留给Batch手工启动/停止/排序及job清单查看使用 urlpatterns = [ #url(r'^user_mas_tmp_app/(?P<id>\d+)/$', views.user_mas_tmp_app, name="user_mas_tmp_app"), ] from apscheduler.scheduler import Scheduler import time import logging logger = logging.getLogger('sourceDns.webdns.views') # 定时任务,采用 apscheduler # 最好使用django-crontab, 可是不支持Windows有啥办法呢? sched = Scheduler() batch_job_001_internal_time = utility.get_myibs_ini( "batch", "batch_job_001_internal_time") @sched.interval_schedule(seconds=int(batch_job_001_internal_time)) def batch_job_001(): logger.info( "-------------------------- batch job started --------------------------" ) views.batch_job_001() logger.info( "-------------------------- batch job ended --------------------------" )