def create_termination_reason(self, reason): ''' Create termination reason ''' self.click(self.add_bth) self.input_text(reason, self.reason_input) self.click(self.save_bth) assert self.is_element_visible(self.success_flag) Log.info("Create Termination Reason Successfully!")
def edit_email(self, method, method1, path, attkey): self.click(self.edit_xpath) self.set_combox_value(method, self.send_method) self.input_text(path, self.path) self.click(self.save_btn) print self.get_element_attribute(self.send_method, attkey) print self.get_element_attribute(self.path, attkey) assert method1 == self.get_element_attribute(self.send_method, attkey) assert path == self.get_element_attribute(self.path, attkey) Log.info("edit successfully")
def click_apply_btn(self, vacancy): """ Apply for a job """ apply_btn_ele = self.apply_btn_ele_value.format(vacancy) self.click(("XPATH", apply_btn_ele)) apply_page_ele = self.apply_page_ele_value.format(vacancy) check_apply_page = self.get_element(("XPATH", apply_page_ele)) if check_apply_page is not None: Log.info("Open Apply page")
def add_pay_grades(self, add_name): """ Add a pay grade """ Log.info("Start to add a pay grade") self.click(self.add_btn) self.clear_text(self.name) self.input_text(add_name, self.name) self.click(self.save_btn)
def cancel_adding_attachment(self, filename, description): """ Cancel adding an attachment """ self.click(self.add_attach_btn) self.upload_file(filename, self.browser_btn) self.input_text(description, self.comment) self.click(self.cancel_attach_btn) assert self.get_element(('LINK_TEXT', filename)) is None Log.info("Cancelling to attach a file")
def verify_membership_success(self, return_message): """ Assert the result of current operation """ if self.get_element_text(self.message) is not None: assert return_message in self.get_element_text(self.message) Log.info(return_message) else: raise Exception("%s is not found" % return_message)
def delete_leave_cancel(self): """ Find record in table, delete then cancel """ self.click(self.checkbox_all) self.click(self.delete_btn) self.wait_unit_el_present(self.confirm_title) self.click(self.cancel_confirm_btn) self.wait_unit_el_present(self.search_title) Log.info("Cancel to delete leave entitlements")
def edit_fax(self, fax): """ Test for fax edit function """ self.edit_general_inf() self.clear_text(self.fax) self.check_fax_valid(fax) self.save_general_inf() self.check_if_edit_success() Log.info("Edit fax action works well")
def save_LeavePeriod(self): """ Click Save button and check the success message """ self.wait_unit_el_present(self.save_button) self.click(self.save_button) self.wait_unit_el_present(self.success_msg) assert ("Successfully Saved" in self.get_element_text(self.success_msg)) Log.info("New Leave Period is saved and success message show - pass!")
def verify_edit_actual_result(self, name, method, *args): act_value = self.act_rep_method.format(name) ielement = ('XPATH', act_value) act_result = self.get_element_text(ielement) if method == "Other": assert str(act_result) == args[0] else: assert str(act_result) == method Log.info( "Edited report method, actual result is equal to expected result.")
def edit_zip_postal_code(self, zip_postal_code): """ Test for zip/postal code """ self.edit_general_inf() self.clear_text(self.zip_postal_code) self.check_zip_postal_code(zip_postal_code) self.save_general_inf() self.check_if_edit_success() Log.info("Edit State Zip/Postal Code action works well")
def edit_state_province(self, state_province): """ Test for state/province edit function """ self.edit_general_inf() self.clear_text(self.state_province) self.check_state_province(state_province) self.save_general_inf() self.check_if_edit_success() Log.info("Edit State Province action works well")
def edit_city(self, city): """ Test for city edit function """ self.edit_general_inf() self.clear_text(self.city) self.check_city_length(city) self.save_general_inf() self.check_if_edit_success() Log.info("Edit City action works well")
def edit_address_street_2(self, address): """ Test for address street 1 edit function """ self.edit_general_inf() self.clear_text(self.address_street_2) self.check_address_Street2_length(address) self.save_general_inf() self.check_if_edit_success() Log.info("Edit Address Street 2 action works well")
def edit_email(self, email): """ Test for email edit function """ self.edit_general_inf() self.clear_text(self.email) self.check_email_valid(email) self.save_general_inf() self.check_if_edit_success() Log.info("Edit email action works well")
def check_EndDate(self, enddate): """ Check End Date updated for new selected Start Month and Start Date """ self.wait_unit_el_present(self.end_date) get_enddate = self.get_element_text(self.end_date) assert get_enddate == enddate Log.info( "End Date is updated according to Start Month and Start Date on Leave Period Page - pass!" )
def check_Current_LeavePeriod(self, period): """ Check Current Leave Period updated based on End Date """ self.wait_unit_el_present(self.current_period) get_currentperiod = self.get_element_text(self.current_period) assert get_currentperiod == period Log.info( "Current Leave Period is updated based on End Date on Leave Period Page - pass!" )
def search(self, user_name): """ search user record """ Log.info("search user") self.clear_text(self.user_search_field) self.input_text(user_name, self.user_search_field) self.click(self.user_search_btn) assert self.get_element_text( ["LINK_TEXT", self.search_link_text.format(user_name)]) is not None
def __init__(self, browser): super(Report, self).__init__(browser) self.click_menu('Reports') self.wait_unit_el_present(self.menu) self.click(self.menu) page_ele = self.get_element(self.success_flag) if page_ele is not None: Log.info("Arrive Leave Entitlements and Usage Report page.") else: Log.info("Cannot arrive Leave Entitlements and Usage Report page.")
def check_overlapping(self, date, leave_type): """ Verify overlapping """ assert self.get_element(self.overlapping_title).is_displayed() assert "-1.00" == self.get_element_text(self.leave_blance)[:5] assert date == self.get_element_text(("xpath", self.overlapping_table_xpath+ "/td[1]")).encode("utf-8") assert leave_type == self.get_element_text(("xpath", self.overlapping_table_xpath + "/td[3]")).encode("utf-8") Log.info("Overlapping is correct")
def edit_the_first_attachments(self, attachments, comment): """ Edit an attachment """ self.click(self.dep_the_first_attachments_edit) self.upload_file(attachments, self.dep_select_file) self.clear_text(self.dep_attachment_comment) self.input_text(comment, self.dep_attachment_comment) self.click(self.dep_upload_btn) Log.info("The attachment is edited.")
def search_method(self, *args): page_ele = self.get_element(self.no_record_flag) if page_ele is not None: Log.info("No record in reporting method page!") i = len(args) if i >= 1: for j in (1, len(args)): self.add_report_method(args[j - 1]) else: Log.info("It has records in reporting method page!")
def test_case6_edit_fax(self): """ Test Case6: Try to check Fax """ Log.info("Start to run test_case6_edit_fax") self.generalinfo.edit_fax(self.fax_invalid) self.generalinfo.edit_fax(self.fax_null) self.generalinfo.edit_fax(self.fax_max) self.generalinfo.edit_fax(self.fax) Log.info("Run Test Case6: Edit Fax works well")
def delete_immigration(self): """ Delete an immigration record """ old_record = self.get_random_data(self.random_record_ele) check_immigration = self.check_one_ele.format(old_record) self.click(('xpath', check_immigration)) self.click(self.delete_btn) Log.info("The immigration record is deleted.") return old_record
def test_case7_edit_email(self): """ Test Case7: Try to check Email """ Log.info("Start to run test_case7_edit_email") self.generalinfo.edit_email(self.email_invalid) self.generalinfo.edit_email(self.email_null) self.generalinfo.edit_email(self.email_max) self.generalinfo.edit_email(self.email) Log.info("Run Test Case7: Edit email works well")
def check_activity(self, activity_name): """ Verify new activity is listed """ check_activity_ele_value = self.activity_ele_value.format( activity_name) check_activity = self.wait_unit_el_present( ("XPATH", check_activity_ele_value)) if check_activity is not None: Log.info("New activity is listed.")
def test_case5_edit_phone(self): """ Test Case5: Try to check Phone """ Log.info("Start to run test_case5_edit_phone") self.generalinfo.edit_phone(self.phone_invalid) self.generalinfo.edit_phone(self.phone_null) self.generalinfo.edit_phone(self.phone_max) self.generalinfo.edit_phone(self.phone) Log.info("Run Test Case5: Edit phone works well")
def testcase4_searchDirectory_No(self): """ Directory page test search functions together for name, job title and location - not existed """ self.directory.search_SearchDirectory_No(self.first_name, self.jobtitle1, self.location1) Log.info( "Test not exist while search function by name, jobtitle, location together - passed" )
def testcase3_searchDirectory_All(self): """ Directory page test search functions together for name, job title and location - existed """ self.directory.search_SearchDirectory_All(self.search_name, self.search_jobtitle, self.search_location) Log.info( "Test exist result while search function by name, jobtitle, location together - passed" )
def testcase2_searchDirectory_One(self): """ Directory page test search functions one by one for name, job title and location """ self.directory.searchDirectory_One(self.search_name, self.search_jobtitle, self.search_location) Log.info( "Test search function by name, jobtitle, location one by one - passed" )
class RaidMonitor(object): def __init__(self): self.logger = Log("raid_mon", config=conf) self.is_running = False self.last_state = True self.vps_mgr = VPSMgr() self.hostname = socket.gethostname() def start(self): if self.is_running: return self.is_running = True self.logger.info("started") def stop(self): if not self.is_running: return self.is_running = False def send_alarm(self, msg): rpc = self.vps_mgr.rpc_connect() try: rpc.alarm("%s: raid_mon: %s" % (self.hostname, msg)) finally: rpc.close() def check(self): cmd = """MegaCli64 -pdlist -aall | grep -i 'firmware state:' | grep -P -v -i "online|Unconfigured\(good\)" """ try: out, err = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() msg = out + err if msg: self.logger.error(msg) if self.last_state: self.last_state = False self.send_alarm("error, %s" % (out)) self.logger.error("alarm sent") else: self.logger.info("ok") if not self.last_state: self.send_alarm("ok") self.last_state = True except Exception, e: self.logger.exception(e)
class SaasMonitor(object): def __init__(self): self.is_running = False self.hostname = socket.gethostname() self.logger = Log("saas_mon", config=conf) self.recover_thres = conf.SAAS_RECOVER_THRESHOLD or(30 * 5) self.bad_thres = conf.SAAS_BAD_THRESHOLD or 5 self.alarm_q = JobQueue(self.logger) self.emailalarm = EmailAlarm(self.logger) self.last_state = True def start(self): if self.is_running: return self.is_running = True self.alarm_q.start_worker(1) self.logger.info("started") def stop(self): if not self.is_running: return self.is_running = False self.alarm_q.stop() def check(self): vps = None try: rpc = SAAS_Client(conf.HOST_ID, self.logger) rpc.connect() try: _id = rpc.todo(CMD.MONITOR) finally: rpc.close() self.logger.info("ok") return True except Exception, e: self.logger.exception(e) return False
class VPSMgr(object): """ all exception should catch and log in this class """ VERSION = 1 def __init__(self): self.logger = Log("vps_mgr", config=conf) self.logger_net = Log("vps_mgr_net", config=conf) self.logger_misc = Log("misc", config=conf) self.logger_debug = Log("debug", config=conf) self.host_id = conf.HOST_ID self.vpsops = VPSOps(self.logger) self.handlers = { CMD.OPEN: self.__class__.vps_open, CMD.REBOOT: self.__class__.vps_reboot, CMD.CLOSE: self.__class__.vps_close, CMD.OS: self.__class__.vps_reinstall_os, CMD.UPGRADE: self.__class__.vps_upgrade, CMD.BANDWIDTH: self.__class__.vps_set_bandwidth, CMD.RM: self.__class__.vps_delete, CMD.PRE_SYNC: self.__class__.vps_hot_sync, CMD.MIGRATE: self.__class__.vps_migrate, CMD.RESET_PW: self.__class__.vps_reset_pw, } self._locker = threading.Lock() self._vps_locker = dict() self.xenstat = XenStat() self.timer = TimerEvents(time.time, self.logger_misc) assert conf.MONITOR_COLLECT_INV > 0 self.last_netflow = None self.last_diskstat = None self.monitor_inv = conf.MONITOR_COLLECT_INV self.last_monitor_ts = None self.timer.add_timer(conf.MONITOR_COLLECT_INV, self.monitor_vps) self.timer.add_timer(12 * 3600, self.refresh_host_space) self.workers = [] self.running = False def _try_lock_vps(self, cmd, vps_id): self._locker.acquire() if self._vps_locker.has_key(vps_id): _cmd = self._vps_locker.get(vps_id) self.logger_debug.info("CMD %s try to lock vps%s failed: locked by CMD %s" % ( CMD._get_name(cmd), vps_id, CMD._get_name(_cmd) )) res = False else: self._vps_locker[vps_id] = cmd res = True self._locker.release() return res def _unlock_vps(self, cmd, vps_id): self._locker.acquire() try: _cmd = self._vps_locker.get(vps_id) if _cmd == cmd: del self._vps_locker[vps_id] except KeyError: pass self._locker.release() def rpc_connect(self): rpc = SAAS_Client(self.host_id, self.logger_debug) rpc.connect() return rpc def monitor_vps(self): net_result = None disk_result = None try: net_result = netflow.read_proc() disk_devs = glob.glob("/dev/main/vps*") if 'MAIN_DISK' in dir(conf): disk_devs.append(conf.MAIN_DISK) disk_result = diskstat.read_stat(disk_devs) except Exception, e: self.logger_misc.exception( "cannot read netflow data from proc: %s" % (str(e))) return ts = time.time() dom_map = XenStore.domain_name_id_map() dom_names = dom_map.keys() self.xenstat.run(dom_names) payload = CarbonPayload() try: payload.append("host.cpu.%s.all" % (self.host_id), ts, self.xenstat.total_cpu) for dom_name in dom_names: om = re.match("^vps(\d+)$", dom_name) if not om: # dom0 dom_cpu = self.xenstat.dom_dict.get(dom_name) if dom_cpu: payload.append("host.cpu.%s.dom0" % (self.host_id), dom_cpu['ts'], dom_cpu['cpu_avg']) if 'MAIN_DISK' in dir(conf) and self.last_diskstat: t_elapse = ts - self.last_monitor_ts v = disk_result.get(conf.MAIN_DISK) last_v = self.last_diskstat.get(conf.MAIN_DISK) read_ops, read_byte, write_ops, write_byte, util = diskstat.cal_stat( v, last_v, t_elapse) payload.append("host.io.%d.ops.read" % (self.host_id), ts, read_ops) payload.append("host.io.%d.ops.write" % (self.host_id), ts, write_ops) payload.append("host.io.%s.traffic.read" % (self.host_id), ts, read_byte) payload.append("host.io.%s.traffic.write" % (self.host_id), ts, write_byte) payload.append("host.io.%s.util" % (self.host_id), ts, util) print conf.MAIN_DISK, read_ops, write_ops, read_byte, write_byte, util if self.last_netflow: t_elapse = ts - self.last_monitor_ts v = net_result.get(conf.EXT_INF) last_v = self.last_netflow.get(conf.EXT_INF) _in = fix_flow((v[0] - last_v[0]) * 8.0 / t_elapse) _out = fix_flow((v[1] - last_v[1]) * 8.0 / t_elapse) _in_pp = (v[2] - last_v[2]) / t_elapse _out_pp = (v[3] - last_v[3]) / t_elapse payload.append("host.netflow.%d.ext.in" % (self.host_id), ts, _in) payload.append("host.netflow.%d.ext.out" % (self.host_id), ts, _out) payload.append("host.netflow.%d.ext_pp.in" % (self.host_id), ts, _in_pp > 0 and _in_pp or 0) payload.append("host.netflow.%d.ext_pp.out" % (self.host_id), ts, _out_pp > 0 and _out_pp or 0) v = net_result.get(conf.INT_INF) last_v = self.last_netflow.get(conf.INT_INF) _in = fix_flow((v[0] - last_v[0]) * 8.0 / t_elapse) _out = fix_flow((v[1] - last_v[1]) * 8.0 / t_elapse) _in_pp = (v[2] - last_v[2]) / t_elapse _out_pp = (v[3] - last_v[3]) / t_elapse payload.append("host.netflow.%d.int.in" % (self.host_id), ts, _in) payload.append("host.netflow.%d.int.out" % (self.host_id), ts, _out) payload.append("host.netflow.%d.int_pp.in" % (self.host_id), ts, _in_pp > 0 and _in_pp or 0) payload.append("host.netflow.%d.int_pp.out" % (self.host_id), ts, _out_pp > 0 and _out_pp or 0) else: vps_id = int(om.group(1)) xv = self.vpsops.load_vps_meta(vps_id) dom_cpu = self.xenstat.dom_dict.get(dom_name) if dom_cpu: payload.append("vps.cpu.%s" % (vps_id), dom_cpu['ts'], dom_cpu['cpu_avg']) if not self.last_netflow or not self.last_diskstat: break # net ifname = dom_name vif = xv.vifs.get(ifname) v = net_result.get(ifname) last_v = self.last_netflow.get(ifname) t_elapse = ts - self.last_monitor_ts if v and last_v: # direction of vps bridged network interface needs to # be reversed _in = fix_flow((v[1] - last_v[1]) * 8.0 / t_elapse) _out = fix_flow((v[0] - last_v[0]) * 8.0 / t_elapse) _in = (vif.bandwidth and vif.bandwidth * 1024 * 1024 < _in) and vif.bandwidth * 1024 * 1024 or _in _out = (vif.bandwidth and vif.bandwidth * 1024 * 1024 < _out) and vif.bandwidth * 1024 * 1024 or _out payload.append("vps.netflow.%d.in" % (vps_id), ts, _in) payload.append("vps.netflow.%d.out" % (vps_id), ts, _out) if conf.LARGE_NETFLOW and _in >= conf.LARGE_NETFLOW or _out >= conf.LARGE_NETFLOW: self.logger_misc.warn( "%s in: %.3f mbps, out: %.3f mbps" % (ifname, _in / 1024.0 / 1024.0, _out / 1024.0 / 1024.0)) # disk if conf.USE_LVM and self.last_diskstat: for disk in xv.data_disks.values(): v = disk_result.get(disk.dev) last_v = self.last_diskstat.get(disk.dev) if not last_v: continue read_ops, read_byte, write_ops, write_byte, util = diskstat.cal_stat( v, last_v, t_elapse) print disk.xen_dev payload.append("vps.io.%d.%s.ops.read" % (vps_id, disk.xen_dev), ts, read_ops) payload.append("vps.io.%d.%s.ops.write" % (vps_id, disk.xen_dev), ts, write_ops) payload.append("vps.io.%d.%s.traffic.read" % (vps_id, disk.xen_dev), ts, read_byte) payload.append("vps.io.%d.%s.traffic.write" % (vps_id, disk.xen_dev), ts, write_byte) payload.append("vps.io.%d.%s.util" % (vps_id, disk.xen_dev), ts, util) v = disk_result.get(xv.swap_store.dev) last_v = self.last_diskstat.get(xv.swap_store.dev) if v and last_v: read_ops, read_byte, write_ops, write_byte, util = diskstat.cal_stat( v, last_v, t_elapse) payload.append("vps.io.%d.swap.ops.read" % (vps_id), ts, read_ops) payload.append("vps.io.%d.swap.ops.write" % (vps_id), ts, write_ops) payload.append("vps.io.%d.swap.traffic.read" % (vps_id), ts, read_byte) payload.append("vps.io.%d.swap.traffic.write" % (vps_id), ts, write_byte) payload.append("vps.io.%d.swap.util" % (vps_id), ts, util) self.last_netflow = net_result self.last_diskstat = disk_result self.last_monitor_ts = ts except Exception, e: self.logger_misc.exception(e) return
class TransWarpClient (TransWarpBase): def __init__(self): TransWarpBase.__init__(self) self.logger = Log("client", config=config) self.engine.set_logger(self.logger) self.sock5_addr = config.SOCK5_ADDR self._sock5_users = 'SOCK5_USERS' in dir( config) and config.SOCK5_USERS or {} ip = self.sock5_addr[0] arr = map(lambda x: chr(int(x)), ip.split(".")) self._sock5_server_id = struct.pack( "!4cH", arr[0], arr[1], arr[2], arr[3], self.sock5_addr[1]) self.server_addr = config.SERVER_ADDR self.sock5_sock = None self.key = config.KEY def start(self): if self.is_running: return self.sock5_sock = self.engine.listen_addr( self.sock5_addr, readable_cb=None, new_conn_cb=self._sock5_handshake, backlog=50) self.is_running = True def stop(self): if not self.is_running: return self.engine.unlisten(self.sock5_sock) self.is_running = False def _send_sock5_unsupport(self, conn): self.logger.error("peer %s not supported" % (str(conn.peer))) buf = "%s%s\x00\x01%s" % (VER, "\x07", self._sock5_server_id) def __write_ok(conn): self.engine.close_conn(conn) return self.engine.write_unblock(conn, buf, __write_ok) def _send_sock5_reply(self, client, err_no): if err_no == 0: status = "\x00" elif err_no == errno.ENETUNREACH: status = "\x03" elif err_no == errno.EHOSTUNREACH: status = "\x04" elif err_no == errno.ECONNREFUSED: status = "\x05" else: status = "\x01" # general error buf = "%s%s\x00\x01%s" % (VER, status, self._sock5_server_id) def __write_ok(cli_conn, *args): if err_no == 0: self.logger.info( "client %s: sent sock5 response" % (client.client_id)) client.cli_state = proto.ClientState.CONNECTED self._check_client_state(client) else: self.logger.info( "client %s: sent sock5 err response and close" % (client.client_id)) self.close_client(client) return self.engine.write_unblock( client.cli_conn, buf, __write_ok, self._on_err, cb_args=(client,)) def _on_client_readable(self, cli_conn, client): # print "client %s client readable" % (client.client_id) self.stream_to_fix(cli_conn, client.r_conn, client) def _on_remote_readable(self, r_conn, client): # print "client %s remote readable" % (client.client_id) self.fix_to_stream(r_conn, client.cli_conn, client) def _on_server_connected(self, sock, client): self.logger.info("client %s connected to server" % (client.client_id)) r_conn = Connection(sock) _hash = proto.myhash(client.seed, self.key) auth_data = proto.AuthData( client.seed, _hash, self.key, client.r_host, client.r_port) buf = auth_data.serialize() buf = proto.pack_head(len(buf)) + buf client.r_conn = r_conn def __on_remote_respond(r_conn, *args): resp = None try: buf = client.crypter_r.decrypt(r_conn.get_readbuf()) resp = proto.ServerResponse.deserialize(buf) if resp.err_no: self.logger.error("client %s: %s %s" % (client.client_id, resp.err_no, resp.message)) self.close_client(client) else: self.logger.info( "client %s server response" % (client.client_id)) client.r_state = proto.ClientState.CONNECTED self._check_client_state(client) except Exception, e: self.logger.exception( "client %s: server response error %s" % (client.client_id, str(e))) self.close_client(client) return def __on_read_head(r_conn, *args): data_len = 0 try: data_len = proto.unpack_head(r_conn.get_readbuf()) except Exception, e: self.logger.error( "client %s remote head invalid" % (client.client_id)) self.close_client(client) return if data_len > 0: self.engine.read_unblock( r_conn, data_len, __on_remote_respond, self._on_err, cb_args=(client, )) return self.logger.error("zero len head") self.close_client(client) return
class VpsProxy(object): NGINX_TEMPLATE = """ server { listen 80; server_name %(host)s *.%(host)s; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header HOST $host.%(suffix)s; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; location / { proxy_pass http://%(ip)s; } } """ def __init__(self): self.logger = Log("proxy", config=config) self.logger_rpc = Log("proxy_rpc", config=config) self.output_path = config.NGINX_CONF_PATH self.nginx_reload_cmd = config.NGINX_RELOAD_CMD self.rpc = AES_RPC_Client(_private.KEY, self.logger_rpc) def start(self): self.is_running = True def stop(self): self.is_running = False def loop(self): while self.is_running: self.gen_config() time.sleep(conf.RUN_INV) def reload_nginx(self): subprocess.check_output(self.nginx_reload_cmd) self.logger.info("nginx reloaded") print "nginx reloaded" def gen_config(self, force=False): domain_list = None try: self.rpc.connect(_private.SAAS_ADDR) try: domain_list = self.rpc.call("proxy_domain_list") finally: self.rpc.close() except Exception, e: self.logger.exception(e) print traceback.print_exc() return conf = [] for i in domain_list: conf.append(self.NGINX_TEMPLATE % {'host':i["domain"], 'ip':i["ip"], 'suffix': _private.PROXY_DOMAIN_SUFFIX}) content = "".join(conf) try: if not force and os.path.exists(self.output_path) and _md5_file(self.output_path) == _md5(content): print "skip the same" return f = open(self.output_path, "w") try: f.write(content) finally: f.close() self.logger.info("conf generated") self.reload_nginx() except Exception, e: self.logger.exception(e) print traceback.print_exc()
class ICMPMonitor (object): def __init__(self): self.is_running = False self.linkage_dict = dict() self.logger = Log("icmp_mon", config=config) self.alarm_q = JobQueue(self.logger) self.emailalarm = EmailAlarm(Log("alarm", config=config)) self.logger_links = Log("links", config=config) if 'log_length_per_link' in dir(config): self.log_length_per_link = config.log_length_per_link else: self.log_length_per_link = 128 if 'links' not in dir(config): self.logger.error("no 'links' in config") return g_alarm_levels = None g_recover = None if 'alarm_levels' in dir(config): g_alarm_levels = self._parse_alarm_levels(config.alarm_levels) if 'recover' in dir(config): g_recover = int(config.recover) links = config.links if isinstance(links, dict): for ip, v in links.iteritems(): if not isinstance(v, dict): v = dict() ttl = v.get('ttl') if ttl >= 0: pass else: ttl = 0 alarm_levels = v.get('alarm_levels') if not alarm_levels and g_alarm_levels: alarm_levels = g_alarm_levels elif alarm_levels: alarm_levels = self._parse_alarm_levels(alarm_levels) if not alarm_levels: continue else: self.logger.error( "config: %s, missing alarm_levels value" % (ip)) continue recover = v.get('recover') if recover: recover = int(recover) elif not recover and g_recover: recover = g_recover else: self.logger.error( "config: %s, missing recover value" % (ip)) continue self.linkage_dict[ip] = Linkage(ip, alarm_levels, recover) self.logger.info("%d link loaded from config" % (len(self.linkage_dict.keys()))) def _parse_alarm_levels(self, alarm_levels, ip=""): if not isinstance(alarm_levels, (tuple, list)): self.logger.error("config: %s, alarm_levels is not a list" % (ip)) return _alarm_levels = filter(lambda x: isinstance(x, int), alarm_levels) if len(_alarm_levels) != len(alarm_levels): self.logger.error( "config: %s, elements in alarm_levels must be integers" % (ip)) return return _alarm_levels def start(self): if self.is_running: return self.is_running = True self.alarm_q.start_worker(1) self.logger.info("started") def stop(self): if not self.is_running: return self.is_running = False self.alarm_q.stop() self.logger.info("stopped") def _alarm_enqueue(self, link): t = "%Y-%m-%d %H:%M:%S" ts = "[%s]" % (time.strftime(t, time.localtime())) job = AlarmJob( self.emailalarm, ts + link.alarm_text(), link.details()) self.alarm_q.put_job(job) def loop(self): ips = self.linkage_dict.keys() fping = FPing(ips) while self.is_running: start_time = time.time() recv_dict, error_dict = fping.ping(1) for ip, rtt in recv_dict.iteritems(): link = self.linkage_dict[ip] res = link.new_state(True, rtt) if res: self._alarm_enqueue(link) print ip, "ok", rtt if len(link.bitmap) == self.log_length_per_link: self.logger_links.info(link.details()) link.reset_bitmap() for ip, err in error_dict.iteritems(): link = self.linkage_dict[ip] res = link.new_state(False, 0) if res is False: self._alarm_enqueue(link) print ip, "err", link.bitmap if len(link.bitmap) == self.log_length_per_link: self.logger_links.info(link.details()) link.reset_bitmap() end_time = time.time() if end_time < start_time + 1: time.sleep(1 - end_time + start_time)