def test_create_vps0 (self): print "create vps00" logger = Log ("test", config=conf) vpsops = VPSOps (logger) vps = XenVPS (0) try: vps.setup (os_id=20001, vcpu=1, mem_m=512, disk_g=7, ip="113.11.199.3", netmask="255.255.255.0", gateway="113.11.199.1", root_pw="fdfdfd") #vps.add_extra_storage (disk_id=1, size_g=1, fs_type='ext3') #vps.add_extra_storage (disk_id=2, size_g=0.5, fs_type='ext4') #vps.setup (os_id=10001, vcpu=1, mem_m=512, disk_g=7, ip="10.10.1.2", netmask="255.255.255.0", gateway="10.10.1.1", root_pw="fdfdfd") #vps.setup (os_id=10002, vcpu=1, mem_m=512, disk_g=7, ip="10.10.1.2", netmask="255.255.255.0", gateway="10.10.1.1", root_pw="fdfdfd") #vps.setup (os_id=30001, vcpu=1, mem_m=512, disk_g=7, ip="10.10.2.2", netmask="255.255.255.0", gateway="10.10.2.1", root_pw="root") #vps.setup (os_id=1, vcpu=1, mem_m=512, disk_g=7, ip="10.10.1.2", netmask="255.255.255.0", gateway="10.10.1.1", root_pw="fdfdfd") #vps.setup (os_id=10000, vcpu=1, mem_m=512, disk_g=7, ip="10.10.1.2", netmask="255.255.255.0", gateway="10.10.1.1", root_pw="fdfdfd") #vps.setup (os_id=20001, vcpu=1, mem_m=512, disk_g=7, ip="10.10.1.2", netmask="255.255.255.0", gateway="10.10.1.1", root_pw="fdfdfd") #vps.setup (os_id=10003, vcpu=1, mem_m=512, disk_g=7, ip="10.10.1.2", netmask="255.255.255.0", gateway="10.10.1.1", root_pw="fdfdfd") #vps.setup (os_id=20001, vcpu=1, mem_m=512, disk_g=7, ip="10.10.1.2", netmask="255.255.255.0", gateway="10.10.1.1", root_pw="fdfdfd") print vps.gen_xenpv_config () #vpsops.create_vps (vps, vps_image='/data/vps/images/arch-2011.08.19-i386-fs-ext3.tar.gz') vpsops.create_vps (vps) except Exception, e: print str(e) logger.exception (e) raise e
def __init__(self): self.logger = Log("vps_mgr", config=conf) self.logger_net = Log("vps_mgr_net", config=conf) self.logger_misc = Log("misc", config=conf) self.logger_debug = Log("debug", config=conf) self.host_id = conf.HOST_ID self.vpsops = VPSOps(self.logger) self.handlers = { CMD.OPEN: self.__class__.vps_open, CMD.REBOOT: self.__class__.vps_reboot, CMD.CLOSE: self.__class__.vps_close, CMD.OS: self.__class__.vps_reinstall_os, CMD.UPGRADE: self.__class__.vps_upgrade, CMD.BANDWIDTH: self.__class__.vps_set_bandwidth, CMD.RM: self.__class__.vps_delete, CMD.PRE_SYNC: self.__class__.vps_hot_sync, CMD.MIGRATE: self.__class__.vps_migrate, CMD.RESET_PW: self.__class__.vps_reset_pw, } self._locker = threading.Lock() self._vps_locker = dict() self.xenstat = XenStat() self.timer = TimerEvents(time.time, self.logger_misc) assert conf.MONITOR_COLLECT_INV > 0 self.last_netflow = None self.last_diskstat = None self.monitor_inv = conf.MONITOR_COLLECT_INV self.last_monitor_ts = None self.timer.add_timer(conf.MONITOR_COLLECT_INV, self.monitor_vps) self.timer.add_timer(12 * 3600, self.refresh_host_space) self.workers = [] self.running = False
def load(file, params={}): tpl = view_folder+file if os.path.isfile(tpl) and os.access(tpl, os.R_OK): tpl = env.get_template(file) return tpl.render(params) else: Log.debug(tpl+": file not exists") return False
def hotsync_partition(lv_dev, dest_ip, speed=None): logger = Log("vps_mgr", config=conf) try: client = MigrateClient(logger, dest_ip) client.snapshot_sync(lv_dev, speed=speed) print "ok" except Exception, e: logger.exception(e) raise e
def test_mem_too_big (self): print "test mem too big" vps = XenVPS (0) logger = Log ("test", config=conf) vps.setup (os_id=50001, vcpu=1, mem_m=500000, disk_g=7, ip="10.10.1.2", netmask="255.255.255.0", gateway="10.10.1.1", root_pw="fdfdfd") try: vps.check_resource_avail () except Exception, e: logger.exception (e) print "exception caught", type (e), str(e) return
def __init__(self): self.is_running = False self.linkage_dict = dict() self.logger = Log("icmp_mon", config=config) self.alarm_q = JobQueue(self.logger) self.emailalarm = EmailAlarm(Log("alarm", config=config)) self.logger_links = Log("links", config=config) if 'log_length_per_link' in dir(config): self.log_length_per_link = config.log_length_per_link else: self.log_length_per_link = 128 if 'links' not in dir(config): self.logger.error("no 'links' in config") return g_alarm_levels = None g_recover = None if 'alarm_levels' in dir(config): g_alarm_levels = self._parse_alarm_levels(config.alarm_levels) if 'recover' in dir(config): g_recover = int(config.recover) links = config.links if isinstance(links, dict): for ip, v in links.iteritems(): if not isinstance(v, dict): v = dict() ttl = v.get('ttl') if ttl >= 0: pass else: ttl = 0 alarm_levels = v.get('alarm_levels') if not alarm_levels and g_alarm_levels: alarm_levels = g_alarm_levels elif alarm_levels: alarm_levels = self._parse_alarm_levels(alarm_levels) if not alarm_levels: continue else: self.logger.error( "config: %s, missing alarm_levels value" % (ip)) continue recover = v.get('recover') if recover: recover = int(recover) elif not recover and g_recover: recover = g_recover else: self.logger.error( "config: %s, missing recover value" % (ip)) continue self.linkage_dict[ip] = Linkage(ip, alarm_levels, recover) self.logger.info("%d link loaded from config" % (len(self.linkage_dict.keys())))
def assign_salary(self, grade, component, frequency, currency, amount): # self.get_emp_record(name) # self.check_salary_page() self.click(self.add_btn) self.set_combox_value(grade, self.pay_grade) self.input_text(component, self.salary_component) self.set_combox_value(frequency, self.pay_frequency) self.wait_unit_el_present(self.currency) self.set_combox_value(currency, self.currency) self.input_text(amount, self.amount) self.click(self.save_btn) self.get_element_text(self.message) Log.info("Add record successfully!")
def search_user(self, user_name): """ search user record """ Log.info("Start to search user") self.clear_text(self.user_search_field) self.sleep(3) self.input_text(user_name, self.user_search_field) self.sleep(3) self.click(self.user_search_btn) self.sleep(3) self.search_name = self.get_element_text(self.search_flag) return self.search_name
def get_windows_img(self): """ save the screenshot """ file_path = "screenshots//" rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time())) screen_name = file_path + rq + '.png' try: self.driver.get_screenshot_as_file(screen_name) Log.info("Save screen shot to screenshots") except NameError as e: Log.error("Failed to take screenshot! %s" % e) self.get_windows_img()
def edit_organization_name(self, org_name): """ Test for organization name edit function """ self.edit_general_inf() self.clear_text(self.organization_name) self.check_organization_name_length(org_name) if len(org_name) != 0: self.save_general_inf() self.check_if_edit_success() Log.info("Edit organization name action works well") else: Log.info("Required for organization name to save")
def add_holiday_save(self, name): """ Add a new holiday and save """ self.click(self.add_btn) self.clear_text(self.holiday_name) self.input_text(name, self.holiday_name) self.set_holiday_element() self.click(self.save_btn) if "Successfully Saved" in self.get_element_text(self.message): Log.info("Create a " + name + " holiday and save.") else: Log.info("Create a " + name + " holiday failed")
def edit_attachment(self, attachment, new_attachment, new_comment): """ Edit the attachment - save comment only """ self.click(self.edit_attachment_btn_ele) self.upload_file(new_attachment, self.upload_attachment_ele) self.input_text(new_comment, self.add_comment_ele) self.click(self.save_comment_only_btn_ele) check_attachment = self.get_element_text(self.attachment_ele) check_comment = self.get_element_text(self.comment_ele) assert check_attachment == attachment assert check_comment == new_comment Log.info("Save comment only successfully.")
def savePasswords(self, password: str, onSuccess: object, onError: object): self.logStart() Log.pushStatus("opening", COLOR.STATUS_SUCCESS) self._parentOnSuccess = onSuccess self._parentOnError = onError self._openChain = ProcHandlerChain(procHandlerChain=[ _getOpenCryptDeviceHandler(password), MOUNT_HANDLER ]) self._openChain.run(onSuccess=self._onSuccessOpenSave, onError=self._onError) self.logEnd()
def terminate_employment(self, terdate): """ Terminate the employment for current employee """ Log.info("Start to terminate employment...") self.click(self.terminate) self.wait_unit_el_present(self.ter_title) assert self.get_element(self.ter_title).is_displayed() self.select_option(self.ter_reason, 7) self.input_text("test-terminate", self.ter_note) self.input_text(terdate, self.ter_date) self.click(self.ter_conf_btn) assert terdate in self.get_element_text(self.ter_msg).encode("utf-8")
def check_employee_exists(self, ele): """ Check if there is employee existing in Available employee list """ Log.info( 'Start to check if there are employees in available employee list') try: select = self.get_element(ele) empno = len(select.find_elements_by_tag_name('option')) return empno except NoSuchElementException: Log.info('There is no employee in %s list' % ele) return 0
def decrypt(encryptCode: str, _private_key=private.api_key) -> Any: """私钥解密""" try: _secret_byte_obj = base64.b64decode(encryptCode) cipher_pri_obj = PKCS1_v1_5.new(RSA.import_key(_private_key)) _byte_obj = cipher_pri_obj.decrypt(_secret_byte_obj, Random.new().read) plain_text = _byte_obj.decode("utf-8") Log.info(f"RSA加密信息结果:{plain_text}") return json.loads(plain_text) except Exception as ex: Log.error(f"解析RSA加密信息失败:{ex}") return {}
def delete_candidates(self, vacan_name): """ delete candidates ---added by julia """ self.get_element(self.backbtn).click() tcandi_xpath = self.tcandi_xpath.format(vacan_name) tcandi = ('xpath', tcandi_xpath) candi = self.get_element(tcandi) if candi is not None: self.click(tcandi) self.click(self.delete_btn) self.click(self.cdelete_btn) Log.info('Candidates record cleaned up')
def test_vps0 (self): print "create vps00" logger = Log ("test", config=conf) vpsops = VPSOps (logger) xv = XenVPS (0) try: xv.setup (os_id=10001, vcpu=1, mem_m=512, disk_g=7, ip="10.10.2.2", netmask="255.255.255.0", gateway="10.10.2.1", root_pw="fdfdfd") xv.add_extra_storage (disk_id=1, size_g=1, fs_type='ext3') print xv.gen_xenpv_config () vpsops.create_vps (xv) except Exception, e: logger.exception (e) raise e
def verify_new_leave(self, leave_period, entitlement): """ Verify search table result for leave type not All """ assert "Added" == self.get_element_text( ("xpath", self.search_table_element + "/td[2]")).encode("utf-8") assert leave_period.split(" - ")[0] == \ self.get_element_text(("xpath", self.search_table_element + "/td[3]")).encode("utf-8") assert leave_period.split(" - ")[1] == \ self.get_element_text(("xpath", self.search_table_element + "/td[4]")).encode("utf-8") assert '{:.2f}'.format(float(entitlement)) == \ self.get_element_text(("xpath", self.search_table_element + "/td[5]")).encode("utf-8") Log.info("Add new leave entitlement successfully")
def fill_in_text_element_by_xpath(driver, element_xpath, text): """ fill_in_text_element_by_xpath Fills in a text field for an element found by xpath :param driver: Webdriver controller for the web page :param element_xpath: XPath of the element :param text: Text to fill in. """ Log.info(f"Filling in text {text} on element of XPath {element_xpath}") element = driver.find_element_by_xpath(element_xpath) element.send_keys(text)
def get_first_element_of_list_by_xpath(driver, xpath): """ get_first_element_of_list_by_xpath Calls get_elements_by_xpath and takes first element of the list :param driver: Webdriver for the browser :param xpath: Xpath for the list of elements :return: WebElement object """ Log.info(f"Getting first elements of list in {xpath}") element_list = get_elements_by_xpath(driver, xpath) return element_list[0]
def fill_in_text_element_by_id(driver, element_id, text): """ fill_in_text_element Generates a contains string for searching for an element on a page :param driver: Webdriver controller for the web page :param element_id: ID of the element :param text: Text to fill in. """ Log.info(f"Filling in text {text} on element of ID {element_id}") element = driver.find_element_by_id(element_id) element.send_keys(text)
def edit_employee_status(self, edit_name): """ Edit element """ Log.info("Start to edit employee status") self.click(self.edit_record) self.sleep(3) self.clear_text(self.employment_status_name) self.input_text(edit_name, self.employment_status_name) self.sleep(3) self.click(self.save_btn) assert "Successfully Saved" in self.get_element_text(self.success_flag) Log.info("Edit Successfully")
def fill_in_text_element_by_class(driver, element_class_name, text): """ fill_in_text_element Generates a contains string for searching for an element on a page :param driver: Webdriver controller for the web page :param element_class_name: Class name of the element :param text: Text to fill in. """ Log.info( f"Filling in text {text} on element of Class {element_class_name}") element = driver.find_element_by_class_name(element_class_name) element.send_keys(text)
def find_by_xpath(driver, xpath): """ find_by_xpath Finds using xpath, an element on a page :param driver: Webdriver controller for the web page :param xpath: Xpath of the element :return: Element :rtype: Element Object """ Log.info(f"Finding element with Xpath {xpath}") return driver.find_element_by_xpath(xpath)
def find_by_id(driver, element_id): """ find_by_id Finds using element ID, an element on a page :param driver: Webdriver controller for the web page :param element_id: ID of the element :return: Element :rtype: Element Object """ Log.info(f"Finding element with ID {element_id}") return driver.find_element_by_id(element_id)
def delete_vacancies(self, vacan_name): """ delete candidates """ self.recruitment.click_menu('Vacancies') Log.info("Arrive Cacancies page") tvacan_xpath = self.tvacan_xpath.format(vacan_name) tvacan = ('xpath', tvacan_xpath) vacan = self.get_element(tvacan) if vacan is not None: self.click(tvacan) self.click(self.delete_btn) self.click(self.vdelete_btn) Log.info('Vacancies record cleaned up')
def get_element_text(self, keys): """ Get element text """ try: element = self.get_element(keys) if element is None: raise Exception( Log.error( "Could not locate the element value {}".format(keys))) else: return element.text except BaseException, e: Log.error(e)
def delete_employee(self, last_name): """ Delete all employees with last name which you will create """ self.click_menu("Employee List") try: self.check_employee_checkbox(last_name) self.click(self.delete_btn_ele) self.click(self.ok_btn_ele) Log.info("All employees with last name: %s are listed!" % last_name) except Exception: Log.info("No employee with last name: %s is listed!" % last_name) self.click_menu("Add Employee")
def get_elements_texts(self, keys): """ Get one group elements texts """ text_list = [] try: elements = self.get_elements(keys) for element in elements: text = element.text text_list.append(text.encode("utf-8")) except BaseException, e: Log.error(" Unable to find the element, please " "check your keys %s" % keys) Log.error(e)
def add_user_employee(self, firstname, lastname): """ Add employee: 1. input first name and last name 2. get employee id 3. click save button """ Log.info("Add employee record") self.clear_text(self.emp_firstname_field) self.input_text(firstname, self.emp_firstname_field) self.clear_text(self.emp_lastname_field) self.input_text(lastname, self.emp_lastname_field) self.click(self.employee_save_btn) Log.info("New Employee Added")
class VPSMgr(object): """ all exception should catch and log in this class """ VERSION = 1 def __init__(self): self.logger = Log("vps_mgr", config=conf) self.logger_err = Log("vps_mgr_err", config=conf) self.logger_misc = Log("misc", config=conf) self.host_id = conf.HOST_ID self.handlers = { Cmd.OPEN: self.__class__.vps_open, Cmd.REBOOT: self.__class__.vps_reboot, Cmd.CLOSE: self.__class__.vps_close, Cmd.OS: self.__class__.vps_reinstall_os, } self.timer = TimerEvents(time.time, self.logger_misc) assert conf.NETFLOW_COLLECT_INV > 0 self.timer.add_timer(conf.NETFLOW_COLLECT_INV, self.send_netflow) self.timer.add_timer(12 * 3600, self.refresh_host_space) self.workers = [] self.running = False def get_client(self): return get_client(VPS) def send_netflow(self): result = None try: result = netflow.read_proc() except Exception, e: self.logger_misc.exception( "cannot read netflow data from proc: %s" % (str(e))) return ts = time.time() netflow_list = list() try: for ifname, v in result.iteritems(): om = re.match("^vps(\d+)$", ifname) if not om: continue vps_id = int(om.group(1)) if vps_id <= 0: continue # direction of vps bridged network interface needs to be reversed netflow_list.append(NetFlow(vps_id, rx=v[1], tx=v[0])) except Exception, e: self.logger_misc.exception("netflow data format error: %s" % (str(e))) return
def enable_notification(self): self.click(self.edit_btn) self.click(self.lea_app_box) self.click(self.lea_ass_box) self.click(self.lea_approvals_box) self.click(self.lea_cancel_box) self.click(self.lea_rejections_box) self.click(self.save_btn) self.check_element_selected(self.lea_app_box) self.check_element_selected(self.lea_ass_box) self.check_element_selected(self.lea_approvals_box) self.check_element_selected(self.lea_cancel_box) self.check_element_selected(self.lea_rejections_box) Log.info("enable notification successfully")
def delete_location(self): Log.info("Start to delete a location randomly") self.sleep(3) roweles = self.get_element( ("xpath", self.result_table)).find_elements_by_tag_name("a") rowcount = len(roweles) i = random.randint(0, rowcount - 1) name_text = roweles[i].text self.click(("xpath", self.del_path.format(name_text))) self.sleep(2) self.click(self.delete_btn) self.click(self.ok_btn) assert self.get_element( ("xpath", self.name_link.format(name_text))) is None
def wait_for_page_changes(driver, page_title_to_change_to, wait_for_seconds=60): """ wait_for_page_changes Waits for an page to change, subject to timeout specified in wait_for_seconds :param driver: Webdriver controller for the web page :param page_title_to_change_to: Title of the page we want to go to. :param wait_for_seconds: Timeout value for the page to change to the desired page title """ Log.info(f"Current page title is {driver.title}") Log.info( f"Waiting {wait_for_seconds} seconds for page to change to {page_title_to_change_to}" ) try: WebDriverWait(driver, wait_for_seconds).until( expected_conditions.title_contains(page_title_to_change_to)) except TimeoutException as e: Log.error(f"Timed out with page title at {driver.title}") take_screenshot(driver) driver.quit() raise TimeoutException( f"Timed out with page title at {driver.title}. {e}") Log.info(f"Page has changed to {page_title_to_change_to}")
def set_combox_value(self, value, keys): """ Choose combox value """ try: element = self.get_element(keys) if element is None: raise Exception( Log.error( "Could not locate the element value {}".format(keys))) else: Select(element).select_by_visible_text(value) except NoSuchElementException: Log.error("Could not locate the element value {}".format(value))
def cancel_adding_pay_grades(self, add_name): """ Try to cancel adding a pay grade """ Log.info("Start to cancel adding a pay grade") self.click(self.add_btn) self.clear_text(self.name) self.input_text(add_name, self.name) self.sleep(1) self.click(self.cancel_btn) self.sleep(1) assert not add_name in self.get_elements_texts(self.pay_grade_list) Log.info("Add operation is being canceled successfully")
def main(): logger = Log("vps_mgr", config=conf) mgr = VPSMgr() host_list = None try: rpc = mgr.rpc_connect() try: host_list = rpc.host_list() finally: rpc.close() update_iplist(host_list) except Exception, e: print e logger.exception(e) return
class VPSMgr (object): """ all exception should catch and log in this class """ VERSION = 1 def __init__ (self): self.logger = Log ("vps_mgr", config=conf) self.logger_err = Log ("vps_mgr_err", config=conf) self.logger_misc = Log ("misc", config=conf) self.host_id = conf.HOST_ID self.handlers = { Cmd.OPEN: self.__class__.vps_open, Cmd.REBOOT: self.__class__.vps_reboot, Cmd.CLOSE: self.__class__.vps_close, Cmd.OS: self.__class__.vps_reinstall_os, } self.timer = TimerEvents (time.time, self.logger_misc) assert conf.NETFLOW_COLLECT_INV > 0 self.timer.add_timer (conf.NETFLOW_COLLECT_INV, self.send_netflow) self.timer.add_timer (12 * 3600, self.refresh_host_space) self.workers = [] self.running = False def get_client (self): return get_client (VPS) def send_netflow (self): result = None try: result = netflow.read_proc () except Exception, e: self.logger_misc.exception ("cannot read netflow data from proc: %s" % (str(e))) return ts = time.time () netflow_list = list () try: for ifname, v in result.iteritems (): om = re.match ("^vps(\d+)$", ifname) if not om: continue vps_id = int(om.group (1)) if vps_id <= 0: continue # direction of vps bridged network interface needs to be reversed netflow_list.append (NetFlow (vps_id, rx=v[1], tx=v[0])) except Exception, e: self.logger_misc.exception ("netflow data format error: %s" % (str(e))) return
def __init__(self): TransWarpBase.__init__(self) self.logger = Log("server", config=config) self.engine.set_logger(self.logger) self.addr = config.SERVER_ADDR self.auth_keys = config.ACCEPT_KEYS self.passive_sock = None
class TransWarpServer (TransWarpBase): def __init__(self): TransWarpBase.__init__(self) self.logger = Log("server", config=config) self.engine.set_logger(self.logger) self.addr = config.SERVER_ADDR self.auth_keys = config.ACCEPT_KEYS self.passive_sock = None def _new_client(self, sock): conn = Connection(sock) print "new %s" % (str(conn.peer)) self.engine.read_unblock( conn, self.head_len, self._on_recv_head, None, cb_args=(self._auth, )) def _auth(self, cli_conn): auth_data = None try: auth_data = proto.AuthData.deserialize( cli_conn.get_readbuf(), self.auth_keys) except Exception, e: self.logger.exception("peer %s %s" % (cli_conn.peer, str(e))) if not auth_data: self.logger.warn("peer %s not authorized" % (str(cli_conn.peer))) self.engine.close_conn(cli_conn) return client = proto.ClientData( auth_data.r_host, auth_data.r_port, cli_conn, auth_data.seed, auth_data.key) self.engine.remove_conn(cli_conn) self.client_conn[client.client_id] = client self.logger.info("client %s auth" % (client.client_id)) client.cli_state = proto.ClientState.CONNECTING self.engine.connect_unblock( (client.r_host, client.r_port), self._on_remote_conn, self._on_remote_conn_err, cb_args=(client, )) resp = proto.ServerResponse(0, "") buf = client.crypter_w.encrypt(resp.serialize()) def _write_ok(cli_conn, *args): client.cli_state = proto.ClientState.CONNECTED self._check_client_state(client) return self.engine.write_unblock(client.cli_conn, proto.pack_head( len(buf)) + buf, _write_ok, self._on_err, cb_args=(client, ))
class RaidMonitor(object): def __init__(self): self.logger = Log("raid_mon", config=conf) self.is_running = False self.last_state = True self.vps_mgr = VPSMgr() self.hostname = socket.gethostname() def start(self): if self.is_running: return self.is_running = True self.logger.info("started") def stop(self): if not self.is_running: return self.is_running = False def send_alarm(self, msg): rpc = self.vps_mgr.rpc_connect() try: rpc.alarm("%s: raid_mon: %s" % (self.hostname, msg)) finally: rpc.close() def check(self): cmd = """MegaCli64 -pdlist -aall | grep -i 'firmware state:' | grep -P -v -i "online|Unconfigured\(good\)" """ try: out, err = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() msg = out + err if msg: self.logger.error(msg) if self.last_state: self.last_state = False self.send_alarm("error, %s" % (out)) self.logger.error("alarm sent") else: self.logger.info("ok") if not self.last_state: self.send_alarm("ok") self.last_state = True except Exception, e: self.logger.exception(e)
def __init__(self): self.is_running = False self.hostname = socket.gethostname() self.logger = Log("saas_mon", config=conf) self.recover_thres = conf.SAAS_RECOVER_THRESHOLD or(30 * 5) self.bad_thres = conf.SAAS_BAD_THRESHOLD or 5 self.alarm_q = JobQueue(self.logger) self.emailalarm = EmailAlarm(self.logger) self.last_state = True
def main(): if len(sys.argv) != 2: print("usage: lecture1 input_file") return entries = Log.from_file(sys.argv[1]) error_count = count_errors(entries) if error_count is None: print("oops, boo boo! does the file exist?") else: print(error_count)
def main(): if len(sys.argv) != 2: print("usage: lecture1 input_file") return try: entries = Log.from_file(sys.argv[1], skip_malformed_entries=True) error_count = count_errors(entries) print(error_count) except FileNotFoundError: print("could not open log file")
def __init__(self, status, headers={}, tpl=None): # get http status code's from json hsc = open('config/http_status_codes.json') jhsc = json.load(hsc) # view params params = {'key': status, 'value': ''} params['SERVER_NAME'] = Env.get('SERVER_NAME') params['CONTACT'] = Env.get('CONTACT') params['SERVER_SOFTWARE'] = Env.get('SERVER_SOFTWARE') # status exists in json if status in jhsc: params['value'] = jhsc[status] # status tpl if tpl==None: tpl = "error/"+status+".tpl" # Content type required header if 'Content-Type' in headers: pass else: headers['Content-Type'] = 'text/html' # load view data = View.load(tpl, params) # view not exists, load default tpl if data==False: tpl = 'error/default.tpl' data = View.load(tpl, params) msg = "\nzpy Error: \n" msg += "Status: "+status+"\n" msg += "Url: "+Env.get('REQUEST_URI')+"\n" msg += "Remote Address: "+Env.get('REMOTE_ADDR')+"\n" msg += "Date: "+time.strftime("%Y/%m/%d %H:%M:%S")+"\n" msg += "-------------------------------------------" Log.debug(msg) web.HTTPError.__init__(self, status, headers, data)
def __init__(self): TransWarpBase.__init__(self) self.logger = Log("client", config=config) self.engine.set_logger(self.logger) self.sock5_addr = config.SOCK5_ADDR self._sock5_users = 'SOCK5_USERS' in dir( config) and config.SOCK5_USERS or {} ip = self.sock5_addr[0] arr = map(lambda x: chr(int(x)), ip.split(".")) self._sock5_server_id = struct.pack( "!4cH", arr[0], arr[1], arr[2], arr[3], self.sock5_addr[1]) self.server_addr = config.SERVER_ADDR self.sock5_sock = None self.key = config.KEY
def __init__ (self): self.logger = Log ("vps_mgr", config=conf) self.logger_err = Log ("vps_mgr_err", config=conf) self.logger_misc = Log ("misc", config=conf) self.host_id = conf.HOST_ID self.handlers = { Cmd.OPEN: self.__class__.vps_open, Cmd.REBOOT: self.__class__.vps_reboot, Cmd.CLOSE: self.__class__.vps_close, Cmd.OS: self.__class__.vps_reinstall_os, } self.timer = TimerEvents (time.time, self.logger_misc) assert conf.NETFLOW_COLLECT_INV > 0 self.timer.add_timer (conf.NETFLOW_COLLECT_INV, self.send_netflow) self.timer.add_timer (12 * 3600, self.refresh_host_space) self.workers = [] self.running = False
def main(): bridge = args[0] vif_name = args[1] logger = Log("vps_mgr", config=conf) try: ovsops = OVSOps() ofport = ovsops.find_ofport_by_name(vif_name) if ofport < 0: logger.error("vif %s ofport=%s, skip it" % (vif_name, ofport)) else: ovsops.unset_mac_filter(bridge, ofport) # it's strange that if you unset traffic first, might find ofport==-1 ovsops.unset_traffic_limit(vif_name) logger.debug("unset %s" % vif_name) return 0 except Exception, e: logger.exception(e) print >> sys.stderr, str(e) return 0
class SaasMonitor(object): def __init__(self): self.is_running = False self.hostname = socket.gethostname() self.logger = Log("saas_mon", config=conf) self.recover_thres = conf.SAAS_RECOVER_THRESHOLD or(30 * 5) self.bad_thres = conf.SAAS_BAD_THRESHOLD or 5 self.alarm_q = JobQueue(self.logger) self.emailalarm = EmailAlarm(self.logger) self.last_state = True def start(self): if self.is_running: return self.is_running = True self.alarm_q.start_worker(1) self.logger.info("started") def stop(self): if not self.is_running: return self.is_running = False self.alarm_q.stop() def check(self): vps = None try: rpc = SAAS_Client(conf.HOST_ID, self.logger) rpc.connect() try: _id = rpc.todo(CMD.MONITOR) finally: rpc.close() self.logger.info("ok") return True except Exception, e: self.logger.exception(e) return False
class ICMPMonitor (object): def __init__(self): self.is_running = False self.linkage_dict = dict() self.logger = Log("icmp_mon", config=config) self.alarm_q = JobQueue(self.logger) self.emailalarm = EmailAlarm(Log("alarm", config=config)) self.logger_links = Log("links", config=config) if 'log_length_per_link' in dir(config): self.log_length_per_link = config.log_length_per_link else: self.log_length_per_link = 128 if 'links' not in dir(config): self.logger.error("no 'links' in config") return g_alarm_levels = None g_recover = None if 'alarm_levels' in dir(config): g_alarm_levels = self._parse_alarm_levels(config.alarm_levels) if 'recover' in dir(config): g_recover = int(config.recover) links = config.links if isinstance(links, dict): for ip, v in links.iteritems(): if not isinstance(v, dict): v = dict() ttl = v.get('ttl') if ttl >= 0: pass else: ttl = 0 alarm_levels = v.get('alarm_levels') if not alarm_levels and g_alarm_levels: alarm_levels = g_alarm_levels elif alarm_levels: alarm_levels = self._parse_alarm_levels(alarm_levels) if not alarm_levels: continue else: self.logger.error( "config: %s, missing alarm_levels value" % (ip)) continue recover = v.get('recover') if recover: recover = int(recover) elif not recover and g_recover: recover = g_recover else: self.logger.error( "config: %s, missing recover value" % (ip)) continue self.linkage_dict[ip] = Linkage(ip, alarm_levels, recover) self.logger.info("%d link loaded from config" % (len(self.linkage_dict.keys()))) def _parse_alarm_levels(self, alarm_levels, ip=""): if not isinstance(alarm_levels, (tuple, list)): self.logger.error("config: %s, alarm_levels is not a list" % (ip)) return _alarm_levels = filter(lambda x: isinstance(x, int), alarm_levels) if len(_alarm_levels) != len(alarm_levels): self.logger.error( "config: %s, elements in alarm_levels must be integers" % (ip)) return return _alarm_levels def start(self): if self.is_running: return self.is_running = True self.alarm_q.start_worker(1) self.logger.info("started") def stop(self): if not self.is_running: return self.is_running = False self.alarm_q.stop() self.logger.info("stopped") def _alarm_enqueue(self, link): t = "%Y-%m-%d %H:%M:%S" ts = "[%s]" % (time.strftime(t, time.localtime())) job = AlarmJob( self.emailalarm, ts + link.alarm_text(), link.details()) self.alarm_q.put_job(job) def loop(self): ips = self.linkage_dict.keys() fping = FPing(ips) while self.is_running: start_time = time.time() recv_dict, error_dict = fping.ping(1) for ip, rtt in recv_dict.iteritems(): link = self.linkage_dict[ip] res = link.new_state(True, rtt) if res: self._alarm_enqueue(link) print ip, "ok", rtt if len(link.bitmap) == self.log_length_per_link: self.logger_links.info(link.details()) link.reset_bitmap() for ip, err in error_dict.iteritems(): link = self.linkage_dict[ip] res = link.new_state(False, 0) if res is False: self._alarm_enqueue(link) print ip, "err", link.bitmap if len(link.bitmap) == self.log_length_per_link: self.logger_links.info(link.details()) link.reset_bitmap() end_time = time.time() if end_time < start_time + 1: time.sleep(1 - end_time + start_time)
class VPSMgr(object): """ all exception should catch and log in this class """ VERSION = 1 def __init__(self): self.logger = Log("vps_mgr", config=conf) self.logger_net = Log("vps_mgr_net", config=conf) self.logger_misc = Log("misc", config=conf) self.logger_debug = Log("debug", config=conf) self.host_id = conf.HOST_ID self.vpsops = VPSOps(self.logger) self.handlers = { CMD.OPEN: self.__class__.vps_open, CMD.REBOOT: self.__class__.vps_reboot, CMD.CLOSE: self.__class__.vps_close, CMD.OS: self.__class__.vps_reinstall_os, CMD.UPGRADE: self.__class__.vps_upgrade, CMD.BANDWIDTH: self.__class__.vps_set_bandwidth, CMD.RM: self.__class__.vps_delete, CMD.PRE_SYNC: self.__class__.vps_hot_sync, CMD.MIGRATE: self.__class__.vps_migrate, CMD.RESET_PW: self.__class__.vps_reset_pw, } self._locker = threading.Lock() self._vps_locker = dict() self.xenstat = XenStat() self.timer = TimerEvents(time.time, self.logger_misc) assert conf.MONITOR_COLLECT_INV > 0 self.last_netflow = None self.last_diskstat = None self.monitor_inv = conf.MONITOR_COLLECT_INV self.last_monitor_ts = None self.timer.add_timer(conf.MONITOR_COLLECT_INV, self.monitor_vps) self.timer.add_timer(12 * 3600, self.refresh_host_space) self.workers = [] self.running = False def _try_lock_vps(self, cmd, vps_id): self._locker.acquire() if self._vps_locker.has_key(vps_id): _cmd = self._vps_locker.get(vps_id) self.logger_debug.info("CMD %s try to lock vps%s failed: locked by CMD %s" % ( CMD._get_name(cmd), vps_id, CMD._get_name(_cmd) )) res = False else: self._vps_locker[vps_id] = cmd res = True self._locker.release() return res def _unlock_vps(self, cmd, vps_id): self._locker.acquire() try: _cmd = self._vps_locker.get(vps_id) if _cmd == cmd: del self._vps_locker[vps_id] except KeyError: pass self._locker.release() def rpc_connect(self): rpc = SAAS_Client(self.host_id, self.logger_debug) rpc.connect() return rpc def monitor_vps(self): net_result = None disk_result = None try: net_result = netflow.read_proc() disk_devs = glob.glob("/dev/main/vps*") if 'MAIN_DISK' in dir(conf): disk_devs.append(conf.MAIN_DISK) disk_result = diskstat.read_stat(disk_devs) except Exception, e: self.logger_misc.exception( "cannot read netflow data from proc: %s" % (str(e))) return ts = time.time() dom_map = XenStore.domain_name_id_map() dom_names = dom_map.keys() self.xenstat.run(dom_names) payload = CarbonPayload() try: payload.append("host.cpu.%s.all" % (self.host_id), ts, self.xenstat.total_cpu) for dom_name in dom_names: om = re.match("^vps(\d+)$", dom_name) if not om: # dom0 dom_cpu = self.xenstat.dom_dict.get(dom_name) if dom_cpu: payload.append("host.cpu.%s.dom0" % (self.host_id), dom_cpu['ts'], dom_cpu['cpu_avg']) if 'MAIN_DISK' in dir(conf) and self.last_diskstat: t_elapse = ts - self.last_monitor_ts v = disk_result.get(conf.MAIN_DISK) last_v = self.last_diskstat.get(conf.MAIN_DISK) read_ops, read_byte, write_ops, write_byte, util = diskstat.cal_stat( v, last_v, t_elapse) payload.append("host.io.%d.ops.read" % (self.host_id), ts, read_ops) payload.append("host.io.%d.ops.write" % (self.host_id), ts, write_ops) payload.append("host.io.%s.traffic.read" % (self.host_id), ts, read_byte) payload.append("host.io.%s.traffic.write" % (self.host_id), ts, write_byte) payload.append("host.io.%s.util" % (self.host_id), ts, util) print conf.MAIN_DISK, read_ops, write_ops, read_byte, write_byte, util if self.last_netflow: t_elapse = ts - self.last_monitor_ts v = net_result.get(conf.EXT_INF) last_v = self.last_netflow.get(conf.EXT_INF) _in = fix_flow((v[0] - last_v[0]) * 8.0 / t_elapse) _out = fix_flow((v[1] - last_v[1]) * 8.0 / t_elapse) _in_pp = (v[2] - last_v[2]) / t_elapse _out_pp = (v[3] - last_v[3]) / t_elapse payload.append("host.netflow.%d.ext.in" % (self.host_id), ts, _in) payload.append("host.netflow.%d.ext.out" % (self.host_id), ts, _out) payload.append("host.netflow.%d.ext_pp.in" % (self.host_id), ts, _in_pp > 0 and _in_pp or 0) payload.append("host.netflow.%d.ext_pp.out" % (self.host_id), ts, _out_pp > 0 and _out_pp or 0) v = net_result.get(conf.INT_INF) last_v = self.last_netflow.get(conf.INT_INF) _in = fix_flow((v[0] - last_v[0]) * 8.0 / t_elapse) _out = fix_flow((v[1] - last_v[1]) * 8.0 / t_elapse) _in_pp = (v[2] - last_v[2]) / t_elapse _out_pp = (v[3] - last_v[3]) / t_elapse payload.append("host.netflow.%d.int.in" % (self.host_id), ts, _in) payload.append("host.netflow.%d.int.out" % (self.host_id), ts, _out) payload.append("host.netflow.%d.int_pp.in" % (self.host_id), ts, _in_pp > 0 and _in_pp or 0) payload.append("host.netflow.%d.int_pp.out" % (self.host_id), ts, _out_pp > 0 and _out_pp or 0) else: vps_id = int(om.group(1)) xv = self.vpsops.load_vps_meta(vps_id) dom_cpu = self.xenstat.dom_dict.get(dom_name) if dom_cpu: payload.append("vps.cpu.%s" % (vps_id), dom_cpu['ts'], dom_cpu['cpu_avg']) if not self.last_netflow or not self.last_diskstat: break # net ifname = dom_name vif = xv.vifs.get(ifname) v = net_result.get(ifname) last_v = self.last_netflow.get(ifname) t_elapse = ts - self.last_monitor_ts if v and last_v: # direction of vps bridged network interface needs to # be reversed _in = fix_flow((v[1] - last_v[1]) * 8.0 / t_elapse) _out = fix_flow((v[0] - last_v[0]) * 8.0 / t_elapse) _in = (vif.bandwidth and vif.bandwidth * 1024 * 1024 < _in) and vif.bandwidth * 1024 * 1024 or _in _out = (vif.bandwidth and vif.bandwidth * 1024 * 1024 < _out) and vif.bandwidth * 1024 * 1024 or _out payload.append("vps.netflow.%d.in" % (vps_id), ts, _in) payload.append("vps.netflow.%d.out" % (vps_id), ts, _out) if conf.LARGE_NETFLOW and _in >= conf.LARGE_NETFLOW or _out >= conf.LARGE_NETFLOW: self.logger_misc.warn( "%s in: %.3f mbps, out: %.3f mbps" % (ifname, _in / 1024.0 / 1024.0, _out / 1024.0 / 1024.0)) # disk if conf.USE_LVM and self.last_diskstat: for disk in xv.data_disks.values(): v = disk_result.get(disk.dev) last_v = self.last_diskstat.get(disk.dev) if not last_v: continue read_ops, read_byte, write_ops, write_byte, util = diskstat.cal_stat( v, last_v, t_elapse) print disk.xen_dev payload.append("vps.io.%d.%s.ops.read" % (vps_id, disk.xen_dev), ts, read_ops) payload.append("vps.io.%d.%s.ops.write" % (vps_id, disk.xen_dev), ts, write_ops) payload.append("vps.io.%d.%s.traffic.read" % (vps_id, disk.xen_dev), ts, read_byte) payload.append("vps.io.%d.%s.traffic.write" % (vps_id, disk.xen_dev), ts, write_byte) payload.append("vps.io.%d.%s.util" % (vps_id, disk.xen_dev), ts, util) v = disk_result.get(xv.swap_store.dev) last_v = self.last_diskstat.get(xv.swap_store.dev) if v and last_v: read_ops, read_byte, write_ops, write_byte, util = diskstat.cal_stat( v, last_v, t_elapse) payload.append("vps.io.%d.swap.ops.read" % (vps_id), ts, read_ops) payload.append("vps.io.%d.swap.ops.write" % (vps_id), ts, write_ops) payload.append("vps.io.%d.swap.traffic.read" % (vps_id), ts, read_byte) payload.append("vps.io.%d.swap.traffic.write" % (vps_id), ts, write_byte) payload.append("vps.io.%d.swap.util" % (vps_id), ts, util) self.last_netflow = net_result self.last_diskstat = disk_result self.last_monitor_ts = ts except Exception, e: self.logger_misc.exception(e) return
def _load(self): # get routes f = open('config/route.json') r = json.load(f) # get uri path = Url.getUri() # if url doesn't end with /, redirect to / ended url if path.endswith('/') == False: path = path + "/" raise web.redirect(path, '301 ') # check url in routes json for key,value in r.items(): if self._regex(path, key): # controller dynamic load module_name = "controller.{}".format(value['controller']) # uppercase first char class_name = value['controller'].title() #default method name method_name = 'main' if 'method' in value: method_name = value['method'] # from path import module try: module = __import__(module_name, fromlist=[class_name]) except Exception as e: Log.error(e) raise Error('500') # get class from module try: class_object = getattr(module, class_name) except Exception as e: return Log.error(e) # instance module try: controller_instance = class_object() except Exception as e: return Log.error(e) # exec method from class instance try: func = getattr(controller_instance, method_name) except Exception as e: return Log.error(e) # method instance try: func_instance = func() return func_instance except Exception as e: m = re.match("[0-9]{3}", str(e)) if m: http_code = m.group(0) if re.match("(4|5)", http_code): Error(http_code) # redirect to 404 page raise Error('404')
from zmq.eventloop.ioloop import ZMQIOLoop from lib.log import Log def prepare(conf_file): cpff = ConfigParserFromFile() conf_file | E(cpff.parseall) | E(conf_drawer.setup) if __name__ == "__main__": includes = None opts, argvs = getopt.getopt(sys.argv[1:], "c:h") for op, value in opts: if op == '-c': includes = value path._ETC_PATH = os.path.dirname(os.path.abspath(value)) elif op == '-h': print u'''使用参数启动: usage: [-c] -c <file> ******加载配置文件 ''' sys.exit(0) if not includes: includes = os.path.join(path._ETC_PATH, 'includes_dev.json') print "no configuration found!,will use [%s] instead" % includes prepare(includes) Log.rose_log().info("starting...") ZMQIOLoop.instance().start()
def main(): bridge = args[0] vif_name = args[1] logger = Log("vps_mgr", config=conf) vpsops = VPSOps(logger) logger.debug("set %s" % vif_name) try: ovsops = OVSOps() om = re.match(r"^\w+?(\d+)\w*?$", vif_name) if not om: print >> sys.stderr, "wrong vif format %s" % (vif_name) return 1 vps_id = int(om.group(1)) xv = vpsops.load_vps_meta(vps_id) vif = xv.vifs.get(vif_name) if not vif: logger.error("no vif %s in metadata of %s" % (vif_name, vps_id)) return 1 ofport = ovsops.find_ofport_by_name(vif_name) if ofport < 0: logger.error("vif %s ofport=%s, fix it by delete the port from bridge " % (vif_name, ofport)) ovsops.del_port_from_bridge(bridge, vif_name) ovsops.add_port_to_bridge(bridge, vif_name) ofport = ovsops.find_ofport_by_name(vif_name) if ofport < 0: logger.error("vif %s ofport=%s, impossible " % (vif_name, ofport)) if ofport >= 0: ovsops.set_mac_filter(bridge, ofport, vif.ip_dict.keys()) ovsops.unset_traffic_limit(vif_name) bandwidth = float(vif.bandwidth or 0) ovsops.set_traffic_limit(vif_name, int(bandwidth * 1000)) print "set vif %s bandwidth %sm/s" % (vif_name, vif.bandwidth) return 0 except Exception, e: logger.exception(e) print >> sys.stderr, str(e) return 1
#copyright 2010 Dominik "Socek" Długajczyk # #This file is part of Gadu History. # #Gadu History is free software; you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation; either version 2 of the License, or #(at your option) any later version. # #Gadu History is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with Gadu History; if not, write to the Free Software #Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # from lib.log import Log Log.init() from lib.application import app if __name__ == '__main__': try: app.init() app.first_view() finally: app.close() Log.end()
class VpsProxy(object): NGINX_TEMPLATE = """ server { listen 80; server_name %(host)s *.%(host)s; proxy_set_header X-Real-IP $remote_addr; proxy_set_header REMOTE-HOST $remote_addr; proxy_set_header HOST $host.%(suffix)s; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; location / { proxy_pass http://%(ip)s; } } """ def __init__(self): self.logger = Log("proxy", config=config) self.logger_rpc = Log("proxy_rpc", config=config) self.output_path = config.NGINX_CONF_PATH self.nginx_reload_cmd = config.NGINX_RELOAD_CMD self.rpc = AES_RPC_Client(_private.KEY, self.logger_rpc) def start(self): self.is_running = True def stop(self): self.is_running = False def loop(self): while self.is_running: self.gen_config() time.sleep(conf.RUN_INV) def reload_nginx(self): subprocess.check_output(self.nginx_reload_cmd) self.logger.info("nginx reloaded") print "nginx reloaded" def gen_config(self, force=False): domain_list = None try: self.rpc.connect(_private.SAAS_ADDR) try: domain_list = self.rpc.call("proxy_domain_list") finally: self.rpc.close() except Exception, e: self.logger.exception(e) print traceback.print_exc() return conf = [] for i in domain_list: conf.append(self.NGINX_TEMPLATE % {'host':i["domain"], 'ip':i["ip"], 'suffix': _private.PROXY_DOMAIN_SUFFIX}) content = "".join(conf) try: if not force and os.path.exists(self.output_path) and _md5_file(self.output_path) == _md5(content): print "skip the same" return f = open(self.output_path, "w") try: f.write(content) finally: f.close() self.logger.info("conf generated") self.reload_nginx() except Exception, e: self.logger.exception(e) print traceback.print_exc()
class TransWarpClient (TransWarpBase): def __init__(self): TransWarpBase.__init__(self) self.logger = Log("client", config=config) self.engine.set_logger(self.logger) self.sock5_addr = config.SOCK5_ADDR self._sock5_users = 'SOCK5_USERS' in dir( config) and config.SOCK5_USERS or {} ip = self.sock5_addr[0] arr = map(lambda x: chr(int(x)), ip.split(".")) self._sock5_server_id = struct.pack( "!4cH", arr[0], arr[1], arr[2], arr[3], self.sock5_addr[1]) self.server_addr = config.SERVER_ADDR self.sock5_sock = None self.key = config.KEY def start(self): if self.is_running: return self.sock5_sock = self.engine.listen_addr( self.sock5_addr, readable_cb=None, new_conn_cb=self._sock5_handshake, backlog=50) self.is_running = True def stop(self): if not self.is_running: return self.engine.unlisten(self.sock5_sock) self.is_running = False def _send_sock5_unsupport(self, conn): self.logger.error("peer %s not supported" % (str(conn.peer))) buf = "%s%s\x00\x01%s" % (VER, "\x07", self._sock5_server_id) def __write_ok(conn): self.engine.close_conn(conn) return self.engine.write_unblock(conn, buf, __write_ok) def _send_sock5_reply(self, client, err_no): if err_no == 0: status = "\x00" elif err_no == errno.ENETUNREACH: status = "\x03" elif err_no == errno.EHOSTUNREACH: status = "\x04" elif err_no == errno.ECONNREFUSED: status = "\x05" else: status = "\x01" # general error buf = "%s%s\x00\x01%s" % (VER, status, self._sock5_server_id) def __write_ok(cli_conn, *args): if err_no == 0: self.logger.info( "client %s: sent sock5 response" % (client.client_id)) client.cli_state = proto.ClientState.CONNECTED self._check_client_state(client) else: self.logger.info( "client %s: sent sock5 err response and close" % (client.client_id)) self.close_client(client) return self.engine.write_unblock( client.cli_conn, buf, __write_ok, self._on_err, cb_args=(client,)) def _on_client_readable(self, cli_conn, client): # print "client %s client readable" % (client.client_id) self.stream_to_fix(cli_conn, client.r_conn, client) def _on_remote_readable(self, r_conn, client): # print "client %s remote readable" % (client.client_id) self.fix_to_stream(r_conn, client.cli_conn, client) def _on_server_connected(self, sock, client): self.logger.info("client %s connected to server" % (client.client_id)) r_conn = Connection(sock) _hash = proto.myhash(client.seed, self.key) auth_data = proto.AuthData( client.seed, _hash, self.key, client.r_host, client.r_port) buf = auth_data.serialize() buf = proto.pack_head(len(buf)) + buf client.r_conn = r_conn def __on_remote_respond(r_conn, *args): resp = None try: buf = client.crypter_r.decrypt(r_conn.get_readbuf()) resp = proto.ServerResponse.deserialize(buf) if resp.err_no: self.logger.error("client %s: %s %s" % (client.client_id, resp.err_no, resp.message)) self.close_client(client) else: self.logger.info( "client %s server response" % (client.client_id)) client.r_state = proto.ClientState.CONNECTED self._check_client_state(client) except Exception, e: self.logger.exception( "client %s: server response error %s" % (client.client_id, str(e))) self.close_client(client) return def __on_read_head(r_conn, *args): data_len = 0 try: data_len = proto.unpack_head(r_conn.get_readbuf()) except Exception, e: self.logger.error( "client %s remote head invalid" % (client.client_id)) self.close_client(client) return if data_len > 0: self.engine.read_unblock( r_conn, data_len, __on_remote_respond, self._on_err, cb_args=(client, )) return self.logger.error("zero len head") self.close_client(client) return
def __init__(self): self.logger = Log("raid_mon", config=conf) self.is_running = False self.last_state = True self.vps_mgr = VPSMgr() self.hostname = socket.gethostname()
def __init__(self): self.logger = Log("proxy", config=config) self.logger_rpc = Log("proxy_rpc", config=config) self.output_path = config.NGINX_CONF_PATH self.nginx_reload_cmd = config.NGINX_RELOAD_CMD self.rpc = AES_RPC_Client(_private.KEY, self.logger_rpc)