def client_send(self): #hello = 10 hello = int(self.response) #print "respone.....",type(self.response) print "hello.....",type(hello) #print hello data = str(transnit_data(self.filename).data_collect()) data_do = transnit_data(self.filename).data_collect() try: print data print "host is :",self.host print "port is :",self.port logging.info("Miontor agent started Successfully!") while True: self.do_working(data_do) try: host = self.host port = self.port s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.connect((host,int(port))) logging.info(("Miontor agent has been successfully connected to the server!!")) s.sendall(data) logging.debug(data) buf = s.recv(8092) if not len(data): break s.close() except: logging.error('socket.error: [Errno 111] Connection refused') continue finally: time.sleep(hello) except: logging.error('The socket connect to the server failed!!!')
def binlog_dump(self, log_file, offset): """ COM_BINLOG_DUMP +=============================================+ | packet header | packet length 0 : 3 | | +-------------------------+ | | sequence number 3 : 1 | +============================================+ | command packet | command code 4 : 1 | COM_BINLOG_DUMP | +------------------------―+ | | offset 5 : 4 | | +-------------------------+ | | flags 9 : 2 | | +-------------------------+ | | server id 11 : 4| | +-------------------------+ | | log name 15 : x| +============================================+ """ payload = '' payload += utils.int1store(ServerCmd.BINLOG_DUMP) payload += utils.int4store(offset) payload += utils.int2store(0) payload += utils.int4store(self.get_server_id()) payload += log_file payload += '\x00' log.debug("len(payload) = %d" % len(payload)) # send BIGLOGDUMP command and parse ok packet response. self._socket.send(payload, 0) ok_packet = self._socket.recv() parser = MySQLProtocol() ok_packet = parser.parse_ok(ok_packet) print ok_packet
def get_var(send_request, var_name): """ 获取变量值 :param send_request: SendRequest对象实例 :param var_name: 变量名 :return: """ index = var_name.find("[") path = None name = var_name if index != -1: path = var_name[index:] name = var_name[:index] local_var = send_request.local_var value = None if name in local_var: value = local_var[name] if path: path = path.replace("'", "") path = path.replace('"', "") r = re.compile(r"\[(.*?)\]") res = r.findall(path) for k in res: value = value[k if not k.isdigit() else int(k)] log.debug("变量:{}的值为:{}".format(var_name, value)) return value
def binlog_dump(self, log_file, offset): """ COM_BINLOG_DUMP +=============================================+ | packet header | packet length 0 : 3 | | +-------------------------+ | | sequence number 3 : 1 | +============================================+ | command packet | command code 4 : 1 | COM_BINLOG_DUMP | +------------------------―+ | | offset 5 : 4 | | +-------------------------+ | | flags 9 : 2 | | +-------------------------+ | | server id 11 : 4| | +-------------------------+ | | log name 15 : x| +============================================+ """ payload = "" payload += utils.int1store(ServerCmd.BINLOG_DUMP) payload += utils.int4store(offset) payload += utils.int2store(0) payload += utils.int4store(self.get_server_id()) payload += log_file payload += "\x00" log.debug("len(payload) = %d" % len(payload)) # send BIGLOGDUMP command and parse ok packet response. self._socket.send(payload, 0) ok_packet = self._socket.recv() parser = MySQLProtocol() ok_packet = parser.parse_ok(ok_packet) print ok_packet
def json_extractor(send_request, var_name, json_path, match_no="0", default=None): """ 根据jsonpath提取响应中的数据 :param send_request: SendRequest对象实例 :param var_name: 变量名 :param json_path: jsonpath :param match_no: 匹配结果中的第几个,0表示随机取一个 :param default: 默认值 :return: """ if not hasattr(send_request, "response"): return local_var = send_request.local_var data = json.loads(send_request.response.body) if len(data) == 0: if default: local_var[var_name] = default return json_path = json_path.replace("'", '"') json_path = json_path.replace('["', '.') json_path = json_path.replace('"]', '') res = jsonpath.jsonpath(data, json_path) print(res) if res: res = res[int(match_no) - 1] if match_no != "0" else random.choice(res) log.debug("提取结果为:{}".format(res)) local_var[var_name] = res elif default: local_var[var_name] = default
def is_error(self): header = utils.read_int(self._body, 1)[1] if 0xff == header: log.debug("received err packet.") return True else: return False
def func(*args, **kwargs): com_data = CommonData() send_request = args[0] if len(args ) == 1 and "post_process" in kwargs and "pre_process" in kwargs: com_data.pre_process = kwargs["pre_process"] com_data.post_process = kwargs["post_process"] elif len(args) == 2 and "post_process" in kwargs: com_data.pre_process = args[1] com_data.post_process = kwargs["post_process"] elif len(args) == 3: com_data.pre_process = args[1] com_data.post_process = args[2] else: assert False, "send_request方法缺少必要参数" pre_process = json.loads( com_data.get_json_pre_or_post(send_request, "pre_process")) post_process = json.loads( com_data.get_json_pre_or_post(send_request, "post_process")) post_process.extend(com_data.post_process if isinstance( com_data.post_process, list) else []) pre_process.extend(com_data.pre_process if isinstance( com_data.pre_process, list) else []) com_data.post_process = post_process com_data.pre_process = pre_process log.info("------------------执行前置步骤------------------") com_data.update_request(send_request) res = function(*args, **kwargs) log.info("-----------------执行后置操作-------------------") com_data.excute_post(send_request) log.debug("全部变量:{}".format(send_request.local_var)) return res
def get_all_json_file(self): """ 获取test_case\\json_data文件夹及其子文件夹下所有的.json文件 :return: """ if len(self.json_data_files) == 0: json_data_dir = self.config.get_json_data() self.get_files(json_data_dir) log.debug("全部文件路径为:{}".format(json.dumps(self.json_data_files, ensure_ascii=False, indent=2)))
def test(): # 实例化,修改日志文件名称,加载新配置 xxx = log.Log() xxx.log_filename = 'test_log.log' xxx.log_config() # 测试 log.debug('This is debug message') log.info('This is info message') log.warning('This is warning message')
def parse_detail(self, html): root = etree.HTML(html) # 获取当前页面的数据 info = {} shop_name = root.xpath('//*[@id="basic-info"]/h1/e/text()') shop_address = root.xpath('//*[@id="address"]/e/text()') phone_number = root.xpath('//*[@id="basic-info"]/p/d/text()') print(shop_name, shop_address, phone_number) log.debug(info) self.itemQueue.put(info)
def next(self): packet = self._socket.recv() event = BinlogEvent(packet) log.debug(str(event)) if event.is_eof() or event.is_error(): raise StopIteration event_type = EventMap().get_event_type(event.header.event_type) if event_type: event = event_type(packet) log.debug(str(event)) return event
def getDomInfoByName(conn): print('----get domain info by name -----') logging.debug('----get domain info by name -----') try: #myDom = conn.lookupByName(name) myDom = conn.listDefinedDomains() return myDom except: print('Failed to find the domain with name %s' % name) logging.error('Failed to find the domain with name %s' % name) return 1
def getDomInfoByID(conn): print('----get domain info by ID -----') logging.debug('----get domain info by ID -----') try: #myDom = conn.lookupByID(id) myDom = conn.listDomainsID() return myDom except: print('Failed to find the domain with ID %s' % id) logging.error('Failed to find the domain with ID %s' % id) return 1
def excute_post(self, send_request): """ 执行后置操作 :return: """ from tools import log data = send_request.data if isinstance(self.post_process, list): for s in self.post_process: log.debug(s) self.Parser(s).keys_replace(send_request)
def add_table(self, db, table, col): if db not in self._tables: self._tables[db] = {} if table not in self._tables[db]: self._tables[db][table] = {"columns_info": {}, "do_columns": {}, "pos_map": {}} for i in col: if not isinstance(i, str): log.warning("non-string col name.") continue if i not in self._tables[db][table]["do_columns"]: self._tables[db][table]["do_columns"][i] = None log.debug(json.dumps(self._tables))
def startDomaction(conn,name): print('----Start domain info by Name -----') logging.debug('----Start domain info by Name -----') dom = conn.lookupByName(name) try: dom.create() print('Dome %s boot sucessfully' %dom.name()) logging.info('Dome %s boot sucessfully'%dom.name()) return 0 except: print('Dome %s boot failed' %dom.name()) logging.error('Dome %s boot failed' %dom.name()) return 1
def startDomaction(conn, name): print('----Start domain info by Name -----') logging.debug('----Start domain info by Name -----') dom = conn.lookupByName(name) try: dom.create() print('Dome %s boot sucessfully' % dom.name()) logging.info('Dome %s boot sucessfully' % dom.name()) return 0 except: print('Dome %s boot failed' % dom.name()) logging.error('Dome %s boot failed' % dom.name()) return 1
def shutdownDomaction(conn, ID): print('----Shutdown domain info by ID -----') logging.debug('----Shutdown domain info by ID -----') dom = conn.lookupByID(ID) try: dom.destroy() print('Dom %s State %s' % (dom.name(), dom.info()[0])) logging.info('Dom %s State %s' % (dom.name(), dom.info()[0])) return 0 except: print('Dom %s shutdown failed...' % dom.name()) logging.error('Dom %s shutdown failed...' % dom.name()) return 1
def get_all_json_file(file_path): """ 获取指定文件夹及其子文件夹下所有的.json文件 :return: """ from tools.config_parser import ParseConfig root_path = ParseConfig().get_root_path() file_path = os.path.join(root_path, file_path) file_list = [] get_files(file_path, file_list) log.debug("全部测试文件:{}".format( json.dumps(file_list, ensure_ascii=False, indent=2))) return file_list
def shutdownDomaction(conn,ID): print('----Shutdown domain info by ID -----') logging.debug('----Shutdown domain info by ID -----') dom = conn.lookupByID(ID) try: dom.destroy() print('Dom %s State %s' %(dom.name(),dom.info()[0])) logging.info('Dom %s State %s' %(dom.name(),dom.info()[0])) return 0 except: print('Dom %s shutdown failed...' % dom.name()) logging.error('Dom %s shutdown failed...' % dom.name()) return 1
def write_json_file(component_dict, json_filename): close_file = False json_env_string = None json_component_string = None # Attempt to remove any previous files, if none exists just eat the error try: os.remove(json_filename) except OSError: pass log.debug("evaluating components to generate %s" % json_filename) for component in component_dict.keys(): for key, version in component_dict[component].iteritems(): if key == "component_env_version": if "component" in json_filename: json_env_string = '"env_version":{"current":"%s","new":"%s"}}' % ( version[1], version[0]) else: json_env_string = '"env_version":{"current":"%s","new":"%s"}}' % ( version[0], version[0]) if key == "component_version": if "component" in json_filename: json_component_string = '"%s":{"version":{"current":"%s","new":"%s"}' % ( component, version[1], version[0]) else: json_component_string = '"%s":{"version":{"current":"%s","new":"%s"}' % ( component, version[0], version[0]) # This section sets the type to ACS if the component is found in the static mapping if component in acs.component.CMPS: if json_env_string is not None and json_component_string is not None: json_string = '{%s,"type":"acs",%s}' % ( json_component_string, json_env_string) else: json_string = '{%s,"type":"acs"}' % json_component_string # If the component isn't apart of the ACS and doesn't have an env_version just output the component version else: if json_env_string is None: json_string = '{%s}' % json_component_string # Check if the file has already been created. If it does append to the file, if not do a write # operation # The file should only exist at this point if it has had current json data written to it if os.path.isfile(json_filename): write_file = open(json_filename, "a") else: write_file = open(json_filename, "w") write_file.write(json_string) write_file.write("\n") close_file = True if close_file: write_file.close() log.info("%s has been closed." % json_filename)
def get_columns_info(self): for db, tables in self._tables.items(): for table, desc in tables.items(): try: sql = "select * from %s.%s limit 0,0" % (db, table) res, columns_desc = self._query(sql) for idx, field in enumerate(columns_desc): if field[0] in desc["do_columns"]: desc["columns_info"][field[0]] = field desc["pos_map"][idx] = field[0] except: log.warning(get_trace_info()) continue log.debug(json.dumps(self._tables))
def __get_response_data(self): """ 获取响应数据,并打印响应信息日志 :return: """ status_code = self.response.status_code headers = self.response.headers body = self.response.body response_data = """{} {} {} """.format(status_code, self.__format_headers(headers), body if body else "") log.debug(response_data) allure.attach(response_data, "--------------------响应数据--------------------", allure.attachment_type.TEXT)
def write_json_file(component_dict, json_filename): close_file = False json_env_string = None json_component_string = None # Attempt to remove any previous files, if none exists just eat the error try: os.remove(json_filename) except OSError: pass log.debug("evaluating components to generate %s" % json_filename) for component in component_dict.keys(): for key, version in component_dict[component].iteritems(): if key == "component_env_version": if "component" in json_filename: json_env_string = '"env_version":{"current":"%s","new":"%s"}}' % (version[1], version[0]) else: json_env_string = '"env_version":{"current":"%s","new":"%s"}}' % (version[0], version[0]) if key == "component_version": if "component" in json_filename: json_component_string = '"%s":{"version":{"current":"%s","new":"%s"}' % ( component, version[1], version[0]) else: json_component_string = '"%s":{"version":{"current":"%s","new":"%s"}' % (component, version[0], version[0]) # This section sets the type to ACS if the component is found in the static mapping if component in acs.component.CMPS: if json_env_string is not None and json_component_string is not None: json_string = '{%s,"type":"acs",%s}' % (json_component_string, json_env_string) else: json_string = '{%s,"type":"acs"}' % json_component_string # If the component isn't apart of the ACS and doesn't have an env_version just output the component version else: if json_env_string is None: json_string = '{%s}' % json_component_string # Check if the file has already been created. If it does append to the file, if not do a write # operation # The file should only exist at this point if it has had current json data written to it if os.path.isfile(json_filename): write_file = open(json_filename, "a") else: write_file = open(json_filename, "w") write_file.write(json_string) write_file.write("\n") close_file = True if close_file: write_file.close() log.info("%s has been closed." % json_filename)
def add_table(self, db, table, col): if db not in self._tables: self._tables[db] = {} if table not in self._tables[db]: self._tables[db][table] = { "columns_info": {}, "do_columns": {}, "pos_map": {} } for i in col: if not isinstance(i, str): log.warning("non-string col name.") continue if i not in self._tables[db][table]["do_columns"]: self._tables[db][table]["do_columns"][i] = None log.debug(json.dumps(self._tables))
def get_full_columns(self): for db, tables in self._tables.items(): for table, desc in tables.items(): try: sql = "show full columns from %s.%s" % (db, table) res, _ = self._query(sql) for idx, field in enumerate(res): if field["Field"] in desc["do_columns"]: desc["columns_info"][idx] = \ {"name":field["Field"], \ "type":field["Type"], \ "Default":field["Default"]} except: log.warning(get_trace_info()) continue log.debug(json.dumps(self._tables))
def get_full_columns(self): for db, tables in self._tables.items(): for table, desc in tables.items(): try: sql = "show full columns from %s.%s" % (db, table) res, _ = self._query(sql) for idx, field in enumerate(res): if field["Field"] in desc["do_columns"]: desc["columns_info"][idx] = { "name": field["Field"], "type": field["Type"], "Default": field["Default"], } except: log.warning(get_trace_info()) continue log.debug(json.dumps(self._tables))
def __get_request_data(self): """ 获取请求数据,并打印请求信息日志 :return: """ body = None if not self.request.body or len(self.request.body) == 0 else self.request.body method = self.request.method url = self.request.url headers = self.request.headers files = None if not self.request.files or len(self.request.files) == 0 else self.request.files request_data = """{} {} {} {}""".format(method, url, self.__format_headers(headers), body if body else "" if not files else "文件内容") log.debug(request_data) allure.attach(request_data, "--------------------请求数据--------------------", allure.attachment_type.TEXT) return {"data": body, "method": method, "url": url, "headers": headers, "files": files}
def excute_pre(self, send_request): """ 执行前置操作 :return: """ if self.pre_process: for s in self.pre_process: log.debug(s) if isinstance(s, dict) or isinstance(s, list): d = send_request.data d = self.Parser(d).keys_replace(send_request) d = json.loads(d) self.update_dict(d, s) send_request.data = json.dumps(d, ensure_ascii=False, indent=2) else: self.Parser(s).keys_replace(send_request)
def send_request(self, pre_process, post_process): """ 主方法,发送请求 :param pre_process: 请求前的操作 :param post_process: 收到响应后的操作 :return: """ self.clear() self.request = BaseRequest(self.data) log.info("--------------获取请求数据-------------------") requst_data = self.__get_request_data() log.info("--------------发送请求----------------------") if "data" in requst_data: requst_data["data"] = requst_data["data"].encode("utf-8") if requst_data["data"] else None response = self.__send_request(requst_data) log.debug("响应状态码为:{}".format(response.status_code)) self.response = BaseResponse(response) log.info("--------------获取响应数据----------------------") self.__get_response_data() return self
def main(): # "main process" log.debug('Hello libcoevent UDP!') sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) msg = 'www.google.com' if len(sys.argv) > 1: msg = str(sys.argv[1]) sock.settimeout(2.0) log.debug('parameter: %s' % msg) sock.sendto(msg.encode(), 0, ('127.0.0.1', 2333)) log.debug('Data sent') if msg != 'quit': data, addr = sock.recvfrom(2048, 0) log.debug('Got reply: "%s", server address: %s' % (data.decode(), addr)) sock.sendto('Thank you!'.encode(), 0, ('127.0.0.1', 2333)) data, addr = sock.recvfrom(2048, 0) log.debug('Got reply: "%s", server address: %s' % (data.decode(), addr)) return
def process_pod_json(command, status_dict, version_dict, component_label, incoming_session, container_dict): cmd_output = incoming_session.get_cmd_output(command) json_data = json.loads(cmd_output) container_failed = False if json_data['items']: for component in json_data['items']: component_name = str(component['metadata']['name']) # Because of reliable naming convention, you can split on the '-' and extract the deployment # config name so that data is stored with the same key as the deployment config dict deployment_name = "-".join(component_name.split("-")[:-2]) deployment_number = int(component['metadata']['annotations'] ['openshift.io/deployment-config.latest-version']) try: component['status']['containerStatuses'] docker_image_name = str(component['status']['containerStatuses'][0]['image']) container_name = str(component['status']['containerStatuses'][0]['name']) for container in component['status']['containerStatuses']: if not container['ready']: container_failed = True container_dict[container_name] = docker_image_name except KeyError: log.debug("%s has a state of %s" % (component['metadata']['name'], component['status']['phase'])) pass # The api can report that a pod is {ready: True} even if a container is down # Therefore, flag the pod as not ready if container_failed: status_dict[deployment_name] = {component_name : False} else: status_dict[deployment_name] = {component_name: True} version_dict[deployment_name] = {component_label: deployment_number} else: # instead of reparsing output, just parse the command. Since this component is failing # the api has no information about the failed pod and thus no easy way to return this info missing_component = command.split("-l")[1].split("-o")[0].strip() status_dict[missing_component] = {None: False}
def parseGFX(result=''): if result == '' or 'No process found' in result: print('no data find!is your current pkg is'+Config.packageName); exit(-1) if result.strip().lower() == '' or result.strip().lower() == None: return -1; if '6' in UsefulHelper().getPropValue('ro.build.version.release').lower(): result = result.split('Stats since')[1].split('View hierarchy:')[0]; data = result[result.index('Execute'):result.rfind('Stats since')].strip(); data_lines = data.split('\n\n')[1:-1]; else: result = result.split('Execute')[-1].split('View hierarchy:')[0] data = result; data_lines = data.split('\n')[1:-2]; log.debug('parse gfx ok'); frame_times = []; dict = {}; jank_count = 0; frame_count = data_lines.__len__(); frame_timeout = 0; #数据提取 for datas in data_lines: l = datas.split('\t') frame_time = round(float(l[1])+float(l[2])+float(l[3])+float(l[4]),2); frame_times.append(frame_time); if frame_time > 16.67: jank_count += 1; log.debug("frame_time:"+str(frame_time)) if frame_time % 16.67 == 0: frame_timeout += frame_time / 16.67 -1; else: frame_timeout += int(frame_time / 16.67); log.debug("frame_timeout:" + str(frame_timeout)) try: _fps = frame_count * 60 / (frame_count + frame_timeout); except: print('parse error!no frame data!do you keep move?'); exit(-1) #数据计算 dict['jank_count'] = jank_count; dict['variance'] = round(numpy.var(frame_times),2); dict['fps'] = round(_fps,2); log.debug('jank:'+str(jank_count)+' varance:'+str(dict['variance'])); return dict;
def main(): # "main process" log.debug('Hello libcoevent TCP!') sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) msg = 'www.google.com' if len(sys.argv) > 1: msg = str(sys.argv[1]) sock.settimeout(2.0) sock.connect(('127.0.0.1', 6666)) log.debug('parameter: %s' % msg) sock.send(msg.encode()) log.debug('Data sent') if msg != 'quit': data = sock.recv(2048) log.debug('Got reply: "%s"' % (data.decode(), )) return
def inspect_load_state(component_to_inspect, gateway_session, pod_status_dict, deployment_version_dict, container_status_dict ): deployment_config_label = "internal.acs.amadeus.com/component=%s" % component_to_inspect get_dc_command = "sudo oc get dc -l %s -o json" % deployment_config_label close_file = False deployer_and_component = None # This is the file which will have the deployer name in it if there is a failure filename = "/tmp/deployer_cleanup" # Attempt to remove the file from a previous run to ensure the file is always empty try: os.remove(filename) except OSError: pass # Need to check the component against acs.component.CMPS[cmp_name]['cluster'] # All of the component types are statically set in component.py try: acs.component.CMPS[component_to_inspect] except KeyError: print("Component is not part of the ACS category. Aborting the validation process") sys.exit(0) os_master_session = openshift.cluster.get_master_session(gateway_session, component_to_inspect) component_attributes = process_deployment_config_json(get_dc_command, os_master_session) for first_key in component_attributes.keys(): for pod_label in component_attributes[first_key].keys(): get_pod_command = "sudo oc get pod -l %s -o json" % pod_label process_pod_json(get_pod_command, pod_status_dict, deployment_version_dict, pod_label, os_master_session, container_status_dict) exit_with_error = False # compare the dc latest version to component latest version for first_key in deployment_version_dict.keys(): for second_key, value in deployment_version_dict[first_key].iteritems(): if component_attributes[first_key][second_key] != deployment_version_dict[first_key][second_key]: # If there is a problem, store the data with the component name, in the same way that it is stored # if there are no pods running. i.e. name=component. This allows consistent error handling # Swap the first_key and second_key to normalize the data in the dict pod_status_dict.pop(first_key) pod_status_dict[second_key] = {first_key: False} # At this point the dict looks like: # pod_status_dict['name=ahp-report-audit-dmn'] = {'report-audit-dmn-deployment': False} for key in pod_status_dict.keys(): log.info("Validating component: %s" % key) for second_key, value in pod_status_dict[key].iteritems(): if not value: if "name" in key: log.warning("%sThis component did not deploy at all: \t%s\n" % (textColours.FAIL, key)) # name=component will be in the key if there was a problem # The second key will be empty if the problem was that the pod did not exist. Usually when a pod # no longer exists, there is no longer a deployer pod kicking around. # This will get the name of the deployer pod and write it to a file for cleanup before # failing back to the previous build deployer_name = second_key + "-" + str(component_attributes[second_key][key]) + "-deploy" else: print("%sThis component has container(s) not ready: \t%s\n" % (textColours.FAIL, key)), print("\nContainer infomration from failure:\n"), for container in container_status_dict.keys(): print("\tDocker image name: %s\n" % (container_status_dict[container])), print("\tContainer name: %s\n" % container) if second_key is not None: try: try: int(second_key.split("-")[-1]) except ValueError: second_key = "-".join(second_key.split("-")[:-1]) command = "sudo oc get pod -l openshift.io/deployer-pod-for.name=%s -o json" % \ (second_key) log.info("Running command: %s" % command) cmd_output = os_master_session.get_cmd_output(command) deployer_json_data = json.loads(cmd_output) deployer_name = deployer_json_data['items'][0]['metadata']['name'] log.warning("Deployer pod found in pod json for %s, writing %s" % (deployer_name, filename)) except IndexError: log.warning("%sNo deployer pods found to clean up for: %s" % (textColours.FAIL, second_key)) except KeyError: print("key error") pass deployer_and_component = component_to_inspect + " : " + deployer_name if deployer_and_component is not None: write_file = open(filename, "a") write_file.write(deployer_and_component) write_file.write("\n") close_file = True exit_with_error = True if close_file: log.debug("%s has been written" % filename) write_file.close() return(exit_with_error)
def server_receive(self): ip = self.host port = self.port s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) s.bind((ip,int(port))) s.listen(9) check_file = '/root/script/monitoring_kvm/tools/check_client_conn' try: while True: #################################### # Process the connection try: clientconn,clientaddr=s.accept() except (KeyboardInterrupt, SystemExit): raise logging.error("you have CTRL+C,Now quit") except: traceback.print_exc() logging.error(traceback.print_exc()) try: print "Got connection from %s",clientconn.getpeername() logging.info("Got connection from %s", clientconn.getpeername()) with open(check_file,'w') as f: f.writelines('1') while True: remote_data=clientconn.recv(8094) if not len(remote_data): clientconn.send('welcome',remote_data) logging.info(("The client data has been received and connection will be disconected!")) break clientconn.sendall(remote_data) recv_data = eval(remote_data) remote_data += remote_data print "Data is :",remote_data logging.debug(remote_data)##### self.do_working(recv_data) except (KeyboardInterrupt, SystemExit): raise logging.error("you have CTRL+C,Now quit") except: traceback.print_exc() logging.error(traceback.print_exc()) # Close the connection try: clientconn.close() except KeyboardInterrupt: raise logging.error("you have CTRL+C,Now quit") except: traceback.print_exc() logging.error(traceback.print_exc()) except (KeyboardInterrupt, SystemExit): print "you have CTRL+C,Now quit" raise logging.error(traceback.print_exc()) except: traceback.print_exc() logging.error(traceback.print_exc()) finally: s.close()