def __init__(self, logger, config, ip_manager, connection_manager, http1worker=Http1Worker, http2worker=Http2Worker): self.logger = logger self.config = config self.ip_manager = ip_manager self.connection_manager = connection_manager self.connection_manager.set_ssl_created_cb(self.on_ssl_created_cb) self.http1worker = http1worker self.http2worker = http2worker self.request_queue = Queue.Queue() self.workers = [] self.working_tasks = {} self.h1_num = 0 self.h2_num = 0 self.last_request_time = time.time() self.task_count_lock = threading.Lock() self.task_count = 0 self.running = True # for statistic self.success_num = 0 self.fail_num = 0 self.continue_fail_num = 0 self.last_fail_time = 0 self.rtts = [] self.last_sent = self.total_sent = 0 self.last_received = self.total_received = 0 self.second_stats = Queue.deque() self.last_statistic_time = time.time() self.second_stat = { "rtt": 0, "sent": 0, "received": 0 } self.minute_stat = { "rtt": 0, "sent": 0, "received": 0 } self.trigger_create_worker_cv = SimpleCondition() self.wait_a_worker_cv = simple_queue.Queue() threading.Thread(target=self.dispatcher).start() threading.Thread(target=self.create_worker_thread).start()
def get_conn(self, host, ips, port, timeout=5): # xlog.debug("connect to %s:%d %r", host, port, ips) end_time = time.time() + timeout host_port = "%s:%d" % (host, port) sock = self.get_sock_from_cache(host_port) if sock: return sock ip_rate = {} for ipd in ips: ipl = ipd.split("|") ip = ipl[0] connect_time = g.ip_cache.get_connect_time(ip, port) if connect_time >= 8000: continue ip_rate[ip] = connect_time if not ip_rate: return None ip_time = sorted(ip_rate.items(), key=operator.itemgetter(1)) ordered_ips = [ip for ip, rate in ip_time] wait_queue = simple_queue.Queue() wait_t = 0.2 for ip in ordered_ips: threading.Thread(target=self.create_connect, args=(wait_queue, host, ip, port)).start() status = wait_queue.get(wait_t) if status: sock = self.get_sock_from_cache(host_port) if sock: return sock else: time.sleep(wait_t) wait_t += 0.1 while True: time_left = end_time - time.time() if time_left <= 0: return self.get_sock_from_cache(host_port) status = wait_queue.get(time_left) if status: sock = self.get_sock_from_cache(host_port) if sock: return sock
def __init__(self, ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data): super(HTTP1_worker, self).__init__(ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data) self.task = None self.request_onway = False self.transfered_size = 0 self.trace_time = [] self.trace_time.append([ssl_sock.create_time, "connect"]) self.record_active("init") self.task_queue = simple_queue.Queue() threading.Thread(target=self.work_loop).start() threading.Thread(target=self.keep_alive_thread).start()
def __init__(self, headers, body, queue, url, timeout): self.headers = headers self.body = body self.queue = queue self.url = url self.timeout = timeout self.start_time = time.time() self.unique_id = "%s:%f" % (url, self.start_time) self.trace_time = [] self.body_queue = simple_queue.Queue() self.body_len = 0 self.body_readed = 0 self.content_length = None self.read_buffer = "" self.responsed = False self.finished = False self.retry_count = 0
def request(self, method, host, path, headers, body, url=b"", timeout=60): method = utils.to_bytes(method) host = utils.to_bytes(host) path = utils.to_bytes(path) headers = utils.to_bytes(headers) body = utils.to_bytes(body) if self.task_count > self.config.max_task_num: self.logger.warn("task num exceed") time.sleep(1) return None with self.task_count_lock: self.task_count += 1 try: # self.logger.debug("task start request") if not url: url = b"%s %s%s" % (method, host, path) self.last_request_time = time.time() q = simple_queue.Queue() task = http_common.Task(self.logger, self.config, method, host, path, headers, body, q, url, timeout) task.set_state("start_request") self.request_queue.put(task) response = q.get(timeout=timeout) if response and response.status == 200: self.success_num += 1 self.continue_fail_num = 0 else: self.logger.warn("task %s %s %s timeout", method, host, path) self.fail_num += 1 self.continue_fail_num += 1 self.last_fail_time = time.time() task.set_state("get_response") return response except Exception as e: self.logger.exception("http_dispatcher:request:%r", e) finally: with self.task_count_lock: self.task_count -= 1
def __init__(self, logger, config, ip_manager, connection_manager): self.logger = logger self.config = config self.ip_manager = ip_manager self.connection_manager = connection_manager self.connection_manager.set_ssl_created_cb(self.on_ssl_created_cb) self.request_queue = Queue.Queue() self.workers = [] self.working_tasks = {} self.h1_num = 0 self.h2_num = 0 self.last_request_time = time.time() self.running = True self.triger_create_worker_cv = SimpleCondition() self.wait_a_worker_cv = simple_queue.Queue() threading.Thread(target=self.dispatcher).start() threading.Thread(target=self.create_worker_thread).start()
def __init__(self, host, log_debug_data): self.host = host self.log_debug_data = log_debug_data self.request_queue = Queue.Queue() self.workers = [] self.working_tasks = {} self.h1_num = 0 self.h2_num = 0 self.create_worker_th = None self.last_request_time = time.time() self.triger_create_worker_cv = SimpleCondition() self.wait_a_worker_cv = simple_queue.Queue() threading.Thread(target=self.dispatcher).start() threading.Thread(target=self.create_worker_thread).start() # move created ssl to worker after ssl timeout self.https_manager = connect_manager.Https_connection_manager( host, self.on_ssl_created_cb)
def request(self, method, host, path, headers, body, url="", timeout=60): # self.logger.debug("task start request") if not url: url = "%s %s%s" % (method, host, path) self.last_request_time = time.time() q = simple_queue.Queue() task = http_common.Task(self.logger, self.config, method, host, path, headers, body, q, url, timeout) task.set_state("start_request") self.request_queue.put(task) response = q.get(timeout=timeout) if response and response.status==200: self.success_num += 1 self.continue_fail_num = 0 else: self.fail_num += 1 self.continue_fail_num += 1 self.last_fail_time = time.time() task.set_state("get_response") return response
def query(self, domain, timeout=3, use_local=False): t0 = time.time() end_time = t0 + timeout while True: id = random.randint(0, 65535) if id not in self.waiters: break que = simple_queue.Queue() que.domain = domain ips = [] if use_local: server_list = self.dns_server.local_list else: server_list = self.dns_server.public_list for ip in server_list: new_time = time.time() if new_time > end_time: break self.send_request(id, domain, ip) self.waiters[id] = que ips += que.get(1) or [] ips += que.get(new_time + 1 - time.time()) or [] if ips: ips = list(set(ips)) g.domain_cache.set_ips(domain, ips) break if id in self.waiters: del self.waiters[id] t1 = time.time() xlog.debug("query by udp, %s cost:%f, return:%s", domain, t1-t0, ips) return ips
def __init__(self, logger, config, method, host, path, headers, body, queue, url, timeout): self.logger = logger self.config = config self.method = method self.host = host self.path = path self.headers = headers self.body = body self.queue = queue self.url = url self.timeout = timeout self.start_time = time.time() self.unique_id = "%s:%f" % (url, self.start_time) self.trace_time = [] self.body_queue = simple_queue.Queue() self.body_len = 0 self.body_readed = 0 self.content_length = None self.worker = None self.read_buffer = "" self.responsed = False self.finished = False self.retry_count = 0
def query(self, domain, timeout=3): end_time = time.time() + timeout id = random.randint(0, 65535) que = simple_queue.Queue() que.domain = domain ips = [] while time.time() < end_time: self.send_request(id, domain) self.waiters[id] = que que.wait(time.time() + 0.5) ips = g.domain_cache.get_ips(domain) if len(ips): break if "." in domain: self.dns_server.next_server() else: break if id in self.waiters: del self.waiters[id] return ips