Ejemplo n.º 1
0
    def recheck_ip_worker(self):
        while self.running:
            try:
                sleep(1)
                if not internet_v4.last_stat and not internet_v6.last_stat:
                    self.kill_pick_worker_cnt = self.pick_worker_cnt
                    continue
                self.check_pick_ip_worker()
                pass_time = mtime() - self.last_check
                if not self.ip_list:
                    if pass_time > self.min_recheck_time:
                        self.logger.warning('当前 %s IP 数量为 0', self.type)
                        self.last_check = mtime()
                    continue
                if pass_time < self.min_recheck_time or \
                        pass_time < self.recheck_loop_time / len(self.ip_list):
                    continue

                ip = self.ip_list[0]
                if not is_ip_use(ip):
                    self.logger.warning('发现配置未使用的 IP:%s', ip)
                    self.remove_ip(ip)
                    continue
                self.last_check = mtime()
                if self.check_ip(ip):
                    self.ip_source.report_recheck_ok(ip)
                else:
                    self.ip_source.report_recheck_fail(ip)
            except Exception as e:
                self.logger.exception('recheck_ip_worker 发生错误:%s', e)
        else:
            self.kill_pick_worker_cnt = self.pick_worker_cnt
Ejemplo n.º 2
0
    def _create_connection(self,
                           ipaddr,
                           queobj,
                           timeout=None,
                           get_cache_sock=None):
        if get_cache_sock:
            sock = get_cache_sock()
            if sock:
                queobj.put(sock)
                return

        sock = None
        try:
            sock = self.get_tcp_socket(ipaddr[0], timeout)
            # start connection time record
            start_time = mtime()
            # TCP connect
            sock.connect(ipaddr)
            # record TCP connection time
            self.tcp_connection_time[ipaddr] = sock.tcp_time = mtime(
            ) - start_time
            # put socket object to output queobj
            sock.xip = ipaddr
            queobj.put(sock)
        except NetWorkIOError as e:
            if sock:
                sock.close()
            # any socket.error, put Excpetions to output queobj.
            e.xip = ipaddr
            queobj.put(e)
            # reset a large and random timeout to the ipaddr
            self.tcp_connection_time[ipaddr] = self.timeout + 1
Ejemplo n.º 3
0
def dns_system_resolve(host, qtypes=qtypes):
    start = mtime()
    try:
        if dns_system_servers:
            iplist = _dns_udp_resolve(host,
                                      dns_system_servers,
                                      timeout=2,
                                      qtypes=qtypes)
        # getaddrinfo 在 Windows 下无法并发,其它系统未知
        else:
            if AAAA not in qtypes:
                iplist = socket.gethostbyname_ex(host)[-1]
            elif A in qtypes:
                iplist = [
                    ipaddr[4][0] for ipaddr in socket.getaddrinfo(host, None)
                ]
            else:
                iplist = [
                    ipaddr[4][0] for ipaddr in socket.getaddrinfo(
                        host, None, socket.AF_INET6)
                ]
    except:
        iplist = None
    cost = int((mtime() - start) * 1000)
    logging.test('%sdns_system_resolve 已缓存:%s/%s,耗时:%s 毫秒,%s = %s',
                 address_string(iplist), len(dns), dns.max_items, cost, host,
                 iplist or '查询失败')
    return iplist
Ejemplo n.º 4
0
 def push(self, block=True, timeout=None, maxsize=None):
     if block:
         if timeout is None:
             timeout = -1
         else:
             if timeout < 0:
                 raise ValueError("'timeout' must be a non-negative number")
             elif timeout == 0:
                 block = False
             else:
                 endtime = mtime() + timeout
     maxsize = maxsize or self.maxsize
     limited = not self._push(maxsize)
     if limited and block and self.__lock_push.acquire(timeout=timeout):
         try:
             while limited:
                 if timeout > 0:
                     timeout = endtime - mtime()
                     if timeout <= 0:
                         break
                 sleep(self.timeout_interval)
                 limited = not self._push(maxsize)
         finally:
             self.__lock_push.release()
     if limited:
         raise LimiterFull
Ejemplo n.º 5
0
def dns_over_https_resolve(host, qtypes=qtypes):
    start = mtime()
    iplist = _dns_over_https_resolve(host, qtypes=qtypes)
    cost = int((mtime() - start) * 1000)
    logging.test('%sdns_over_https 已缓存:%s/%s,耗时:%s 毫秒,%s = %s',
                 address_string(iplist), len(dns), dns.max_items, cost, host,
                 iplist or '查询失败')
    return iplist
Ejemplo n.º 6
0
 def create_gws_connection_withproxy(self,
                                     address,
                                     hostname,
                                     cache_key,
                                     getfast=None,
                                     **kwargs):
     proxy = self.get_gws_front(getfast)
     proxytype, proxyuser, proxypass, proxyaddress = parse_proxy(proxy)
     proxyhost, _, proxyport = proxyaddress.rpartition(':')
     ips = dns_resolve(proxyhost).copy()
     if ips:
         ipcnt = len(ips)
     else:
         logging.error('create_gws_connection_withproxy 代理地址无法解析:%r',
                       proxy)
         return
     if ipcnt > 1:
         #优先使用未使用 IP,之后按连接速度排序
         ips.sort(key=self.get_gws_front_connection_time_ip)
     proxyport = int(proxyport)
     ohost, port = address
     while ips:
         proxyip = ips.pop(0)
         ip = random.choice(dns[hostname])
         if proxytype:
             proxytype = proxytype.upper()
         if proxytype not in socks.PROXY_TYPES:
             proxytype = 'HTTP'
         proxy_sock = self.get_proxy_socket(proxyip, 8)
         proxy_sock.set_proxy(socks.PROXY_TYPES[proxytype], proxyip,
                              proxyport, True, proxyuser, proxypass)
         start_time = mtime()
         try:
             proxy_ssl_sock = self.get_ssl_socket(
                 proxy_sock, ohost.encode())
             proxy_ssl_sock.settimeout(self.timeout)
             #proxy_ssl_sock.set_connect_state()
             proxy_ssl_sock.connect((ip, port))
             proxy_ssl_sock.do_handshake()
         except Exception as e:
             cost_time = self.timeout + 1 + random.random()
             if ipcnt > 1:
                 self.gws_front_connection_time['ip'][
                     proxyip] = cost_time
             self.gws_front_connection_time[proxy] = cost_time
             logging.error(
                 'create_gws_connection_withproxy 连接代理 [%s] 失败:%r',
                 proxy, e)
             continue
         else:
             cost_time = mtime() - start_time
             if ipcnt > 1:
                 self.gws_front_connection_time['ip'][
                     proxyip] = cost_time
             self.gws_front_connection_time[proxy] = cost_time
         proxy_ssl_sock.xip = proxyip, proxyport
         return proxy_ssl_sock
Ejemplo n.º 7
0
    def _create_ssl_connection(self, ipaddr, cache_key, host, queobj, timeout=None, get_cache_sock=None, callback=None):
        retry = None
        while True:
            if get_cache_sock:
                sock = get_cache_sock()
                if sock:
                    queobj.put(sock)
                    return

            ip = ipaddr[0]
            sock = None
            try:
                sock = self.get_tcp_socket(ip, timeout)
                server_name = self.get_server_hostname(host, cache_key)
                ssl_sock = self.get_ssl_socket(sock, cache_key, server_name)
                # start connection time record
                start_time = mtime()
                # TCP connect
                ssl_sock.connect(ipaddr)
                #connected_time = mtime()
                # set a short timeout to trigger timeout retry more quickly.
                if timeout is not None:
                    ssl_sock.settimeout(3 if self.gws else 1.5)
                # SSL handshake
                ssl_sock.do_handshake()
                handshaked_time = mtime()
                # record SSL connection time
                ssl_sock.ssl_time = handshaked_time - start_time
                # verify Google SSL certificate.
                if self.gws:
                    self.google_verify(ssl_sock)
                ssl_sock.xip = ipaddr
                if callback:
                    cache_key = callback(ssl_sock) or cache_key
                    self.ssl_connection_time[ipaddr] = ssl_sock.ssl_time
                    self.ssl_connection_cache[cache_key].append((mtime(), ssl_sock))
                    return True
                self.ssl_connection_time[ipaddr] = ssl_sock.ssl_time
                # put ssl socket object to output queobj
                queobj.put(ssl_sock)
            except NetWorkIOError as e:
                if sock:
                    sock.close()
                # any socket.error, put Excpetions to output queobj.
                e.xip = ipaddr
                if callback:
                    if not retry and (e.args == zero_EOF_error or e.args[0] in zero_errno):
                        retry = True
                        continue
                    else:
                        callback(e)
                        return isinstance(e, LimiterFull)
                # reset a large and random timeout to the ipaddr
                self.ssl_connection_time[ipaddr] = self.timeout + 1
                queobj.put(e)
            break
Ejemplo n.º 8
0
def dns_local_resolve(host, qtypes=qtypes):
    start = mtime()
    iplist = _dns_udp_resolve(host,
                              dns_local_servers,
                              timeout=2,
                              qtypes=qtypes)
    cost = int((mtime() - start) * 1000)
    logging.test('%sdns_local_resolve 已缓存:%s/%s,耗时:%s 毫秒,%s = %s',
                 address_string(iplist), len(dns), dns.max_items, cost, host,
                 iplist or '查询失败')
    return iplist
Ejemplo n.º 9
0
 def get_ip_info(self,
                 ip,
                 server_name=None,
                 callback=None,
                 conntimeout=g_conntimeout,
                 handshaketimeout=g_handshaketimeout,
                 timeout=g_timeout):
     retry = None
     server_name = server_name or self.server_name
     callback = callback or self.check_type_status
     while True:
         start_time = mtime()
         ssl_time = 1e5
         type = None
         domain = None
         sock = None
         ssl_sock = None
         try:
             sock = http_gws.get_tcp_socket(ip)
             http_gws.set_tcp_socket(sock, set_buffer=False)
             ssl_sock = http_gws.get_ssl_socket(sock, server_name)
             ssl_sock.settimeout(conntimeout)
             ssl_sock.connect((ip, 443))
             ssl_sock.settimeout(handshaketimeout)
             ssl_sock.do_handshake()
             ssl_sock.settimeout(timeout)
             handshaked_time = mtime() - start_time
             ssl_time = int(handshaked_time * 1000)
             if handshaked_time > handshaketimeout:
                 raise socket.error('handshake 超时:%d ms' % ssl_time)
             cert = http_gws.google_verify(ssl_sock)
             domain = cert.get_subject().CN
             if not domain:
                 raise CertificateError(
                     -1, '%s 无法获取 commonName:%s' % (ip, cert))
             type = callback(ssl_sock, ip)
         except NetWorkIOError as e:
             self.logger.debug('get_ip_info 发生错误:%s', e)
             if not retry and (e.args == zero_EOF_error
                               or e.args[0] in zero_errno):
                 retry = True
                 continue
         finally:
             if ssl_sock:
                 ssl_sock.close()
             elif sock:
                 sock.close()
         if server_name is self.server_name and domain == self.com_domain:
             domain = '*.google.com'
         if type is 'gae' and not self.test_ip_gae(ip) or \
                 type is 'gws' and not self.test_ip_gws(ip):
             type = None
         return domain, ssl_time, type
Ejemplo n.º 10
0
 def set(self, key, value, expire=None):
     # expire is False or /
     # expire <  0:永不过期,只能在这里设置
     # expire == 0:最近最少使用过期
     # expire >  0:最近最少使用过期 + 时间过期
     if expire is None:
         expire = self.expire
     elif expire is False:
         expire = -1
     else:
         expire = int(expire)
     if expire > 0:
         expire += int(mtime())
     cache = self.cache
     key_order = self.key_order
     max_items = self.max_items
     if key in cache:
         key_order.remove(key)
     key_order.appendleft(key)
     cache[key] = value, expire
     while len(key_order) > max_items:
         key = key_order.pop()
         value, expire = cache[key]
         if expire < 0:
             key_order.appendleft(key)
         else:
             del cache[key]
Ejemplo n.º 11
0
def check_appid_exists(appid):
    request_params, http_util, connection_cache_key = _get_request_params(appid)
    for _ in range(3):
        err = None
        response = None
        try:
            sock = http_util.create_ssl_connection((request_params.host, request_params.port),
                                                   request_params.hostname,
                                                   connection_cache_key)
            if sock is None:
                continue
            sock.sendall(b'HEAD / HTTP/1.1\r\n'
                         b'Host: %s\r\n'
                         b'Connection: Close\r\n\r\n' % host.encode())
            response = HTTPResponse(sock, method='HEAD')
            response.begin()
        except:
            err = True
        finally:
            if response:
                response.close()
                if err is None:
                    exists = response.status in (200, 503)
                    if exists and GC.GAE_KEEPALIVE:
                        http_util.ssl_connection_cache[connection_cache_key].append((mtime(), sock))
                    return exists
Ejemplo n.º 12
0
def get_action(scheme, host, path, url):
    schemes = '', scheme
    key = '%s://%s' % (scheme, host)
    filters = filters_cache.gettill(key)
    if filters:
        #是否临时规则
        _, action, expire = filters[0]
        if action is 'TEMPACT':
            if mtime() > expire:
                del filters[0]
                logging.warning('%r 的临时 %s 规则已经失效。', GC.LISTEN_ACT, key)
            #符合自动多线程时不使用临时 GAE 规则,仍尝试默认规则
            #是否包含元组元素(媒体文件)
            elif GC.LISTEN_ACT == 'GAE' and any(path.endswith(x) for x in GC.AUTORANGE_FAST_ENDSWITH):
                pass
            else:
                return TEMPACT
        #以缓存规则进行匹配
        for pathfilter, action, target in filters:
            if action is 'TEMPACT':
                continue
            if match_path_filter(pathfilter, path):
                #计算重定向网址
                if action in REDIRECTS:
                    target = get_redirect(target, url)
                    if target is not None:
                        durl, mhost = target
                        if durl and durl != url:
                            return action, target
                    continue
                return action, target
    filter = None
    #建立缓存条目
    filters_cache.setpadding(key)
    _filters = []
    for filters in action_filters:
        if filters.action == FAKECERT:
            continue
        for schemefilter, hostfilter, pathfilter, target in filters:
            if schemefilter in schemes and match_host_filter(hostfilter, host):
                action = numToAct[filters.action]
                #填充规则到缓存
                _filters.append((pathfilter, action, target))
                #匹配第一个,后面忽略
                if not filter and match_path_filter(pathfilter, path):
                    #计算重定向网址
                    if action in REDIRECTS:
                        target = get_redirect(target, url)
                        if target is not None:
                            durl, mhost = target
                            if durl and durl != url:
                                filter = action, target
                    else:
                        filter = action, target
    #添加默认规则
    _filters.append(filter_DEF)
    filters_cache[key] = _filters
    return filter or filter_DEF[1:]
Ejemplo n.º 13
0
 def save_ip(self):
     headers = ('#coding: utf-8\n' '#此文件由 GotoX 自动维护,请不要修改。\n' '[iplist]\n')
     with open(GC.CONFIG_IPDB, 'w', encoding='utf_8', newline='\n') as f:
         f.write(headers)
         for m in (self.gae, self.gws):
             f.write(m.list_name)
             f.write(' = ')
             f.write('|'.join(m.ip_list))
             f.write('\n')
     self.last_update = mtime()
Ejemplo n.º 14
0
def forward_socket(local,
                   remote,
                   payload=None,
                   timeout=60,
                   tick=4,
                   bufsize=8192,
                   maxping=None,
                   maxpong=None):
    if payload:
        remote.sendall(payload)
    buf = memoryview(bytearray(bufsize))
    maxpong = maxpong or timeout
    allins = [local, remote]
    timecount = timeout
    all_forward_sockets.add(remote)
    try:
        while allins and timecount > 0:
            start_time = mtime()
            ins, _, err = select(allins, [], allins, tick)
            t = mtime() - start_time
            timecount -= int(t)
            if err:
                raise socket.error(err)
            if remote not in all_forward_sockets:
                break
            for sock in ins:
                ndata = sock.recv_into(buf)
                if ndata:
                    other = local if sock is remote else remote
                    other.sendall(buf[:ndata])
                elif sock is remote:
                    return
                else:
                    allins.remove(sock)
            if ins and len(allins) == 2:
                timecount = max(min(timecount * 2, maxpong), tick)
    except Exception as e:
        logging.debug('forward_socket except: %s %r', ins, e)
        raise
    finally:
        all_forward_sockets.discard(remote)
        remote.close()
Ejemplo n.º 15
0
def set_temp_action(host):
    #将临时规则插入缓存规则中第一个位置
    try:
        filters = filters_cache[host]
    except KeyError:
        filters_cache[host] = filters = [filter_DEF]
    action = filters[0][1]
    if action != 'TEMPGAE':
        filter = '', 'TEMPGAE', mtime() + GC.LINK_TEMPTIME
        filters.insert(0, filter)
        return True
Ejemplo n.º 16
0
    def handle_request(self):
        # 处理一个 request 可能阻塞
        # TODO 疑惑 为啥还要单独写一个函数
        timeout = self.socket.gettimeout()
        if timeout is None:
            timeout = self.timeout
        elif self.timeout is not None:
            timeout = min(timeout, self.timeout)
        if timeout is not None:
            deadline = mtime() + timeout
        
        with _ServerSelector() as selector:
            selectors.register(self, selectors.EVENT_READ)

            while True:
                ready = selector.select(timeout)
                if ready:
                    return self._handel_request_noblokc()
                else:
                    if mtime is not None:
                        timeout = deadline - mtime()
                        if timeout < 0:
                            return self.handle_timeout()
Ejemplo n.º 17
0
def _https_resolve(server, qname, qtype, query_data):
    '此函数功能实现仅限于解析为 A、AAAA 记录'
    # https://developers.cloudflare.com/1.1.1.1/dns-over-https/wireformat/

    iplist = []
    xip = None
    response = None
    noerror = False
    ok = False
    http_util = http_gws if server.hostname.startswith('google') else http_nor
    connection_cache_key = '%s:%d' % (server.hostname, server.port)
    try:
        response = http_util.request(server,
                                     query_data,
                                     headers=server.headers.copy(),
                                     connection_cache_key=connection_cache_key)
        if response:
            data = response.read()
            noerror = True
            if response.status == 200:
                reply = dnslib.DNSRecord.parse(data)
                if reply:
                    if reply.header.rcode is NXDOMAIN:
                        ok = True
                        iplist.append(NXDOMAIN)
                    else:
                        ok = reply.header.rcode is NOERROR
                        for r in reply.rr:
                            if r.rtype is qtype:
                                iplist.append(str(r.rdata))
            else:
                raise DoHError((response.status, data))
    except DoHError as e:
        logging.error('%s _https_resolve %r 失败:%r', address_string(response),
                      qname, e)
    except Exception as e:
        logging.debug('%s _https_resolve %r 失败:%r', address_string(response),
                      qname, e)
    finally:
        if response:
            response.close()
            xip = response.xip
            if noerror:
                if GC.GAE_KEEPALIVE or http_util is not http_gws:
                    http_util.ssl_connection_cache[
                        connection_cache_key].append((mtime(), response.sock))
                else:
                    response.sock.close()
        return iplist, xip, ok
Ejemplo n.º 18
0
 def _expire_check(self, key=__marker, index=None):
     if key is self.__marker and isinstance(index, int):
         key = self.key_order[index]
     cache = self.cache
     if key in cache:
         value, expire = cache[key]
         if expire > 0:
             now = int(mtime())
             timeleft = expire - now
             if timeleft <= 0:
                 del cache[key]
                 if isinstance(index, int):
                     del self.key_order[index]
                 else:
                     self.key_order.remove(key)
                 return True
             elif timeleft < 8:
                 #为可能存在的紧接的调用保持足够的反应时间
                 cache[key] = value, now + 8
Ejemplo n.º 19
0
 def __init__(self, ip_source):
     self.running = False
     self.pick_worker_cnt = 0
     self.kill_pick_worker_cnt = 0
     type = ip_source.type
     if type == 'gae':
         self.check_callback = self.check_gae_callback
     elif type == 'gws':
         self.check_callback = self.check_gws_callback
     self.type = type
     self.logger = logging.getLogger('[picker %s]' % type)
     self.logger.setLevel(GC.LOG_LEVEL)
     self.list_name = 'google_' + type
     self.cache_key = self.list_name + '|:443'
     self.ip_list = collections.deque(GC.IPLIST_MAP[self.list_name])
     GC.IPLIST_MAP[self.list_name] = self.ip_list
     self.ip_set |= set(self.ip_list)
     ip_source._ip_source.ip_set_used = self.ip_set
     self.ip_source = ip_source
     self.load_config()
     now = mtime()
     self.last_update = now
     self.last_check = now - self.min_recheck_time
Ejemplo n.º 20
0
 def _cleanup(self):
     #按每秒一次的频率循环检查并清除靠后的 l/m 个项目中的过期项目
     m = 4
     n = 1
     lock = self.lock
     key_order = self.key_order
     cache = self.cache
     clean_items = self.max_items // m
     while True:
         sleep(1)
         with lock:
             l = len(key_order)
             if l < clean_items:
                 n = 1
                 continue
             l = l // m
             if n > l:
                 n = 1
             now = int(mtime())
             while True:
                 try:
                     key = key_order[-n]
                 except IndexError:
                     break
                 expire = self.cache[key][1]
                 if expire < 0:
                     del key_order[-n]
                     key_order.appendleft(key)
                     #终止全部都是永不过期项目的极端情况
                     break
                 elif 0 < expire <= now:
                     del key_order[-n]
                     del cache[key]
                 else:
                     n += 1
                     break
Ejemplo n.º 21
0
#https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
import os, time
time.strftime('%X %x %Z')
os.environ['TZ'] = 'Asia/Calcutta'
time.tzset()
time.strftime('%X %x %Z')

"""#https://en.wikipedia.org/wiki/List_of_tz_database_time_zones """

#print(time.time())
import time
z=0
while (z<1):
  t = time.time()
d = (2020, 7, 9, 13, 2, 0, 0, 0)
n = time.mtime(d)-t
if n<0:
    print("time is up")
    z+=1

import time
t = time.time()
d = (2020, 7, 9, 13, 10, 0, 0, 0)
n = time.mtime(d)-t
print(n)





# import the time module 
Ejemplo n.º 22
0
def _dns_udp_resolve(qname, dnsservers, timeout=2, qtypes=qtypes):
    # https://gfwrev.blogspot.com/2009/11/gfwdns.html
    # https://zh.wikipedia.org/wiki/域名服务器缓存污染
    # http://support.microsoft.com/kb/241352 (已删除)

    def get_sock(v4):
        nonlocal sock_v4, sock_v6
        if v4:
            if sock_v4 is None:
                sock_v4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
                socks.append(sock_v4)
            return sock_v4
        else:
            if sock_v6 is None:
                sock_v6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
                socks.append(sock_v6)
            return sock_v6

    socks = []
    sock_v4 = sock_v6 = None
    query_times = 0
    iplists = {'remote': []}
    local_servers = ()
    pollution = qname in polluted_hosts
    remote_resolve = dnsservers is dns_remote_servers
    if remote_resolve:
        # local_prefer 禁用时不要将 servers 中的境内服务器加入 local_servers 判断
        if dns_local_prefer and not pollution and qname not in direct_domains_black_tree:
            local_servers = dns_remote_local_servers or (
                random.choice(dns_local_servers), )
        if local_servers:
            iplists['local'] = []
            if not dns_remote_local_servers:
                dnsservers += local_servers
    for qtype in qtypes:
        query = dnslib.DNSRecord(q=dnslib.DNSQuestion(qname, qtype))
        if remote_resolve:
            query.ar.append(remote_query_opt)
        query_data = query.pack()
        for dnsserver in dnsservers:
            sock = get_sock(isipv4(dnsserver[0]))
            try:
                sock.sendto(query_data, dnsserver)
                query_times += 1
            except socket.error as e:
                logging.warning('send dns qname=%r \nsocket: %r', qname, e)
        del query, query_data

    def is_resolved(qtype):
        if qtype is A:
            return resolved & (bv4_local if local else bv4_remote)
        elif qtype is AAAA:
            return resolved & (bv6_local if local else bv6_remote)
        return True

    time_start = mtime()
    timeout_at = time_start + timeout
    iplist = []
    xips = []
    resolved = 0
    if A not in qtypes:
        resolved |= bv4_remote | bv4_local
    elif AAAA not in qtypes:
        resolved |= bv6_remote | bv6_local
    if not local_servers:
        resolved |= bv4_local | bv6_local
    udp_len = remote_resolve and remote_query_opt.edns_len or 512
    while mtime() < timeout_at and (allresolved ^ resolved) and query_times:
        ins, _, _ = select(socks, [], [], 0.1)
        for sock in ins:
            iplist.clear()
            qtype = None
            try:
                reply_data, xip = sock.recvfrom(udp_len)
                xip = xip[:2]
                local = xip in local_servers
                if local and pollution:
                    continue
                reply = dnslib.DNSRecord.parse(reply_data)
                qtype = reply.q.qtype
                rr_alone = len(reply.rr) == 1 and reply.a.rtype is qtype
                if is_resolved(qtype):
                    continue
                #虽然现在没有污染,但此处不排除非标准端口
                if remote_resolve and not local and xip not in dns_remote_local_servers:
                    if rr_alone and (
                            not check_edns_opt(reply.ar)
                            or mtime() - time_start < dns_time_threshold):
                        query_times += 1
                        if local_servers:
                            resolved |= bv4_local | bv6_local
                        if not pollution:
                            polluted_hosts.add(qname)
                        pollution = True
                        continue
                    elif not pollution and dns_local_prefer and local_servers:
                        resolved |= bv4_remote | bv6_remote
                        if is_resolved(qtype):
                            continue
                if reply.header.rcode is NOERROR:
                    for r in reply.rr:
                        if r.rtype is qtype:
                            ip = str(r.rdata)
                            #一个简单排除 IPv6 污染定式的方法,有及其微小的机率误伤正常结果
                            #虽然没办法用于 IPv4,但这只是 check_edns_opt 的后备,聊胜于无
                            if qtype is AAAA and pollution and rr_alone and \
                                    len(ip) == 15 and ip.startswith('2001::'):
                                query_times += 1
                                #iplist.clear()
                                #break
                            else:
                                iplist.append(ip)
                elif reply.header.rcode is NXDOMAIN:
                    timeout_at = 0
                    iplist.append(NXDOMAIN)
                    break
            except socket.error as e:
                logging.warning('receive dns qname=%r \nsocket: %r', qname, e)
            except dnslib.dns.DNSError as e:
                # dnslib 没有完整的支持,这里跳过一些不影响使用的解析错误
                logging.debug(
                    'receive dns qname=%r \nerror: %s\nreply data: %r', qname,
                    e, reply_data)
            finally:
                query_times -= 1
                if iplist:
                    if local:
                        resolved |= bv4_local if qtype is A else bv6_local
                        iplists['local'].extend(iplist)
                    else:
                        resolved |= bv4_remote if qtype is A else bv6_remote
                        iplists['remote'].extend(iplist)
                #大概率没有 AAAA 结果
                elif qtype is AAAA and is_resolved(A):
                    resolved |= bv6_local if local else bv6_remote
                if xip not in xips:
                    xips.append(xip)
    for sock in socks:
        sock.close()
    logging.debug('query qname=%r reply iplist=%s', qname, iplists)
    if pollution or not remote_resolve or not local_servers or not dns_local_prefer:
        iplist = iplists['remote']
    else:
        iplist = iplists['local']
    if xips:
        iplist = classlist(iplist)
        iplist.xip = xips
    if pollution:
        logging.warning('发现 DNS 污染, 域名: %r, 解析结果:\n%r', qname, iplists)
    return iplist
Ejemplo n.º 23
0
def get_modified_time(filePath):
    return time.mtime(os.path.getmtime(filePath))
Ejemplo n.º 24
0
 def __fetchlet(self, range_queue, data_queue, threadorder):
     headers = {k.title(): v for k, v in self.headers.items()}
     #headers['Connection'] = 'close'
     while True:
         try:
             with self.tLock:
                 if self.lastupdate != ip_manager_gae.last_update:
                     self.lastupdate = ip_manager_gae.last_update
                     self.iplist = GC.IPLIST_MAP['google_gae'].copy()
             noerror = True
             response = None
             starttime = None
             if self._stopped: return
             try:
                 if self.response:
                     response = self.response
                     self.response = None
                     start, end = self.firstrange
                 else:
                     start, end = range_queue.get(timeout=1)
                     headers['Range'] = 'bytes=%d-%d' % (start, end)
                     while start - self.expect_begin > self.threads * self.delaysize and \
                             data_queue.qsize() * self.bufsize > 3 * self.threads * self.delaysize:
                         if self._stopped: return
                         sleep(0.1)
                     response = gae_urlfetch(self.command, self.url, headers, self.payload, getfast=self.timeout)
                 if response:
                     xip = response.xip[0]
                     if xip in self.iplist:
                         realstart = start
                         starttime = mtime()
                     else:
                         range_queue.put((start, end))
                         noerror = False
                         continue
             except queue.Empty:
                 return
             except LimiterFull:
                 range_queue.put((start, end))
                 sleep(2)
                 continue
             except Exception as e:
                 logging.warning('%s Response %r in __fetchlet', self.address_string(response), e)
                 range_queue.put((start, end))
                 continue
             if self._stopped: return
             if not response:
                 logging.warning('%s RangeFetch %s 没有响应,重试', self.address_string(response), headers['Range'])
                 range_queue.put((start, end))
             elif response.app_status == 503:
                 if hasattr(response, 'appid'):
                     mark_badappid(response.appid)
                 range_queue.put((start, end))
                 noerror = False
             elif response.app_status != 200:
                 logging.warning('%s Range Fetch "%s %s" %s 返回 %s', self.address_string(response), self.command, self.url, headers['Range'], response.app_status)
                 range_queue.put((start, end))
                 noerror = False
             elif response.getheader('Location'):
                 self.url = urljoin(self.url, response.getheader('Location'))
                 logging.info('%s RangeFetch Redirect(%r)', self.address_string(response), self.url)
                 range_queue.put((start, end))
             elif 200 <= response.status < 300:
                 content_range = response.getheader('Content-Range')
                 if not content_range:
                     logging.warning('%s RangeFetch "%s %s" 返回 Content-Range=%r: response headers=%r', self.address_string(response), self.command, self.url, content_range, response.getheaders())
                     range_queue.put((start, end))
                     continue
                 content_length = int(response.getheader('Content-Length', 0))
                 logging.test('%s >>>> %s: 线程 %s %s %s', self.address_string(response), self.host, threadorder, content_length, content_range)
                 try:
                     data = response.read(self.bufsize)
                     while data:
                         data_queue.put((start, data))
                         start += len(data)
                         if self._stopped: return
                         if (start-realstart) / (mtime()-starttime) < self.lowspeed:
                             #移除慢速 ip
                             if self.delable: 
                                 with self.tLock:
                                     if xip in self.iplist and len(self.iplist) > self.minip:
                                         self.iplist.remove(xip)
                                         logging.warning('%s RangeFetch 移除慢速 ip %s', self.address_string(), xip)
                             noerror = False
                             break
                         else:
                             data = response.read(self.bufsize)
                 except Exception as e:
                     noerror = False
                     logging.warning('%s RangeFetch "%s %s" %s 失败:%r', self.address_string(response), self.command, self.url, headers['Range'], e)
                 if self._stopped: return
                 if start < end + 1:
                     logging.warning('%s RangeFetch "%s %s" 重试 %s-%s', self.address_string(response), self.command, self.url, start, end)
                     range_queue.put((start, end))
                     continue
                 logging.test('%s >>>> %s: 线程 %s 成功接收到 %d 字节', self.address_string(response), self.host, threadorder, start)
             else:
                 logging.error('%s RangeFetch %r 返回 %s', self.address_string(response), self.url, response.status)
                 range_queue.put((start, end))
                 noerror = False
         except Exception as e:
             logging.exception('%s RangeFetch._fetchlet 错误:%r', self.address_string(), e)
             noerror = False
             raise
         finally:
             if response:
                 response.close()
                 if noerror:
                     #放入套接字缓存
                     response.http_util.ssl_connection_cache[response.connection_cache_key].append((mtime(), response.sock))
                 else:
                     response.sock.close()
                     if self.delable:
                         with self.tLock:
                              if xip in self.iplist and len(self.iplist) > self.minip:
                                 self.iplist.remove(xip)
                                 logging.warning('%s RangeFetch 移除故障 ip %s', self.address_string(response), xip)
             if noerror:
                 sleep(0.1)
Ejemplo n.º 25
0
 def _cache_connection(self, cache, count, queobj):
     for _ in range(count):
         sock = queobj.get()
         if hasattr(sock, '_sock'):
             cache.append((mtime(), sock))
Ejemplo n.º 26
0
 def check_connection_alive(sock, keeptime, ctime):
     if mtime() - ctime > keeptime:
         sock.close()
         return
     return not check_connection_dead(sock)