Exemple #1
0
def worker():
    q = PriorityQueue()
    q.put(Job(5, 'mid job'))
    q.put(Job(10, 'low job'))
    q.put(Job(1, 'high job'))

    while not q.empty():
        job = q.get()
        print(job)
Exemple #2
0
class HttpTest(object):

    def __init__(self,host,keyword,ips,timeout):
        self.threads = 100
        self.queue = PriorityQueue()
        self.host = host
        self.keyword = keyword
        self.result = []
        for ip in ips:
            self.queue.put(ip)
        self.num = self.queue.qsize()
        self.i = 0
        self.success = 0
        self.timeout = timeout
        self.filename = os.path.join(rootPath,"result",host + ".log")
        self.outfile = open(self.filename, 'w')


    def _scan(self,j):
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=3.0)
                if config.HTTPS_Support:
                    host, domain, port = item, self.host , 443
                else:
                    host, domain, port = item, self.host , 80
                html = httpServer((host, domain, port),self.timeout)
                if html  is not None and self.keyword in html:
                    self.outfile.write(item + '\n')
                    self.outfile.flush()
                    self.success += 1
            except:
                pass
            finally:
                self.i += 1
                msg = '[*] %s found, %s scanned , %s groups left'%(self.success,self.i,self.num - self.i)
                print_msg(msg)
            time.sleep(1.0)

    def run(self):
        threads = [gevent.spawn(self._scan, i) for i in range(self.threads)]
        gevent.joinall(threads)

        msg = '[+] All Done. Success:%d Saved in:%s'%(self.success,self.filename)
        print_msg(msg, line_feed=True)
    def test__get_resource_item_from_queue(self):
        items_queue = PriorityQueue()
        item = (1, uuid.uuid4().hex)
        items_queue.put(item)

        # Success test
        worker = ResourceItemWorker(resource_items_queue=items_queue,
                                    config_dict=self.worker_config)
        self.assertEqual(worker.resource_items_queue.qsize(), 1)
        priority, resource_item = worker._get_resource_item_from_queue()
        self.assertEqual((priority, resource_item), item)
        self.assertEqual(worker.resource_items_queue.qsize(), 0)

        # Empty queue test
        priority, resource_item = worker._get_resource_item_from_queue()
        self.assertEqual(resource_item, None)
        self.assertEqual(priority, None)
        del worker
Exemple #4
0
def test_priority():
    refund_queue = PriorityQueue()
    receipt = TransferReceipt(sender=123, amount=1, identifier=123, received_timestamp=1)

    # higher priority
    refund1 = Refund(receipt, priority=1, claim_fee=False)
    # lower priority
    refund2 = Refund(receipt, priority=5, claim_fee=False)

    assert refund1 > refund2

    refund_queue.put(refund1)
    refund_queue.put(refund2)

    received_first = refund_queue.get()
    received_second = refund_queue.get()

    assert received_first == refund2
    assert received_second == refund1
 def test_run(self):
     result = self.mox.CreateMock(AsyncResult)
     env = Envelope('*****@*****.**', ['*****@*****.**'])
     env.parse('From: [email protected]\r\n\r\ntest test\r\n')
     queue = PriorityQueue()
     queue.put((1, result, env))
     self.sock.recv(IsA(int)).AndReturn('220 Welcome\r\n')
     self.sock.sendall('EHLO test\r\n')
     self.sock.recv(IsA(int)).AndReturn('250-Hello\r\n250 PIPELINING\r\n')
     self.sock.sendall('MAIL FROM:<*****@*****.**>\r\nRCPT TO:<*****@*****.**>\r\nDATA\r\n')
     self.sock.recv(IsA(int)).AndReturn('250 Ok\r\n250 Ok\r\n354 Go ahead\r\n')
     self.sock.sendall('From: [email protected]\r\n\r\ntest test\r\n.\r\n')
     self.sock.recv(IsA(int)).AndReturn('250 Ok\r\n')
     result.set(True)
     self.sock.sendall('QUIT\r\n')
     self.sock.recv(IsA(int)).AndReturn('221 Goodbye\r\n')
     self.sock.close()
     self.mox.ReplayAll()
     client = SmtpRelayClient(None, queue, socket_creator=self._socket_creator, ehlo_as='test')
     client._run()
Exemple #6
0
class AutoHack():
    def __init__(self, zoomeye_results, threads_num):
        self.threads_num = threads_num
        self.targets = PriorityQueue()
        self.zoomeye_results = zoomeye_results
        self.result = []
        
        for zoomeye_result in zoomeye_results:
            self.targets.put(zoomeye_result)
        self.total = self.targets.qsize()
        self.pbar = tqdm(total=self.total,ascii=True)


    def check(self):
        while self.targets.qsize() > 0:
            target = self.targets.get().strip()
            try:
                self.pbar.update(1)
                result = exp.exp(target)
                if result:
                    self.result.append(result)
            except Exception as e:
                #print(e)
                pass


    def run(self):
        threads = [gevent.spawn(self.check) for i in range(self.threads_num)]
        try:
            gevent.joinall(threads)
        except KeyboardInterrupt as e:
            print ('[WARNING] User aborted')
            for res in self.result:
                print (res)
        self.pbar.close()
        print ("Hack it!")
        for res in self.result:
            print (res)
        print("Found ",len(self.result))
        print ("End!")
Exemple #7
0
class Muiti(object):
    """
    :l: [(arg1, arg2), (arg1, arg2), ...]
    :func: callee function
    :num: coroutine number
    """
    def __init__(self, l, func, num=20):
        self.queue = PriorityQueue()
        for item in l:
            self.queue.put(item)
        self.num = num
        self.func = func
        self.stop = False
        self.results = PriorityQueue()

    def _do(self):
        while not self.stop:
            try:
                item = self.queue.get_nowait()
            except Exception as e:
                self.stop = True
                continue

            value = None
            if type(item) == tuple:
                value = self.func(*item)
            else:
                value = self.func(item)

            self.results.put(value)

    def get_result(self):
        return list(self.results.queue)

    def start(self):
        threads = [gevent.spawn(self._do) for i in range(self.num)]
        gevent.joinall(threads)

    def terminate(self):
        self.stop = True
Exemple #8
0
class _Channel(object):
    def __init__(self, from_fsm, to_fsm, tracer, queue=None):
        if queue is None:
            self.queue = PriorityQueue()
        else:
            self.queue = queue
        self.from_fsm = from_fsm
        self.to_fsm = to_fsm
        self.tracer = tracer

    def put(self, priority_order_item):
        priority, order, item = priority_order_item
        self.tracer.send_trace_message(
            messages.ChannelTrace(
                self.tracer.trace_order_seq(),
                self.from_fsm.fsm_id if self.from_fsm else None,
                self.to_fsm.fsm_id if self.to_fsm else None, item.name))
        self.queue.put(priority_order_item)

    def get(self, block=True, timeout=None):
        return self.queue.get(block, timeout)

    receive = get
class TestBasicCouchDBFilter(unittest.TestCase):

    config = deepcopy(TEST_CONFIG['main'])
    config['storage_config']['bulk_query_limit'] = 1

    def setUp(self):
        self.old_date_modified = datetime.now().isoformat()
        self.id_1 = uuid4().hex
        self.date_modified_1 = datetime.now().isoformat()
        self.id_2 = uuid4().hex
        self.date_modified_2 = datetime.now().isoformat()
        self.id_3 = uuid4().hex
        self.date_modified_3 = datetime.now().isoformat()
        self.queue = PriorityQueue()
        self.input_queue = PriorityQueue()
        self.db = MagicMock()
        self.bulk = {
            self.id_1: self.date_modified_1,
            self.id_2: self.date_modified_2,
            self.id_3: self.date_modified_3
        }
        self.priority_cache = {self.id_1: 1, self.id_2: 1, self.id_3: 1}
        self.return_value = [
            munchify({'id': self.id_1, 'key': self.date_modified_1}),
            munchify({'id': self.id_2, 'key': self.old_date_modified}),
            munchify({'id': self.id_3, 'key': self.old_date_modified})
        ]
        self.db.db.view.return_value = self.return_value

    def test__check_bulk(self):
        self.queue.put((1000, self.id_3))
        couchdb_filter = BasicCouchDBFilter(self.config, self.input_queue, self.queue, self.db)
        self.assertEqual(self.queue.qsize(), 1)

        couchdb_filter._check_bulk(self.bulk, self.priority_cache)
        self.assertEqual(self.queue.qsize(), 2)

        self.db.db.view.side_effect = [Exception(), Exception(), Exception('test')]
        self.bulk = {}
        with self.assertRaises(Exception) as e:
            couchdb_filter._check_bulk(self.bulk, self.priority_cache)
        self.assertEqual(e.exception.message, 'test')

    @patch('openprocurement.bridge.basic.filters.INFINITY')
    def test__run(self, mocked_infinity):
        couchdb_filter = BasicCouchDBFilter(self.config, self.input_queue, self.queue, self.db)
        self.input_queue.put((1, {'id': self.id_1, 'dateModified': self.date_modified_1}))
        self.input_queue.put((1, {'id': self.id_2, 'dateModified': self.date_modified_2}))
        self.input_queue.put((1, {'id': self.id_3, 'dateModified': self.date_modified_3}))
        mocked_infinity.__nonzero__.side_effect = [True] * 5 + [False, False]
        self.assertEqual(self.queue.qsize(), 0)
        self.assertEqual(self.input_queue.qsize(), 3)

        couchdb_filter._run()
        self.assertEqual(self.queue.qsize(), 2)
        self.assertEqual(self.input_queue.qsize(), 0)
Exemple #10
0
class Actor(Greenlet):
    """Simple implementation of the Actor pattern
    """

    def __init__(self):
        self.inbox = PriorityQueue()
        self._handlers = {ShutdownRequest: self.receive_shutdown}
        Greenlet.__init__(self)

    def receive(self, msg):
        """Dispatch a received message to the appropriate type handler
        """
        #log.debug("Received a message: " + repr(msg))
        cls = msg.__class__
        if cls in self._handlers.keys():
            self._handlers[cls](msg)
        else:
            raise NotImplemented()

    def receive_shutdown(self, msg):
        self.running = False

    def send(self, msg, priority=50):
        """Place a message into the actor's inbox
        """
        self.inbox.put((priority, msg))

    def _run(self):
        """Run the Actor in a blocking event loop
        """
        self.running = True

        while self.running:
            prio, msg = self.inbox.get()
            self.receive(msg)
            del msg
Exemple #11
0
class PriorityBlockingQueue(object):
    """
    带优先级的阻塞队列。
    优先级数字越小,优先级越高。

    插入元素:
    * put: 向队列尾部插入一个元素,如果该队列已满,则一直阻塞。
    * offer: 向队列尾部插入一个元素,插入成功返回True。插入失败返回False。

    获取元素:
    * poll: 获取并移除队列的头元素,若队列为空,则返回null。
    * take: 获取并移除队列的头元素,若队列为空,则一直阻塞。
    * peek:获取但不移除队列的头元素,若队列为空,则返回null

    队列状态状态:
    * qsize:获取队列中当前元素数量
    * maxsize:获取队列的最大容量
    """
    def __init__(self, maxsize: int = None):
        """
        init
        :param maxsize: 队列的最大容量
        """
        self.__queue = PriorityQueue(maxsize=maxsize)

    def put(self, item, priority: int = 200) -> None:
        """
        向队列尾部插入一个元素,如果该队列已满,则一直阻塞。
        :param item:
        :param priority: 优先级
        :return:
        """
        while True:
            try:
                self.__queue.put(PriorityEntry(priority, item))
                break
            except Exception as e:
                logger.debug("put data failed error -> {0}".format(e))
            time.sleep(0.5)

    def offer(self, item, priority: int = 200) -> bool:
        """
        向队列尾部插入一个元素,插入成功返回True。插入失败返回False。
        :param item: 元素
        :param priority: 优先级
        :return:
        """
        try:
            self.__queue.put(PriorityEntry(priority, item), block=False)
            return True
        except Exception as e:
            logger.debug("offer data failed error -> {0}".format(e))
        return False

    def poll(self):
        """
        获取并移除队列的头元素,若队列为空,则返回null。
        :return:
        """
        try:
            return self.__queue.get(block=False).data
        except Exception as e:
            logger.debug("poll data failed error -> {0}".format(e))
        return None

    def take(self):
        """
        获取并移除队列的头元素,若队列为空,则一直阻塞。
        :return:
        """
        while True:
            try:
                return self.__queue.get().data
            except Exception as e:
                logger.debug("take data failed error -> {0}".format(e))
            time.sleep(0.5)

    def peek(self):
        """
        获取但不移除队列的头元素,若队列为空,则返回null
        :return:
        """
        try:
            return self.__queue.peek(block=False).data
        except Exception as e:
            logger.debug("peek data failed error -> {0}".format(e))
        return None

    def qsize(self) -> int:
        """
        获取队列中当前元素数量
        :return:
        """
        return self.__queue.qsize()

    def maxsize(self) -> int:
        """
        获取队列的最大容量
        :return:
        """
        return self.__queue.maxsize
Exemple #12
0
class FactoryPool(object):
    def __init__(self, factory, maxsize=200, timeout=60):
        self.factory = factory
        self.maxsize = maxsize
        self.timeout = timeout
        self.clients = PriorityQueue(maxsize)
        # If there is a maxsize, prime the queue with empty slots.
        if maxsize is not None:
            for _ in xrange(maxsize):
                self.clients.put(EMPTY_SLOT)

    @contextlib.contextmanager
    def reserve(self):
        """Context-manager to obtain a Client object from the pool."""
        ts, client = self._checkout_connection()
        try:
            yield client
        finally:
            self._checkin_connection(ts, client)

    def _checkout_connection(self):
        # If there's no maxsize, no need to block waiting for a connection.
        blocking = self.maxsize is not None

        # Loop until we get a non-stale connection, or we create a new one.
        while True:
            try:
                ts, client = self.clients.get(blocking)
            except Empty:
                # No maxsize and no free connections, create a new one.
                # XXX TODO: we should be using a monotonic clock here.
                # see http://www.python.org/dev/peps/pep-0418/
                now = int(time.time())
                return now, self.factory()
            else:
                now = int(time.time())
                # If we got an empty slot placeholder, create a new connection.
                if client is None:
                    return now, self.factory()
                # If the connection is not stale, go ahead and use it.
                if ts + self.timeout > now:
                    return ts, client
                # Otherwise, the connection is stale.
                # Close it, push an empty slot onto the queue, and retry.
                if hasattr(client, 'disconnect'):
                    client.disconnect()

                self.clients.put(EMPTY_SLOT)
                continue

    def _checkin_connection(self, ts, client):
        """Return a connection to the pool."""
        if hasattr(client, '_closed') and client._closed:
            self.clients.put(EMPTY_SLOT)
            return

        # If the connection is now stale, don't return it to the pool.
        # Push an empty slot instead so that it will be refreshed when needed.
        now = int(time.time())
        if ts + self.timeout > now:
            self.clients.put((ts, client))
        else:
            if self.maxsize is not None:
                self.clients.put(EMPTY_SLOT)
Exemple #13
0
class SubNameBrute(object):
    def __init__(self, *params):
        self.domain, self.options, self.process_num, self.dns_servers, self.next_subs, \
            self.scan_count, self.found_count, self.queue_size_array, tmp_dir = params
        self.dns_count = len(self.dns_servers)
        self.scan_count_local = 0
        self.found_count_local = 0
        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(self.options.threads)]
        for r in self.resolvers:
            r.lifetime = 4
            r.timeout = 10.0
        self.queue = PriorityQueue()
        self.priority = 0
        self.ip_dict = {}
        self.found_subs = set()
        self.timeout_subs = {}
        self.count_time = time.time()
        self.outfile = open('%s/%s_part_%s.txt' % (tmp_dir, self.domain, self.process_num), 'w')
        self.normal_names_set = set()
        self.load_sub_names()
        self.lock = RLock()
        self.threads_status = ['1'] * self.options.threads

    def load_sub_names(self):
        normal_lines = []
        wildcard_lines = []
        wildcard_set = set()
        regex_list = []
        lines = set()
        with open(self.options.file) as inFile:
            for line in inFile.readlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                brace_count = sub.count('{')
                if brace_count > 0:
                    wildcard_lines.append((brace_count, sub))
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_set:
                        wildcard_set.add(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
                    self.normal_names_set.add(sub)

        if regex_list:
            pattern = '|'.join(regex_list)
            _regex = re.compile(pattern)
            for line in normal_lines:
                if _regex.search(line):
                    normal_lines.remove(line)

        for _ in normal_lines[self.process_num::self.options.process]:
            self.queue.put((0, _))    # priority set to 0
        for _ in wildcard_lines[self.process_num::self.options.process]:
            self.queue.put(_)

    def scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]] + self.dns_servers

        while True:
            try:

                if time.time() - self.count_time > 1.0:
                    self.lock.acquire()
                    self.scan_count.value += self.scan_count_local
                    self.scan_count_local = 0
                    self.queue_size_array[self.process_num] = self.queue.qsize()
                    if self.found_count_local:
                        self.found_count.value += self.found_count_local
                        self.found_count_local = 0
                    self.count_time = time.time()
                    self.lock.release()
                brace_count, sub = self.queue.get_nowait()
                self.threads_status[j] = '1'
                if brace_count > 0:
                    brace_count -= 1
                    if sub.find('{next_sub}') >= 0:
                        for _ in self.next_subs:
                            self.queue.put((0, sub.replace('{next_sub}', _)))
                    if sub.find('{alphnum}') >= 0:
                        for _ in 'abcdefghijklmnopqrstuvwxyz0123456789':
                            self.queue.put((brace_count, sub.replace('{alphnum}', _, 1)))
                    elif sub.find('{alpha}') >= 0:
                        for _ in 'abcdefghijklmnopqrstuvwxyz':
                            self.queue.put((brace_count, sub.replace('{alpha}', _, 1)))
                    elif sub.find('{num}') >= 0:
                        for _ in '0123456789':
                            self.queue.put((brace_count, sub.replace('{num}', _, 1)))
                    continue
            except gevent.queue.Empty as e:
                self.threads_status[j] = '0'
                gevent.sleep(0.5)
                if '1' not in self.threads_status:
                    break
                else:
                    continue

            try:

                if sub in self.found_subs:
                    continue

                self.scan_count_local += 1
                cur_domain = sub + '.' + self.domain
                answers = self.resolvers[j].query(cur_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0', '0.0.0.1']:
                        continue
                    if self.options.i and is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count_local += 1
                        answers = self.resolvers[j].query(cur_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(self.domain) and cname not in self.found_subs:
                            cname_sub = cname[:len(cname) - len(self.domain) - 1]    # new sub
                            if cname_sub not in self.normal_names_set:
                                self.found_subs.add(cname)
                                self.queue.put((0, cname_sub))
                    except Exception as e:
                        pass

                    first_level_sub = sub.split('.')[-1]
                    max_found = 20

                    if self.options.w:
                        first_level_sub = ''
                        max_found = 3

                    if (first_level_sub, ips) not in self.ip_dict:
                        self.ip_dict[(first_level_sub, ips)] = 1
                    else:
                        self.ip_dict[(first_level_sub, ips)] += 1
                        if self.ip_dict[(first_level_sub, ips)] > max_found:
                            continue

                    self.found_count_local += 1

                    self.outfile.write(cur_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    try:
                        self.scan_count_local += 1
                        self.resolvers[j].query('lijiejie-test-not-existed.' + cur_domain)
                    except (dns.resolver.NXDOMAIN, ) as e:    # dns.resolver.NoAnswer
                        if self.queue.qsize() < 50000:
                            for _ in self.next_subs:
                                self.queue.put((0, _ + '.' + sub))
                        else:
                            self.queue.put((1, '{next_sub}.' + sub))
                    except Exception as e:
                        pass

            except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                pass
            except dns.resolver.NoNameservers as e:
                self.queue.put((0, sub))    # Retry
            except dns.exception.Timeout as e:
                self.timeout_subs[sub] = self.timeout_subs.get(sub, 0) + 1
                if self.timeout_subs[sub] <= 1:
                    self.queue.put((0, sub))    # Retry
            except Exception as e:
                import traceback
                traceback.print_exc()
                with open('errors.log', 'a') as errFile:
                    errFile.write('[%s] %s\n' % (type(e), str(e)))

    def run(self):
        threads = [gevent.spawn(self.scan, i) for i in range(self.options.threads)]
        gevent.joinall(threads)
Exemple #14
0
class SubNameBrute:
    def __init__(self, target, args, process_num, dns_servers, next_subs,
                 scan_count, found_count, queue_size_list, tmp_dir):
        self.target = target.strip()
        self.args = args
        self.process_num = process_num
        self.dns_servers = dns_servers
        self.dns_count = len(dns_servers)
        self.next_subs = next_subs
        self.scan_count = scan_count
        self.scan_count_local = 0
        self.found_count = found_count
        self.found_count_local = 0
        self.queue_size_list = queue_size_list

        self.resolvers = [
            dns.resolver.Resolver(configure=False) for _ in range(args.threads)
        ]
        for _r in self.resolvers:
            _r.lifetime = _r.timeout = 6.0

        self.queue = PriorityQueue()
        self.item_index = 0
        self.priority = 0
        self._load_sub_names()
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = dns_servers
        self.local_time = time.time()
        self.outfile = open(
            '%s/%s_part_%s.txt' % (tmp_dir, target, process_num), 'w')
        self.outfile_html = open('tmp/%s_html_%s.txt' % (target, process_num),
                                 'w')

    def _load_sub_names(self):
        if self.args.full_scan and self.args.file == 'subnames.txt':
            _file = 'Dict/subnames_full.txt'
        else:
            if os.path.exists(self.args.file):
                _file = self.args.file
            elif os.path.exists('Dict/%s' % self.args.file):
                _file = 'Dict/%s' % self.args.file
            else:
                print_msg('[ERROR] Names file not found: %s' % self.args.file)
                exit(-1)

        normal_lines = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)
                normal_lines.append(sub)

        for item in normal_lines[self.process_num::self.args.process]:
            self.priority += 1
            self.queue.put((self.priority, item))

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get()[1]
                self.scan_count_local += 1
                if time.time() - self.local_time > 3.0:
                    self.scan_count.value += self.scan_count_local
                    self.scan_count_local = 0
                    self.queue_size_list[self.process_num] = self.queue.qsize()
            except Exception as e:
                break
            try:
                if item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(
                        sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.args.intranet and is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count_local += 1
                        answers = self.resolvers[j].query(
                            cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(
                                self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) -
                                              1]
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1
                        if self.ip_dict[(_sub, ips)] > 30:
                            continue

                    self.found_count_local += 1
                    if time.time() - self.local_time > 3.0:
                        self.found_count.value += self.found_count_local
                        self.found_count_local = 0
                        self.queue_size_list[
                            self.process_num] = self.queue.qsize()
                        self.local_time = time.time()

                    self.outfile_html.write(
                        PrintHtml.Sub_html_print(cur_sub_domain, ips))
                    self.outfile_html.flush()
                    self.outfile.write(
                        cur_sub_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    try:
                        self.resolvers[j].query('myzxcghelloha.' +
                                                cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer,
                    dns.exception.Timeout) as e:
                pass
Exemple #15
0
class AWSLogs(object):

    ACTIVE = 1
    EXHAUSTED = 2
    WATCH_SLEEP = 2

    def __init__(self, **kwargs):
        self.connection_cls = kwargs.get('connection_cls', AWSConnection)
        self.aws_region = kwargs.get('aws_region')
        self.aws_access_key_id = kwargs.get('aws_access_key_id')
        self.aws_secret_access_key = kwargs.get('aws_secret_access_key')
        self.log_group_name = kwargs.get('log_group_name')
        self.log_stream_name = kwargs.get('log_stream_name')
        self.watch = kwargs.get('watch')
        self.color_enabled = kwargs.get('color_enabled')
        self.output_stream_enabled = kwargs.get('output_stream_enabled')
        self.output_group_enabled = kwargs.get('output_group_enabled')
        self.start = self.parse_datetime(kwargs.get('start'))
        self.end = self.parse_datetime(kwargs.get('end'))
        self.pool_size = max(kwargs.get('pool_size', 0), 10)
        self.max_group_length = 0
        self.max_stream_length = 0
        self.publishers = []
        self.events_queue = Queue()
        self.raw_events_queue = PriorityQueue()
        self.publishers_queue = PriorityQueue()
        self.publishers = []
        self.stream_status = {}
        self.stream_max_timestamp = {}

        self.connection = self.connection_cls(
            self.aws_region,
            aws_access_key_id=self.aws_access_key_id,
            aws_secret_access_key=self.aws_secret_access_key)

    def _get_streams_from_patterns(self, log_group_pattern,
                                   log_stream_pattern):
        """Returns pairs of group, stream matching ``log_group_pattern`` and
        ``log_stream_pattern``."""
        for group in self._get_groups_from_pattern(log_group_pattern):
            for stream in self._get_streams_from_pattern(
                    group, log_stream_pattern):
                yield group, stream

    def _get_groups_from_pattern(self, pattern):
        """Returns groups matching ``pattern``."""
        pattern = '.*' if pattern == 'ALL' else pattern
        reg = re.compile('^{0}'.format(pattern))
        for group in self.get_groups():
            if re.match(reg, group):
                yield group

    def _get_streams_from_pattern(self, group, pattern):
        """Returns streams in ``group`` matching ``pattern``."""
        pattern = '.*' if pattern == 'ALL' else pattern
        reg = re.compile('^{0}'.format(pattern))
        for stream in self.get_streams(group):
            if re.match(reg, stream):
                yield stream

    def _publisher_queue_consumer(self):
        """Consume ``publishers_queue`` api calls, run them and publish log
        events to ``raw_events_queue``. If ``nextForwardToken`` is present
        register a new api call into ``publishers_queue`` using as weight
        the timestamp of the latest event."""
        while True:
            try:
                _, (log_group_name, log_stream_name,
                    next_token) = self.publishers_queue.get(block=False)
            except Empty:
                if self.watch:
                    gevent.sleep(self.WATCH_SLEEP)
                    continue
                else:
                    break

            response = self.connection.get_log_events(
                next_token=next_token,
                log_group_name=log_group_name,
                log_stream_name=log_stream_name,
                start_time=self.start,
                end_time=self.end,
                start_from_head=True)

            if not len(response['events']):
                self.stream_status[(log_group_name,
                                    log_stream_name)] = self.EXHAUSTED
                continue

            self.stream_status[(log_group_name, log_stream_name)] = self.ACTIVE

            for event in response['events']:
                event['group'] = log_group_name
                event['stream'] = log_stream_name
                self.raw_events_queue.put((event['timestamp'], event))
                self.stream_max_timestamp[(
                    log_group_name, log_stream_name)] = event['timestamp']

            if 'nextForwardToken' in response:
                self.publishers_queue.put((response['events'][-1]['timestamp'],
                                           (log_group_name, log_stream_name,
                                            response['nextForwardToken'])))

    def _get_min_timestamp(self):
        """Return the minimum timestamp available across all active streams."""
        pending = [
            self.stream_max_timestamp[k]
            for k, v in self.stream_status.iteritems() if v != self.EXHAUSTED
        ]
        return min(pending) if pending else None

    def _get_all_streams_exhausted(self):
        """Return if all streams are exhausted."""
        return all(
            (s == self.EXHAUSTED for s in self.stream_status.itervalues()))

    def _raw_events_queue_consumer(self):
        """Consume events from ``raw_events_queue`` if all active streams
        have already publish events up to the ``_get_min_timestamp`` and
        register them in order into ``events_queue``."""
        while True:
            if self._get_all_streams_exhausted(
            ) and self.raw_events_queue.empty():
                if self.watch:
                    gevent.sleep(self.WATCH_SLEEP)
                    continue
                self.events_queue.put(NO_MORE_EVENTS)
                break

            try:
                timestamp, line = self.raw_events_queue.peek(timeout=1)
            except Empty:
                continue

            min_timestamp = self._get_min_timestamp()
            if min_timestamp and min_timestamp < timestamp:
                gevent.sleep(0.3)
                continue

            timestamp, line = self.raw_events_queue.get()

            output = [line['message']]
            if self.output_stream_enabled:
                output.insert(
                    0,
                    self.color(
                        line['stream'].ljust(self.max_stream_length, ' '),
                        'cyan'))
            if self.output_group_enabled:
                output.insert(
                    0,
                    self.color(line['group'].ljust(self.max_group_length, ' '),
                               'green'))
            self.events_queue.put("{0}\n".format(' '.join(output)))

    def _events_consumer(self):
        """Print events from ``events_queue`` as soon as they are available."""
        while True:
            event = self.events_queue.get(True)
            if event == NO_MORE_EVENTS:
                break
            sys.stdout.write(event)
            sys.stdout.flush()

    def list_logs(self):
        self.register_publishers()

        pool = Pool(size=self.pool_size)
        pool.spawn(self._raw_events_queue_consumer)
        pool.spawn(self._events_consumer)

        if self.watch:
            pool.spawn(self.register_publishers_periodically)

        for i in xrange(self.pool_size):
            pool.spawn(self._publisher_queue_consumer)
        pool.join()

    def register_publishers(self):
        """Register publishers into ``publishers_queue``."""
        for group, stream in self._get_streams_from_patterns(
                self.log_group_name, self.log_stream_name):
            if (group, stream) in self.publishers:
                continue
            self.publishers.append((group, stream))
            self.max_group_length = max(self.max_group_length, len(group))
            self.max_stream_length = max(self.max_stream_length, len(stream))
            self.publishers_queue.put((0, (group, stream, None)))
            self.stream_status[(group, stream)] = self.ACTIVE
            self.stream_max_timestamp[(group, stream)] = -1

    def register_publishers_periodically(self):
        while True:
            self.register_publishers()
            gevent.sleep(2)

    def list_groups(self):
        """Lists available CloudWatch logs groups"""
        for group in self.get_groups():
            print group

    def list_streams(self, *args, **kwargs):
        """Lists available CloudWatch logs streams in ``log_group_name``."""
        for stream in self.get_streams(*args, **kwargs):
            print stream

    def get_groups(self):
        """Returns available CloudWatch logs groups"""
        next_token = None
        while True:
            response = self.connection.describe_log_groups(
                next_token=next_token)

            for group in response.get('logGroups', []):
                yield group['logGroupName']

            if 'nextToken' in response:
                next_token = response['nextToken']
            else:
                break

    def get_streams(self, log_group_name=None):
        """Returns available CloudWatch logs streams in ``log_group_name``."""
        log_group_name = log_group_name or self.log_group_name
        next_token = None
        window_start = self.start or 0
        window_end = self.end or sys.maxint

        while True:
            response = self.connection.describe_log_streams(
                log_group_name=log_group_name, next_token=next_token)

            for stream in response.get('logStreams', []):
                if max(stream['firstEventTimestamp'], window_start) <= \
                   min(stream['lastEventTimestamp'], window_end):
                    yield stream['logStreamName']

            if 'nextToken' in response:
                next_token = response['nextToken']
            else:
                break

    def color(self, text, color):
        """Returns coloured version of ``text`` if ``color_enabled``."""
        if self.color_enabled:
            return colored(text, color)
        return text

    def parse_datetime(self, datetime_text):
        """Parse ``datetime_text`` into a ``datetime``."""
        if not datetime_text:
            return None

        ago_match = re.match(
            r'(\d+)\s?(m|minute|minutes|h|hour|hours|d|day|days|w|weeks|weeks)(?: ago)?',
            datetime_text)
        if ago_match:
            amount, unit = ago_match.groups()
            amount = int(amount)
            unit = {'m': 60, 'h': 3600, 'd': 86400, 'w': 604800}[unit[0]]
            date = datetime.now() + timedelta(seconds=unit * amount * -1)
        else:
            try:
                date = parse(datetime_text)
            except ValueError:
                raise exceptions.UnknownDateError(datetime_text)

        return int(date.strftime("%s")) * 1000
Exemple #16
0
class FactoryPool(object):

    def __init__(self, factory, maxsize=200, timeout=60):
        self.factory = factory
        self.maxsize = maxsize
        self.timeout = timeout
        self.clients = PriorityQueue(maxsize)
        # If there is a maxsize, prime the queue with empty slots.
        if maxsize is not None:
            for _ in xrange(maxsize):
                self.clients.put(EMPTY_SLOT)

    @contextlib.contextmanager
    def reserve(self):
        """Context-manager to obtain a Client object from the pool."""
        ts, client = self._checkout_connection()
        try:
            yield client
        finally:
            self._checkin_connection(ts, client)

    def _checkout_connection(self):
        # If there's no maxsize, no need to block waiting for a connection.
        blocking = self.maxsize is not None

        # Loop until we get a non-stale connection, or we create a new one.
        while True:
            try:
                ts, client = self.clients.get(blocking)
            except Empty:
                # No maxsize and no free connections, create a new one.
                # XXX TODO: we should be using a monotonic clock here.
                # see http://www.python.org/dev/peps/pep-0418/
                now = int(time.time())
                return now, self.factory()
            else:
                now = int(time.time())
                # If we got an empty slot placeholder, create a new connection.
                if client is None:
                    return now, self.factory()
                # If the connection is not stale, go ahead and use it.
                if ts + self.timeout > now:
                    return ts, client
                # Otherwise, the connection is stale.
                # Close it, push an empty slot onto the queue, and retry.
                if hasattr(client, 'disconnect'):
                    client.disconnect()

                self.clients.put(EMPTY_SLOT)
                continue

    def _checkin_connection(self, ts, client):
        """Return a connection to the pool."""
        if hasattr(client, '_closed') and client._closed:
            self.clients.put(EMPTY_SLOT)
            return

        # If the connection is now stale, don't return it to the pool.
        # Push an empty slot instead so that it will be refreshed when needed.
        now = int(time.time())
        if ts + self.timeout > now:
            self.clients.put((ts, client))
        else:
            if self.maxsize is not None:
                self.clients.put(EMPTY_SLOT)
Exemple #17
0
class TestContractProformaFilter(unittest.TestCase):
    conf = CONFIG
    db = {}

    @patch('openprocurement.bridge.templatesregistry.filters.INFINITY')
    def test_init(self, infinity):
        self.input_queue = PriorityQueue()
        self.filtered_queue = PriorityQueue()

        filter = ContractProformaFilter(self.conf, self.input_queue,
                                        self.filtered_queue, self.db)

        # Valid tender filtering
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-02',
            'procurementMethodType': 'dgf',
            'status': 'status1',
            'documents': [{
                'documentType': 'contractProforma'
            }]
        }

        self.input_queue.put((None, deepcopy(doc)))
        filter._run()
        self.assertEqual(len(self.filtered_queue), 1)
        filtered_doc = self.filtered_queue.get(block=False)
        self.assertEqual(doc, filtered_doc[1])

        # Not changed dateModified
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'dgf',
            'status': 'status1',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }

        self.input_queue.put((None, deepcopy(doc)))
        self.db['test_id'] = '1970-01-01'

        filter._run()
        self.assertEqual(len(self.filtered_queue), 0)

        self.db.pop('test_id')

        # No contractProforma doc
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'dgf',
            'status': 'status1',
            'documents': [{
                'documentType': 'notContractProforma'
            }],
        }

        filter._run()
        self.assertEqual(len(self.filtered_queue), 0)

        # Wrong tender status
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'dgf',
            'status': 'status3',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }

        self.input_queue.put((None, deepcopy(doc)))

        filter._run()
        self.assertEqual(len(self.filtered_queue), 0)

        # Wrong procurementMethodType without appropriate status
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'other_pmt',
            'status': 'status1',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }
        self.input_queue.put((None, deepcopy(doc)))

        filter._run()
        self.assertEqual(len(self.filtered_queue), 0)

        # Wrong procurementMethodType with appropriate status
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'other_pmt',
            'status': 'status2',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }
        self.input_queue.put((None, deepcopy(doc)))

        filter._run()
        self.assertEqual(len(self.filtered_queue), 1)
Exemple #18
0
class ConnectionPool(object):
    def __init__(self,
                 factory,
                 retry_max=3,
                 retry_delay=.1,
                 timeout=-1,
                 max_lifetime=600.,
                 max_size=10,
                 options=None):
        self.max_size = max_size
        self.pool = PriorityQueue()
        self.size = 0
        self.factory = factory
        self.retry_max = retry_max
        self.retry_delay = retry_delay
        self.timeout = timeout
        self.max_lifetime = max_lifetime
        if options is None:
            self.options = {}
        else:
            self.options = options

    def too_old(self, conn):
        return time.time() - conn.get_lifetime() > self.max_lifetime

    def release_connection(self, conn):
        connected = conn.is_connected()
        if connected and not self.too_old(conn):
            self.pool.put((conn.get_lifetime(), conn))
        else:
            conn.invalidate()

    def get(self, **options):
        pool = self.pool

        # first let's try to find a matching one
        found = None
        if self.size >= self.max_size or pool.qsize():
            for priority, candidate in pool:
                if self.too_old(candidate):
                    # let's drop it
                    continue

                matches = candidate.matches(**options)
                if not matches:
                    # let's put it back
                    pool.put((priority, candidate))
                else:
                    found = candidate
                    break

        # we got one.. we use it
        if found is not None:
            return found

        # we build a new one and send it back
        tries = 0
        last_error = None

        while tries < self.retry_max:
            self.size += 1
            try:
                new_item = self.factory(**options)
            except Exception, e:
                self.size -= 1
                last_error = e
            else:
                # we should be connected now
                if new_item.is_connected():
                    return new_item

            tries += 1
            gevent.sleep(self.retry_delay)

        if last_error is None:
            raise MaxTriesError()
        else:
            raise last_error
class SyncClient:
    idle = idle
    backward_class = BackwardWorker
    forward_class = ForwardWorker

    def __init__(self,
                 host_url,
                 resource,
                 auth=None,
                 params={},
                 headers=None,
                 retrievers_params=DEFAULT_RETRIEVERS_PARAMS,
                 adaptive=False,
                 with_priority=False):
        LOGGER.info(f'Init SyncClient for resource {resource}')
        self.host = host_url
        self.auth = auth
        self.resource = resource
        self.adaptive = adaptive
        self.headers = headers

        self.params = params
        self.retrievers_params = retrievers_params
        self.queue = PriorityQueue(maxsize=retrievers_params['queue_size'])

    def init_clients(self):
        self.backward_client = ResourceClient(self.host, self.resource,
                                              self.params, self.auth,
                                              self.headers)
        self.forward_client = ResourceClient(self.host, self.resource,
                                             self.params, self.auth,
                                             self.headers)

    def handle_response_data(self, data):
        for resource_item in data:
            self.queue.put(PrioritizedItem(1, resource_item))

    def worker_watcher(self):
        while True:
            if time() - self.heartbeat > DEFAULT_FORWARD_HEARTBEAT:
                self.restart_sync()
                LOGGER.warning(
                    'Restart sync, reason: Last response from workers greater than 15 min ago.'
                )
            sleep(300)

    def start_sync(self):
        LOGGER.info('Start sync...')

        data = self.backward_client.get_resource_items(self.params)

        self.handle_response_data(data[f'{self.resource}s'])

        forward_params = deepcopy(self.params)
        forward_params.update({
            k: v[0]
            for k, v in parse.parse_qs(parse.urlparse(
                data.links.prev).query).items()
        })
        backward_params = deepcopy(self.params)
        backward_params.update({
            k: v[0]
            for k, v in parse.parse_qs(parse.urlparse(
                data.links.next).query).items()
        })

        self.forward_worker = self.forward_class(sync_client=self,
                                                 client=self.forward_client,
                                                 params=forward_params)
        self.backward_worker = self.backward_class(sync_client=self,
                                                   client=self.backward_client,
                                                   params=backward_params)
        self.workers = [self.forward_worker, self.backward_worker]

        for worker in self.workers:
            worker.start()
        self.heartbeat = time()
        self.watcher = spawn(self.worker_watcher)

    def restart_sync(self):
        """
        Restart retrieving from OCDS API.
        """

        LOGGER.info('Restart workers')
        for worker in self.workers:
            worker.kill()
        self.watcher.kill()
        self.init_clients()
        self.start_sync()

    def get_resource_items(self):
        self.init_clients()
        self.start_sync()
        while True:
            if self.forward_worker.check() or self.backward_worker.check():
                self.restart_sync()
            while not self.queue.empty():
                LOGGER.debug(f'Sync queue size: {self.queue.qsize()}',
                             extra={'SYNC_QUEUE_SIZE': self.queue.qsize()})
                LOGGER.debug('Yield resource item',
                             extra={'MESSAGE_ID': 'sync_yield'})
                item = self.queue.get()
                yield item.data
            LOGGER.debug(f'Sync queue size: {self.queue.qsize()}',
                         extra={'SYNC_QUEUE_SIZE': self.queue.qsize()})
            try:
                self.queue.peek(block=True, timeout=0.1)
            except Empty:
                pass
Exemple #20
0
class JobSpider:
    def __init__(self, start_requests):
        self.start_request = start_requests
        self.domain = tldextract.extract(self.start_request.url).domain

        self.request_queue = PriorityQueue()
        self.result = {
            start_requests.url: 0,
        }
        self.gl_list = []
        self.stop_flag = False

    def start(self, number):
        resp = requests.get(self.start_request.url)
        if resp.status_code != 200:
            raise Exception('HTTPError<%d>' % resp.status_code)

        self.request_queue.put((0, self.start_request))
        for i in range(number):
            gl = gevent.spawn(self.downloader)
            self.gl_list.append(gl)
            gl.start()

    def stop(self):
        self.stop_flag = True

    def join(self):
        return gevent.joinall(self.gl_list)

    def downloader(self):
        a_re = re.compile(
            r'''<a.+?href=(['"])([^>\s]+)\1.*?>([\S\s]+?)<\/a>''',
            re.IGNORECASE)

        while not self.request_queue.empty():
            if self.stop_flag: break
            prio, request = self.request_queue.get()
            headers = {'User-Agent': choice(random_ua)}
            try:
                resp = requests.get(request.url, headers=headers)
            except Exception as e:
                continue

            encoding = chardet.detect(resp.content)['encoding']
            html_text = resp.content.decode(
                encoding) if encoding is not None else resp.text
            self.result[request.url] += calc_text_weight(html_text)
            if self.result[request.url] >= 100:
                self.stop()
                break

            if request.depth == max_depth:
                continue

            matches = a_re.findall(html_text)
            for each_a in matches:
                href = each_a[1]
                name = each_a[2]
                if href.startswith('javascript'): continue
                if href.startswith('/'): href = request.url + href
                if href.startswith('http'):
                    new_request = Request(href, request.depth + 1)
                    self.result[href] = calc_name_url_weight(name, href)
                    if tldextract.extract(href).domain == self.domain:
                        self.request_queue.put(
                            (-self.result[href], new_request))
                    elif self.result[href] >= 80:
                        self.request_queue.put(
                            (-self.result[href], new_request))
class SubNameBrute:
    def __init__(self, target, options):
        self.start_time = time.time()
        self.target = target.strip()
        self.options = options
        self.ignore_intranet = options.i
        self.scan_count = self.found_count = 0
        self.console_width = getTerminalSize()[0] - 2
        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(options.threads)]
        for _ in self.resolvers:
            _.lifetime = _.timeout = 10.0
        self.print_count = 0
        self.STOP_ME = False
        self._load_dns_servers()
        self._load_next_sub()
        self.queue = PriorityQueue()
        self.priority = 0
        self._load_sub_names()
        if options.output:
            outfile = options.output
        else:
            _name = os.path.basename(self.options.file).replace('subnames', '')
            if _name != '.txt':
                _name = '_' + _name
            outfile = target + _name if not options.full_scan else target + '_full' + _name
        self.outfile = open(outfile, 'w')
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = self.dns_servers

    def _load_dns_servers(self):
        print '[+] Validate DNS servers ...'
        self.dns_servers = []
        pool = Pool(30)
        for server in open('dict/dns_servers.txt').xreadlines():
            server = server.strip()
            if server:
                pool.apply_async(self._test_server, (server,))
        pool.join()

        self.dns_count = len(self.dns_servers)
        sys.stdout.write('\n')
        print '[+] Found %s available DNS Servers in total' % self.dns_count
        if self.dns_count == 0:
            print '[ERROR] No DNS Servers available.'
            sys.exit(-1)

    def _test_server(self, server):
        resolver = dns.resolver.Resolver()
        resolver.lifetime = resolver.timeout = 10.0
        try:
            resolver.nameservers = [server]
            answers = resolver.query('public-dns-a.baidu.com')    # test lookup a existed domain
            if answers[0].address != '180.76.76.76':
                raise Exception('incorrect DNS response')
            try:
                resolver.query('test.bad.dns.lijiejie.com')    # Non-existed domain test
                with open('bad_dns_servers.txt', 'a') as f:
                    f.write(server + '\n')
                self._print_msg('[+] Bad DNS Server found %s' % server)
            except:
                self.dns_servers.append(server)
            self._print_msg('[+] Check DNS Server %s < OK >   Found %s' % (server.ljust(16), len(self.dns_servers)))
        except:
            self._print_msg('[+] Check DNS Server %s <Fail>   Found %s' % (server.ljust(16), len(self.dns_servers)))

    def _load_sub_names(self):
        self._print_msg('[+] Load sub names ...')
        if self.options.full_scan and self.options.file == 'subnames.txt':
            _file = 'dict/subnames_full.txt'
        else:
            if os.path.exists(self.options.file):
                _file = self.options.file
            elif os.path.exists('dict/%s' % self.options.file):
                _file = 'dict/%s' % self.options.file
            else:
                self._print_msg('[ERROR] Names file not exists: %s' % self.options.file)
                exit(-1)

        normal_lines = []
        wildcard_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0:
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_list:
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
        pattern = '|'.join(regex_list)
        if pattern:
            _regex = re.compile(pattern)
            if _regex:
                for line in normal_lines[:]:
                    if _regex.search(line):
                        normal_lines.remove(line)

        for item in normal_lines:
            self.priority += 1
            self.queue.put((self.priority, item))

        for item in wildcard_lines:
            self.queue.put((88888888, item))

    def _load_next_sub(self):
        self._print_msg('[+] Load next level subs ...')
        self.next_subs = []
        _set = set()
        _file = 'dict/next_sub.txt' if not self.options.full_scan else 'dict/next_sub_full.txt'
        with open(_file) as f:
            for line in f:
                sub = line.strip()
                if sub and sub not in self.next_subs:
                    tmp_set = {sub}
                    while len(tmp_set) > 0:
                        item = tmp_set.pop()
                        if item.find('{alphnum}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                                tmp_set.add(item.replace('{alphnum}', _letter, 1))
                        elif item.find('{alpha}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz':
                                tmp_set.add(item.replace('{alpha}', _letter, 1))
                        elif item.find('{num}') >= 0:
                            for _letter in '0123456789':
                                tmp_set.add(item.replace('{num}', _letter, 1))
                        elif item not in _set:
                            _set.add(item)
                            self.next_subs.append(item)

    def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            self.print_count += 1
            if self.print_count < 100:
                return
            self.print_count = 0
            msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                self.found_count, self.queue.qsize(), self.scan_count, time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
        elif _msg.startswith('[+] Check DNS Server'):
            sys.stdout.write('\r' + _msg + ' ' * (self.console_width - len(_msg)))
        else:
            sys.stdout.write('\r' + _msg + ' ' * (self.console_width - len(_msg)) + '\n')
            if _found_msg:
                msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                    self.found_count, self.queue.qsize(), self.scan_count, time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
        sys.stdout.flush()

    @staticmethod
    def is_intranet(ip):
        ret = ip.split('.')
        if len(ret) != 4:
            return True
        if ret[0] == '10':
            return True
        if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
            return True
        if ret[0] == '192' and ret[1] == '168':
            return True
        return False

    def put_item(self, item):
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count('{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            self.queue.put((self.priority + num * 10000000, item))

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=1.0)[1]
                self.scan_count += 1
            except:
                break
            self._print_msg()
            try:
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.ignore_intranet and SubNameBrute.is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count += 1
                        answers = self.resolvers[j].query(cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) - 1]    # new sub
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1

                    if ips not in self.ip_dict:
                        self.ip_dict[ips] = 1
                    else:
                        self.ip_dict[ips] += 1

                    if self.ip_dict[(_sub, ips)] > 3 or self.ip_dict[ips] > 6:
                        continue

                    self.found_count += 1
                    msg = cur_sub_domain.ljust(30) + ips
                    self._print_msg(msg, _found_msg=True)
                    self._print_msg()
                    self.outfile.write(cur_sub_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    try:
                        self.resolvers[j].query('lijiejietest.' + cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e:
                pass
Exemple #22
0
class SubNameBrute:
    def __init__(self, target, options):
        # save result to list
        self.subdomains = list()
        self.start_time = time.time()
        self.target = target.strip()
        self.options = options
        self.ignore_intranet = options.i
        self.scan_count = self.found_count = 0
        self.console_width = getTerminalSize()[0] - 2
        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(options.threads)]
        for _ in self.resolvers:
            _.lifetime = _.timeout = 10.0
        self.STOP_ME = False
        self._load_dns_servers()
        self._load_next_sub()
        self.queue = PriorityQueue()
        self.priority = 0
        self._load_sub_names()
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = self.dns_servers

    def _load_dns_servers(self):
        self.dns_servers = []
        pool = Pool(30)
        for server in open('dict/dns_servers.txt').xreadlines():
            server = server.strip()
            if server:
                pool.apply_async(self._test_server, (server,))
        pool.join()

        self.dns_count = len(self.dns_servers)
        sys.stdout.write('\n')
        if self.dns_count == 0:
            sys.exit(-1)

    def _test_server(self, server):
        resolver = dns.resolver.Resolver()
        resolver.lifetime = resolver.timeout = 10.0
        try:
            resolver.nameservers = [server]
            answers = resolver.query('public-dns-a.baidu.com')    # test lookup a existed domain
            if answers[0].address != '180.76.76.76':
                raise Exception('incorrect DNS response')
            try:
                resolver.query('test.bad.dns.lijiejie.com')    # Non-existed domain test
                with open('bad_dns_servers.txt', 'a') as f:
                    f.write(server + '\n')
            except:
                self.dns_servers.append(server)
        except:
            pass
    def _load_sub_names(self):
        if self.options.full_scan and self.options.file == 'subnames.txt':
            _file = 'dict/subnames_full.txt'
        else:
            if os.path.exists(self.options.file):
                _file = self.options.file
            elif os.path.exists('dict/%s' % self.options.file):
                _file = 'dict/%s' % self.options.file
            else:
                exit(-1)

        normal_lines = []
        wildcard_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0:
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_list:
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
        pattern = '|'.join(regex_list)
        if pattern:
            _regex = re.compile(pattern)
            if _regex:
                for line in normal_lines[:]:
                    if _regex.search(line):
                        normal_lines.remove(line)

        for item in normal_lines:
            self.priority += 1
            self.queue.put((self.priority, item))

        for item in wildcard_lines:
            self.queue.put((88888888, item))

    def _load_next_sub(self):
        self.next_subs = []
        _set = set()
        _file = 'dict/next_sub.txt' if not self.options.full_scan else 'dict/next_sub_full.txt'
        with open(_file) as f:
            for line in f:
                sub = line.strip()
                if sub and sub not in self.next_subs:
                    tmp_set = {sub}
                    while len(tmp_set) > 0:
                        item = tmp_set.pop()
                        if item.find('{alphnum}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                                tmp_set.add(item.replace('{alphnum}', _letter, 1))
                        elif item.find('{alpha}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz':
                                tmp_set.add(item.replace('{alpha}', _letter, 1))
                        elif item.find('{num}') >= 0:
                            for _letter in '0123456789':
                                tmp_set.add(item.replace('{num}', _letter, 1))
                        elif item not in _set:
                            _set.add(item)
                            self.next_subs.append(item)

   

    @staticmethod
    def is_intranet(ip):
        ret = ip.split('.')
        if len(ret) != 4:
            return True
        if ret[0] == '10':
            return True
        if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
            return True
        if ret[0] == '192' and ret[1] == '168':
            return True
        return False

    def put_item(self, item):
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count('{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            self.queue.put((self.priority + num * 10000000, item))

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=1.0)[1]
                self.scan_count += 1
            except:
                break
            try:
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.ignore_intranet and SubNameBrute.is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count += 1
                        answers = self.resolvers[j].query(cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) - 1]    # new sub
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1

                    if ips not in self.ip_dict:
                        self.ip_dict[ips] = 1
                    else:
                        self.ip_dict[ips] += 1

                    if self.ip_dict[(_sub, ips)] > 3 or self.ip_dict[ips] > 6:
                        continue

                    self.found_count += 1
                    msg = cur_sub_domain.ljust(30) + ips

                    # save result to list
                    self.subdomains.append([cur_sub_domain,ips])


                    try:
                        self.resolvers[j].query('lijiejietest.' + cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e:
                pass
class EdgeDataBridge(object):
    """Edge Bridge"""
    def __init__(self, config):
        super(EdgeDataBridge, self).__init__()
        self.config = config
        self.workers_config = {}
        self.bridge_id = uuid.uuid4().hex
        self.api_host = self.config_get('resources_api_server')
        self.api_version = self.config_get('resources_api_version')
        self.retrievers_params = self.config_get('retrievers_params')

        # Check up_wait_sleep
        up_wait_sleep = self.retrievers_params.get('up_wait_sleep')
        if up_wait_sleep is not None and up_wait_sleep < 30:
            raise DataBridgeConfigError('Invalid \'up_wait_sleep\' in '
                                        '\'retrievers_params\'. Value must be '
                                        'grater than 30.')

        # Workers settings
        for key in WORKER_CONFIG:
            self.workers_config[key] = (self.config_get(key)
                                        or WORKER_CONFIG[key])

        # Init config
        for key in DEFAULTS:
            setattr(self, key, self.config_get(key) or DEFAULTS[key])

        # Pools
        self.workers_pool = gevent.pool.Pool(self.workers_max)
        self.retry_workers_pool = gevent.pool.Pool(self.retry_workers_max)
        self.filter_workers_pool = gevent.pool.Pool(self.filter_workers_count)

        # Queues
        if self.input_queue_size == -1:
            self.input_queue = PriorityQueue()
        else:
            self.input_queue = PriorityQueue(self.input_queue_size)

        if self.resource_items_queue_size == -1:
            self.resource_items_queue = PriorityQueue()
        else:
            self.resource_items_queue = PriorityQueue(
                self.resource_items_queue_size)

        self.api_clients_queue = Queue()
        # self.retry_api_clients_queue = Queue()

        if self.retry_resource_items_queue_size == -1:
            self.retry_resource_items_queue = PriorityQueue()
        else:
            self.retry_resource_items_queue = PriorityQueue(
                self.retry_resource_items_queue_size)

        if self.api_host != '' and self.api_host is not None:
            api_host = urlparse(self.api_host)
            if api_host.scheme == '' and api_host.netloc == '':
                raise DataBridgeConfigError(
                    'Invalid \'tenders_api_server\' url.')
        else:
            raise DataBridgeConfigError('In config dictionary empty or missing'
                                        ' \'tenders_api_server\'')
        self.db = prepare_couchdb(self.couch_url, self.db_name, logger)
        db_url = self.couch_url + '/' + self.db_name
        prepare_couchdb_views(db_url, self.workers_config['resource'], logger)
        self.server = Server(self.couch_url,
                             session=Session(retry_delays=range(10)))
        self.view_path = '_design/{}/_view/by_dateModified'.format(
            self.workers_config['resource'])
        extra_params = {
            'mode': self.retrieve_mode,
            'limit': self.resource_items_limit
        }
        self.feeder = ResourceFeeder(host=self.api_host,
                                     version=self.api_version,
                                     key='',
                                     resource=self.workers_config['resource'],
                                     extra_params=extra_params,
                                     retrievers_params=self.retrievers_params,
                                     adaptive=True,
                                     with_priority=True)
        self.api_clients_info = {}

    def config_get(self, name):
        try:
            return self.config.get('main').get(name)
        except AttributeError:
            raise DataBridgeConfigError('In config dictionary missed section'
                                        ' \'main\'')

    def create_api_client(self):
        client_user_agent = self.user_agent + '/' + self.bridge_id
        timeout = 0.1
        while 1:
            try:
                api_client = APIClient(
                    host_url=self.api_host,
                    user_agent=client_user_agent,
                    api_version=self.api_version,
                    key='',
                    resource=self.workers_config['resource'])
                client_id = uuid.uuid4().hex
                logger.info('Started api_client {}'.format(
                    api_client.session.headers['User-Agent']),
                            extra={'MESSAGE_ID': 'create_api_clients'})
                api_client_dict = {
                    'id': client_id,
                    'client': api_client,
                    'request_interval': 0,
                    'not_actual_count': 0
                }
                self.api_clients_info[api_client_dict['id']] = {
                    'drop_cookies': False,
                    'request_durations': {},
                    'request_interval': 0,
                    'avg_duration': 0
                }
                self.api_clients_queue.put(api_client_dict)
                break
            except RequestFailed as e:
                logger.error(
                    'Failed start api_client with status code {}'.format(
                        e.status_code),
                    extra={'MESSAGE_ID': 'exceptions'})
                timeout = timeout * 2
                logger.info(
                    'create_api_client will be sleep {} sec.'.format(timeout))
                sleep(timeout)
            except Exception as e:
                logger.error('Failed start api client with error: {}'.format(
                    e.message),
                             extra={'MESSAGE_ID': 'exceptions'})
                timeout = timeout * 2
                logger.info(
                    'create_api_client will be sleep {} sec.'.format(timeout))
                sleep(timeout)

    def fill_api_clients_queue(self):
        while self.api_clients_queue.qsize() < self.workers_min:
            self.create_api_client()

    def fill_input_queue(self):
        for resource_item in self.feeder.get_resource_items():
            self.input_queue.put(resource_item)
            logger.debug('Add to temp queue from sync: {} {} {}'.format(
                self.workers_config['resource'][:-1], resource_item[1]['id'],
                resource_item[1]['dateModified']),
                         extra={
                             'MESSAGE_ID': 'received_from_sync',
                             'TEMP_QUEUE_SIZE': self.input_queue.qsize()
                         })

    def send_bulk(self, input_dict, priority_cache):
        sleep_before_retry = 2
        for i in xrange(0, 3):
            try:
                logger.debug('Send check bulk: {}'.format(len(input_dict)),
                             extra={'CHECK_BULK_LEN': len(input_dict)})
                start = time()
                rows = self.db.view(self.view_path, keys=input_dict.values())
                end = time() - start
                logger.debug('Duration bulk check: {} sec.'.format(end),
                             extra={'CHECK_BULK_DURATION': end * 1000})
                resp_dict = {k.id: k.key for k in rows}
                break
            except (IncompleteRead, Exception) as e:
                logger.error('Error while send bulk {}'.format(e.message),
                             extra={'MESSAGE_ID': 'exceptions'})
                if i == 2:
                    raise e
                sleep(sleep_before_retry)
                sleep_before_retry *= 2
        for item_id, date_modified in input_dict.items():
            if item_id in resp_dict and date_modified == resp_dict[item_id]:
                logger.debug('Skipped {} {}: In db exist newest.'.format(
                    self.workers_config['resource'][:-1], item_id),
                             extra={'MESSAGE_ID': 'skipped'})
            elif ((1, item_id) not in self.resource_items_queue.queue
                  and (1000, item_id) not in self.resource_items_queue.queue):
                self.resource_items_queue.put(
                    (priority_cache[item_id], item_id))
                logger.debug(
                    'Put to main queue {}: {}'.format(
                        self.workers_config['resource'][:-1], item_id),
                    extra={'MESSAGE_ID': 'add_to_resource_items_queue'})
            else:
                logger.debug(
                    'Skipped {} {}: In queue exist with same id'.format(
                        self.workers_config['resource'][:-1], item_id),
                    extra={'MESSAGE_ID': 'skipped'})

    def fill_resource_items_queue(self):
        start_time = datetime.now()
        input_dict = {}
        priority_cache = {}
        while True:
            # Get resource_item from temp queue
            if not self.input_queue.empty():
                priority, resource_item = self.input_queue.get()
            else:
                timeout = self.bulk_query_interval -\
                    (datetime.now() - start_time).total_seconds()
                if timeout > self.bulk_query_interval:
                    timeout = self.bulk_query_interval
                try:
                    priority, resource_item = self.input_queue.get(
                        timeout=timeout)
                except Empty:
                    resource_item = None

            # Add resource_item to bulk
            if resource_item is not None:
                logger.debug('Add to input_dict {}'.format(
                    resource_item['id']))
                input_dict[resource_item['id']] = resource_item['dateModified']
                priority_cache[resource_item['id']] = priority

            if (len(input_dict) >= self.bulk_query_limit
                    or (datetime.now() - start_time).total_seconds() >=
                    self.bulk_query_interval):
                if len(input_dict) > 0:
                    self.send_bulk(input_dict, priority_cache)
                    input_dict = {}
                    priority_cache = {}
                start_time = datetime.now()

    def resource_items_filter(self, r_id, r_date_modified):
        try:
            local_document = self.db.get(r_id)
            if local_document:
                if local_document['dateModified'] < r_date_modified:
                    return True
                else:
                    return False
            else:
                return True
        except Exception as e:
            logger.error(
                'Filter error: Error while getting {} {} from couchdb: '
                '{}'.format(self.workers_config['resource'][:-1], r_id,
                            e.message),
                extra={'MESSAGE_ID': 'exceptions'})
            return True

    def _get_average_requests_duration(self):
        req_durations = []
        delta = timedelta(seconds=self.perfomance_window)
        current_date = datetime.now() - delta
        for cid, info in self.api_clients_info.items():
            if len(info['request_durations']) > 0:
                if min(info['request_durations'].keys()) <= current_date:
                    info['grown'] = True
                avg = round(
                    sum(info['request_durations'].values()) * 1.0 /
                    len(info['request_durations']), 3)
                req_durations.append(avg)
                info['avg_duration'] = avg

        if len(req_durations) > 0:
            return round(sum(req_durations) / len(req_durations),
                         3), req_durations
        else:
            return 0, req_durations

    # TODO: Add logic for restart sync if last response grater than some values
    # and no active tasks specific for resource

    def queues_controller(self):
        while True:
            if (self.workers_pool.free_count() > 0
                    and (self.resource_items_queue.qsize() >
                         ((float(self.resource_items_queue_size) / 100) *
                          self.workers_inc_threshold))):
                self.create_api_client()
                w = ResourceItemWorker.spawn(self.api_clients_queue,
                                             self.resource_items_queue,
                                             self.db, self.workers_config,
                                             self.retry_resource_items_queue,
                                             self.api_clients_info)
                self.workers_pool.add(w)
                logger.info('Queue controller: Create main queue worker.')
            elif (self.resource_items_queue.qsize() <
                  ((float(self.resource_items_queue_size) / 100) *
                   self.workers_dec_threshold)):
                if len(self.workers_pool) > self.workers_min:
                    wi = self.workers_pool.greenlets.pop()
                    wi.shutdown()
                    api_client_dict = self.api_clients_queue.get()
                    del self.api_clients_info[api_client_dict['id']]
                    logger.info('Queue controller: Kill main queue worker.')
            filled_resource_items_queue = round(
                self.resource_items_queue.qsize() /
                (float(self.resource_items_queue_size) / 100), 2)
            logger.info('Resource items queue filled on {} %'.format(
                filled_resource_items_queue))
            filled_retry_resource_items_queue \
                = round(self.retry_resource_items_queue.qsize() / float(
                    self.retry_resource_items_queue_size) / 100, 2)
            logger.info('Retry resource items queue filled on {} %'.format(
                filled_retry_resource_items_queue))
            sleep(self.queues_controller_timeout)

    def gevent_watcher(self):
        self.perfomance_watcher()
        for t in self.server.tasks():
            if (t['type'] == 'indexer' and t['database'] == self.db_name
                    and t.get('design_document', None) == '_design/{}'.format(
                        self.workers_config['resource'])):
                logger.info(
                    'Watcher: Waiting for end of view indexing. Current'
                    ' progress: {} %'.format(t['progress']))

        # Check fill threads
        input_threads = 1
        if self.input_queue_filler.ready():
            input_threads = 0
            logger.error('Temp queue filler error: {}'.format(
                self.input_queue_filler.exception.message),
                         extra={'MESSAGE_ID': 'exception'})
            self.input_queue_filler = spawn(self.fill_input_queue)
        logger.info('Input threads {}'.format(input_threads),
                    extra={'INPUT_THREADS': input_threads})
        fill_threads = 1
        if self.filler.ready():
            fill_threads = 0
            logger.error('Fill thread error: {}'.format(
                self.filler.exception.message),
                         extra={'MESSAGE_ID': 'exception'})
            self.filler = spawn(self.fill_resource_items_queue)
        logger.info('Filter threads {}'.format(fill_threads),
                    extra={'FILTER_THREADS': fill_threads})

        main_threads = self.workers_max - self.workers_pool.free_count()
        logger.info('Main threads {}'.format(main_threads),
                    extra={'MAIN_THREADS': main_threads})

        if len(self.workers_pool) < self.workers_min:
            for i in xrange(0, (self.workers_min - len(self.workers_pool))):
                w = ResourceItemWorker.spawn(self.api_clients_queue,
                                             self.resource_items_queue,
                                             self.db, self.workers_config,
                                             self.retry_resource_items_queue,
                                             self.api_clients_info)
                self.workers_pool.add(w)
                logger.info('Watcher: Create main queue worker.')
                self.create_api_client()
        retry_threads = self.retry_workers_max -\
            self.retry_workers_pool.free_count()
        logger.info('Retry threads {}'.format(retry_threads),
                    extra={'RETRY_THREADS': retry_threads})
        if len(self.retry_workers_pool) < self.retry_workers_min:
            for i in xrange(
                    0, self.retry_workers_min - len(self.retry_workers_pool)):
                self.create_api_client()
                w = ResourceItemWorker.spawn(self.api_clients_queue,
                                             self.retry_resource_items_queue,
                                             self.db, self.workers_config,
                                             self.retry_resource_items_queue,
                                             self.api_clients_info)
                self.retry_workers_pool.add(w)
                logger.info('Watcher: Create retry queue worker.')

        # Log queues size and API clients count
        main_queue_size = self.resource_items_queue.qsize()
        logger.info('Resource items queue size {}'.format(main_queue_size),
                    extra={'MAIN_QUEUE_SIZE': main_queue_size})
        retry_queue_size = self.retry_resource_items_queue.qsize()
        logger.info(
            'Resource items retry queue size {}'.format(retry_queue_size),
            extra={'RETRY_QUEUE_SIZE': retry_queue_size})
        api_clients_count = len(self.api_clients_info)
        logger.info('API Clients count: {}'.format(api_clients_count),
                    extra={'API_CLIENTS': api_clients_count})

    def _calculate_st_dev(self, values):
        if len(values) > 0:
            avg = sum(values) * 1.0 / len(values)
            variance = map(lambda x: (x - avg)**2, values)
            avg_variance = sum(variance) * 1.0 / len(variance)
            st_dev = math.sqrt(avg_variance)
            return round(st_dev, 3)
        else:
            return 0

    def _mark_bad_clients(self, dev):
        # Mark bad api clients
        for cid, info in self.api_clients_info.items():
            if info.get('grown', False) and info['avg_duration'] > dev:
                info['drop_cookies'] = True
                logger.debug('Perfomance watcher: Mark client {} as bad, avg.'
                             ' request_duration is {} sec.'.format(
                                 cid, info['avg_duration']),
                             extra={'MESSAGE_ID': 'marked_as_bad'})
            elif info['avg_duration'] < dev and info['request_interval'] > 0:
                info['drop_cookies'] = True
                logger.debug('Perfomance watcher: Mark client {} as bad,'
                             ' request_interval is {} sec.'.format(
                                 cid, info['request_interval']),
                             extra={'MESSAGE_ID': 'marked_as_bad'})

    def perfomance_watcher(self):
        avg_duration, values = self._get_average_requests_duration()
        for _, info in self.api_clients_info.items():
            delta = timedelta(seconds=self.perfomance_window +
                              self.watch_interval)
            current_date = datetime.now() - delta
            delete_list = []
            for key in info['request_durations']:
                if key < current_date:
                    delete_list.append(key)
            for k in delete_list:
                del info['request_durations'][k]
            delete_list = []

        st_dev = self._calculate_st_dev(values)
        if len(values) > 0:
            min_avg = min(values) * 1000
            max_avg = max(values) * 1000
        else:
            max_avg = 0
            min_avg = 0
        dev = round(st_dev + avg_duration, 3)

        logger.info('Perfomance watcher:\nREQUESTS_STDEV - {} sec.\n'
                    'REQUESTS_DEV - {} ms.\nREQUESTS_MIN_AVG - {} ms.\n'
                    'REQUESTS_MAX_AVG - {} ms.\nREQUESTS_AVG - {} sec.'.format(
                        round(st_dev, 3), dev, min_avg, max_avg, avg_duration),
                    extra={
                        'REQUESTS_DEV': dev * 1000,
                        'REQUESTS_MIN_AVG': min_avg,
                        'REQUESTS_MAX_AVG': max_avg,
                        'REQUESTS_AVG': avg_duration * 1000
                    })
        self._mark_bad_clients(dev)

    def run(self):
        logger.info('Start Edge Bridge',
                    extra={'MESSAGE_ID': 'edge_bridge_start_bridge'})
        logger.info('Start data sync...',
                    extra={'MESSAGE_ID': 'edge_bridge__data_sync'})
        self.input_queue_filler = spawn(self.fill_input_queue)
        self.filler = spawn(self.fill_resource_items_queue)
        spawn(self.queues_controller)
        while True:
            self.gevent_watcher()
            sleep(self.watch_interval)
Exemple #24
0
class Scanner:
    def __init__(self):
        self.start_time = time.time()
        self.queue = PriorityQueue()
        self.history = []
        self.total_count = 0
        self.scan_count = 0
        self._load_target()
        self.outfile = open("log.log", 'w')
        self.console_width = getTerminalSize()[0] - 2

    def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            msg = '%s TotalCount| %s Scanned in %.2f seconds' % (
                self.total_count, self.total_count - self.queue.qsize(),
                time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                             msg)
        else:
            sys.stdout.write('\r' + _msg + ' ' *
                             (self.console_width - len(_msg)) + '\n')
            self.outfile.write(_msg + '\n')
            self.outfile.flush()
            if _found_msg:
                msg = '%s TotalCount| %s Scanned in %.2f seconds' % (
                    self.total_count, self.total_count - self.queue.qsize(),
                    time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                                 msg)
        sys.stdout.flush()

    def _load_target(self):
        print('[+] Read targets ...')
        target_file = raw_input("Target File :")
        with open(target_file) as f:
            for line in f.xreadlines():
                target = line.strip()
                self.queue.put(target)

        print("TotalCount is %d" % self.queue.qsize())
        self.total_count = self.queue.qsize()
        print("Now scanning ...")

    def _scan(self, case):
        while not self.queue.empty():
            target = self.queue.get()
            if case == "1":
                self.vulnCheck(target)
            if case == "2":
                self.s2_045(target)
            if case == "3":
                self.headers(target)
            if case == "4":
                self.weakfile(target)
            if case == "5":
                self.portscan_c(target)

    def vulnCheck(self, target):
        if ":2375" in target:
            try:
                res = requests.head("http://" + str(target) +
                                    "/containers/json",
                                    timeout=2)
                if res.headers['Content-Type'] == 'application/json':
                    self._print_msg(target + "==>  docker api Vuln", True)
                else:
                    self._print_msg()
            except:
                self._print_msg()
            self._print_msg()

        if ":27017" in target:
            ip, port = target.split(":")
            try:
                socket.setdefaulttimeout(3)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((ip, int(port)))
                data = binascii.a2b_hex(
                    "3a000000a741000000000000d40700000000000061646d696e2e24636d640000000000ffffffff130000001069736d6173746572000100000000"
                )
                s.send(data)
                result = s.recv(1024)
                if "ismaster" in result:
                    getlog_data = binascii.a2b_hex(
                        "480000000200000000000000d40700000000000061646d696e2e24636d6400000000000100000021000000026765744c6f670010000000737461727475705761726e696e67730000"
                    )
                    s.send(getlog_data)
                    result = s.recv(1024)
                    if "totalLinesWritten" in result:
                        self._print_msg(target + "==>  mongodb Vuln", True)
            except Exception as e:
                pass

        if ":6379" in target:
            ip, port = target.split(":")
            try:
                socket.setdefaulttimeout(3)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((ip, int(port)))
                s.send("INFO\r\n")
                result = s.recv(1024)
                if "redis_version" in result:
                    self._print_msg(target + "==>  redis Vuln", True)
                elif "Authentication" in result:
                    for pass_ in ['123456', 'redis', 'pass', 'password']:
                        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                        s.connect((ip, int(port)))
                        s.send("AUTH %s\r\n" % (pass_))
                        result = s.recv(1024)
                        if '+OK' in result:
                            self._print_msg(
                                target + "==>  redis pass Vuln :" + pass_,
                                True)
            except Exception as e:
                pass
        if ":11211" in target:
            ip, port = target.split(":")
            try:
                socket.setdefaulttimeout(3)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((ip, int(port)))
                s.send("stats\r\n")
                result = s.recv(1024)
                if "STAT version" in result:
                    self._print_msg(target + "==>  memcache Vuln", True)
            except Exception as e:
                pass

        if ":9200" in target:
            try:
                res = requests.head("http://" + str(target) +
                                    "/_rvier/_search",
                                    timeout=2)
                if res.status_code == 200:
                    self._print_msg(target + "==>  Elasticsearch Vuln", True)
                else:
                    self._print_msg()
            except:
                self._print_msg()
            self._print_msg()

    def headers(self, target):
        try:
            res = requests.head("http://" + str(target), timeout=1)
            self._print_msg(target + "==>" + str(res.headers), True)
        except:
            self._print_msg()
        self._print_msg()

    def s2_045(self, target):
        try:
            data = {"image": " "}
            headers = {
                "User-Agent":
                "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
                "Content-Type":
                "%{#context['com.opensymphony.xwork2.dispatcher.HttpServletResponse'].addHeader('vul','s2-045')}.multtargetart/form-data"
            }
            req = requests.post("http://" + str(target),
                                data=data,
                                headers=headers)
            if req.headers["vul"] == "s2-045":
                self._print_msg(target + "==>" + "S2-045 Vuln", True)
        except:
            self._print_msg()
        self._print_msg()

    def weakfile(self, target):
        weaklist = ["robots.txt", "/i.php", "/phpinfo.php"]
        for weakfile in weaklist:
            try:
                res = requests.head("http://" + str(target) + weakfile,
                                    timeout=1)
                if res.status_code == 200:
                    if ("User-agent" in res.content) or ("phpinfo"
                                                         in res.content):
                        self._print_msg("http://" + target + weakfile, True)
            except:
                self._print_msg()
        self._print_msg()

    def portscan_c(self, target):
        import socket
        ip = socket.gethostbyname(target)
        ports = [1433, 2375, 3306, 6379, 9200, 11211, 27017]
        ip = ip.split(".")
        ipc = ip[0] + "." + ip[1] + "." + ip[2] + "."
        if ipc in self.history:
            return
        else:
            self.history.append(ipc)

        for port in ports:
            for i in range(255):
                try:
                    cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    cs.settimeout(float(2.5))
                    address = (ipc + str(i), int(port))
                    status = cs.connect_ex((address))
                    if status == 0:
                        self._print_msg(ipc + str(i) + ":" + str(port), True)
                except Exception as e:
                    pass

                finally:
                    cs.close()
                self._print_msg()

    def run(self, case):
        threads = [gevent.spawn(self._scan, case) for i in xrange(1000)]
Exemple #25
0
class SubNameBrute:
    def __init__(self, target, options):
        # 设置优先级
        self.queue = PriorityQueue()
        self.priority = 0

        # 根据参数进行基本设置
        self.target = target.strip()
        self.options = options
        self.ignore_intranet = options.get('ignore_intranet')

        # 是否用大字典
        if self.options.get('subnames_full'):
            outfile_name+='_sfull'
        if self.options.get('next_sub_full'):
            outfile_name += '_nfull'

        # 根据主域名确定结果文件名称
        outfile_name = options.get('file') if options.get('file') else(target)
        self.fname = 'results/'+outfile_name+'.txt'
        self.outfile = open('results/'+outfile_name+'.txt', 'wb')
        self.outfile_ips = open('results/'+outfile_name+'_ip.txt', 'w')

        # 设置dns解析器 (根据预设的线程数量初始化dns resolver)
        # QUESTION: configure = False还是不太明白 为什么要不以/etc/resolv.conf的常规常规配置??
        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(options.get('threads'))]
        for _ in self.resolvers:
            '''
            dns.resolver.Resolver: http://www.dnspython.org/docs/1.14.0/dns.resolver.Resolver-class.html
            dns.resolver.Resolver.lifetime: The total number of seconds to spend trying to get an answer to the question.
            dns.resolver.Resolver.timeout: The number of seconds to wait for a response from a server, before timing out.
            '''
            # QUESTION:lifetime 与 timeout 什么区别?
            _.lifetime = _.timeout = 10.0

        # 加载dns服务器列表
        self._load_dns_servers()
        # self.ex_resolver是备用的在出现except时使用的dns_resolver
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = self.dns_servers
        self.logfile = open('results/'+target+'_log.txt','a')

        #set subdomain dct set
        self._load_next_sub()
        self._load_sub_names()

        #set init paras
        self.start_time = time.time()
        self.scan_count = 0
        self.found_count = 0 # 已验证过存在子域名的前缀
        self.STOP_ME = False
        self.ip_dict = {}
        self.found_subs = set()

    def _load_dns_servers(self):
        """
        功能:导入可用的名称服务器 (init初始化时执行)
        :return:
        """
        print '[+] Validate DNS servers ...'
        self.dns_servers = []
        pool = Pool(30)
        for server in open('dns_servers.txt').xreadlines():#xreadlines返回一个生成器
            server = server.strip()
            if server:
                # apply_async 并行
                pool.apply_async(self._test_server, (server,))#apply_async(func[, args[, kwds[, callback]]]) 它是非阻塞
        pool.join()#主进程阻塞,等待子进程的退出
        self.dns_count = len(self.dns_servers)
        sys.stdout.write('\n')
        print '[+] Found %s available DNS Servers in total' % self.dns_count
        if self.dns_count == 0:
            print '[ERROR] No DNS Servers available.'
            sys.exit(-1)

    def _test_server(self, server):
        '''
        功能:检测dns服务器是否可用(_load_dns_servers()在加载dns列表时会探测)
            检测思路:1.已存在域名可成功解析出ip;
                    2.不存在的域名解析则会出错.
        :param server:nameserver
        :return: 无
        '''
        resolver = dns.resolver.Resolver()
        resolver.lifetime = resolver.timeout = 10.0
        try:
            resolver.nameservers = [server]
            existed_domain = 'public-dns-a.baidu.com'
            corrs_ip = '180.76.76.76'
            answers = resolver.query(existed_domain)
            if answers[0].address != corrs_ip:
                raise Exception('incorrect DNS response')
            try:
                non_existed_domain = 'test.bad.dns.lijiejie.com'
                resolver.query(non_existed_domain)
                print '[+] Bad DNS Server found %s' % server
            except:
                self.dns_servers.append(server)
            print '[+] Check DNS Server %s < OK >   Found %s' % (server.ljust(16), len(self.dns_servers))
        except:
            print '[+] Check DNS Server %s <Fail>   Found %s' % (server.ljust(16), len(self.dns_servers))


    def _get_filename(self,option,is_full):
        '''
        功能:构造要打开字典文件的目录
        param: option: 字典的类型 subnames / next_sub
        param: is_full: 决定使用大字典还是小字典
        return: _file: 当前要加载字典的路径
        '''
        has_newdct = self.options.get('new_dct')
        if has_newdct:
            try:
                # 有新字典文件名,则加载新的字典
                next_sub,subnames = has_newdct.split(',')
            except Exception:
                print '[ERROR] Names file not exists: %s' % has_newdct
                exit(-1)
            else:
                # 若新字典名next_sub,subnames加载异常,则打开原来的字典
                self.new_filenames = {
                    'next_sub':'dict/'+next_sub,
                    'subnames':'dict/'+subnames
                }
                filename = self.new_filenames.get(option)
                if os.path.exists(filename):
                    _file = filename
                else:
                    print '[ERROR] Names file not exists: %s' % filename
                    exit(-1)
        elif is_full:
            _file = 'dict/'+option+'_full.txt'
        else:
            _file = 'dict/'+option+'.txt'

        return _file

    def _load_sub_names(self):

        print '[+] Load sub names ...'
        is_full = self.options.get('subnames_full')
        # _file是完整的路径名
        _file = self._get_filename('subnames',is_full)
        normal_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            wildcard_lines = []
            for line in f.xreadlines():
                sub = line.strip()
                print 'sub:' + sub
                if not sub or sub in lines:
                    continue
                lines.add(sub)
                # 通配符
                # QUESTION:但实际的sub文件中都没有通配符????
                if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0:
                    # 如果存在某个通配符,则先将其加入到wildcard_lines
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    print 'sun2: ' + sub
                    if sub not in wildcard_list:
                        # QUESTION:为什么替换通配符后还要加入到wildcard_list??
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    # 不存在通配符的加入到normal_lines
                    normal_lines.append(sub)
        pattern = '|'.join(regex_list)
        if pattern:
            _regex = re.compile(pattern)
            if _regex:
                for line in normal_lines[:]:
                    if _regex.search(line):
                        normal_lines.remove(line)

        for item in normal_lines:
            # QUESTION: 为什么遍历时每次要令priority自增?
            self.priority = self.priority+1
            self.queue.put((self.priority, item))

        for item in wildcard_lines:
            # QUESTION: wildcard_lines中元素含有通配符所以优先级低???(大数对应低优先级)
            self.queue.put((88888888, item))

    def _load_next_sub(self):
        """
        枚举一、二位子域并添加已存子域
        :return:
        """
        self.next_subs = []
        _set = set()
        is_full = self.options.get('next_sub_full')
        #  _file是nett_sub完整的路径
        _file = self._get_filename('next_sub',is_full)
        with open(_file) as f:
            for line in f:
                sub = line.strip()
                if sub and sub not in self.next_subs:
                    #  利用{alphnum}等通配符组合新的子串
                    # QUESTION:但原文件中的其他子串没有用?
                    tmp_set = {sub} # 相当于tep_set = set(sub)
                    while len(tmp_set) > 0:
                        item = tmp_set.pop()
                        # print 'item: ' + item
                        if item.find('{alphnum}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                            # for _letter in 'ab89':
                                # 如果是{alphnum}{alphnum},则将'abcdefghijklmnopqrstuvwxyz0123456789' 两两组合的结果加入了tmp_set
                                tt = item.replace('{alphnum}', _letter, 1)
                                tmp_set.add(tt)
                        elif item.find('{alpha}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz':
                                tmp_set.add(item.replace('{alpha}', _letter, 1))
                        elif item.find('{num}') >= 0:
                            for _letter in '0123456789':
                                tmp_set.add(item.replace('{num}', _letter, 1))
                        elif item not in _set:
                            # 当所有的{alphnum}等通配符都被replace完后,将被加入到_set / self.next_subs
                            # 原文件中不包括通配符的子串直接加入了_set,也加入了self.next_subs
                            _set.add(item)
                            self.next_subs.append(item)


    @staticmethod
    # 判断是否是内网ip
    def is_intranet(ip):
        ret = ip.split('.')
        if len(ret) != 4:
            return True
        if ret[0] == '10':
            return True
        if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
            return True
        if ret[0] == '192' and ret[1] == '168':
            return True
        return False

    def put_item(self, item):
        # 向待检测前缀队列self.queue中添加新的子域名前缀
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count('{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            # 存在通配符则将优先级设为低级
            self.queue.put((self.priority + num * 10000000, item))

    def _universal_parsing(self,sub,ips):
        # 统计数量,与泛解析有关
        _sub = sub.split('.')[-1]
        # (_sub,ips)前缀与该前缀构成的子域名所得ip QUESTION:这和泛解析什么关系???
        '''
        a.b.baidu.com 与 a.baidu.com ,它们的_sub都是'a.',当他们解析到相同的A记录时,
        则有可能其他_sub同为'a.'(最左侧一级为a.)的子域名也会解析到同样的ip,存在泛解析
        '''
        if (_sub, ips) not in self.ip_dict:
            self.ip_dict[(_sub, ips)] = 1
        else:
            self.ip_dict[(_sub, ips)] += 1

        # 计数:一组ips被多少个sub解析到  (如果一组ips被多组sub解析到,则可能是泛解析)
        if ips not in self.ip_dict:
            self.ip_dict[ips] = 1
        else:
            self.ip_dict[ips] += 1

        return True if self.ip_dict[(_sub, ips)] > 3 or self.ip_dict[ips] > 6 else(False)

    def _validate_subdomain(self,j,sub):
        '''
           功能:验证子域名是否存在
        '''

        # 构造新的子域名
        subdmname = sub + '.' + self.target

        try:
            answers = self.resolvers[j].query(subdmname)
        except dns.resolver.NoAnswer:
            try:
                # 出现异常则用备用dns解析器解析
                answers = self.ex_resolver.query(subdmname)
            except dns.resolver.NoAnswer:
                # 如果2次都出现异常,则返回False
                return False
        if answers:
            # 如果得到响应,则将该前缀加入到self.found_subs
            # QUESTION: 验证说明不存在的不用单独存下来吗
            self.found_subs.add(sub)
            # 得到A记录集合
            ips = ', '.join(sorted([answer.address for answer in answers]))
            print ips
            self.cur_ips = ips
            # QUESTION: 只有一个ip且ip是一下其中之一的情况
            if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                return False
            #除去内网域名
            # self.ignore_instanet表示是否要进行内网ip过滤
            # SubNameBrute.is_intranet(answers[0].address) 是实际进行是否是内外ip的测算
            # QUESTION: 为什么只对answers[0]中的ip进行测试???
            if self.ignore_intranet and SubNameBrute.is_intranet(answers[0].address):
                return False
            # 泛解析
            if self._universal_parsing(sub, ips):
                return False
        else:
            return False

        return True

    def _scan_cname(self,j,subdmname):
        '''
        功能:检测子域名的cname是否是新的子域名,是否可以得到新的前缀
        '''
        try:
            self.scan_count += 1
            # subdmname是已经验证有效的子域名,现获取其cname
            answers = self.resolvers[j].query(subdmname, 'cname')
            cname = answers[0].target.to_unicode().rstrip('.')
            # cname.endswith(self.target)判断cname是不是子域名
            if cname.endswith(self.target) and cname not in self.found_subs:
                # 将是子域名的cname加入到self.found_subs

                self.found_subs.add(cname)
                # 假设cname是'www.a.shifen.com',target是'shifen.com',,则cname_sub是'www.a'
                # 当cname是子域时,将i其前缀再次加入队列,当此前缀在不同级上时,可能构成新的子域
                cname_sub = cname[:len(cname) - len(self.target) - 1]  # new sub
                self.queue.put((0, cname_sub))
        except:
            pass

    def _scan(self, j):
        # 具体运行的核心函数
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]

        while not self.queue.empty():
            try:
                # 从队列中获得一个前缀
                item = self.queue.get(timeout=1.0)[1]
                self.scan_count += 1
            except:
                break
            try:
                # 根据_load_sub_names代码,会有包含通配符的sub被加入了queue,因此这里要进行处理
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                # 如果是已经验证过的,则不进行验证处理
                if sub in self.found_subs:
                    continue

                if self._validate_subdomain(j,sub):
                    cur_sub_domain = sub+'.'+self.target
                    self._scan_cname(j,cur_sub_domain) # 检测子域名的cname中是否包含子域名
                    self.found_count += 1
                    # QUESTON
                    self.outfile.write(cur_sub_domain+'\n')
                    '''
                    关于flush与write:http://blog.csdn.net/fenfeiqinjian/article/details/49444973
                        一般的文件流操作都包含缓冲机制,write方法并不直接将数据写入文件,而是先写入内存中特定的缓冲区。
                        flush方法是用来刷新缓冲区的,即将缓冲区中的数据立刻写入文件,同时清空缓冲区
                        正常情况下缓冲区满时,操作系统会自动将缓冲数据写入到文件中。
                        至于close方法,原理是内部先调用flush方法来刷新缓冲区,再执行关闭操作,这样即使缓冲区数据未满也能保证数据的完整性。
                        如果进程意外退出或正常退出时而未执行文件的close方法,缓冲区中的内容将会丢失
                    '''
                    self.outfile.flush()
                    self.outfile_ips.write(self.cur_ips+'\n')
                    self.outfile_ips.flush()
                    print cur_sub_domain
                    # '{next_sub}.' + sub 目的在于给当前前缀再增加前缀,以构成多级域名
                    self.queue.put((999999999, '{next_sub}.' + sub))

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel):
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout):
                pass
            except Exception:
                pass
            print "scan_count=%s,found_count=%s"%(self.scan_count,self.found_count)

    def run(self):
        # i用来标识是第几个写成,同时在协程中用来选择dns resolver
        threads = [gevent.spawn(self._scan, i) for i in range(self.options.get('threads'))]

        try:
            gevent.joinall(threads)
        except KeyboardInterrupt:
            print '[WARNING] User aborted.'

        self.end_time = time.time()
        s = (self.end_time-self.start_time)
        m = ((self.end_time - self.start_time)/60)
        h = ((self.end_time - self.start_time) / 3600)

        self.logfile.write(self.fname+'\n')
        result = "scan_count=%s,found_count=%s"%(self.scan_count,self.found_count)
        self.logfile.write(result+'\n')
        time_consuming = "time-consuming:%d seconds"%s
        print time_consuming
        self.logfile.write(time_consuming+'\n')
        time_consuming = "time-consuming:%d minutes" % m
        print time_consuming
        self.logfile.write(time_consuming+'\n')
        time_consuming = "time-consuming:%d hours" % h
        print time_consuming
        self.logfile.write(time_consuming+'\n')
        # 统计结果文件中各级域名的数量(self.fname是输出结果的文件名)
        ocount, bcount, tcount, fcount = self.get_distribution(self.fname)

        subdomain_count = '二级域名数量: %d' % ocount
        print subdomain_count
        self.logfile.write(subdomain_count+'\n')
        subdomain_count = '三级域名数量: %d' % bcount
        print subdomain_count
        self.logfile.write(subdomain_count+'\n')
        subdomain_count = '四级域名数量: %d' % tcount
        print subdomain_count
        self.logfile.write(subdomain_count+'\n')
        subdomain_count = '五级域名数量: %d' % fcount
        print subdomain_count
        self.logfile.write(subdomain_count+'\n')

        self.outfile.flush()
        self.outfile.close()
        self.outfile_ips.flush()
        self.outfile_ips.close()

    def get_distribution(self,filename):
        '''
        功能:统计结果文件filename中二级、三级和四级域名的数量
        '''
        with open(filename, 'rb') as f:
            subdomains = [line.strip() for line in f.readlines()]
        ocount = bcount = tcount = fcount = 0
        for domain in subdomains:
            if domain.count('.') == 2:
                ocount += 1
            elif domain.count('.') == 3:
                bcount += 1
            elif domain.count('.') == 4:
                tcount += 1
            else:
                fcount += 1

        return ocount, bcount, tcount, fcount
class TestStatuslistFilter(unittest.TestCase):
    def setUp(self):
        self.conf = CONFIG
        self.db = {}
        self.input_queue = PriorityQueue()
        self.filtered_queue = PriorityQueue()
        self.filter = StatuslistFilter(self.conf, self.input_queue,
                                       self.filtered_queue, self.db)

    @patch('openprocurement.bridge.rbot.filters.INFINITY')
    def test_filter_ok(self, infinity):
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-02',
            'procurementMethodType': 'dgf',
            'status': 'active.tendering',
            'documents': [{
                'documentType': 'contractProforma'
            }]
        }

        self.input_queue.put((None, deepcopy(doc)))
        self.filter._run()
        self.assertEqual(len(self.filtered_queue), 1)
        filtered_doc = self.filtered_queue.get(block=False)
        self.assertEqual(doc, filtered_doc[1])

    @patch('openprocurement.bridge.rbot.filters.INFINITY')
    def test_filter_not_modified(self, infinity):
        # Not changed dateModified
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'dgf',
            'status': 'status1',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }

        self.input_queue.put((None, deepcopy(doc)))
        self.db['test_id'] = '1970-01-01'

        self.filter._run()
        self.assertEqual(len(self.filtered_queue), 0)
        self.db.pop('test_id')

    @patch('openprocurement.bridge.rbot.filters.INFINITY')
    def test_filter_by_status(self, infinity):
        # Wrong tender status
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'dgf',
            'status': 'status3',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }

        self.input_queue.put((None, deepcopy(doc)))
        self.filter._run()
        self.assertEqual(len(self.filtered_queue), 0)
Exemple #27
0
class AWSLogs(object):

    ACTIVE = 1
    EXHAUSTED = 2
    WATCH_SLEEP = 2

    def __init__(self, **kwargs):
        self.connection_cls = kwargs.get('connection_cls', AWSConnection)
        self.aws_region = kwargs.get('aws_region')
        self.aws_access_key_id = kwargs.get('aws_access_key_id')
        self.aws_secret_access_key = kwargs.get('aws_secret_access_key')
        self.log_group_name = kwargs.get('log_group_name')
        self.log_stream_name = kwargs.get('log_stream_name')
        self.watch = kwargs.get('watch')
        self.color_enabled = kwargs.get('color_enabled')
        self.output_stream_enabled = kwargs.get('output_stream_enabled')
        self.output_group_enabled = kwargs.get('output_group_enabled')
        self.start = self.parse_datetime(kwargs.get('start'))
        self.end = self.parse_datetime(kwargs.get('end'))
        self.pool_size = max(kwargs.get('pool_size', 0), 10)
        self.max_group_length = 0
        self.max_stream_length = 0
        self.publishers = []
        self.events_queue = Queue()
        self.raw_events_queue = PriorityQueue()
        self.publishers_queue = PriorityQueue()
        self.publishers = []
        self.stream_status = {}
        self.stream_max_timestamp = {}
        self.connection = self.connection_cls(
            self.aws_region,
            aws_access_key_id=self.aws_access_key_id,
            aws_secret_access_key=self.aws_secret_access_key
        )

    def _get_streams_from_patterns(self, log_group_pattern, log_stream_pattern):
        """Returns pairs of group, stream matching ``log_group_pattern`` and
        ``log_stream_pattern``."""
        for group in self._get_groups_from_pattern(log_group_pattern):
            for stream in self._get_streams_from_pattern(group,
                                                         log_stream_pattern):
                yield group, stream

    def _get_groups_from_pattern(self, pattern):
        """Returns groups matching ``pattern``."""
        pattern = '.*' if pattern == 'ALL' else pattern
        reg = re.compile('^{0}'.format(pattern))
        for group in self.get_groups():
            if re.match(reg, group):
                yield group

    def _get_streams_from_pattern(self, group, pattern):
        """Returns streams in ``group`` matching ``pattern``."""
        pattern = '.*' if pattern == 'ALL' else pattern
        reg = re.compile('^{0}'.format(pattern))
        for stream in self.get_streams(group):
            if re.match(reg, stream):
                yield stream

    def _publisher_queue_consumer(self):
        """Consume ``publishers_queue`` api calls, run them and publish log
        events to ``raw_events_queue``. If ``nextForwardToken`` is present
        register a new api call into ``publishers_queue`` using as weight
        the timestamp of the latest event."""
        while True:
            try:
                _, (log_group_name, log_stream_name, next_token) = self.publishers_queue.get(block=False)
            except Empty:
                if self.watch:
                    gevent.sleep(self.WATCH_SLEEP)
                else:
                    break

            response = self.connection.get_log_events(
                next_token=next_token,
                log_group_name=log_group_name,
                log_stream_name=log_stream_name,
                start_time=self.start,
                end_time=self.end,
                start_from_head=True
            )

            if not len(response['events']):
                self.stream_status[(log_group_name, log_stream_name)] = self.EXHAUSTED
                continue

            self.stream_status[(log_group_name, log_stream_name)] = self.ACTIVE

            for event in response['events']:
                event['group'] = log_group_name
                event['stream'] = log_stream_name
                self.raw_events_queue.put((event['timestamp'], event))
                self.stream_max_timestamp[(log_group_name, log_stream_name)] = event['timestamp']

            if 'nextForwardToken' in response:
                self.publishers_queue.put(
                    (response['events'][-1]['timestamp'],
                     (log_group_name, log_stream_name, response['nextForwardToken']))
                )

    def _get_min_timestamp(self):
        """Return the minimum timestamp available across all active streams."""
        pending = [self.stream_max_timestamp[k] for k, v in self.stream_status.iteritems() if v != self.EXHAUSTED]
        return min(pending) if pending else None

    def _get_all_streams_exhausted(self):
        """Return if all streams are exhausted."""
        return all((s == self.EXHAUSTED for s in self.stream_status.itervalues()))

    def _raw_events_queue_consumer(self):
        """Consume events from ``raw_events_queue`` if all active streams
        have already publish events up to the ``_get_min_timestamp`` and
        register them in order into ``events_queue``."""
        while True:
            if self._get_all_streams_exhausted() and self.raw_events_queue.empty():
                if self.watch:
                    gevent.sleep(self.WATCH_SLEEP)
                    continue
                self.events_queue.put(NO_MORE_EVENTS)
                break

            try:
                timestamp, line = self.raw_events_queue.peek(timeout=1)
            except Empty:
                continue

            min_timestamp = self._get_min_timestamp()
            if min_timestamp and min_timestamp < timestamp:
                gevent.sleep(0.3)
                continue

            timestamp, line = self.raw_events_queue.get()

            output = [line['message']]
            if self.output_stream_enabled:
                output.insert(
                    0,
                    self.color(
                        line['stream'].ljust(self.max_stream_length, ' '),
                        'cyan'
                    )
                )
            if self.output_group_enabled:
                output.insert(
                    0,
                    self.color(
                        line['group'].ljust(self.max_group_length, ' '),
                        'green'
                    )
                )
            self.events_queue.put("{0}\n".format(' '.join(output)))

    def _events_consumer(self):
        """Print events from ``events_queue`` as soon as they are available."""
        while True:
            event = self.events_queue.get(True)
            if event == NO_MORE_EVENTS:
                break
            sys.stdout.write(event)
            sys.stdout.flush()

    def list_logs(self):
        self.register_publishers()

        pool = Pool(size=self.pool_size)
        pool.spawn(self._raw_events_queue_consumer)
        pool.spawn(self._events_consumer)

        if self.watch:
            pool.spawn(self.register_publishers_periodically)

        for i in xrange(self.pool_size):
            pool.spawn(self._publisher_queue_consumer)
        pool.join()

    def register_publishers(self):
        """Register publishers into ``publishers_queue``."""
        for group, stream in self._get_streams_from_patterns(self.log_group_name, self.log_stream_name):
            if (group, stream) in self.publishers:
                continue
            self.publishers.append((group, stream))
            self.max_group_length = max(self.max_group_length, len(group))
            self.max_stream_length = max(self.max_stream_length, len(stream))
            self.publishers_queue.put((0, (group, stream, None)))
            self.stream_status[(group, stream)] = self.ACTIVE
            self.stream_max_timestamp[(group, stream)] = -1

    def register_publishers_periodically(self):
        while True:
            self.register_publishers()
            gevent.sleep(2)

    def list_groups(self):
        """Lists available CloudWatch logs groups"""
        for group in self.get_groups():
            print group

    def list_streams(self, *args, **kwargs):
        """Lists available CloudWatch logs streams in ``log_group_name``."""
        for stream in self.get_streams(*args, **kwargs):
            print stream

    def get_groups(self):
        """Returns available CloudWatch logs groups"""
        next_token = None
        while True:
            response = self.connection.describe_log_groups(next_token=next_token)

            for group in response.get('logGroups', []):
                yield group['logGroupName']

            if 'nextToken' in response:
                next_token = response['nextToken']
            else:
                break

    def get_streams(self, log_group_name=None):
        """Returns available CloudWatch logs streams in ``log_group_name``."""
        log_group_name = log_group_name or self.log_group_name
        next_token = None

        while True:
            response = self.connection.describe_log_streams(
                log_group_name=log_group_name,
                next_token=next_token
            )

            for stream in response.get('logStreams', []):
                yield stream['logStreamName']

            if 'nextToken' in response:
                next_token = response['nextToken']
            else:
                break

    def color(self, text, color):
        """Returns coloured version of ``text`` if ``color_enabled``."""
        if self.color_enabled:
            return colored(text, color)
        return text

    def parse_datetime(self, datetime_text):
        """Parse ``datetime_text`` into a ``datetime``."""
        if not datetime_text:
            return None

        ago_match = re.match(r'(\d+)\s?(m|minute|minutes|h|hour|hours|d|day|days|w|weeks|weeks)(?: ago)?', datetime_text)
        if ago_match:
            amount, unit = ago_match.groups()
            amount = int(amount)
            unit = {'m': 60, 'h': 3600, 'd': 86400, 'w': 604800}[unit[0]]
            date = datetime.now() + timedelta(seconds=unit * amount * -1)
        else:
            try:
                date = parse(datetime_text)
            except ValueError:
                raise exceptions.UnknownDateError(datetime_text)

        return int(date.strftime("%s")) * 1000
Exemple #28
0
class StaticSmtpRelay(Relay):
    """Manages the relaying of messages to a specific ``host:port``. Connections
    may be recycled when possible, to send multiple messages over a single
    channel.

    :param host: Host string to connect to.
    :param port: Port to connect to.
    :param pool_size: At most this many simultaneous connections will be open to
                      the destination. If this limit is reached and no
                      connections are idle, new attempts will block.
    :param tls: Optional dictionary of TLS settings passed directly as
                keyword arguments to :class:`gevent.ssl.SSLSocket`.
    :param tls_required: If given and True, it should be considered a delivery
                         failure if TLS cannot be negotiated by the client.
    :param connect_timeout: Timeout in seconds to wait for a client connection
                            to be successful before issuing a transient failure.
    :param command_timeout: Timeout in seconds to wait for a reply to each SMTP
                            command before issuing a transient failure.
    :param data_timeout: Timeout in seconds to wait for a reply to message data
                         before issuing a transient failure.
    :param idle_timeout: Timeout in seconds after a message is delivered before
                         a QUIT command is sent and the connection terminated.
                         If another message should be delivered before this
                         timeout expires, the connection will be re-used. By
                         default, QUIT is sent immediately and connections are
                         never re-used.


    """

    def __init__(self, host, port=25, pool_size=None, client_class=None,
                       **client_kwargs):
        super(StaticSmtpRelay, self).__init__()
        if client_class:
            self.client_class = client_class
        else:
            from slimta.relay.smtp.client import SmtpRelayClient
            self.client_class = SmtpRelayClient
        self.host = host
        self.port = port
        self.queue = PriorityQueue()
        self.pool = set()
        self.pool_size = pool_size
        self.client_kwargs = client_kwargs

    def _remove_client(self, client):
        self.pool.remove(client)
        if not self.queue.empty() and not self.pool:
            self._add_client()

    def _add_client(self):
        client = self.client_class((self.host, self.port), self.queue,
                                   **self.client_kwargs)
        client.start()
        client.link(self._remove_client)
        self.pool.add(client)

    def _check_idle(self):
        for client in self.pool:
            if client.idle:
                return
        if not self.pool_size or len(self.pool) < self.pool_size:
            self._add_client()

    def attempt(self, envelope, attempts):
        self._check_idle()
        result = AsyncResult()
        self.queue.put((1, result, envelope))
        return result.get()
class BasicDataBridge(object):
    """Basic Bridge"""
    def __init__(self, config):
        super(BasicDataBridge, self).__init__()
        defaults = deepcopy(DEFAULTS)
        defaults.update(config['main'])
        self.config = defaults
        # Init config
        for key, value in defaults.items():
            setattr(self, key, value)
        self.bridge_id = uuid.uuid4().hex
        self.api_host = self.config.get('resources_api_server')
        self.api_version = self.config.get('resources_api_version')
        self.retrievers_params = self.config.get('retrievers_params')
        self.storage_type = self.config['storage_config'].get(
            'storage_type', 'couchdb')
        self.worker_type = self.config['worker_config'].get(
            'worker_type', 'basic_couchdb')
        self.filter_type = self.config['filter_config'].get(
            'filter_type', 'basic_couchdb')

        # Check up_wait_sleep
        up_wait_sleep = self.retrievers_params.get('up_wait_sleep')
        if up_wait_sleep is not None and up_wait_sleep < 30:
            raise DataBridgeConfigError(
                'Invalid \'up_wait_sleep\' in \'retrievers_params\'. Value must be grater than 30.'
            )

        # Pools
        self.workers_pool = gevent.pool.Pool(self.workers_max)
        self.retry_workers_pool = gevent.pool.Pool(self.retry_workers_max)
        self.filter_workers_pool = gevent.pool.Pool(self.filter_workers_count)

        # Queues
        if self.input_queue_size == -1:
            self.input_queue = PriorityQueue()
        else:
            self.input_queue = PriorityQueue(self.input_queue_size)

        if self.resource_items_queue_size == -1:
            self.resource_items_queue = PriorityQueue()
        else:
            self.resource_items_queue = PriorityQueue(
                self.resource_items_queue_size)

        self.api_clients_queue = Queue()
        # self.retry_api_clients_queue = Queue()

        if self.retry_resource_items_queue_size == -1:
            self.retry_resource_items_queue = PriorityQueue()
        else:
            self.retry_resource_items_queue = PriorityQueue(
                self.retry_resource_items_queue_size)

        if self.api_host != '' and self.api_host is not None:
            api_host = urlparse(self.api_host)
            if api_host.scheme == '' and api_host.netloc == '':
                raise DataBridgeConfigError(
                    'Invalid \'resources_api_server\' url.')
        else:
            raise DataBridgeConfigError(
                'In config dictionary empty or missing \'resources_api_server\''
            )

        # Connecting storage plugin
        self.db = None
        for entry_point in iter_entry_points(
                'openprocurement.bridge.basic.storage_plugins',
                self.storage_type):
            plugin = entry_point.load()
            self.db = plugin(self.config)

        # Register handlers
        handlers = self.config.get('handlers', [])
        for entry_point in iter_entry_points(
                'openprocurement.bridge.basic.handlers'):
            if not handlers or entry_point.name in handlers:
                plugin = entry_point.load()
                PROCUREMENT_METHOD_TYPE_HANDLERS[entry_point.name] = plugin(
                    self.config, self.db)

        if hasattr(self, 'filter_type'):
            for entry_point in iter_entry_points(
                    'openprocurement.bridge.basic.filter_plugins',
                    self.filter_type):
                self.filter_greenlet = entry_point.load()
        for entry_point in iter_entry_points(
                'openprocurement.bridge.basic.worker_plugins',
                self.worker_type):
            self.worker_greenlet = entry_point.load()

        self.feeder = ResourceFeeder(host=self.config.get(
            'public_resources_api_server', self.api_host),
                                     version=self.api_version,
                                     key='',
                                     resource=self.config['resource'],
                                     extra_params=self.extra_params,
                                     retrievers_params=self.retrievers_params,
                                     adaptive=True,
                                     with_priority=True)
        self.api_clients_info = {}

    def create_api_client(self):
        client_user_agent = self.user_agent + '/' + self.bridge_id
        timeout = 0.1
        while 1:
            try:
                api_client = APIClient(host_url=self.api_host,
                                       user_agent=client_user_agent,
                                       api_version=self.api_version,
                                       key='',
                                       resource=self.resource)
                client_id = uuid.uuid4().hex
                logger.info('Started api_client {}'.format(
                    api_client.session.headers['User-Agent']),
                            extra={'MESSAGE_ID': 'create_api_clients'})
                api_client_dict = {
                    'id': client_id,
                    'client': api_client,
                    'request_interval': 0,
                    'not_actual_count': 0
                }
                self.api_clients_info[api_client_dict['id']] = {
                    'drop_cookies': False,
                    'request_durations': {},
                    'request_interval': 0,
                    'avg_duration': 0
                }
                self.api_clients_queue.put(api_client_dict)
                break
            except RequestFailed as e:
                logger.error(
                    'Failed start api_client with status code {}'.format(
                        e.status_code),
                    extra={'MESSAGE_ID': 'exceptions'})
                timeout = timeout * 2
                logger.info(
                    'create_api_client will be sleep {} sec.'.format(timeout))
                sleep(timeout)
            except Exception as e:
                logger.error('Failed start api client with error: {}'.format(
                    e.message),
                             extra={'MESSAGE_ID': 'exceptions'})
                timeout = timeout * 2
                logger.info(
                    'create_api_client will be sleep {} sec.'.format(timeout))
                sleep(timeout)

    def fill_api_clients_queue(self):
        while self.api_clients_queue.qsize() < self.workers_min:
            self.create_api_client()

    def fill_input_queue(self):
        # if not hasattr(self.db, 'filter'):
        #     self.input_queue = self.resource_items_queue
        for resource_item in self.feeder.get_resource_items():
            self.input_queue.put(resource_item)
            logger.debug('Add to temp queue from sync: {} {} {}'.format(
                self.resource[:-1], resource_item[1]['id'],
                resource_item[1]['dateModified']),
                         extra={
                             'MESSAGE_ID': 'received_from_sync',
                             'TEMP_QUEUE_SIZE': self.input_queue.qsize()
                         })

    def _get_average_requests_duration(self):
        req_durations = []
        delta = timedelta(seconds=self.perfomance_window)
        current_date = datetime.now() - delta
        for cid, info in self.api_clients_info.items():
            if len(info['request_durations']) > 0:
                if min(info['request_durations'].keys()) <= current_date:
                    info['grown'] = True
                avg = round(
                    sum(info['request_durations'].values()) * 1.0 /
                    len(info['request_durations']), 3)
                req_durations.append(avg)
                info['avg_duration'] = avg

        if len(req_durations) > 0:
            return round(sum(req_durations) / len(req_durations),
                         3), req_durations
        else:
            return 0, req_durations

    # TODO: Add logic for restart sync if last response grater than some values
    # and no active tasks specific for resource

    def queues_controller(self):
        while True:
            if (self.workers_pool.free_count() > 0
                    and (self.resource_items_queue.qsize() >
                         ((float(self.resource_items_queue_size) / 100) *
                          self.workers_inc_threshold))):
                self.create_api_client()
                w = self.worker_greenlet.spawn(self.api_clients_queue,
                                               self.resource_items_queue,
                                               self.db, self.config,
                                               self.retry_resource_items_queue,
                                               self.api_clients_info)
                self.workers_pool.add(w)
                logger.info('Queue controller: Create main queue worker.')
            elif (self.resource_items_queue.qsize() <
                  ((float(self.resource_items_queue_size) / 100) *
                   self.workers_dec_threshold)):
                if len(self.workers_pool) > self.workers_min:
                    wi = self.workers_pool.greenlets.pop()
                    wi.shutdown()
                    api_client_dict = self.api_clients_queue.get()
                    del self.api_clients_info[api_client_dict['id']]
                    logger.info('Queue controller: Kill main queue worker.')
            filled_resource_items_queue = round(
                self.resource_items_queue.qsize() /
                (float(self.resource_items_queue_size) / 100), 2)
            logger.info('Resource items queue filled on {} %'.format(
                filled_resource_items_queue))
            filled_retry_resource_items_queue \
                = round(self.retry_resource_items_queue.qsize() / float(self.retry_resource_items_queue_size) / 100, 2)
            logger.info('Retry resource items queue filled on {} %'.format(
                filled_retry_resource_items_queue))
            sleep(self.queues_controller_timeout)

    def gevent_watcher(self):
        self.perfomance_watcher()

        # Check fill threads
        input_threads = 1
        if self.input_queue_filler.ready():
            input_threads = 0
            logger.error('Temp queue filler error: {}'.format(
                self.input_queue_filler.exception.message),
                         extra={'MESSAGE_ID': 'exception'})
            self.input_queue_filler = spawn(self.fill_input_queue)
        logger.info('Input threads {}'.format(input_threads),
                    extra={'INPUT_THREADS': input_threads})
        fill_threads = 1
        if hasattr(self, 'queue_filter') and self.queue_filter.ready():
            fill_threads = 0
            logger.error('Fill thread error: {}'.format(
                self.queue_filter.exception.message),
                         extra={'MESSAGE_ID': 'exception'})
            self.queue_filter = self.filter_greenlet.spawn(
                self.config, self.input_queue, self.resource_items_queue,
                self.db)
        logger.info('Filter threads {}'.format(fill_threads),
                    extra={'FILTER_THREADS': fill_threads})

        main_threads = self.workers_max - self.workers_pool.free_count()
        logger.info('Main threads {}'.format(main_threads),
                    extra={'MAIN_THREADS': main_threads})

        if len(self.workers_pool) < self.workers_min:
            for i in xrange(0, (self.workers_min - len(self.workers_pool))):
                self.create_api_client()
                w = self.worker_greenlet.spawn(self.api_clients_queue,
                                               self.resource_items_queue,
                                               self.db, self.config,
                                               self.retry_resource_items_queue,
                                               self.api_clients_info)
                self.workers_pool.add(w)
                logger.info('Watcher: Create main queue worker.')
        retry_threads = self.retry_workers_max - self.retry_workers_pool.free_count(
        )
        logger.info('Retry threads {}'.format(retry_threads),
                    extra={'RETRY_THREADS': retry_threads})
        if len(self.retry_workers_pool) < self.retry_workers_min:
            for i in xrange(
                    0, self.retry_workers_min - len(self.retry_workers_pool)):
                self.create_api_client()
                w = self.worker_greenlet.spawn(self.api_clients_queue,
                                               self.retry_resource_items_queue,
                                               self.db, self.config,
                                               self.retry_resource_items_queue,
                                               self.api_clients_info)
                self.retry_workers_pool.add(w)
                logger.info('Watcher: Create retry queue worker.')

        # Log queues size and API clients count
        main_queue_size = self.resource_items_queue.qsize()
        logger.info('Resource items queue size {}'.format(main_queue_size),
                    extra={'MAIN_QUEUE_SIZE': main_queue_size})
        retry_queue_size = self.retry_resource_items_queue.qsize()
        logger.info(
            'Resource items retry queue size {}'.format(retry_queue_size),
            extra={'RETRY_QUEUE_SIZE': retry_queue_size})
        api_clients_count = len(self.api_clients_info)
        logger.info('API Clients count: {}'.format(api_clients_count),
                    extra={'API_CLIENTS': api_clients_count})

    def _calculate_st_dev(self, values):
        if len(values) > 0:
            avg = sum(values) * 1.0 / len(values)
            variance = map(lambda x: (x - avg)**2, values)
            avg_variance = sum(variance) * 1.0 / len(variance)
            st_dev = math.sqrt(avg_variance)
            return round(st_dev, 3)
        else:
            return 0

    def _mark_bad_clients(self, dev):
        # Mark bad api clients
        for cid, info in self.api_clients_info.items():
            if info.get('grown', False) and info['avg_duration'] > dev:
                info['drop_cookies'] = True
                logger.debug(
                    'Perfomance watcher: Mark client {} as bad, avg. request_duration is {} sec.'
                    .format(cid, info['avg_duration']),
                    extra={'MESSAGE_ID': 'marked_as_bad'})
            elif info['avg_duration'] < dev and info['request_interval'] > 0:
                info['drop_cookies'] = True
                logger.debug(
                    'Perfomance watcher: Mark client {} as bad, request_interval is {} sec.'
                    .format(cid, info['request_interval']),
                    extra={'MESSAGE_ID': 'marked_as_bad'})

    def perfomance_watcher(self):
        avg_duration, values = self._get_average_requests_duration()
        for _, info in self.api_clients_info.items():
            delta = timedelta(seconds=self.perfomance_window +
                              self.watch_interval)
            current_date = datetime.now() - delta
            delete_list = []
            for key in info['request_durations']:
                if key < current_date:
                    delete_list.append(key)
            for k in delete_list:
                del info['request_durations'][k]
            delete_list = []

        st_dev = self._calculate_st_dev(values)
        if len(values) > 0:
            min_avg = min(values) * 1000
            max_avg = max(values) * 1000
        else:
            max_avg = 0
            min_avg = 0
        dev = round(st_dev + avg_duration, 3)

        logger.info('Perfomance watcher:\nREQUESTS_STDEV - {} sec.\n'
                    'REQUESTS_DEV - {} ms.\nREQUESTS_MIN_AVG - {} ms.\n'
                    'REQUESTS_MAX_AVG - {} ms.\nREQUESTS_AVG - {} sec.'.format(
                        round(st_dev, 3), dev, min_avg, max_avg, avg_duration),
                    extra={
                        'REQUESTS_DEV': dev * 1000,
                        'REQUESTS_MIN_AVG': min_avg,
                        'REQUESTS_MAX_AVG': max_avg,
                        'REQUESTS_AVG': avg_duration * 1000
                    })
        self._mark_bad_clients(dev)

    def run(self):
        logger.info('Start Basic Bridge',
                    extra={'MESSAGE_ID': 'start_basic_bridge'})
        logger.info('Start data sync...',
                    extra={'MESSAGE_ID': 'basic_bridge__data_sync'})
        self.input_queue_filler = spawn(self.fill_input_queue)
        if hasattr(self, 'filter_greenlet'):
            self.queue_filter = self.filter_greenlet.spawn(
                self.config, self.input_queue, self.resource_items_queue,
                self.db)
        else:
            self.resource_items_queue = self.input_queue
        spawn(self.queues_controller)
        while True:
            self.gevent_watcher()
            sleep(self.watch_interval)
Exemple #30
0
class ConnectionPool(object):

    def __init__(self, factory,
                 retry_max=3, retry_delay=.1,
                 timeout=-1, max_lifetime=600.,
                 max_size=10, options=None):
        self.max_size = max_size
        self.pool = PriorityQueue()
        self.size = 0
        self.factory = factory
        self.retry_max = retry_max
        self.retry_delay = retry_delay
        self.timeout = timeout
        self.max_lifetime = max_lifetime
        if options is None:
            self.options = {}
        else:
            self.options = options

    def too_old(self, conn):
        return time.time() - conn.get_lifetime() > self.max_lifetime

    def release_connection(self, conn):
        connected = conn.is_connected()
        if connected and not self.too_old(conn):
            self.pool.put((conn.get_lifetime(), conn))
        else:
            conn.invalidate()

    def get(self, **options):
        pool = self.pool

        # first let's try to find a matching one
        found = None
        if self.size >= self.max_size or pool.qsize():
            for priority, candidate in pool:
                if self.too_old(candidate):
                    # let's drop it
                    continue

                matches = candidate.matches(**options)
                if not matches:
                    # let's put it back
                    pool.put((priority, candidate))
                else:
                    found = candidate
                    break

        # we got one.. we use it
        if found is not None:
            return found

        # we build a new one and send it back
        tries = 0
        last_error = None

        while tries < self.retry_max:
            self.size += 1
            try:
                new_item = self.factory(**options)
            except Exception, e:
                self.size -= 1
                last_error = e
            else:
                # we should be connected now
                if new_item.is_connected():
                    return new_item

            tries += 1
            gevent.sleep(self.retry_delay)

        if last_error is None:
            raise MaxTriesError()
        else:
            raise last_error
Exemple #31
0
class SubNameBrute(object):
    def __init__(self, *params):
        (
            self.domain,
            self.options,
            self.process_num,
            self.dns_servers,
            self.next_subs,
            self.scan_count,
            self.found_count,
            self.queue_size_array,
            tmp_dir,
        ) = params
        self.dns_count = len(self.dns_servers)
        self.scan_count_local = 0
        self.found_count_local = 0
        self.resolvers = [
            dns.resolver.Resolver(configure=False)
            for _ in range(self.options.threads)
        ]
        for r in self.resolvers:
            r.lifetime = r.timeout = 10.0
        self.queue = PriorityQueue()
        self.priority = 0
        self.ip_dict = {}
        self.found_subs = set()
        self.timeout_subs = {}
        self.count_time = time.time()
        self.outfile = open(
            "%s/%s_part_%s.txt" % (tmp_dir, self.domain, self.process_num),
            "w")
        self.normal_names_set = set()
        self.load_sub_names()
        self.lock = RLock()

    def load_sub_names(self):
        normal_lines = []
        wildcard_lines = []
        wildcard_set = set()
        regex_list = []
        lines = set()
        with open(self.options.file) as inFile:
            for line in inFile.readlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                brace_count = sub.count("{")
                if brace_count > 0:
                    wildcard_lines.append((brace_count, sub))
                    sub = sub.replace("{alphnum}", "[a-z0-9]")
                    sub = sub.replace("{alpha}", "[a-z]")
                    sub = sub.replace("{num}", "[0-9]")
                    if sub not in wildcard_set:
                        wildcard_set.add(sub)
                        regex_list.append("^" + sub + "$")
                else:
                    normal_lines.append(sub)
                    self.normal_names_set.add(sub)

        if regex_list:
            pattern = "|".join(regex_list)
            _regex = re.compile(pattern)
            for line in normal_lines:
                if _regex.search(line):
                    normal_lines.remove(line)

        for _ in normal_lines[self.process_num::self.options.process]:
            self.queue.put((0, _))  # priority set to 0
        for _ in wildcard_lines[self.process_num::self.options.process]:
            self.queue.put(_)

    def scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]
                                         ] + self.dns_servers

        while True:
            try:
                self.lock.acquire()
                if time.time() - self.count_time > 1.0:
                    self.scan_count.value += self.scan_count_local
                    self.scan_count_local = 0
                    self.queue_size_array[
                        self.process_num] = self.queue.qsize()
                    if self.found_count_local:
                        self.found_count.value += self.found_count_local
                        self.found_count_local = 0
                    self.count_time = time.time()
                self.lock.release()
                brace_count, sub = self.queue.get(timeout=3.0)
                if brace_count > 0:
                    brace_count -= 1
                    if sub.find("{next_sub}") >= 0:
                        for _ in self.next_subs:
                            self.queue.put((0, sub.replace("{next_sub}", _)))
                    if sub.find("{alphnum}") >= 0:
                        for _ in "abcdefghijklmnopqrstuvwxyz0123456789":
                            self.queue.put(
                                (brace_count, sub.replace("{alphnum}", _, 1)))
                    elif sub.find("{alpha}") >= 0:
                        for _ in "abcdefghijklmnopqrstuvwxyz":
                            self.queue.put(
                                (brace_count, sub.replace("{alpha}", _, 1)))
                    elif sub.find("{num}") >= 0:
                        for _ in "0123456789":
                            self.queue.put(
                                (brace_count, sub.replace("{num}", _, 1)))
                    continue
            except gevent.queue.Empty as e:
                break

            try:

                if sub in self.found_subs:
                    continue

                self.scan_count_local += 1
                cur_domain = sub + "." + self.domain
                answers = self.resolvers[j].query(cur_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ", ".join(
                        sorted([answer.address for answer in answers]))
                    if ips in ["1.1.1.1", "127.0.0.1", "0.0.0.0", "0.0.0.1"]:
                        continue
                    if self.options.i and is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count_local += 1
                        answers = self.resolvers[j].query(cur_domain, "cname")
                        cname = answers[0].target.to_unicode().rstrip(".")
                        if cname.endswith(
                                self.domain) and cname not in self.found_subs:
                            cname_sub = cname[:len(cname) - len(self.domain) -
                                              1]  # new sub
                            if cname_sub not in self.normal_names_set:
                                self.found_subs.add(cname)
                                self.queue.put((0, cname_sub))
                    except Exception as e:
                        pass

                    first_level_sub = sub.split(".")[-1]
                    if (first_level_sub, ips) not in self.ip_dict:
                        self.ip_dict[(first_level_sub, ips)] = 1
                    else:
                        self.ip_dict[(first_level_sub, ips)] += 1
                        if self.ip_dict[(first_level_sub, ips)] > 30:
                            continue

                    self.found_count_local += 1

                    self.outfile.write(
                        cur_domain.ljust(30) + "\t" + ips + "\n")
                    self.outfile.flush()
                    try:
                        self.scan_count_local += 1
                        self.resolvers[j].query("test-not-existed." +
                                                cur_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        if self.queue.qsize() < 10000:
                            for _ in self.next_subs:
                                self.queue.put((0, _ + "." + sub))
                        else:
                            self.queue.put((1, "{next_sub}." + sub))
                    except Exception as e:
                        pass

            except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                pass
            except dns.resolver.NoNameservers as e:
                self.queue.put((0, sub))  # Retry
            except dns.exception.Timeout as e:
                self.timeout_subs[sub] = self.timeout_subs.get(sub, 0) + 1
                if self.timeout_subs[sub] <= 2:
                    self.queue.put((0, sub))  # Retry
            except Exception as e:
                import traceback

                traceback.print_exc()
                with open("errors.log", "a") as errFile:
                    errFile.write("[%s] %s\n" % (type(e), str(e)))

    def run(self):
        threads = [
            gevent.spawn(self.scan, i) for i in range(self.options.threads)
        ]
        gevent.joinall(threads)
Exemple #32
0
class WaveGenerator(object):

    def __init__(self, u):
        self.uid = u.uid
        self.access_token = u.access_token
        self.db = DB()
        self.results = PriorityQueue()

    def fetch_friends(self):
        friends = self.db.friends(self.uid)
        if not friends:
            friends = [User(x["id"], None, x["name"]) for x
                in unpage_par(fb.me.using(self.access_token).friends.get)]
            self.db.store_friends(self.uid, friends)
        return friends

    def rt_listens_for(self, u):
        rt_results = users.subscribe(u.uid, self.access_token)
        for listen in rt_results:
            if not self.db.has_song(listen.sid):
                self.db.store_song(self.fetch_song(listen.sid))
            t = self.db.match_song(listen.sid)
            if t:
                self.results.put((1, {
                        "trackId": t.ztid,
                        "userId": u.uid,
                        "userName": u.uname,
                        "src": t.surl,
                        "songName": t.title,
                        "artistName": t.artist_name,
                        "artistPhoto": t.aimgurl,
                        "coverSrc": t.rimgurl,
                        "timestamp": listen.ts.strftime("%Y-%m-%dT%H:%M:%S+0000")
                        }))

    def listens_for(self, u, num=50):
        last_ts, last_cts = self.db.last_listen(u.uid)
       #if not last_cts or (
       #        last_cts and datetime.utcnow() - last_ts > timedelta(seconds=300)):
       #    for listen in unpage_seq(
       #            fb[u.uid]["music.listens"].using(self.access_token).get, num):
       #        ts = fb_datetime(listen.get("end_time"))
       #        if last_ts and last_ts >= ts:
       #            break
       #        listen = Listen(
       #            lid=listen.get("id"),
       #            uid=u.uid,
       #            sid=listen.get("data", {}).get("song", {}).get("id"),
       #            ts=ts)
       #        self.db.store_listen(listen)
       #        yield listen
       #    self.db.update_cts(u.uid)
        time.sleep(2)
        for n, listen in enumerate(self.db.listens(u.uid, last_ts)):
            if n % 3 == 0:
                time.sleep(1)
            yield listen

    def fetch_song(self, sid):
        data = fb[sid].using(self.access_token).get()
        return Song(
            sid=sid,
            title=data.get("title"),
            artist_name=data.get("data", {}).get("musician", [{}])[0].get("name"),
            site_name=data.get("site_name"))

    def fetch_listens(self, u):
        for listen in self.listens_for(u):
            if not self.db.has_song(listen.sid):
                self.db.store_song(self.fetch_song(listen.sid))
            t = self.db.match_song(listen.sid)
            if t:
                self.results.put((10, {
                    "trackId": t.ztid,
                    "userId": u.uid,
                    "userName": u.uname,
                    "src": t.surl,
                    "songName": t.title,
                    "artistName": t.artist_name,
                    "artistPhoto": t.aimgurl,
                    "coverSrc": t.rimgurl,
                    "timestamp": listen.ts.strftime("%Y-%m-%dT%H:%M:%S+0000")
                    }))

    def fetch(self):
        friends = self.fetch_friends()
        for f in friends:
            spawn(self.rt_listens_for, f)
            spawn(self.fetch_listens, f)

    def __call__(self):
        spawn(self.fetch)
        return self.results
Exemple #33
0
class Scanner:
    def __init__(self):
        self.start_time = time.time()
        self.queue = PriorityQueue()
        self.history = []
        self.total_count = 0
        self.scan_count = 0
        self._load_target()
        self.outfile = open("log.log", 'w')
        self.console_width = getTerminalSize()[0] - 2

    def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            msg = '%s TotalCount| %s Scanned in %.2f seconds' % (
                    self.total_count,self.total_count - self.queue.qsize(), time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
        else:
            sys.stdout.write('\r' + _msg + ' ' * (self.console_width - len(_msg)) + '\n')
            self.outfile.write(_msg + '\n')
            self.outfile.flush()
            if _found_msg:
                msg = '%s TotalCount| %s Scanned in %.2f seconds' % (
                        self.total_count,self.total_count - self.queue.qsize(), time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
        sys.stdout.flush()

    def _load_target(self):
        print '[+] Read targets ...'
        target_file = raw_input("Target File :")
        with open(target_file) as f:
            for line in f.xreadlines():
                target = line.strip()
                self.queue.put(target)

        print "TotalCount is %d" % self.queue.qsize()
        self.total_count = self.queue.qsize()
        print "Now scanning ..."

    def _scan(self,case):
        while not self.queue.empty():
            target = self.queue.get()
            if case == "1":
                self.vulnCheck(target)
            if case == "2":
                self.s2_045(target)
            if case == "3":
                self.headers(target)
            if case == "4":
                self.weakfile(target)
            if case == "5":
                self.portscan_c(target)



#####################################################################
#                                                                   #
#    Vuln poc by:xi4okv QQ:48011203                                 #
#                                                                   #
#####################################################################

    def vulnCheck(self,target):
        if ":2375" in target:        
            try:
                res = requests.head("http://" + str(target) + "/containers/json",timeout=2)
                if res.headers['Content-Type'] == 'application/json':
                    self._print_msg(target + "==>  docker api Vuln",True)
                else:
                    self._print_msg()
            except:
                self._print_msg()
            self._print_msg()

        if ":27017" in target:
            ip,port = target.split(":")
            try:
                socket.setdefaulttimeout(3)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((ip, int(port)))
                data = binascii.a2b_hex("3a000000a741000000000000d40700000000000061646d696e2e24636d640000000000ffffffff130000001069736d6173746572000100000000")
                s.send(data)
                result = s.recv(1024)
                if "ismaster" in result:
                    getlog_data = binascii.a2b_hex("480000000200000000000000d40700000000000061646d696e2e24636d6400000000000100000021000000026765744c6f670010000000737461727475705761726e696e67730000")
                    s.send(getlog_data)
                    result = s.recv(1024)
                    if "totalLinesWritten" in result:
                        self._print_msg(target + "==>  mongodb Vuln",True)
            except Exception, e:
                pass

        if ":6379" in target:
            ip,port = target.split(":")
            try:
                socket.setdefaulttimeout(3)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((ip, int(port)))
                s.send("INFO\r\n")
                result = s.recv(1024)
                if "redis_version" in result:
                    self._print_msg(target + "==>  redis Vuln",True)
                elif "Authentication" in result:
                    for pass_ in ['123456','redis','pass','password']:
                        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                        s.connect((ip, int(port)))
                        s.send("AUTH %s\r\n" % (pass_))
                        result = s.recv(1024)
                        if '+OK' in result:
                           self._print_msg(target + "==>  redis pass Vuln :" + pass_,True)
            except Exception, e:
                pass
Exemple #34
0
class TaskList:
    """
    Task list if a FIFO queue of tasks
    """
    def __init__(self, service):
        self.service = service
        self._queue = PriorityQueue()
        # done keeps the tasks that have been extracted from the queue
        # so we can inspect them later

        # keep the done task on disk, not in memory.
        # now we use the filesystem, but we could plug any key-value stor or database behind
        # check TaskStorageBase to see the interface your storage needs to have
        # to be used to store tasks
        # self._done = TaskStorageFile(self)
        self._done = TaskStorageSqlite(self)
        # pointer to current task
        self._current = None
        self._current_mu = Semaphore()

    @property
    def current(self):
        with self._current_mu:
            return self._current

    @current.setter
    def current(self, value):
        with self._current_mu:
            self._current = value

    def __del__(self):
        if self._done:
            self._done.close()

    def get(self):
        """
        pop out a task from the task list
        this call is blocking when the task list is empty
        """
        _, task = self._queue.get()
        self.current = task
        nr_task_waiting.labels(service_guid=self.service.guid).dec()
        return task

    def put(self, task, priority=PRIORITY_NORMAL):
        """
        append task to the task list
        """
        if not isinstance(task, Task):
            raise ValueError(
                "task should be an instance of the Task class not %s" %
                type(task))
        task._priority = priority
        nr_task_waiting.labels(service_guid=self.service.guid).inc()
        self._queue.put((priority, task))

    def done(self, task):
        """
        notify that a task is done
        """
        if task._priority != PRIORITY_SYSTEM:
            self.current = None
            self._done.add(task)

    def empty(self):
        """
        return True if the task list is empty, False otherwise
        """
        return self._queue.empty()

    def clear(self):
        """
        clear emtpy the task list from all its tasks
        """

        try:
            while not self.empty():
                self._queue.get_nowait()
        except gevent.queue.Empty:
            return

    def list_tasks(self, all=False):
        """
        @param all: if True, also return the task that have been executed
                    if False only return the task waiting in the task list
        returns all the task that are currently in the task list
        """
        tasks = [x[1] for x in self._queue.queue]
        if all:
            tasks.extend(self._done.list())

        if self.current and self.current.state == TASK_STATE_RUNNING:
            # also return the current running
            # task as part of the task list
            tasks.insert(0, self.current)

        return tasks

    def get_task_by_guid(self, guid):
        """
        return a task from the list by it's guid
        """

        # FIXME: this is really inefficient
        def find_task(guid, l):
            for task in l:
                if task.guid == guid:
                    return task
            raise TaskNotFoundError()

        # check if it's not the current running task
        if self.current and self.current.guid == guid:
            return self.current

        # search in waiting tasks
        try:
            task = find_task(guid, [x[1] for x in self._queue.queue])
            return task
        except TaskNotFoundError:
            pass

        # search in done task
        # this will raise TaskNotFoundError if can't find the task
        return self._done.get(guid)

    def save(self, path):
        """
        serialize the task list to disk
        @param path: file path where to serialize the task list
        """
        def serialize_task(task):
            return {
                "guid": task.guid,
                "action_name": task.action_name,
                "args": task._args,
                "state": task.state,
                "eco": json.loads(task.eco.json) if task.eco else None,
                "created": task.created,
            }

        output = []
        for task in self.list_tasks(all=False):
            output.append(serialize_task(task))
        j.data.serializer.yaml.dump(path, output)

    def load(self, path):
        """
        load a task list that have been serialized with save method
        @param path: file path where the task list is serialized
        @param service: the service object to which this task list belongs
        """
        if not os.path.exists(path):
            return

        data = j.data.serializer.yaml.load(path)
        for task in data:
            if task['state'] in [TASK_STATE_NEW, TASK_STATE_RUNNING]:
                self.put(_instantiate_task(task, self.service))
            else:
                # None supported state, just skip it
                continue
Exemple #35
0
class SubNameBrute:
    def __init__(self, target, options):
        self.start_time = time.time()
        self.target = target.strip()
        self.options = options
        self.ignore_intranet = options.i
        self.scan_count = self.found_count = 0
        self.console_width = getTerminalSize()[0] - 2
        self.resolvers = [
            dns.resolver.Resolver(configure=False)
            for _ in range(options.threads)
        ]
        for _ in self.resolvers:
            _.lifetime = _.timeout = 10.0
        self.print_count = 0
        self.STOP_ME = False
        self._load_dns_servers()
        self._load_next_sub()
        self.queue = PriorityQueue()
        self.priority = 0
        self._load_sub_names()
        if options.output:
            outfile = options.output
        else:
            _name = os.path.basename(self.options.file).replace('subnames', '')
            if _name != '.txt':
                _name = '_' + _name
            outfile = target + _name if not options.full_scan else target + '_full' + _name
        self.outfile = open(outfile, 'w')
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = self.dns_servers
        self.result_lines = []
        self.result_domains = []
        self.result_ips = []

    def _load_dns_servers(self):
        print '[+] Validate DNS servers ...'
        self.dns_servers = []
        pool = Pool(30)

        # change file location
        for server in open('lijiejie/' + 'dict/dns_servers.txt').xreadlines():
            server = server.strip()
            if server:
                pool.apply_async(self._test_server, (server, ))
        pool.join()

        self.dns_count = len(self.dns_servers)
        sys.stdout.write('\n')
        print '[+] Found %s available DNS Servers in total' % self.dns_count
        if self.dns_count == 0:
            print '[ERROR] No DNS Servers available.'
            sys.exit(-1)

    def _test_server(self, server):
        resolver = dns.resolver.Resolver()
        resolver.lifetime = resolver.timeout = 10.0
        try:
            resolver.nameservers = [server]
            answers = resolver.query(
                'public-dns-a.baidu.com')  # test lookup a existed domain
            if answers[0].address != '180.76.76.76':
                raise Exception('incorrect DNS response')
            try:
                resolver.query(
                    'test.bad.dns.lijiejie.com')  # Non-existed domain test
                with open('bad_dns_servers.txt', 'a') as f:
                    f.write(server + '\n')
                self._print_msg('[+] Bad DNS Server found %s' % server)
            except:
                self.dns_servers.append(server)
            self._print_msg('[+] Check DNS Server %s < OK >   Found %s' %
                            (server.ljust(16), len(self.dns_servers)))
        except:
            self._print_msg('[+] Check DNS Server %s <Fail>   Found %s' %
                            (server.ljust(16), len(self.dns_servers)))

    def _load_sub_names(self):
        self._print_msg('[+] Load sub names ...')
        if self.options.full_scan and self.options.file == 'subnames.txt':
            _file = 'lijiejie/' + 'dict/subnames_full.txt'
        else:
            if os.path.exists(self.options.file):
                _file = self.options.file
            elif os.path.exists('lijiejie/' + 'dict/%s' % self.options.file):
                _file = 'lijiejie/' + 'dict/%s' % self.options.file
            else:
                self._print_msg('[ERROR] Names file not exists: %s' %
                                self.options.file)
                exit(-1)

        if self.options.debug:
            _file = 'lijiejie/' + 'dict/debug.txt'
            if not os.path.exists(_file):
                self._print_msg('[ERROR] Names file not exists: %s' %
                                self.options.file)
                exit(-1)

        normal_lines = []
        wildcard_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                if sub.find('{alphnum}') >= 0 or sub.find(
                        '{alpha}') >= 0 or sub.find('{num}') >= 0:
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_list:
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
        pattern = '|'.join(regex_list)
        if pattern:
            _regex = re.compile(pattern)
            if _regex:
                for line in normal_lines[:]:
                    if _regex.search(line):
                        normal_lines.remove(line)

        for item in normal_lines:
            self.priority += 1
            self.queue.put((self.priority, item))

        for item in wildcard_lines:
            self.queue.put((88888888, item))

    def _load_next_sub(self):
        self._print_msg('[+] Load next level subs ...')
        self.next_subs = []
        _set = set()
        _file = 'lijiejie/' + 'dict/next_sub.txt' if not self.options.full_scan else 'dict/next_sub_full.txt'
        with open(_file) as f:
            for line in f:
                sub = line.strip()
                if sub and sub not in self.next_subs:
                    tmp_set = {sub}
                    while len(tmp_set) > 0:
                        item = tmp_set.pop()
                        if item.find('{alphnum}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                                tmp_set.add(
                                    item.replace('{alphnum}', _letter, 1))
                        elif item.find('{alpha}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz':
                                tmp_set.add(item.replace(
                                    '{alpha}', _letter, 1))
                        elif item.find('{num}') >= 0:
                            for _letter in '0123456789':
                                tmp_set.add(item.replace('{num}', _letter, 1))
                        elif item not in _set:
                            _set.add(item)
                            self.next_subs.append(item)

    def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            self.print_count += 1
            if self.print_count < 100:
                return
            self.print_count = 0
            msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                self.found_count, self.queue.qsize(), self.scan_count,
                time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                             msg)
        elif _msg.startswith('[+] Check DNS Server'):
            sys.stdout.write('\r' + _msg + ' ' *
                             (self.console_width - len(_msg)))
        else:
            sys.stdout.write('\r' + _msg + ' ' *
                             (self.console_width - len(_msg)) + '\n')
            if _found_msg:
                msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                    self.found_count, self.queue.qsize(), self.scan_count,
                    time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                                 msg)
        sys.stdout.flush()

    @staticmethod
    def is_intranet(ip):
        ret = ip.split('.')
        if len(ret) != 4:
            return True
        if ret[0] == '10':
            return True
        if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
            return True
        if ret[0] == '192' and ret[1] == '168':
            return True
        return False

    def put_item(self, item):
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count(
            '{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            self.queue.put((self.priority + num * 10000000, item))

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=1.0)[1]
                self.scan_count += 1
            except:
                break
            self._print_msg()
            try:
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(
                        sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.ignore_intranet and SubNameBrute.is_intranet(
                            answers[0].address):
                        continue

                    try:
                        self.scan_count += 1
                        answers = self.resolvers[j].query(
                            cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(
                                self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) -
                                              1]  # new sub
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1

                    if ips not in self.ip_dict:
                        self.ip_dict[ips] = 1
                    else:
                        self.ip_dict[ips] += 1

                    if self.ip_dict[(_sub, ips)] > 3 or self.ip_dict[ips] > 6:
                        continue

                    self.found_count += 1
                    msg = cur_sub_domain.ljust(30) + ips
                    self._print_msg(msg, _found_msg=True)
                    self._print_msg()

                    # TODO: close write file
                    self.outfile.write(
                        cur_sub_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()

                    self.result_lines.append(
                        cur_sub_domain.ljust(30) + '\t' + ips)
                    self.result_domains.append(cur_sub_domain)
                    self.result_ips.extend(ips.split(", "))

                    try:
                        self.resolvers[j].query('lijiejietest.' +
                                                cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer,
                    dns.exception.Timeout) as e:
                pass
Exemple #36
0
class SubNameBrute(object):
    def __init__(self, *params):
        self.domain, self.options, self.process_num, self.dns_servers, self.next_subs, \
            self.scan_count, self.found_count, self.queue_size_array, tmp_dir = params
        self.dns_count = len(self.dns_servers)
        self.scan_count_local = 0
        self.found_count_local = 0
        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(self.options.threads)]
        for r in self.resolvers:
            r.lifetime = 6.0
            r.timeout = 10.0
        self.queue = PriorityQueue()
        self.priority = 0
        self.ip_dict = {}
        self.found_subs = set()
        self.cert_subs = set()
        self.timeout_subs = {}
        self.no_server_subs = {}
        self.count_time = time.time()
        self.outfile = open('%s/%s_part_%s.txt' % (tmp_dir, self.domain, self.process_num), 'w')
        self.normal_names_set = set()
        self.load_sub_names()
        self.lock = RLock()
        self.threads_status = ['1'] * self.options.threads

    def load_sub_names(self):
        normal_lines = []
        wildcard_lines = []
        wildcard_set = set()
        regex_list = []
        lines = set()
        with open(self.options.file) as inFile:
            for line in inFile.readlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                brace_count = sub.count('{')
                if brace_count > 0:
                    wildcard_lines.append((brace_count, sub))
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_set:
                        wildcard_set.add(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
                    self.normal_names_set.add(sub)

        if regex_list:
            pattern = '|'.join(regex_list)
            _regex = re.compile(pattern)
            for line in normal_lines:
                if _regex.search(line):
                    normal_lines.remove(line)

        for _ in normal_lines[self.process_num::self.options.process]:
            self.queue.put((0, _))    # priority set to 0
        for _ in wildcard_lines[self.process_num::self.options.process]:
            self.queue.put(_)

    def update_counter(self):
        while True:
            if '1' not in self.threads_status:
                return
            self.scan_count.value += self.scan_count_local
            self.scan_count_local = 0
            self.queue_size_array[self.process_num] = self.queue.qsize()
            if self.found_count_local:
                self.found_count.value += self.found_count_local
                self.found_count_local = 0
            self.count_time = time.time()
            gevent.sleep(0.5)

    def check_https_alt_names(self, domain):
        try:
            x509 = reqs.OpenSSL.crypto.load_certificate(
                reqs.OpenSSL.crypto.FILETYPE_PEM,
                reqs.ssl.get_server_certificate((domain, 443))
            )
            for item in reqs.get_subj_alt_name(x509):
                if item[0].upper() == 'DNS':
                    name = item[1].lower()
                    if name.endswith(self.domain):
                        sub = name[:len(name) - len(self.domain) - 1]    # new sub
                        sub = sub.replace('*', '')
                        sub = sub.strip('.')
                        if sub and sub not in self.found_subs and \
                                sub not in self.normal_names_set and sub not in self.cert_subs:
                            self.cert_subs.add(sub)
                            self.queue.put((0, sub))
        except Exception as e:
            pass

    def scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        if self.dns_count > 1:
            while True:
                s = random.choice(self.dns_servers)
                if s != self.dns_servers[j % self.dns_count]:
                    self.resolvers[j].nameservers.append(s)
                    break
        empty_counter = 0
        while True:
            try:

                brace_count, sub = self.queue.get_nowait()
                self.threads_status[j] = '1'
                empty_counter = 0
            except gevent.queue.Empty as e:
                empty_counter += 1
                if empty_counter > 10:
                    self.threads_status[j] = '0'
                if '1' not in self.threads_status:
                    break
                else:
                    gevent.sleep(0.1)
                    continue
            if brace_count > 0:
                brace_count -= 1
                if sub.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, sub.replace('{next_sub}', _)))
                if sub.find('{alphnum}') >= 0:
                    for _ in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.queue.put((brace_count, sub.replace('{alphnum}', _, 1)))
                elif sub.find('{alpha}') >= 0:
                    for _ in 'abcdefghijklmnopqrstuvwxyz':
                        self.queue.put((brace_count, sub.replace('{alpha}', _, 1)))
                elif sub.find('{num}') >= 0:
                    for _ in '0123456789':
                        self.queue.put((brace_count, sub.replace('{num}', _, 1)))
                continue

            try:
                if sub in self.found_subs:
                    continue

                self.scan_count_local += 1
                cur_domain = sub + '.' + self.domain
                answers = self.resolvers[j].query(cur_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.address for answer in answers]))
                    invalid_ip_found = False
                    for answer in answers:
                        if answer.address in ['1.1.1.1', '127.0.0.1', '0.0.0.0', '0.0.0.1']:
                            invalid_ip_found = True
                    if invalid_ip_found:
                        continue
                    if self.options.i and is_intranet(answers[0].address):
                        continue

                    try:
                        cname = str(answers.canonical_name)[:-1]
                        if cname != cur_domain and cname.endswith(self.domain):
                            cname_sub = cname[:len(cname) - len(self.domain) - 1]    # new sub
                            if cname_sub not in self.found_subs and cname_sub not in self.normal_names_set:
                                self.queue.put((0, cname_sub))
                    except Exception as e:
                        print(e)
                        pass

                    try:
                        self.scan_count_local += 1
                        answers = self.resolvers[j].query(cur_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(self.domain) and cname not in self.found_subs:
                            cname_sub = cname[:len(cname) - len(self.domain) - 1]    # new sub
                            if cname_sub not in self.normal_names_set:
                                # self.found_subs.add(cname)
                                self.queue.put((0, cname_sub))
                    except Exception as e:
                        pass

                    first_level_sub = sub.split('.')[-1]
                    max_found = 20

                    if self.options.w:
                        first_level_sub = ''
                        max_found = 3

                    if (first_level_sub, ips) not in self.ip_dict:
                        self.ip_dict[(first_level_sub, ips)] = 1
                    else:
                        self.ip_dict[(first_level_sub, ips)] += 1
                        if self.ip_dict[(first_level_sub, ips)] > max_found:
                            continue

                    self.found_count_local += 1

                    self.outfile.write(cur_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    if not self.options.no_cert_check:
                        with gevent.Timeout(10, False):
                            self.check_https_alt_names(cur_domain)
                    try:
                        self.scan_count_local += 1
                        self.resolvers[j].query('lijiejie-test-not-existed.' + cur_domain)
                    except (dns.resolver.NXDOMAIN, ) as e:    # dns.resolver.NoAnswer
                        if self.queue.qsize() < 20000:
                            for _ in self.next_subs:
                                self.queue.put((0, _ + '.' + sub))
                        else:
                            self.queue.put((1, '{next_sub}.' + sub))
                    except Exception as e:
                        pass

            except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                pass
            except dns.resolver.NoNameservers as e:
                self.no_server_subs[sub] = self.no_server_subs.get(sub, 0) + 1
                if self.no_server_subs[sub] <= 3:
                    self.queue.put((0, sub))    # Retry
            except dns.exception.Timeout as e:
                self.timeout_subs[sub] = self.timeout_subs.get(sub, 0) + 1
                if self.timeout_subs[sub] <= 1:
                    self.queue.put((0, sub))    # Retry
            except Exception as e:
                import traceback
                traceback.print_exc()
                with open('errors.log', 'a') as errFile:
                    errFile.write('[%s] %s\n' % (type(e), str(e)))

    def run(self):
        threads = [gevent.spawn(self.scan, i) for i in range(self.options.threads)]
        threads.insert(0, gevent.spawn(self.update_counter))
        gevent.joinall(threads)
class SubNameBrute:
    """
        receive commandline args and do some initialization work
    """
    def __init__(self, target, options):
        self.start_time = time.time()
        self.target = target.strip()
        self.options = options
        self.scan_count = self.found_count = 0
        self.console_width = os.get_terminal_size()[0] - 2

        # create dns resolver pool ~ workers
        self.resolvers = [
            dns.resolver.Resolver(configure=False)
            for _ in range(options.threads)
        ]
        for resolver in self.resolvers:
            resolver.lifetime = resolver.timeout = 10.0

        self.print_count = 0
        self.STOP_ME = False

        # load dns servers and check whether these dns servers works fine ?
        self._load_dns_servers()

        # load sub names
        self.subs = []  # subs in file
        self.goodsubs = []  # checks ok for further exploitation
        self._load_subname('dict/subnames.txt', self.subs)

        # load sub.sub names
        self.subsubs = []
        self._load_subname('dict/next_sub.txt', self.subsubs)

        # results will save to target.txt

        global path

        path = os.path.join("results", target)
        if not os.path.exists(path):
            os.makedirs(path)

        self.outfile = open('%s/%s.txt' % (path, target), 'w')

        self.ip_dict = set()  #
        self.found_sub = set()

        # task queue
        self.queue = PriorityQueue()
        for sub in self.subs:
            self.queue.put(sub)

    """
        Load DNS Servers(ip saved in file), and check whether the DNS servers works fine
    """

    def _load_dns_servers(self):
        print('[*] Validate DNS servers ...')
        self.dns_servers = []

        # create a process pool for checking DNS servers, the number is your processors(cores) * 2, just change it!
        processors = cpu_count() * 2
        pool = Pool(processors)

        # read dns ips and check one by one
        for server in open('dict/dns_servers.txt').readlines():
            server = server.strip()
            if server:
                pool.apply_async(self._test_server, (server, ))

        pool.join()  # waiting for process finish
        self.dns_count = len(self.dns_servers)

        sys.stdout.write('\n')
        dns_info = '[+] Found {} available DNS Servers in total'.format(
            self.dns_count)
        print(dns_info)

        if self.dns_count == 0:
            print('[ERROR] No DNS Servers available.')
            sys.exit(-1)

    """
        test these dns servers whether works fine
    """

    def _test_server(self, server):

        # create a dns resolver and set timeout
        resolver = dns.resolver.Resolver()
        resolver.lifetime = resolver.timeout = 10.0

        try:
            resolver.nameservers = [server]

            answers = resolver.query('public-dns-a.baidu.com')
            if answers[0].address != '180.76.76.76':
                raise Exception('incorrect DNS response')
            self.dns_servers.append(server)
        except:
            self._print_msg('[-] Check DNS Server %s <Fail>   Found %s' %
                            (server.ljust(16), len(self.dns_servers)))

        self._print_msg('[+] Check DNS Server %s < OK >   Found %s' %
                        (server.ljust(16), len(self.dns_servers)))

    """
        load sub names in dict/*.txt, one function would be enough
        file for read, subname_list for saving sub names
    """

    def _load_subname(self, file, subname_list):
        self._print_msg('[*] Load sub names ...')

        with open(file) as f:
            for line in f:
                sub = line.strip()
                if sub and sub not in subname_list:
                    tmp_set = {sub}
                    """
                        in case of the sub names which contains the following expression
                        and replace them {alphnum}, {alpha}, {num} with character and num
                    """
                    while len(tmp_set) > 0:
                        item = tmp_set.pop()
                        if item.find('{alphnum}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                                tmp_set.add(
                                    item.replace('{alphnum}', _letter, 1))
                        elif item.find('{alpha}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz':
                                tmp_set.add(item.replace(
                                    '{alpha}', _letter, 1))
                        elif item.find('{num}') >= 0:
                            for _letter in '0123456789':
                                tmp_set.add(item.replace('{num}', _letter, 1))
                        elif item not in subname_list:
                            subname_list.append(item)

    """
        for better presentation of brute force results, not really matters ...
    """

    def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            self.print_count += 1
            if self.print_count < 100:
                return
            self.print_count = 0
            msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                self.found_count, self.queue.qsize(), self.scan_count,
                time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                             msg)
        elif _msg.startswith('[+] Check DNS Server'):
            sys.stdout.write('\r' + _msg + ' ' *
                             (self.console_width - len(_msg)))
        else:
            sys.stdout.write('\r' + _msg + ' ' *
                             (self.console_width - len(_msg)) + '\n')
            if _found_msg:
                msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                    self.found_count, self.queue.qsize(), self.scan_count,
                    time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                                 msg)
        sys.stdout.flush()

    def _print_domain(self, msg):
        console_width = os.get_terminal_size()[0]
        msg = '\r' + msg + ' ' * (console_width - len(msg))
        # msg = '\033[0;31;47m%s{}\033[0m'.format(msg)
        sys.stdout.write(msg)

    def _print_progress(self):
        """
            显示扫描进度,显示更美观
        """
        msg = '\033[0;31;47m%s\033[0m found | %s remaining | %s scanned in %.2f seconds' % \
              (self.found_count, self.queue.qsize(), self.scan_count, time.time()- self.start_time)

        console_width = os.get_terminal_size()[0]
        out = '\r' + ' ' * int((console_width - len(msg)) / 2) + msg
        sys.stdout.write(out)

    """
        important : assign task to resolvers
    """

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            sub = self.queue.get(timeout=1.0)
            self.scan_count += 1

            try:
                cur_sub_domain = sub + '.' + self.target
                answers = self.resolvers[j].query(cur_sub_domain)
            except:
                continue

            if answers:
                ips = ', '.join(sorted([answer.address for answer in answers]))

                # exclude : intranet or kept addresses
                if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0', '0.0.0.1']:
                    continue
                if SubNameBrute.is_intranet(answers[0].address):
                    continue

                self.found_sub.add(cur_sub_domain)
                for answer in answers:
                    self.ip_dict.add(answer.address)

                if sub not in self.goodsubs:
                    self.goodsubs.append(sub)

                self.found_count += 1
                ip_info = '{} \t {}'.format(cur_sub_domain, ips)
                # print(ip_info)
                self.outfile.write(cur_sub_domain + '\t' + ips + '\n')
                self._print_domain(ip_info)
                sys.stdout.flush()
                self._print_progress()
                sys.stdout.flush()

    @staticmethod
    def is_intranet(ip):
        ret = ip.split('.')
        if len(ret) != 4:
            return True
        if ret[0] == '10':
            return True
        if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
            return True
        if ret[0] == '192' and ret[1] == '168':
            return True
        return False

    """
        assign task to threads ...
    """

    def run(self):
        threads = [
            gevent.spawn(self._scan, i) for i in range(self.options.threads)
        ]

        print('[*] Initializing %d threads' % self.options.threads)

        try:
            gevent.joinall(threads)
        except KeyboardInterrupt as e:
            msg = '[WARNING] User aborted.'
            sys.stdout.write('\r' + msg + ' ' *
                             (self.console_width - len(msg)) + '\n\r')
            sys.stdout.flush()
Exemple #38
0
class DatasetDownloader(object):
    _queue_item_type = namedtuple("queue_item",
                                    ("hour", "sleep_until", "filename",
                                     "expect_pressures", "bad_downloads"))

    def __init__(self, directory, ds_time, timeout=120,
                 first_file_timeout=600,
                 bad_download_retry_limit=3,
                 write_dataset=True, write_gribmirror=True,
                 deadline=None,
                 dataset_host="ftp.ncep.noaa.gov",
                 dataset_path="/pub/data/nccf/com/gfs/prod/gfs.{0}/"):

        # set these ASAP for close() via __del__ if __init__ raises something
        self.success = False
        self._dataset = None
        self._gribmirror = None
        self._tmp_directory = None

        assert ds_time.hour in (0, 6, 12, 18)
        assert ds_time.minute == ds_time.second == ds_time.microsecond == 0

        if not (write_dataset or write_gribmirror):
            raise ValueError("Choose write_datset or write_gribmirror "
                                "(or both)")

        if deadline is None:
            deadline = max(datetime.now() + timedelta(hours=2),
                           ds_time + timedelta(hours=9, minutes=30))

        self.directory = directory
        self.ds_time = ds_time

        self.timeout = timeout
        self.first_file_timeout = first_file_timeout
        self.write_dataset = write_dataset
        self.write_gribmirror = write_gribmirror
        self.bad_download_retry_limit = bad_download_retry_limit

        self.deadline = deadline
        self.dataset_host = dataset_host
        self.dataset_path = dataset_path

        self.have_first_file = False

        self.files_complete = 0
        self.files_count = 0
        self.completed = Event()

        ds_time_str = self.ds_time.strftime("%Y%m%d%H")
        self.remote_directory = dataset_path.format(ds_time_str)

        self._greenlets = Group()
        self.unpack_lock = RLock()

        # Items in the queue are
        #   (hour, sleep_until, filename, ...)
        # so they sort by hour, and then if a not-found adds a delay to
        # a specific file, files from that hour without the delay
        # are tried first
        self._files = PriorityQueue()

        # areas in self.dataset.array are considered 'undefined' until
        #   self.checklist[index[:3]] is True, since unpack_grib may
        #   write to them, and then abort via ValueError before marking
        #   updating the checklist if the file turns out later to be bad

        # the checklist also serves as a sort of final sanity check:
        #   we also have "does this file contain all the records we think it
        #   should" checklists; see Worker._download_file

        self._checklist = make_checklist()

    def open(self):
        logger.info("downloader: opening files for dataset %s", self.ds_time)

        self._tmp_directory = \
                tempfile.mkdtemp(dir=self.directory, prefix="download.")
        os.chmod(self._tmp_directory, 0o775)
        logger.debug("Temporary directory is %s", self._tmp_directory)

        if self.write_dataset:
            self._dataset = \
                Dataset(self.ds_time, directory=self._tmp_directory, new=True)

        if self.write_gribmirror:
            fn = Dataset.filename(self.ds_time,
                                  directory=self._tmp_directory,
                                  suffix=Dataset.SUFFIX_GRIBMIRROR)
            logger.debug("Opening gribmirror (truncate and write) %s %s",
                                self.ds_time, fn)
            self._gribmirror = open(fn, "w+")

    def download(self):
        logger.info("download of %s starting", self.ds_time)

        ttl, addresses = resolve_ipv4(self.dataset_host)
        logger.debug("Resolved to %s IPs", len(addresses))

        addresses = [inet_ntoa(x) for x in addresses]

        total_timeout = self.deadline - datetime.now()
        total_timeout_secs = total_timeout.total_seconds()
        if total_timeout_secs < 0:
            raise ValueError("Deadline already passed")
        else:
            logger.debug("Deadline in %s", total_timeout)

        self._add_files()
        self._run_workers(addresses, total_timeout_secs)

        if not self.completed.is_set():
            raise ValueError("timed out")

        if not self._checklist.all():
            raise ValueError("incomplete: records missing")

        self.success = True
        logger.debug("downloaded %s successfully", self.ds_time)

    def _add_files(self):
        filename_prefix = self.ds_time.strftime("gfs.t%Hz.pgrb2")

        for hour in Dataset.axes.hour:
            hour_str = "{0:02}".format(hour)

            for bit, exp_pr in (("f", Dataset.pressures_pgrb2f),
                                ("bf", Dataset.pressures_pgrb2bf)):
                self._files.put(self._queue_item_type(
                    hour, 0, filename_prefix + bit + hour_str, exp_pr, 0))
                self.files_count += 1

        logger.info("Need to download %s files", self.files_count)

    def _run_workers(self, addresses, total_timeout_secs):
        logger.debug("Spawning %s workers", len(addresses) * 2)

        # don't ask _join_all to raise the first exception it catches
        # if we're already raising something in the except block
        raising = False

        try:
            for worker_id, address in enumerate(addresses * 2):
                w = DownloadWorker(self, worker_id, address)
                w.start()
                w.link()
                self._greenlets.add(w)

            # worker unhandled exceptions are raised in this greenlet
            # via link(). They can appear in completed.wait and
            # greenlets.kill(block=True) only (the only times that this
            # greenlet will yield)
            self.completed.wait(timeout=total_timeout_secs)

        except:
            # includes LinkedCompleted - a worker should not exit cleanly
            # until we .kill them below
            logger.debug("_run_workers catch %s (will reraise)",
                         sys.exc_info()[1])
            raising = True
            raise

        finally:
            # don't leak workers.
            self._join_all(raise_exception=(not raising))

    def _join_all(self, raise_exception=False):
        # we need the loop to run to completion and so have it catch and
        # hold or discard exceptions for later.
        # track the first exception caught and re-raise that
        exc_info = None

        while len(self._greenlets):
            try:
                self._greenlets.kill(block=True)
            except greenlet.LinkedCompleted:
                # now that we've killed workers, these are expected.
                # ignore.
                pass
            except greenlet.LinkedFailed as e:
                if exc_info is None and raise_exception:
                    logger.debug("_join_all catch %s "
                                 "(will reraise)", e)
                    exc_info = sys.exc_info()
                else:
                    logger.debug("_join_all discarding %s "
                                 "(already have exc)", e)

        if exc_info is not None:
            try:
                raise exc_info[1], None, exc_info[2]
            finally:
                # avoid circular reference
                del exc_info

    def _file_complete(self):
        self.files_complete += 1
        self.have_first_file = True

        if self.files_complete == self.files_count:
            self.completed.set()

        logger.info("progress %s/%s %s%%",
                    self.files_complete, self.files_count,
                    self.files_complete / self.files_count * 100)

    def close(self, move_files=None):
        if move_files is None:
            move_files = self.success

        if self._dataset is not None or self._gribmirror is not None or \
                self._tmp_directory is not None:
            if move_files:
                logger.info("moving downloaded files")
            else:
                logger.info("deleting failed download files")

        if self._dataset is not None:
            self._dataset.close()
            self._dataset = None
            if move_files:
                self._move_file()
            else:
                self._delete_file()

        if self._gribmirror is not None:
            self._gribmirror.close()
            self._gribmirror = None
            if move_files:
                self._move_file(Dataset.SUFFIX_GRIBMIRROR)
            else:
                self._delete_file(Dataset.SUFFIX_GRIBMIRROR)

        if self._tmp_directory is not None:
            self._remove_download_directory()
            self._tmp_directory = None

    def __del__(self):
        self.close()

    def _remove_download_directory(self):
        l = os.listdir(self._tmp_directory)
        if l:
            logger.warning("cleaning %s unknown file%s in temporary directory",
                           len(l), '' if len(l) == 1 else 's')

        logger.debug("removing temporary directory")
        shutil.rmtree(self._tmp_directory)

    def _move_file(self, suffix=''):
        fn1 = Dataset.filename(self.ds_time,
                               directory=self._tmp_directory,
                               suffix=suffix)
        fn2 = Dataset.filename(self.ds_time,
                               directory=self.directory,
                               suffix=suffix)
        logger.debug("renaming %s to %s", fn1, fn2)
        os.rename(fn1, fn2)

    def _delete_file(self, suffix=''):
        fn = Dataset.filename(self.ds_time,
                              directory=self._tmp_directory,
                              suffix=suffix)
        logger.warning("deleting %s", fn)
        os.unlink(fn)
Exemple #39
0
class SubNameBrute:
    def __init__(self, target, options, process_num, dns_servers, cdns,next_subs,
                 scan_count, found_count, queue_size_list, tmp_dir):
        self.target = target.strip()
        self.options = options
        self.process_num = process_num
        self.dns_servers = dns_servers
        self.cdns = cdns

        self.dns_count = len(dns_servers)
        self.next_subs = next_subs
        self.scan_count = scan_count
        self.scan_count_local = 0
        self.found_count = found_count
        self.found_count_local = 0
        self.queue_size_list = queue_size_list

        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(options.threads)]
        for _r in self.resolvers:
            _r.lifetime = _r.timeout = 6.0
        self.queue = PriorityQueue()
        self.item_index = 0
        self.priority = 0
        self._load_sub_names()
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = dns_servers
        self.local_time = time.time()
        self.outfile = open('%s/%s_part_%s.txt' % (tmp_dir, target, process_num), 'w')

    def _load_sub_names(self):
        if self.options.full_scan and self.options.file == 'subnames.txt':
            _file = 'dict/subnames_full.txt'
        else:
            if os.path.exists(self.options.file):
                _file = self.options.file
            elif os.path.exists('dict/%s' % self.options.file):
                _file = 'dict/%s' % self.options.file
            else:
                print_msg('[ERROR] Names file not found: %s' % self.options.file)
                exit(-1)

        normal_lines = []
        wildcard_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0:
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_list:
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
        if regex_list:
            pattern = '|'.join(regex_list)
            _regex = re.compile(pattern)
            for line in normal_lines[:]:
                if _regex.search(line):
                    normal_lines.remove(line)

        for item in normal_lines[self.process_num::self.options.process]:
            self.priority += 1
            self.queue.put((self.priority, item))

        for item in wildcard_lines[self.process_num::self.options.process]:
            self.queue.put((88888888, item))

    def put_item(self, item):
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count('{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            self.queue.put((self.priority + num * 10000000, item))

    def check_cdn(self, cname):
        '''
        bTrue = True
        bFound = False
        i = 0
        while bTrue:
          cdn = self.cdns[i]
          i += 1
          if (cdn in cname) or (i == len(self.cdns)):
            if (cdn in cname): bFound = True
            bTrue = False
        return bFound
        '''
        for cdn in self.cdns:
          if cdn in cname:
            return True
        return False

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=3.0)[1]
                self.scan_count_local += 1
                if time.time() - self.local_time > 3.0:
                    self.scan_count.value += self.scan_count_local
                    self.scan_count_local = 0
                    self.queue_size_list[self.process_num] = self.queue.qsize()
            except Exception as e:
                break
            try:
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    ans = self.resolvers[j].query(cur_sub_domain,'cname')
                    cname = ans[0].target.to_unicode().rstrip('.')

                    if self.check_cdn(cname):
                      continue 
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.options.i and is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count_local += 1
                        answers = self.resolvers[j].query(cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) - 1]    # new sub
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1
                        if self.ip_dict[(_sub, ips)] > 30:
                            continue

                    self.found_count_local += 1
                    if time.time() - self.local_time > 3.0:
                        self.found_count.value += self.found_count_local
                        self.found_count_local = 0
                        self.queue_size_list[self.process_num] = self.queue.qsize()
                        self.local_time = time.time()

                    msg = cur_sub_domain.ljust(30) + ips
                    # print_msg(msg, line_feed=True)

                    self.outfile.write(cur_sub_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    try:
                        self.resolvers[j].query('lijiejietest.' + cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e:
                pass
class SubNameBrute:
    def __init__(self, target, options, process_num, dns_servers, next_subs,
                 scan_count, found_count, queue_size_list, tmp_dir):
        self.target = target.strip()
        self.options = options
        self.process_num = process_num
        self.dns_servers = dns_servers
        self.dns_count = len(dns_servers)
        self.next_subs = next_subs
        self.scan_count = scan_count
        self.scan_count_local = 0
        self.found_count = found_count
        self.found_count_local = 0
        self.queue_size_list = queue_size_list

        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(options.threads)]
        for _r in self.resolvers:
            _r.lifetime = _r.timeout = 6.0
        self.queue = PriorityQueue()
        self.item_index = 0
        self.priority = 0
        self._load_sub_names()
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = dns_servers
        self.local_time = time.time()
        self.outfile = open('%s/%s_part_%s.txt' % (tmp_dir, target, process_num), 'w')

    def _load_sub_names(self):
        if self.options.full_scan and self.options.file == 'subnames.txt':
            _file = 'dict/subnames_full.txt'
        else:
            if os.path.exists(self.options.file):
                _file = self.options.file
            elif os.path.exists('dict/%s' % self.options.file):
                _file = 'dict/%s' % self.options.file
            else:
                print_msg('[ERROR] Names file not found: %s' % self.options.file)
                exit(-1)

        normal_lines = []
        wildcard_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0:
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_list:
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
        if regex_list:
            pattern = '|'.join(regex_list)
            _regex = re.compile(pattern)
            for line in normal_lines[:]:
                if _regex.search(line):
                    normal_lines.remove(line)

        for item in normal_lines[self.process_num::self.options.process]:
            self.priority += 1
            self.queue.put((self.priority, item))

        for item in wildcard_lines[self.process_num::self.options.process]:
            self.queue.put((88888888, item))

    def put_item(self, item):
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count('{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            self.queue.put((self.priority + num * 10000000, item))

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=3.0)[1]
                self.scan_count_local += 1
                if time.time() - self.local_time > 3.0:
                    self.scan_count.value += self.scan_count_local
                    self.scan_count_local = 0
                    self.queue_size_list[self.process_num] = self.queue.qsize()
            except Exception as e:
                break
            try:
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.options.i and is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count_local += 1
                        answers = self.resolvers[j].query(cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) - 1]    # new sub
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1
                        if self.ip_dict[(_sub, ips)] > 30:
                            continue

                    self.found_count_local += 1
                    if time.time() - self.local_time > 3.0:
                        self.found_count.value += self.found_count_local
                        self.found_count_local = 0
                        self.queue_size_list[self.process_num] = self.queue.qsize()
                        self.local_time = time.time()

                    msg = cur_sub_domain.ljust(30) + ips
                    # print_msg(msg, line_feed=True)

                    self.outfile.write(cur_sub_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    try:
                        self.resolvers[j].query('lijiejietest.' + cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e:
                pass