예제 #1
0
    def start_load_and_resolver_domain(net_array, work_path, delete_old=True, count=COUNT_THREAD, verbose=False,
                                       count_cycle=10, resolve_dns='127.0.0.1'):
        """
        Запускам процессы резолвинга

        :param net_array: unicode|list
        :type work_path: unicode
        :type delete_old: bool
        :type count: int
        :type verbose: bool
        :type count_cycle: int
        :type resolve_dns: unicode
        :return:
        """

        if verbose:
            log_path = os.path.abspath(os.path.join(work_path, 'log'))
            if not os.path.exists(log_path):
                os.makedirs(log_path)
        else:
            log_path = False

        count_array_data = count * count_cycle
        data_for_process = []
        for thread_number in range(0, count_array_data):
            data_for_process.append([])

        counter_all = {}

        for prefix in PREFIX_LIST:
            BColor.process("Load prefix_list %s " % prefix)
            file_prefix = os.path.join(work_path, prefix+"_domains")
            file_rib_data = open(file_prefix)

            BColor.process("Load file %s " % file_prefix)
            line = file_rib_data.readline()
            counter_all[prefix] = 0
            while line:
                data_for_process[counter_all[prefix] % count_array_data].append({'line': line, 'prefix': prefix})
                counter_all[prefix] += 1
                line = file_rib_data.readline()

            BColor.process("All load zone %s -  %s" % (prefix, counter_all[prefix]))

        process_list = []
        for i in range(0, count_array_data):
            BColor.process("Start process to work %s %s" % (i, len(data_for_process[i])))
            resolver = Resolver(i,  data_for_process[i], resolve_dns, net_array, log_path)
            resolver.daemon = True
            process_list.append(resolver)
            resolver.start()

            if i !=0 and i % count == 0:
                BColor.process("Wait for threads finish...")
                for process in process_list:
                    try:
                        # timeout 2 days
                        process.join(1728000)
                    except KeyboardInterrupt:
                        BColor.warning("Interrupted by user")
                        return
                process_list = []

        if len(process_list):
            for process in process_list:
                try:
                    # timeout 2 days
                    process.join(1728000)
                except KeyboardInterrupt:
                    BColor.warning("Interrupted by user")
                    return

        if delete_old:
            Resolver.delete_not_updated_today(counter_all)
예제 #2
0
    def start_load_and_resolver_domain(net_array,
                                       work_path,
                                       delete_old=True,
                                       count_thread=COUNT_THREAD,
                                       verbose=False,
                                       count_cycle=2,
                                       resolve_dns='127.0.0.1'):
        """
        Запускам процессы резолвинга

        :param net_array: unicode|list
        :type work_path: unicode
        :type delete_old: bool
        :type count_thread: int
        :type verbose: bool
        :type count_cycle: int
        :type resolve_dns: unicode
        :return:
        """

        if verbose:
            log_path = os.path.abspath(os.path.join(work_path, 'log'))
            if not os.path.exists(log_path):
                os.makedirs(log_path)
        else:
            log_path = False

        count_array_data = count_thread * count_cycle
        data_for_process = []
        for thread_number in range(0, count_array_data):
            data_for_process.append([])

        counter_all = {}

        for prefix in PREFIX_LIST_ZONE.keys():
            BColor.process("Load prefix_list %s " % prefix)
            file_prefix = os.path.join(work_path, prefix + "_domains")
            file_domain_data = open(file_prefix)

            BColor.process("Load file %s " % file_prefix)
            line = file_domain_data.readline()
            counter_all[prefix] = 0
            while line:
                data_for_process[counter_all[prefix] %
                                 count_array_data].append({
                                     'line': line,
                                     'prefix': prefix
                                 })
                counter_all[prefix] += 1
                line = file_domain_data.readline()

            BColor.process("All load zone %s -  %s" %
                           (prefix, counter_all[prefix]))

        process_list = []
        for i in range(0, count_array_data):
            BColor.process("Start process to work %s %s" %
                           (i, len(data_for_process[i])))
            resolver = Resolver(i, data_for_process[i], resolve_dns, net_array,
                                log_path)
            resolver.daemon = True
            process_list.append(resolver)
            resolver.start()

            if i != 0 and i % count_thread == 0:
                BColor.process("Wait for threads finish...")
                for process in process_list:
                    try:
                        # timeout 2 days
                        process.join(1728000)
                    except KeyboardInterrupt:
                        BColor.warning("Interrupted by user")
                        return
                process_list = []

        if len(process_list):
            for process in process_list:
                try:
                    # timeout 2 days
                    process.join(1728000)
                except KeyboardInterrupt:
                    BColor.warning("Interrupted by user")
                    return

        if delete_old:
            Resolver.delete_not_updated_today(counter_all)
예제 #3
0
    def start_load_and_resolver_domain(net_array: SubnetTree.SubnetTree,
                                       work_path: str,
                                       delete_old: bool = True,
                                       count_thread: int = COUNT_THREAD,
                                       verbose: bool = False,
                                       resolve_dns: str = '127.0.0.1') -> None:
        """
        Запускам процессы резолвинга
        :return:
        """

        if verbose:
            log_path = os.path.abspath(os.path.join(work_path, 'log'))
            if not os.path.exists(log_path):
                os.makedirs(log_path)
        else:
            log_path = False

        # Разбиваем все домены в файлах на N массивов
        # пример формата строки из файла
        # 0--2.RU	REGRU-RU	15.06.2019	15.06.2020	16.07.2020	1
        queue_data = multiprocessing.Queue(MAX_DOMAIN_COUNT)
        queue_statistic = multiprocessing.Queue(count_thread + 5)
        counter_all = {}

        for prefix in PREFIX_LIST_ZONE.keys():
            BColor.process("Load prefix_list %s " % prefix)
            file_prefix = os.path.join(work_path, prefix + "_domains")
            file_domain_data = open(file_prefix)

            BColor.process("Load file %s " % file_prefix)
            line = file_domain_data.readline()
            counter_all[prefix] = 0
            while line:
                queue_data.put({'line': line, 'prefix': prefix})
                counter_all[prefix] += 1
                line = file_domain_data.readline()

                # if counter_all[prefix] > 10000:
                #     break

            BColor.process("All load zone %s -  %s" %
                           (prefix, counter_all[prefix]))

        # Запускаем процессы парсинга доменов
        start_time = datetime.now()
        registrant_mutex = multiprocessing.Lock()
        process_list = []

        dist_ip = multiprocessing.Manager().dict()
        for i in range(0, count_thread):
            resolver = Resolver(i, queue_data, resolve_dns, net_array,
                                log_path, registrant_mutex, queue_statistic,
                                dist_ip)
            resolver.daemon = True
            process_list.append(resolver)
            resolver.start()

        # Вывод информации о процессе парсинга доменов
        status_prefix = os.path.join(work_path, "status_parse_domain")
        process_status = Status(queue_data, status_prefix)
        process_status.daemon = True
        process_status.start()

        if len(process_list):
            for process in process_list:
                try:
                    # timeout 2 days
                    process.join(1728000)
                except KeyboardInterrupt:
                    BColor.warning("Interrupted by user")
                    return

        process_status.join(10)

        queue_data.close()
        diff = datetime.now() - start_time

        all_time = 0
        all_count = 0
        while not queue_statistic.empty():
            statistic_data = queue_statistic.get()
            all_time += statistic_data['time_diff']
            all_count += statistic_data['count']

        performance_per_process = all_time / all_count
        performance = diff.seconds / all_count
        BColor.process(
            "Performance %f per process, total time %i per process, total count %i, performance %f, all time %i"
            % (performance_per_process, all_time, all_count, performance,
               diff.seconds))

        # После обработки всех доменов запускаем удаление доменов которые сегодня не обновлены
        if delete_old:
            Resolver.delete_not_updated_today(counter_all)
def main(argv):
    verboseprint(arguments)      

    threadList=list()
    updateList=list()

    # some defaults
    hosts = get_file_as_list('alexa/alexa50k.csv')
    resolvers = get_file_as_list('alexa/cloudResolvers.csv')
    OpenResolvers = get_file_as_list('alexa/public_resolvers.csv')
   
    session1 = get_db_session()

    if arguments["firewall"]:
        verboseprint("Host vote requested: %s, %s" % (arguments["<host>"],arguments["<ip>"]))
        if (elect_official(arguments["<host>"],arguments["<ip>"],session1,resolvers,OpenResolvers)):
                print("IP ok")
        else:
                verboseprint("IP tainted")
        exit()

        # archivefirst
    #verboseprint("Updates: %s " % update_flag_set(session1))
    #verboseprint("Archived: %s " % archive_on_flag(session1))
        #exit()

# we start one thread for each resolver
    if arguments["--archiving"]:
        verboseprint("Only archiving process")
        while 1:
            archive_on_flag(session1)
            verboseprint("Archived:  "  )
            time.sleep(600)

    if arguments["--lookupserver"]:
        print "Starting the lookup server"
        firewall_daemon.run(port=51805, debug = True,host='130.83.186.141' ) #ekana allagi edo kai ebala to host                                                                                                                                                                                    
        exit()                                                                                                                                                                                                              

    if arguments["--bench"]:
        names = []
        addrs = []
        dumpcounter=0
        testers=session1.execute("SELECT archivedrequests.host, resourcerecords.rdata FROM archivedrequests  inner join resourcerecords on archivedrequests.id = resourcerecords.parent_id where resourcerecords.rtype='1' limit 1000;")
        for row in testers:
            names.append(row[0])
            addrs.append(row[1])

        for x in range (0,len(names)):
            dumpcounter+=1
            print dumpcounter
            lookup_host=names.pop()
            ip=addrs.pop()
            elect_official(lookup_host, ip, session1,resolvers,OpenResolvers)

        print "Done love"



    if arguments["--alexa"]:
        verboseprint("Only alexa list is being processed")
        for resolver in resolvers:
            resolver=threading.Thread(target=request_save, args = (hosts,resolver, session1))
            resolver.start()
            threadList.append(resolver)
        for thread in threadList:
            thread.join()
        exit(1)
    dumpCounter=0

    if arguments["--process"]:
        verboseprint("The reponses from the archive requests")
        get_archivals = session1.query(Request_Archive).filter(Request_Archive.parsed==0).all()  
        for r in get_archivals:
            dumpCounter+=1
            print dumpCounter
            msgA = dns.message.from_text(r.response.encode('utf8'))
        
            for rdata in msgA.answer:
                for items in rdata.items:
            # write out rdatatype
                    rr_item = Resource_Record()
                    session1.add(rr_item) 
                    rr_item.parent_id = r.id
                    rr_item.rtype = rdata.rdtype
                    rr_item.ttl   = rdata.ttl
                    rr_item.rdata  = items.to_text()
                    r.parsed=1

        session1.commit()
        exit(1)

    for resolver in resolvers:

        #resolver=
        threading.Thread(target=request_save, args = (hosts,resolver, session1)).start()
        #resolver.start()
        #threadList.append(resolver)

# we wait for the resolver-threads to finish        
    #for thread in threadList:
    #   thread.join()

#   time.sleep(20)

    while 1:
       

           #sent query to find those expired
           #it has to check if expired -- from sql query is true
           #then it updates
           #
           #need also tocheck for only one resolver!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
           #do not update like google.com 15 times!!!!
       get_requests = session1.query(Request).filter_by(expired=0).all()  
       for r in get_requests:
               verboseprint("Request %s %s %s %s\n-------------------\n" % (r.host, r.resolver, r.timestamp, r.ttl))
               ttl_datetime = r.timestamp + datetime.timedelta(seconds=r.ttl)
               verboseprint("TTL Datetime: %s" % ttl_datetime)
               if (ttl_datetime - datetime.datetime.utcfromtimestamp(time.time())) < datetime.datetime.utcfromtimestamp(0)-datetime.datetime.utcfromtimestamp(0):
                       try:
                           r.expired = 10
                           verboseprint("Inside updateList")
#
                           session1.commit() # dump out
                           #print r.host
                           request_update(r.host,r.resolver,session1)
#
                       except:
                               print "Problem saving to DB"
                       
#
     
           #wait 10 minites before redo
   
       time.sleep(60)

    verboseprint("Finished")            
예제 #5
0
def main():
    """Main program processing."""
    opts = OptionParser()

    opts.add_option("-d", "--dump", dest="dump", action="store_true", default=False, help="Dump all NS RR host data.")

    opts.add_option("-l", "--log", dest="log", default="/tmp/zone.log", help="Debug logfile location.")

    opts.add_option("-t", "--threads", dest="threads", default=10, help="How many concurrent threads to operate with.")

    opts.add_option("-z", "--zone", dest="zone", help="Zonefile to process.")

    (options, args) = opts.parse_args()

    logging.basicConfig(filename=options.log, level=logging.DEBUG)

    try:
        zone = open(options.zone)
    except IOError as err:
        print "Failed to open the zonefile(%s): %s" % (options.zone, err)
        sys.exit(255)

    (zones, nshosts) = readFileContent(zone)

    # Spin up the threads for resolution activities.
    nsaddrs = {}
    nsdict = {}

    for xval in xrange(int(options.threads)):
        resolver = Resolver(hosts, answers)
        resolver.setDaemon(True)
        resolver.start()

    queries = 0
    for host in nshosts:
        hosts.put("A/%s" % host)
        hosts.put("AAAA/%s" % host)
        queries += 2

    count = 0
    while count < queries:
        (host, rr) = answers.get()
        logging.debug("Returned host: %s", host)
        for rdata in rr:
            if host in nsaddrs:
                nsaddrs[host].append(rdata.address)
            else:
                nsaddrs[host] = [rdata.address]

        logging.debug("Count: %s Queries: %s", count, queries)
        count += 1

    for host in nsaddrs:
        for addr in nsaddrs[host]:
            if addr in nsdict:
                nsdict[addr] += 1
            else:
                nsdict[addr] = 1

    print "Zone count: %s" % len(zones)
    print "NSHost count: %s" % len(nshosts)
    print "NSAddr count: %s" % len(nsdict)
    if options.dump:
        print "All NSAddrs:"
        print "count\taddress"
        for addr in nsdict:
            print "%s\t%s" % (nsdict[addr], addr)

    print "Analysis of address usage:"
    parseNS(nsdict)