def get_domain_dns_rc(domain_data):
    """
    获取域名的dns记录,并存储到本地或者数据库
    :param domain_data: dict,需要探测的域名数据,包括task_id和domains
    """

    global domain_dns_rc_set  # 存储原始的域名DNS记录数据
    domain_dns_rc_set = []  # 务必置空,因为程序是长期执行的
    task_id = domain_data['task_id']
    domains = domain_data['domains']
    task_type = domain_data['task_type']
    # task_from = domain_data['task_from']  # todo 暂时未使用,是否用于区分任务,从数据库来,还是对方发送
    task_num = domain_data['task_num']
    task_total_num = domain_data['total_task_num']
    domain_ns_data = []  # 存储域名的NS记录
    _logger.logger.info('开始解析任务:%s-%s/%s的域名的NS记录' %
                        (task_id, task_num, task_total_num))
    if domains:  # 探测的域名不为空
        processed_domain_data = processing_domain_data(domains)  # 对探测的域名进行预处理
        domain_split = domain_resign_process(processed_domain_data,
                                             cpu_count())  # 进程数量与CPU的核数相同
        process_list = []
        process_manager = Manager()
        process_dns_result = process_manager.dict()

        # 多进程执行域名dns探测
        for i, domains in enumerate(domain_split):
            p = Process(target=coroutine_fetch,
                        args=(domains, i, process_dns_result))
            p.start()
            process_list.append(p)

        # 阻塞进程直到完成
        for p in process_list:
            p.join()
        # 获取各个进程探测的域名ns记录结果
        for i in process_dns_result.values():
            domain_ns_data.extend(i)  # 合并各个进程的域名dns记录

    # 注意,即使域名为空或者结果为空,也要产生响应结果
    dns_result = generate_kafka_result(task_id, task_type, task_num,
                                       task_total_num, domain_ns_data)
    kafka_servers = _system_config.read_kafka_servers()
    p = confluent_kafka_producer(topic="dnsrst",
                                 servers=kafka_servers,
                                 timeout=0)
    p.push(value=dns_result)  # todo,增加是否发送成功
    _logger.logger.info('结束解析任务:%s-%s/%s的域名的DNS记录' %
                        (task_id, task_num, task_total_num))