def get_sites_by_seed_sites(account: MajesticCom, seed_domains: [], catagories: [], fresh_data=False, index=0,
                                iteration=1, loop_count=0, count_per_domain=100, callback=None, current_count=0,
                                max_count=-1, tf=20) -> []:
        if iteration < 0:
            raise ValueError("get_sites_by_seed_sites: iteration should >= 0.")
        sub_domains = [LinkChecker.get_root_domain(x, use_www=False)[4] for x in seed_domains[index:]]
        if len(sub_domains) == 0:
            return []
        backlinks = []
        # counter = index
        if max_count > 0 and current_count >= max_count:
                return backlinks
        temp_sub_domains = []
        temp = []
        # target_catagories = []
        # for catagory in catagories:
        #     target_catagories.append(str(CategoryManager.decode_sub_category(catagory, False)))
        for sub_domain in sub_domains:
            print("doing backlinks of domain:", sub_domain, "seed len:", len(temp_sub_domains))
            try:
                temp = account.get_backlinks(sub_domain, count_per_domain, topic="", is_dev=False,
                                             fresh_data=fresh_data)
                current_count += 1
            except Exception as ex:
                print(ex)
            for item in temp:
                if isinstance(item, MajesticBacklinkDataStruct):

                    # item_catagory = str(CategoryManager.decode_sub_category(item.src_topic, False))
                    domain = LinkChecker.get_root_domain(item.backlink, use_www=False)[4]
                    item.ref_domain = domain
                    # if callback is not None:
                    #     callback(item)
                    # if len(target_catagories) > 0 and item_catagory not in target_catagories:
                    #         continue
                    if domain not in sub_domains and domain not in temp_sub_domains:
                        if len(catagories) > 0:
                            is_in = False
                            if len(item.src_topic) > 0:
                                decoded = str(CategoryManager.decode_sub_category(item.src_topic, False))
                                for cate in catagories:
                                    if cate in decoded:
                                        is_in = True
                                        break
                                if is_in and item.src_tf >= tf:
                                    temp_sub_domains.append(domain)
                        elif item.src_tf >= tf:
                            temp_sub_domains.append(domain)
                        item.ref_domain = domain
                        if callback is not None:
                            callback(item)

            if max_count > 0 and current_count >= max_count:
                break
        if loop_count >= iteration:
            return backlinks
        else:
            return backlinks + GoogleMajestic.get_sites_by_seed_sites(account, sub_domains + temp_sub_domains, catagories, fresh_data, len(seed_domains),
                                                                      iteration, loop_count+1, count_per_domain, callback, current_count, max_count, tf)
Exemple #2
0
    def get_sites(keyword: str, page_number: int=1, result_per_page: int=100,
                  index: int=0, length: int=100, use_browser=False,
                  source_type="", filter_list=[], country_code='', return_domain_home_only=True, days_ago=0,
                  **kwargs) -> []:

    # def get_sites(keyword: str, page: int=1, index: int=0, length: int=100,
    #               history=SeedSiteSettings.TIME_NOW, blog=False) -> []:
        assert page_number > 0, "page number should greater than 0"
        assert index >= 0, "index should greater or equal to 0"
        assert length > 0, "length should greater than 0"
        search_query = BingConst.SearchLink.format(quote(keyword), quote(keyword), (page_number-1)*length + index + 1, length)
        user_agent = WebRequestCommonHeader.webpage_agent
        try:
            req = BingCom._get_response(request_link=search_query, user_agent=user_agent, **kwargs)
            # req = requests.get(search_query, timeout=30, headers=WebRequestCommonHeader.get_html_header())
            result = req.text
            soup = bs4.BeautifulSoup(result)
            tags = soup.select(BingConst.SitePath)
            domains = []
            for tag in tags:
                try:
                    domain = tag.attrs["href"].strip().replace(" ", "")
                    if return_domain_home_only:
                        domain = LinkChecker.get_root_domain(domain, use_www=False)[2]  # get the link
                    else:
                        domain = LinkChecker.get_root_domain(domain, use_www=False)[3]
                    if len(domain) > 0:
                        domains.append(domain)
                except:
                    pass

            new_list = []
            if isinstance(domains, list):
                if len(filter_list) > 0:
                    for domain in domains:
                        if isinstance(domain, str):
                            temp = domain.lower().strip()
                            if not any(x in temp for x in filter_list):
                                new_list.append(temp)
                else:
                    new_list = domains

            end = index + length
            data_len = len(new_list)
            if domains is not None and index < data_len:
                if data_len >= end:
                    return new_list[index:end]
                else:
                    return new_list[index:]
            else:
                return []

        except Exception as ex:
            print(ex)
            return None
    def get_search_results(keyword: str, page_number: int, proxy: ProxyStruct=None, result_per_page: int=GoogleConst.Result100, timeout=5,
                           return_domain_home_only=True, use_forbidden_filter=True, days_ago=0, addtional_query_parameter: str="",
                           country_code="us", use_browser=False) -> list:
        """
        generic normal search, get a list of domains form page
        :param keyword:
        :param page_number:  > 0
        :param resultPerPage:
        :param timeout:
        :param return_domain_home_only: return root domain name if True, else return protocol suffix + domain name
        :param use_forbidden_filter:
        :param days_ago: specify how many days ago before when results were indexed.
        :return:
        """
        assert page_number > 0, "page number should be greater than 0."
        page_range = GoogleCom.get_result_per_page_range()
        assert result_per_page in page_range, "result per page should be one of those values:" + str(page_range)

        sub_domain = "www"
        request_link = GoogleUtility.get_local_endpoint(country_code, sub_domain) \
                       + GoogleConst.CommonSearchPath.format(quote(keyword), result_per_page, (page_number - 1) * result_per_page, country_code) \
                       + addtional_query_parameter+GoogleUtility.get_query_for_days(days_ago)
        try:
            user_agent = WebRequestCommonHeader.webpage_agent
            if not use_browser:
                response = GoogleCom._get_response(request_link, proxy=proxy, timeout=timeout, user_agent=user_agent)
                if not response.status_code == 200:
                    # if response.status_code == 503:
                        # print(response.text)
                    raise ConnectionRefusedError("error getting result, with status code:", response.status_code)
                result = response.text
            else:
                result = GoogleCom._get_response_browser(request_link, proxy=proxy, timeout=timeout, user_agent=user_agent)
            soup = bs4.BeautifulSoup(result)
            tags = soup.select(GoogleConst.SitePath)
            domains = []
            for tag in tags:
                try:
                    domain = tag.text.strip().replace(" ", "")
                    if return_domain_home_only:
                        domain = LinkChecker.get_root_domain(domain, use_www=False)[2]  # get the link
                    else:
                        domain = LinkChecker.get_root_domain(domain, use_www=False)[3]
                    if use_forbidden_filter and LinkChecker.is_domain_forbidden(domain):
                        continue
                    if len(domain) > 0:
                        domains.append(domain)
                except:
                    pass
            return domains

        except Exception as ex:
            print(ex)
            return None
 def _check_whois_v1(self, domain_data: OnSiteLink):
     root_domain = domain_data.link
     try:
         if root_domain.startswith("http"):
             root_domain = LinkChecker.get_root_domain(domain_data.link)[1]
         real_response_code = domain_data.response_code
         whois = LinkChecker.check_whois(root_domain)  # check whois record
         if whois[0]:
             if whois[2]:  # domain is expired
                 real_response_code = ResponseCode.Expired
             else:
                 real_response_code = ResponseCode.MightBeExpired
         if real_response_code == ResponseCode.Expired:
             #if ResponseCode.domain_might_be_expired(real_response_code):
             domain_data.link = root_domain
             domain_data.response_code = real_response_code
             #return_obj = OnSiteLink(root_domain, real_response_code, domain_data.link_level, OnSiteLink.TypeOutbound)
             # if isinstance(self._queue_lock, multiprocessing.RLock):
             with self._queue_lock:
                 self._output_q.put(
                     (domain_data.link, domain_data.response_code))
     except Exception as ex:
         ErrorLogger.log_error("ExternalSiteChecker.WhoisChecker", ex,
                               "_check_whois() " + root_domain)
     finally:
         self._add_job_done_one()
Exemple #5
0
def check_whois_with_dns(page: OnSiteLink):

    real_response_code = ResponseCode.DNSError
    skip_whois_check = False
    try:
        root_result = LinkChecker.get_root_domain(page.link)
        root_domain = root_result[1]
        sub_domain = root_result[4]
        suffix = root_result[5]

        if len(sub_domain) == 0 or suffix not in TldUtility.TOP_TLD_LIST:
            skip_whois_check = True
        else:

            if LinkChecker.is_domain_DNS_OK(sub_domain):  # check DNS first
                real_response_code = ResponseCode.NoDNSError
                skip_whois_check = True
            elif not sub_domain.startswith("www."):
                if LinkChecker.is_domain_DNS_OK("www." + root_domain):
                    real_response_code = ResponseCode.NoDNSError
                    skip_whois_check = True
                # response = LinkChecker.get_response(page.link, timeout)  # check 404 error

            page.response_code = real_response_code
            page.link_type = OnSiteLink.TypeOutbound
            page.link = root_domain

    except Exception as ex:
        # ErrorLogger.log_error("WhoisChecker", ex, "_check_whois_with_dns() " + page.link)
        skip_whois_check = True
    finally:
        if not skip_whois_check and real_response_code == ResponseCode.DNSError:
            return check_whois(page)
        else:
            return page.link, page.response_code
 def _check_whois(self, domain_data: OnSiteLink):
     root_domain = domain_data.link.lower()
     try:
         if not self._is_debug:
             if root_domain.startswith("http"):
                 root_domain = LinkChecker.get_root_domain(
                     domain_data.link)[1]
             is_available, is_redemption = LinkChecker.is_domain_available_whois(
                 root_domain)  # check whois record
             if is_available or is_redemption:
                 if is_available:
                     real_response_code = ResponseCode.Expired
                 else:
                     real_response_code = ResponseCode.MightBeExpired
                 domain_data.link = root_domain
                 domain_data.response_code = real_response_code
                 #return_obj = OnSiteLink(root_domain, real_response_code, domain_data.link_level, OnSiteLink.TypeOutbound)
                 self._put_output_result_in_queue(domain_data)
         else:
             self._put_output_result_in_queue(domain_data)
     except Exception as ex:
         ErrorLogger.log_error("ExternalSiteChecker.WhoisChecker", ex,
                               "_check_whois() " + root_domain)
     finally:
         self._add_job_done_one()
 def testScrapePageBatch(self):
     save_path = "/Users/superCat/Desktop/PycharmProjectPortable/test/profile_archive_downloaded.csv"
     file_path = "/Users/superCat/Desktop/PycharmProjectPortable/test/profile_test_links.txt"
     CsvLogger.log_to_file_path(save_path, [ArchiveDetail.get_title()])
     domains_links = FileHandler.read_lines_from_file(file_path)
     for link in domains_links:
         # link = "http://web.archive.org/web/20140711025724/http://susodigital.com/"
         #link ="http://web.archive.org/web/20130415001342/http://www.bbc.co.uk/"
         stop_event = multiprocessing.Event()
         inner_link, domain, path, link_class, ext, fragment = LinkUtility.get_link_detail(
             link)
         root_domain = LinkChecker.get_root_domain(domain)[1]
         path = "/index.html"
         link_s = LinkAttrs(link=link,
                            path=path,
                            ref_link="/",
                            shadow_ref_link="/",
                            source=path,
                            res_type=LinkUtility.EXT_WEBPAGE,
                            level=0)
         explorer = ArchiveExplorer(
             original_domain=root_domain,
             link=link,
             external_stop_event=stop_event,
             download_base_dir=FilePath.get_default_archive_dir(),
             max_thread=10,
             max_level=2)
         explorer.run()
         archive_detail = explorer.get_archive_detail()
         CsvLogger.log_to_file_path(save_path, [archive_detail.to_tuple()])
def check_whois_with_dns(page: OnSiteLink):

    real_response_code = ResponseCode.DNSError
    skip_whois_check = False
    try:
        root_result = LinkChecker.get_root_domain(page.link)
        root_domain = root_result[1]
        sub_domain = root_result[4]
        suffix = root_result[5]

        if len(sub_domain) == 0 or suffix not in TldUtility.TOP_TLD_LIST:
            skip_whois_check = True
        else:

            if LinkChecker.is_domain_DNS_OK(sub_domain):  # check DNS first
                real_response_code = ResponseCode.NoDNSError
                skip_whois_check = True
            elif not sub_domain.startswith("www."):
                if LinkChecker.is_domain_DNS_OK("www." + root_domain):
                    real_response_code = ResponseCode.NoDNSError
                    skip_whois_check = True
                # response = LinkChecker.get_response(page.link, timeout)  # check 404 error

            page.response_code = real_response_code
            page.link_type = OnSiteLink.TypeOutbound
            page.link = root_domain

    except Exception as ex:
        # ErrorLogger.log_error("WhoisChecker", ex, "_check_whois_with_dns() " + page.link)
        skip_whois_check = True
    finally:
        if not skip_whois_check and real_response_code == ResponseCode.DNSError:
            return check_whois(page)
        else:
            return page.link, page.response_code
 def test_get_sub_domains(self):
     full_link = "http://blogspot.co.uk/"
     domain_data = LinkChecker.get_root_domain(full_link, False)
     root_domain = domain_data[1]
     sub_domain = domain_data[4]
     domain_suffix = domain_data[5]
     sub_domain_no_local = sub_domain.strip(domain_suffix)
     print(sub_domain_no_local)
Exemple #10
0
 def test_get_sub_domains(self):
     full_link = "http://blogspot.co.uk/"
     domain_data = LinkChecker.get_root_domain(full_link, False)
     root_domain = domain_data[1]
     sub_domain = domain_data[4]
     domain_suffix = domain_data[5]
     sub_domain_no_local = sub_domain.strip(domain_suffix)
     print(sub_domain_no_local)
 def testScrapePage(self):
     # link = "http://web.archive.org/web/20111102054835/http://www.agfdh.org:80/"
     link = "http://web.archive.org/web/20150425143742/http://susodigital.com/"
     #link ="http://web.archive.org/web/20130415001342/http://www.bbc.co.uk/"
     stop_event = multiprocessing.Event()
     inner_link, domain, path, link_class, ext, fragment = LinkUtility.get_link_detail(link)
     root_domain = LinkChecker.get_root_domain(domain)[1]
     path = "/index.html"
     link_s = LinkAttrs(link=link, path=path, ref_link="/", shadow_ref_link="/", source=path, res_type=LinkUtility.EXT_WEBPAGE, level=0)
     explorer = ArchiveExplorer(original_domain=root_domain, link=link,
                                external_stop_event=stop_event,
                                download_base_dir=FilePath.get_default_archive_dir(), max_thread=10, max_level=2)
     explorer.run()
     save_path = "/Users/superCat/Desktop/PycharmProjectPortable/test/profile_archive_downloaded.csv"
     CsvLogger.log_to_file_path(save_path, [ArchiveDetail.get_title()])
     archive_detail = explorer.get_archive_detail()
     CsvLogger.log_to_file_path(save_path, [archive_detail.to_tuple()])
    def check_external_page(checker: SiteChecker, page: OnSiteLink, timeout=10):
        """
        check DNS Error Only
        :param checker:
        :param page:
        :param timeout:
        :return:
        """
        # response = LinkChecker.get_response(page.link, timeout)
        #real_response_code = response[0]
        #real_response_code = ResponseCode.LinkOK

        #print("-------checking external " + page.link)
        try:
            root_result = LinkChecker.get_root_domain(page.link)
            root_domain = root_result[1]
            sub_domain = root_result[4]

            if len(sub_domain) == 0 or root_domain in checker.external_cache_list:
                return
            else:
                if len(checker.external_cache_list) < checker.external_cache_size:
                    checker.external_cache_list.append(root_domain)

            real_response_code = page.response_code
            if LinkChecker.is_domain_DNS_OK(sub_domain):  # check DNS first
                real_response_code = ResponseCode.NoDNSError
            elif not sub_domain.startswith("www."):
                if LinkChecker.is_domain_DNS_OK("www." + root_domain):
                    real_response_code = ResponseCode.NoDNSError
                # response = LinkChecker.get_response(page.link, timeout)  # check 404 error

            page.response_code = real_response_code
            page.link_type = OnSiteLink.TypeOutbound
            page.link = root_domain
            #print(" ready to output external:", str(page))
            if checker.output_all_external or ResponseCode.domain_might_be_expired(real_response_code):
                    # if checker.delegate is not None:
                    #     checker.delegate(new_page)
                if checker.output_queue is not None:
                    with checker._queue_lock:
                        checker.output_queue.put(page)
        except Exception as ex:
            PrintLogger.print(ex)
            ErrorLogger.log_error("PageChecker", ex, "check_external_page() " + page.link)
def check_whois(domain_data: OnSiteLink):
    root_domain = domain_data.link.lower()
    try:
        if root_domain.startswith("http"):
            root_domain = LinkChecker.get_root_domain(domain_data.link)[1]
        is_available, is_redemption = LinkChecker.is_domain_available_whois(root_domain)  # check whois record
        if is_available or is_redemption:
            if is_available:
                real_response_code = ResponseCode.Expired
            else:
                real_response_code = ResponseCode.MightBeExpired
            domain_data.link = root_domain
            domain_data.response_code = real_response_code
            #return_obj = OnSiteLink(root_domain, real_response_code, domain_data.link_level, OnSiteLink.TypeOutbound)
            # self._output_q.put((domain_data.link, domain_data.response_code))
    except Exception as ex:
        print(ex)
    finally:
        return domain_data.link, domain_data.response_code
Exemple #14
0
def check_whois(domain_data: OnSiteLink):
    root_domain = domain_data.link.lower()
    try:
        if root_domain.startswith("http"):
            root_domain = LinkChecker.get_root_domain(domain_data.link)[1]
        is_available, is_redemption = LinkChecker.is_domain_available_whois(
            root_domain)  # check whois record
        if is_available or is_redemption:
            if is_available:
                real_response_code = ResponseCode.Expired
            else:
                real_response_code = ResponseCode.MightBeExpired
            domain_data.link = root_domain
            domain_data.response_code = real_response_code
            #return_obj = OnSiteLink(root_domain, real_response_code, domain_data.link_level, OnSiteLink.TypeOutbound)
            # self._output_q.put((domain_data.link, domain_data.response_code))
    except Exception as ex:
        print(ex)
    finally:
        return domain_data.link, domain_data.response_code
 def _check_whois(self, domain_data: OnSiteLink):
     root_domain = domain_data.link.lower()
     try:
         if not self._is_debug:
             if root_domain.startswith("http"):
                 root_domain = LinkChecker.get_root_domain(domain_data.link)[1]
             is_available, is_redemption = LinkChecker.is_domain_available_whois(root_domain)  # check whois record
             if is_available or is_redemption:
                 if is_available:
                     real_response_code = ResponseCode.Expired
                 else:
                     real_response_code = ResponseCode.MightBeExpired
                 domain_data.link = root_domain
                 domain_data.response_code = real_response_code
             #return_obj = OnSiteLink(root_domain, real_response_code, domain_data.link_level, OnSiteLink.TypeOutbound)
                 self._put_output_result_in_queue(domain_data)
         else:
             self._put_output_result_in_queue(domain_data)
     except Exception as ex:
         ErrorLogger.log_error("ExternalSiteChecker.WhoisChecker", ex, "_check_whois() " + root_domain)
     finally:
         self._add_job_done_one()
    def _get_back_link_thread(account: MajesticCom, sub_domain: str, count_per_domain: int, fresh_data: bool,
                              sub_domains: [], temp_sub_domains: [], categories: [], callback, tf=20, bad_country_list=[]):
        temp = []
        print("doing backlinks of domain:", sub_domain, " domain len:", len(temp_sub_domains))
        try:

            temp = account.get_backlinks(sub_domain, count_per_domain, topic="", is_dev=False, fresh_data=fresh_data)
        except Exception as ex:
            print(ex)
        for item in temp:
            if isinstance(item, MajesticBacklinkDataStruct):

                # item_catagory = str(CategoryManager.decode_sub_category(item.src_topic, False))
                domain = LinkChecker.get_root_domain(item.backlink, use_www=False)[4]
                item.ref_domain = domain
                # if callback is not None:
                #     callback(item)
                # if len(target_catagories) > 0 and item_catagory not in target_catagories:
                #         continue
                if domain not in sub_domains and domain not in temp_sub_domains:
                    if len(categories) > 0:
                        is_in = False
                        if len(item.src_topic) > 0:
                            decoded = str(CategoryManager.decode_sub_category(item.src_topic, False))
                            for cate in categories:
                                if cate in decoded:
                                    is_in = True
                                    break
                            if is_in and item.src_tf >= tf:
                                temp_sub_domains.append(domain)
                    elif item.src_tf >= tf:
                        temp_sub_domains.append(domain)
                    item.ref_domain = domain
                    if callback is not None:
                        callback(item)
        time.sleep(1)
 def _check_whois_v1(self, domain_data: OnSiteLink):
     root_domain = domain_data.link
     try:
         if root_domain.startswith("http"):
             root_domain = LinkChecker.get_root_domain(domain_data.link)[1]
         real_response_code = domain_data.response_code
         whois = LinkChecker.check_whois(root_domain)  # check whois record
         if whois[0]:
             if whois[2]:  # domain is expired
                 real_response_code = ResponseCode.Expired
             else:
                 real_response_code = ResponseCode.MightBeExpired
         if real_response_code == ResponseCode.Expired:
         #if ResponseCode.domain_might_be_expired(real_response_code):
             domain_data.link = root_domain
             domain_data.response_code = real_response_code
             #return_obj = OnSiteLink(root_domain, real_response_code, domain_data.link_level, OnSiteLink.TypeOutbound)
             # if isinstance(self._queue_lock, multiprocessing.RLock):
             with self._queue_lock:
                 self._output_q.put((domain_data.link, domain_data.response_code))
     except Exception as ex:
         ErrorLogger.log_error("ExternalSiteChecker.WhoisChecker", ex, "_check_whois() " + root_domain)
     finally:
         self._add_job_done_one()
    def get_sites_by_seed_sites_muti_threads(account: MajesticCom, seed_domains: [], catagories: [], fresh_data=False, index=0,
                                iteration=1, loop_count=0, count_per_domain=100, callback=None, current_count=0,
                                max_count=-1, tf=20, thread_pool_size=20, get_backlinks=True, bad_country_list=[]):
        """

        :param account:
        :param seed_domains:
        :param catagories:
        :param fresh_data:
        :param index:
        :param iteration:
        :param loop_count:
        :param count_per_domain:
        :param callback:
        :param current_count:
        :param max_count:
        :param tf:
        :param thread_pool_size:
        :param get_backlinks: it will get backlinks of domains if True, else it will get ref domains instead, which is cheaper.
        :return:
        """
        target_func = GoogleMajestic._get_back_link_thread if get_backlinks else GoogleMajestic._get_ref_domain_thread
        if iteration < 0:
            raise ValueError("get_sites_by_seed_sites: iteration should >= 0.")
        sub_domains = [LinkChecker.get_root_domain(x, use_www=False)[4] for x in seed_domains[index:]]
        if len(sub_domains) == 0:
            print("sub_domains is len 0.")
            return
        # counter = index
        process_len = len(sub_domains)
        if max_count > 0:
            if current_count >= max_count:
                print("exceeded seed len.")
                return
            elif current_count + process_len > max_count:
                process_len = max_count - current_count
        #target_catagories = []
        # for catagory in catagories:
        #     target_catagories.append(str(CategoryManager.decode_sub_category(catagory, False)))
        temp_sub_domains = []

        thread_pool = ThreadPool(processes=thread_pool_size)
        processes = [thread_pool.apply_async(target_func,
                                             args=(account, x, count_per_domain, fresh_data, sub_domains,
                                                   temp_sub_domains, catagories,  callback, tf, bad_country_list))
                     for x in sub_domains[0: process_len]]
        results = [y.get() for y in processes]
        thread_pool.terminate()
        current_count += process_len
        if loop_count >= iteration:
            return
        else:
            new_seeds = sub_domains + temp_sub_domains
            print("going to next level with seeds:", len(new_seeds))
            return GoogleMajestic.get_sites_by_seed_sites_muti_threads(account, new_seeds,
                                                                       catagories, fresh_data, len(seed_domains),
                                                                       iteration, loop_count+1, count_per_domain,
                                                                       callback, current_count, max_count,
                                                                       thread_pool_size=10, tf=tf,
                                                                       get_backlinks=get_backlinks,
                                                                       bad_country_list=bad_country_list)
    def __init__(self, full_link: str="", data_source: SiteTempDataSrcInterface=None,
                 controller: SiteCheckerController=None,
                 max_level=10, max_page=1000, delegate=None, output_buff_size=2000,
                 output_queue=None, output_all_external=False, result_delegate=None,
                 memory_control_terminate_event=None, check_robot_text=True,
                 **kwargs):
        """
        :param full_link: The full link of a domain, e.g: https://www.google.co.uk
        :param domain: domain to crawl
        :param max_level: stop crawling if it reaches this level
        :param max_page: maximum pages to check within a site, also stop crawling
        :param delegate: if this is not None, then it will send the latest result of external domain of ResponseCode==404 or 999
        :param result_delegate: send site_info upon finish
        :param memory_control_terminate_event: if this is not None and being set, it will be able to terminate an external memory controlled process.
        :return:
        """
        FeedbackInterface.__init__(self, **kwargs)
        #super(SiteChecker, self).__init__(**kwargs)
        if full_link is None or len(full_link) == 0:
            raise ValueError()

        original_path = ""
        try:
            paras = urlsplit(full_link)
            self.scheme, self.domain, original_path = paras[0], paras[1], paras[2]
        except:
            pass

        domain_data = LinkChecker.get_root_domain(full_link, False)
        self.root_domain = domain_data[1]
        self.sub_domain = domain_data[4]
        self.domain_suffix = domain_data[5]
        self.sub_domain_no_local = self.sub_domain.strip(self.domain_suffix)
        if self.scheme == "":
            self.scheme = "http"
        if self.domain == "":
            self.domain = self.root_domain
        self.orginal_link = full_link
        self.domain_link = LinkChecker.get_valid_link(self.root_domain, full_link, self.scheme)
        self.max_level = max_level
        self.max_page = max_page
        self.page_count = 0  # keep track page done
        self._page_count_shadow = 0 # track previous count
        self._all_page_count_shadow = 0 #track previous count in datasource
        self.internal_page_count = 0
        self.internal_page_last_count = 0
        self.page_allocated = 0
        self.current_level = 0  # if this = 0, it is root domain/home_page
        self._stop_event = Event()
        valid_file_name = SiteTempDataSrcInterface.get_valid_file_name(self.domain_link)
        self._external_db_buffer = ExternalTempDataDiskBuffer(valid_file_name+".ext.db", self,
                                                              stop_event=self._stop_event,
                                                              buf_size=int(output_buff_size/2),
                                                              dir_path=get_db_buffer_default_dir(),
                                                              convert_output=False)
        self._external_db_buffer.append_to_buffer([(self.root_domain, ResponseCode.DNSError),], convert_tuple=False)
        self._memory_control_terminate_event = memory_control_terminate_event
        self.task_control_lock = threading.RLock()
        if data_source is None:
            #self.data_source = SiteTempDataDisk(self.root_domain, ref_obj=self)
            self.data_source = SiteTempDataDiskWithBuff(ref=self.domain_link, output_buff_size=output_buff_size, ref_obj=self)
        else:
            self.data_source = data_source  # a list of OnSiteLink
        self.delegate = delegate
        if LinkChecker.might_be_link_html_page(original_path):
            self.data_source.append(OnSiteLink(self.domain_link, response_code=ResponseCode.LinkOK, link_level=1)) # add the root domain as a starting point
        self.data_source.append(OnSiteLink(self.scheme + "://www."+self.sub_domain, ResponseCode.LinkOK, link_level=1))
        self.data_source.append(OnSiteLink(self.scheme + "://" + self.domain, ResponseCode.LinkOK, link_level=1))
        self.cache_list = []  # internal page cache
        self.page_need_look_up_temp = 0
        self.cache_list.append(self.domain_link)
        if "www." not in self.sub_domain:
            self.cache_list.append(self.scheme + "://www."+self.sub_domain)
        self.cache_list.append(self.scheme + "://" + self.domain)
        self.page_need_look_up = self.data_source.count_all()
        self.cache_size = 500  # create a small cache list to avoid going to check link in file system with lots of read and write
        self._double_check_cache_lock = threading.RLock()
        self._double_check_cache = deque(maxlen=self.cache_size)
        self.external_cache_list = []
        self.external_cache_size = 500  # cache that hold external sites
        self.external_links_checked = 0
        self.add_internal_page_OK_only = True
        self.output_queue = output_queue
        self.output_all_external = output_all_external
        self.controller = controller
        self.result_delegate = result_delegate
        self.page_count_lock = threading.RLock()
        self.internal_page_count_lock = threading.RLock()
        self.level_lock = threading.RLock()
        self.page_look_up_lock = threading.RLock()
        self.external_link_check_lock = threading.RLock()
        self._finihsed = False
        self.task_control_max = 1
        self.agent = "VegeBot (we follow your robots.txt settings before crawling, you can slow down the bot by change the Crawl-Delay parameter in the settings." \
                     "if you have an enquiry, please email to: [email protected])"
        self.agent_from = "*****@*****.**"
        if check_robot_text:
            self.robot_agent = LinkChecker.get_robot_agent(self.sub_domain, protocol=self.scheme)
        else:
            self.robot_agent = None
        self.site_crawl_delay = 0.60

        if isinstance(self.robot_agent, Rules):
            delay_temp = self.robot_agent.delay(self.agent)
            if delay_temp is not None and delay_temp != self.site_crawl_delay:
                self.site_crawl_delay = delay_temp

        self.task_control_counter = 1
        self._speed_penalty_count = 0
        self._speed_penalty_threshold = 10
        self._progress_logging_speed = 120
        self._output_period = 120
        self._output_batch_size = 100
        self._death_wish_sent = False
        SiteChecker._is_lxml_parser_exist()
        self._output_thread = None
        self._output_queue = None
        self.progress_logger = ProgressLogger(self._progress_logging_speed, self, self._stop_event)
        self._status = "Start"
        self._populate_with_state()  # restore laste known state
    def get_search_results(keyword: str,
                           page_number: int,
                           proxy: ProxyStruct = None,
                           result_per_page: int = GoogleConst.Result100,
                           timeout=5,
                           return_domain_home_only=True,
                           use_forbidden_filter=True,
                           days_ago=0,
                           addtional_query_parameter: str = "",
                           country_code="us",
                           use_browser=False) -> list:
        """
        generic normal search, get a list of domains form page
        :param keyword:
        :param page_number:  > 0
        :param resultPerPage:
        :param timeout:
        :param return_domain_home_only: return root domain name if True, else return protocol suffix + domain name
        :param use_forbidden_filter:
        :param days_ago: specify how many days ago before when results were indexed.
        :return:
        """
        assert page_number > 0, "page number should be greater than 0."
        page_range = GoogleCom.get_result_per_page_range()
        assert result_per_page in page_range, "result per page should be one of those values:" + str(
            page_range)

        sub_domain = "www"
        request_link = GoogleUtility.get_local_endpoint(country_code, sub_domain) \
                       + GoogleConst.CommonSearchPath.format(quote(keyword), result_per_page, (page_number - 1) * result_per_page, country_code) \
                       + addtional_query_parameter+GoogleUtility.get_query_for_days(days_ago)
        try:
            user_agent = WebRequestCommonHeader.webpage_agent
            if not use_browser:
                response = GoogleCom._get_response(request_link,
                                                   proxy=proxy,
                                                   timeout=timeout,
                                                   user_agent=user_agent)
                if not response.status_code == 200:
                    # if response.status_code == 503:
                    # print(response.text)
                    raise ConnectionRefusedError(
                        "error getting result, with status code:",
                        response.status_code)
                result = response.text
            else:
                result = GoogleCom._get_response_browser(request_link,
                                                         proxy=proxy,
                                                         timeout=timeout,
                                                         user_agent=user_agent)
            soup = bs4.BeautifulSoup(result)
            tags = soup.select(GoogleConst.SitePath)
            domains = []
            for tag in tags:
                try:
                    domain = tag.text.strip().replace(" ", "")
                    if return_domain_home_only:
                        domain = LinkChecker.get_root_domain(
                            domain, use_www=False)[2]  # get the link
                    else:
                        domain = LinkChecker.get_root_domain(domain,
                                                             use_www=False)[3]
                    if use_forbidden_filter and LinkChecker.is_domain_forbidden(
                            domain):
                        continue
                    if len(domain) > 0:
                        domains.append(domain)
                except:
                    pass
            return domains

        except Exception as ex:
            print(ex)
            return None