コード例 #1
0
def creat_keypair_mulitthreading(how_many_pairs, paths):
    #created file and a+
    the_path = "keypair_file" + time.ctime().replace(":", ".") + ".pem"
    keypair_file = open(the_path, "w+")
    Pools = thread_pool(16)
    Pools.map(creat_keypair, [(2048, str(the_path))] * int(how_many_pairs))
    Pools.close()
コード例 #2
0
def download_using_thread_pool(urls):
    pool = thread_pool(100)
    # 第一个参数为函数名,第二个参数一个可迭代对象,为函数所需的参数列表
    resps = pool.map(session.get,urls)
    pool.close()
    pool.join()
    return resps
コード例 #3
0
def tutorialspoint_all():
    url = "https://www.tutorialspoint.com/tutorialslibrary.htm"

    print("Connecting to Tutorialspoint")
    while True:
        try:
            page_response = requests.get(url,
                                         headers={'User-Agent': 'Chrome'},
                                         timeout=5)
            soup = BeautifulSoup(page_response.content, "html.parser")
            str_soup = str(soup)
        except:
            # raise
            print("Could not connect, trying again in 1 seconds!")
            time.sleep(1)
            continue
        else:
            break

    links = []
    for ul in soup.find_all("ul", attrs={"class": "menu"}):
        for li in ul.find_all("li"):
            for a in li.find_all("a"):
                links.append(url[:url.rfind("/")] + a['href'])

    with thread_pool(processes=3) as pool:
        pool.map(tutorialspoint, links)
コード例 #4
0
    def __async_analyze(self, target_list: list, interrupt=False):
        def analyze(post):
            def _anl_():
                post_i = {}
                if self.schema['post_type'] == 'url':
                    post_i = self.__post_analyze(post, self.schema['domain'])
                else:
                    post_i = self.__post_analyze(post)
                if not 'name' in post_i or post_i['name'] in [
                        None, ''
                ]:  # invalid datapack
                    print('error: datapack name cannot be null (may be 404)')
                    return
                self.info_list.append(post_i)

            if interrupt:
                _anl_()
            else:
                try:
                    _anl_()
                except Exception as e:
                    print(post, ':', 'got error :', e)
                    self.retry_list.append(post)
            self.current += 1
            print('done', self.current, '/', str(self.total))

        self.current = 0
        self.total = target_list.__len__()
        pool = thread_pool()
        pool.map(analyze, target_list)
        pool.close()
        pool.join()
        del self.current
        del self.total
コード例 #5
0
 def __init__(self, bus_name):
     try:
         super().__init__(bus_name, "/InstallPkgs")
     except dbus.DBusException:
         raise PkgException("Exception in install pkgs")
     self._thread_pool = thread_pool(10)
     self._mp_pool = mp_pool(10)
     self._result = None
     self._lock = threading.Lock()
     config_dir = str(Path.home()) + '/' + CONFIG_DIR
     self.cfg = ConfigObj(config_dir + CONFIG_FILE)
コード例 #6
0
ファイル: bro_pdns.py プロジェクト: aeppert/bro-pdns
def process_fn(f):
    thread_count = int(os.getenv("BRO_PDNS_THREADS", "1"))
    processed = 0

    aggregated = list(aggregate_file(f))
    batches = window(aggregated, 10000)

    pool = thread_pool(thread_count)

    processed = sum(pool.imap(load_records, batches, chunksize=1))

    print "%d" % processed
コード例 #7
0
ファイル: bro_pdns.py プロジェクト: JustinAzoff/bro-pdns
def process_fn(f):
    thread_count = int(os.getenv("BRO_PDNS_THREADS", "1"))
    processed = 0

    aggregated = list(aggregate_file(f))
    batches = window(aggregated, 10000)

    pool = thread_pool(thread_count)

    processed = sum(pool.imap(load_records, batches, chunksize=1))

    print "%d" % processed
コード例 #8
0
ファイル: packages.py プロジェクト: millerthegorilla/fedkx
 def __init__(self, team, arch):
     super().__init__()
     self._lp_team = team
     self._launchpad = None
     self._lp_arch = arch
     self._lp_ppa = ""
     self._pkgs = []
     self.debs = []
     self._thread_pool = thread_pool(10)
     self._mp_pool = mp_pool(10)
     self._result = None
     self._lock = threading.Lock()
コード例 #9
0
def mulit_threading_search_path(start_path):
    SPpool = thread_pool(8)
    templist = []
    templist2 = []
    sysinfo_path(start_path)
    while not diffQ.empty() or not diffQA.empty():
        templist.append(diffQ.get())
        try:
            SPpool.map(sysinfo_path, templist)
        except Exception, e:
            pass
        # if you pool.close() too early something mistakes will happen for example, assertionerror.
        #Because you close before it complete,in reality we do not need close.
        #Close() -- Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes will exit.

        templist = []
        templist2.append(diffQA.get())
コード例 #10
0
def tutorialspoint(url):
    tutorial = url.split("/")[-2]
    domain_name = url.split(tutorial)[0][:-1]
    while True:
        try:
            page_response = requests.get(url,
                                         headers={'User-Agent': 'Chrome'},
                                         timeout=1)
            soup = BeautifulSoup(page_response.content, "html.parser")
            str_soup = str(soup)
        except:
            # raise
            print(
                "Could not connect to tutorialspoint, trying again in 1 seconds!"
            )
            time.sleep(1)
            continue
        else:
            break

    print("Downloading " + tutorial)
    links = []

    for ul in soup.find_all("ul", attrs={"class": "toc chapters"}):
        for li in ul.find_all("li"):
            for a in li.find_all("a"):
                if ".htm" in a['href']:
                    links.append(domain_name + a['href'])
    pages = []
    with thread_pool(processes=2 * mp.cpu_count()) as pool:
        pages = pool.map(tutorialspoint_get_page, links)

    head = str_soup[:str_soup.find("<body")] + '\n<body>\n'
    head = head.replace(
        "<style>",
        '<style>\n.prettyprint{\nbackground-color:#D3D3D3;\nfont-size: 12px;}\n'
    )

    end = '\n</body>\n</html>'
    page = head + "".join(pages) + end
    with open('..' + os.sep + 'temp' + os.sep + tutorial + ".html", "w") as f:
        f.write(page)
    print(tutorial + " download completed")
    return
コード例 #11
0
def javatpoint_all():
    url = "https://www.javatpoint.com/"

    print("Connecting to Javatpoint")
    while True:
        try:
            page_response = requests.get(url,
                                         headers={'User-Agent': 'Chrome'},
                                         timeout=5)
            soup = BeautifulSoup(page_response.content, "html.parser")
            str_soup = str(soup)
        except:
            # raise
            print("Could not connect, trying again in 1 seconds!")
            time.sleep(1)
            continue
        else:
            break

    tutorials = []
    links = []
    break_condition = False
    for div in soup.find_all("div", attrs={"class": "firsthomecontent"}):
        if break_condition:
            break
        for a in div.find_all("a"):
            if "forum" in a["href"]:
                break_condition = True
                break
            if "http" in a["href"]:
                links.append(a["href"])
            else:
                links.append("https://www.javatpoint.com/" + a["href"])
            for tutorial_name in a.find_all("p"):
                tutorials.append(tutorial_name.contents[0])

    with thread_pool(processes=3) as pool:
        pool.starmap(javatpoint, zip(links, tutorials))
    return
コード例 #12
0
def javatpoint(url, tutorial=None):
    if tutorial is None:
        tutorial = url.split("/")[-1]
    print("Downloading " + tutorial)
    while True:
        try:
            page_response = requests.get(url,
                                         headers={'User-Agent': 'Chrome'},
                                         timeout=5)
            soup = BeautifulSoup(page_response.content, "html.parser")
            str_soup = str(soup)
        except:
            # raise
            print(
                "Could not connect to javatpoint, trying again in 1 seconds!")
            time.sleep(1)
            continue
        else:
            break
    links = []

    for div in soup.find_all("div", attrs={"class": "leftmenu"}):
        for a in div.find_all("a"):
            links.append(url[:url.rfind("/") + 1] + a["href"])

    pages = []
    with thread_pool(processes=2 * mp.cpu_count()) as pool:
        pages = pool.map(javatpoint_get_page, links)

    page = str_soup[:str_soup.find("<body")] + "\n<body>\n" + "".join(
        pages) + "\n</body>\n</html>"

    with open('..' + os.sep + 'temp' + os.sep + tutorial + ".html", "w") as f:
        f.write(page)
    # os.system('xdg-open page.html')
    # os.system('xdg-open page.pdf')
    print(tutorial + " download completed")
    return
コード例 #13
0
ファイル: cloudsql_importer.py プロジェクト: mkcmurthy/pontem
def restore_secondary_indexes():
    """Restores dropped secondary indexes.

  This is done to replicate the original databases' index configuration. Indexes
  commands are stored in files by table name so that each table's indexes can be
  loaded in parallel.

  Raises:
    RuntimeError: If restoring any secondary index failed.
  """
    logging.info("Restoring secondary indexes")

    restore_files = []
    commands_and_files = {}
    for restore_table_indexes_file in os.listdir(
            cloudsql_importer_constants.RESTORE_INDEXES_COMMANDS_FOLDER):
        restore_path = os.path.join(
            cloudsql_importer_constants.RESTORE_INDEXES_COMMANDS_FOLDER,
            restore_table_indexes_file)
        logging.debug("Reading restore commands from file '%s'", restore_path)

        restore_file = open(restore_path, "r")
        commands = restore_file.read()
        restore_files += [restore_file]

        commands_and_files[commands] = restore_path

    pool = thread_pool(len(commands_and_files.keys()))
    failed_commands = pool.map(restore, commands_and_files.keys())
    failed_commands = [command for command in failed_commands if command]

    for restore_file in restore_files:
        restore_file.close()

    if failed_commands:
        handle_restore_secondary_indexes_failures(failed_commands,
                                                  commands_and_files)
        raise RuntimeError("Not all secondary indexes were properly restored.")
コード例 #14
0
 def javatpoint_start():
     with thread_pool(processes=3) as pool:
         pool.map(javatpoint, javatpoint_list)
     return
コード例 #15
0
from multiprocessing.dummy import Pool as thread_pool
import datetime
import time


def fun(tem):
    print(tem)
    time.sleep(1)


if __name__ == '__main__':
    begin = datetime.datetime.now()
    print(begin)
    async_pool = thread_pool(processes=5)
    results = []

    for i in range(10):
        result = async_pool.apply_async(fun, (i, ))
        print(result)
        results.append(result)

    for i in results:
        i.wait()
    end = datetime.datetime.now()
    used_time = (end - begin)
    print(used_time)
コード例 #16
0
def mulit_type_gather(filepath):
    typepool = thread_pool(16)
    try:
        typepool.map(file_type_gather, filepath)
    except Exception, e:
        pass
コード例 #17
0
 def tutorialspoint_start():
     with thread_pool(processes=3) as pool:
         pool.map(tutorialspoint, tutorialspoint_list)
     return
コード例 #18
0
def run_main(names):
    pool = thread_pool(3)
    pool.apply_async(get_http, names)
    pool.close()
    pool.join()
コード例 #19
0
ファイル: concurrent.py プロジェクト: winkar/python-utils
 def threadedFunc(iterable):
     pool = thread_pool(threads)
     for x in pool.map(func, iterable):
         yield x