def _assert_all_load(resources, host, resource_checking_method='head', **extra):
    urls = []
    futures = []
    session = FuturesSession(executor=ThreadPoolExecutor(max_workers=2))
    for path in resources:
        if path is None:
            LOGGER.warning("empty path in resources: %s", resources)
            continue

        if path.startswith("data:"):
            LOGGER.debug("Skipping `data:` resource '%s'", path)
            continue

        url = _build_url(path, host)
        if url in RESOURCE_CACHE and resource_checking_method == 'head':
            LOGGER.debug("Cached HEAD %s: %s", url, RESOURCE_CACHE[url], extra=extra)
            continue
        urls.append(url)
        futures.append(getattr(session, resource_checking_method)(url))

    wait(futures, HTTP_TIMEOUT)

    for url, future in zip(urls, futures):
        response = future.result()
        LOGGER.debug("Loading (%s) resource %s", resource_checking_method, url, extra=extra)

        if retries.retry_request(response):
            LOGGER.warning("Loading (%s) resource %s again due to %s status code", resource_checking_method, url, response.status_code, extra=extra)
            response = requests.get(url)

        assert_status_code(response, 200, url)
        RESOURCE_CACHE[url] = response.status_code
Exemple #2
0
def add_list_new() -> None:
    requester = FuturesSession(executor=ProcessPoolExecutor(30), session=requests.session())
    api_key = settings.TBA_API_HEADERS

    team_list_get = lambda p: requester.get(team_by_page_url_template(page=p), headers=api_key)
    team_participation_get = lambda tn: requester.get(team_participation_url_template(team=tn), headers=api_key)

    page_range = get_page_range()

    print("\nStarting %d HTTP requests for team lists, split between %d processes..." % (
        page_range[1] - page_range[0], requester.executor._max_workers))
    team_list_futures = [team_list_get(p) for p in range(*page_range)]
    print("Waiting...")
    wait(team_list_futures)
    print("Done!\n")

    teams_lists = map(lambda f: f.result().json(), team_list_futures)
    teams_data = [item for page_data in teams_lists for item in page_data]
    team_numbers = [*map(lambda t: t['team_number'], teams_data)]

    print("Starting %d HTTP requests for team participation data, split between %d processes..." % (
        len(team_numbers), requester.executor._max_workers))
    team_participation_futures = [team_participation_get(tn) for tn in team_numbers]
    print("Waiting...")
    wait(team_participation_futures)
    print("Done!\n")

    team_participations = map(lambda f: f.result().json(), team_participation_futures)
    arg_list = zip(team_numbers, teams_data, team_participations)

    for args in arg_list:
        add_team(*args)
Exemple #3
0
 def _ProcessFutureCallback(self, future, user_callback, self_callback):
   if user_callback:
     future.add_done_callback(functools.partial(self_callback, user_callback))
     return future
   else:
     futures.wait([future])
     return self_callback(user_callback, future)
 def update_depths(self):
     depths = {}
     futures = []
     for market in self.markets:
         futures.append(self.threadpool.submit(self.__get_market_depth, market, depths))
     wait(futures, timeout=20)
     return depths
Exemple #5
0
    def wait(self):
        if not self.executor:
            return []
        try:
            futures.wait(self.futures, return_when=futures.FIRST_EXCEPTION)
        except KeyboardInterrupt:
            for future in self.futures:
                future.cancel()
                raise

        # If there was an exception, cancel all the rest of the jobs.
        # If there was no exception, can "cancel" the jobs anyway, because canceling does
        # nothing if the job is done.
        for future in self.futures:
            future.cancel()
        results = []
        error_indices = []
        cancelled_indices = []
        for i, future in enumerate(self.futures):
            if future.cancelled():
                results.append(futures.CancelledError())
                cancelled_indices.append(i)
            else:
                exc = future.exception()
                if exc is not None:
                    results.append(exc)
                    error_indices.append(i)
                else:
                    results.append(future.result())
        self.futures = []
        was_error = len(error_indices) > 0 or len(cancelled_indices) > 0
        return results, was_error, error_indices, cancelled_indices
    def test_l2_agent_restart(self, agent_restart_timeout=20):
        # Environment preparation is effectively the same as connectivity test
        vms = self._prepare_vms_in_single_network()
        vms.ping_all()

        ns0 = vms[0].namespace
        ip1 = vms[1].ip
        agents = [host.l2_agent for host in self.environment.hosts]

        # Restart agents on all nodes simultaneously while pinging across
        # the hosts. The ping has to cross int and phys bridges and travels
        # via central bridge as the vms are on separate hosts.
        with net_helpers.async_ping(ns0, [ip1], timeout=2,
                                    count=agent_restart_timeout) as done:
            LOG.debug("Restarting agents")
            executor = futures.ThreadPoolExecutor(max_workers=len(agents))
            restarts = [agent.restart(executor=executor)
                        for agent in agents]

            futures.wait(restarts, timeout=agent_restart_timeout)

            self.assertTrue(all([r.done() for r in restarts]))
            LOG.debug("Restarting agents - done")

            # It is necessary to give agents time to initialize
            # because some crucial steps (e.g. setting up bridge flows)
            # happen only after RPC is established
            common_utils.wait_until_true(
                done,
                exception=RuntimeError("Could not ping the other VM, L2 agent "
                                       "restart leads to network disruption"))
Exemple #7
0
    def build_all(self, deps=None):
        """Function that browse containers dependencies and build them.

        :params deps: Dictionary defining the container images
            dependencies.
        """

        if deps is None:
            deps = self.deps
        if isinstance(deps, (list,)):
            # Only a list of images can be multi-processed because they
            # are the last layer to build. Otherwise we could have issues
            # to build multiple times the same layer.
            # Number of workers will be based on CPU count with a min 2,
            # max 8. Concurrency in Buildah isn't that great so it's not
            # useful to go above 8.
            workers = min(8, max(2, processutils.get_worker_count()))
            with futures.ThreadPoolExecutor(max_workers=workers) as executor:
                future_to_build = {executor.submit(self.build_all,
                                   container): container for container in
                                   deps}
                futures.wait(future_to_build, timeout=self.build_timeout,
                             return_when=futures.ALL_COMPLETED)
        elif isinstance(deps, (dict,)):
            for container in deps:
                self._generate_container(container)
                self.build_all(deps.get(container))
        elif isinstance(deps, six.string_types):
            self._generate_container(deps)
Exemple #8
0
 def test_list_plugins_in_familly_async_fails(self, mock_urlopen):
   mock_urlopen.side_effect = Exception('something went wrong')
   callback = mock.Mock(return_value=None)
   future = self._nessus.ListPluginsInFamily('General', callback)
   futures.wait([future])
   self.assertTrue(future.done())
   callback.assert_called_once_with(None, error=mock.ANY)
Exemple #9
0
 def warmup(self, num_warmup):
     futures = []
     for i in range(num_warmup):
         c = self.conn.cursor()
         stmt = self.stmt
         futures.append(executor.submit(lambda: c.execute(stmt)))
     wait(futures)
Exemple #10
0
    def get_lengths(self, directories):
        """
        directories: list of paths to directories with questions

        returns input_length, query_length
        """
        futures = []

        for d_idx, directory in enumerate(directories):
            for i in os.listdir(directory):
                if i.endswith('.question'):
                    self.nb_samples_list[d_idx] += 1
                    futures.append(self.executor.submit(DataProcessor.get_file_lengths, directory, i))

        assert len(futures) > 0

        wait(futures)

        for f in futures:
            assert f.done()
            entity_set, max_entity_id, input_length, query_length = f.result()
            self.entity_set |= entity_set
            self.max_entity_id = max(self.max_entity_id, max_entity_id)
            self.input_length = max(self.input_length, input_length)
            self.query_length = max(self.query_length, query_length)
Exemple #11
0
    def _build(self, command, threading_events, stack_statuses, dependencies):
        """
        Launches or deletes all stacks in the environment.

        Whether the stack is launched or delete depends on the value of
        <command>. It does this by calling stack.<command>() for
        each stack in the environment. Stack.<command>() is blocking, because
        it waits for the stack to be built, so each command is run on a
        separate thread. As some stacks need to be built before others,
        depending on their depedencies, threading.Events() are used to notify
        the other stacks when a particular stack is done building.

        :param command: The stack command to run. Can be (launch | delete).
        :type command: str
        """
        if self.stacks:
            num_stacks = len(self.stacks)
            with ThreadPoolExecutor(max_workers=num_stacks) as executor:
                futures = [
                    executor.submit(
                        self._manage_stack_build, stack,
                        command, threading_events, stack_statuses, dependencies
                    )
                    for stack in self.stacks.values()
                ]
                wait(futures)
        else:
            self.logger.info(
                "No stacks found for environment: '%s'", self.path
            )
def test_wait(client):
    with client.get_executor(pure=False) as e:
        N = 10
        fs = [e.submit(slowinc, i, delay=0.05) for i in range(N)]
        res = wait(fs, timeout=0.01)
        assert len(res.not_done) > 0
        res = wait(fs)
        assert len(res.not_done) == 0
        assert res.done == set(fs)

        fs = [e.submit(slowinc, i, delay=0.05) for i in range(N)]
        res = wait(fs, return_when=FIRST_COMPLETED)
        assert len(res.not_done) > 0
        assert len(res.done) >= 1
        res = wait(fs)
        assert len(res.not_done) == 0
        assert res.done == set(fs)

        fs = [e.submit(slowinc, i, delay=0.05) for i in range(N)]
        fs += [e.submit(throws, None)]
        fs += [e.submit(slowdec, i, delay=0.05) for i in range(N)]
        res = wait(fs, return_when=FIRST_EXCEPTION)
        assert any(f.exception() for f in res.done)
        assert res.not_done

        errors = []
        for fs in res.done:
            try:
                fs.result()
            except RuntimeError as e:
                errors.append(e)

        assert len(errors) == 1
        assert "hello" in str(errors[0])
Exemple #13
0
def flac2mp3(folders, concurrency):
    '''Convert all files in folders to mp3'''
    import atexit
    import concurrent.futures as cf
    from concurrent.futures.thread import _python_exit
    from pydub import AudioSegment
    flac_files = list(lib.find_files(folders, ['flac']))

    pbar = None
    if not config.quiet:
        pbar = click.progressbar(length=len(flac_files), label='Converting musics')

    def convert(flac_path):
        logger.debug('Converting %s', flac_path)
        flac_audio = AudioSegment.from_file(flac_path, "flac")
        mp3_path = flac_path.replace('.flac', '.mp3')
        if not config.dry:
            flac_audio.export(mp3_path, format="mp3")
        else:
            logger.info("[DRY-RUN] Exporting from %s to %s", flac_path, mp3_path)
        if pbar:
            pbar.update(1)
    # Permit CTRL+C to work as intended
    atexit.unregister(_python_exit)  # pylint: disable=protected-access
    with cf.ThreadPoolExecutor(max_workers=concurrency) as executor:
        executor.shutdown = lambda wait: None
        futures = [executor.submit(convert, flac_path) for flac_path in flac_files]
        cf.wait(futures)
Exemple #14
0
def main():
    pt = 0
    with ProcessPoolExecutor(max_workers=usecpus) as executer:
        a = []
        for i, j in combinations_with_replacement(range(2,351), 2):
        #for i, j in combinations_with_replacement(range(2,20), 2):
            future = executer.submit(get_ij_mi, i, j, frms_num)
            a.append(future)
            pt += 1
            if pt%1000 == 0:
                print pt
        wait(a)
    #join small files together
    os.system('cat *.px > temp-all')
    f2 = open("temp-all")
    for line in f2.readlines():
        a = re.split('\t|\n',line)
        s0 = int(a[0])
        s1 = int(a[1])
        s2 = float(a[2])
        b[s0][s1] = s2
        b[s1][s0] = s2
    f2.close()

    for i in range(0,352):
        for j in range(0,352):
            p = str(i) + '\t' + str(j) + '\t' + str(b[i][j]) + '\n'
            f.write(p)
    f.close()
    os.system('rm *.px')
    os.system('rm temp-all')
Exemple #15
0
 def test_all_on_paired_peers(self):
     conns = connection_pair()
     rpc = RPC("json", async=False)
     def echo_tag(ch):
         obj, _ = ch.recv()
         obj["tag"] = True
         ch.send(obj)
     rpc.register("echo-tag", echo_tag)
     tasks = [
         self.spawn(rpc.accept, conns[0], False),
         self.spawn(rpc.handshake, conns[1], False),
     ]
     futures.wait(tasks)
     peer1 = tasks[0].result()
     peer2 = tasks[1].result()
     tasks = [
         self.spawn(peer1.call, "echo-tag", {"from": "peer1"}),
         self.spawn(peer2.call, "echo-tag", {"from": "peer2"}),
         self.spawn(peer1.route, 2),
         self.spawn(peer2.route, 2),
     ]
     futures.wait(tasks)
     conns[0].close()
     conns[1].close()
     self.assertEqual(tasks[0].result()["from"], "peer1")
     self.assertEqual(tasks[0].result()["tag"], True)
     self.assertEqual(tasks[1].result()["from"], "peer2")
     self.assertEqual(tasks[1].result()["tag"], True)
Exemple #16
0
    def _assert_ping_during_agents_restart(
            self, agents, src_namespace, ips, restart_timeout=10,
            ping_timeout=1, count=10):
        with net_helpers.async_ping(
                src_namespace, ips, timeout=ping_timeout,
                count=count) as done:
            LOG.debug("Restarting agents")
            executor = futures.ThreadPoolExecutor(max_workers=len(agents))
            restarts = [agent.restart(executor=executor)
                        for agent in agents]

            futures.wait(restarts, timeout=restart_timeout)

            self.assertTrue(all([r.done() for r in restarts]))
            LOG.debug("Restarting agents - done")

            # It is necessary to give agents time to initialize
            # because some crucial steps (e.g. setting up bridge flows)
            # happen only after RPC is established
            agent_names = ', '.join({agent.process_fixture.process_name
                                     for agent in agents})
            common_utils.wait_until_true(
                done,
                timeout=count * (ping_timeout + 1),
                exception=RuntimeError("Could not ping the other VM, "
                                       "re-starting %s leads to network "
                                       "disruption" % agent_names))
Exemple #17
0
    def time_sosfilt(self, n_samples, threads):
        pool = ThreadPoolExecutor(max_workers=threads)
        futures = []
        for i in range(threads):
            futures.append(pool.submit(sosfilt, self.filt, self.chunks[i]))

        wait(futures)
Exemple #18
0
def main():
    with ProcessPoolExecutor(max_workers=usecpus) as executer:
        a = []
        for i, j in permutations(range(2,8), 2):
            future = executer.submit(get_ij_ca, i, j, frms_num)
            a.append(future)
        wait(a)
    #join small files together
    os.system('cat *.ax > temp-all')
    f2 = open("temp-all")
    for line in f2.readlines():
        a = re.split('\t|\n',line)
        s0 = int(a[0])
        s1 = int(a[1])
        s2 = float(a[2])
        b[s0][s1] = s2
        #b[s1][s0] = s2
    f2.close()

    for i in range(0,352):
        for j in range(0,352):
            p = str(i) + '\t' + str(j) + '\t' + str(b[i][j]) + '\n'
            f.write(p)
    f.close()
    os.system('mv *.ax crap/')
    os.system('rm temp-all')
Exemple #19
0
    def schedule(self):
        gcurrent = proc.current()

        while True:
            if self.runq:
                if self.runq[0] == gcurrent:
                    self.runq.rotate(-1)

                gnext = self.runq[0]

            elif len(self.sleeping) > 0:
                # we dont't have any proc running but a future may come back.
                # just wait for the first one.
                futures.wait([fs for fs in self.sleeping], timeout=.2,
                        return_when=futures.FIRST_COMPLETED)
                continue
            elif self._run_calls:
                gnext = self._run_calls.pop()
            else:
                return

            if not gnext.is_alive():
                self.runq.popleft()
                continue

            self._last_task = gnext
            if gnext != gcurrent:
                gnext.switch()

            if gcurrent is self._last_task:
                return
    def spider_concurrent(self):
        self.login()
        self.get_units()

        # Do the initial scrapes for each unit
        for unit in self.units:
            if '[' not in unit.name:
                fut = self.thread_pool.submit(self._scrape_unit, unit)
                self.futures.append(fut)

        # Wait so that we have some sections in the queue initially
        wait(self.futures, return_when=FIRST_COMPLETED)

        while not self.sections.empty():
            section = self.sections.get()

            # Tell the thread pool to scrape that section then add the
            # Future to a list 
            fut = self.thread_pool.submit(self._scrape_section, section)

            callback = lambda future: self._requeue(self._scrape_section, future)
            fut.add_done_callback(callback)

            self.futures.append(fut)

        self.sections.join()
        logger.info('{} files found'.format(self.documents.qsize()))
Exemple #21
0
    def generate_batch_files(self, sources, targets, batch_size):
        """
        save np arrays of with batch_size samples into a file for
        entire source directory. files will be .npy

        sources: list of paths to source directories
        targets: same thing but for targets

        files will be np.array([X, Xq, y])
        """
        assert(len(sources) == len(targets))

        futures = []
        for source, target in it.izip(sources, targets):
            all_files = os.listdir(source)
            batch_file_lists = [all_files[x:x+batch_size] for x in xrange(0, len(all_files), batch_size)]
            for i, batch_file_list in enumerate(batch_file_lists):
                futures.append(self.executor.submit(self.make_batch, batch_file_list, source, target, i))

        wait(futures)
        assert len(futures) > 0

        for f in futures:
            assert f.done()
            '''
Exemple #22
0
def async_ping(namespace, ips):
    with futures.ThreadPoolExecutor(max_workers=len(ips)) as executor:
        fs = [executor.submit(assert_ping, namespace, ip, count=10) for ip in ips]
        yield lambda: all(f.done() for f in fs)
        futures.wait(fs)
        for f in fs:
            f.result()
def start(config: Config):
    set_session_config(per_process_gpu_memory_fraction=1, allow_growth=True, device_list=config.opts.device_list)
    base_model = {'digest': 'd6fce85e040a63966fa7651d4a08a7cdba2ef0e5975fc16a6d178c96345547b3', 'elo': 0}
    m = Manager()
    base_weight_path = os.path.join(config.resource.next_generation_model_dir, base_model['digest'] + '.h5')
    model_base = load_model(config, config.resource.model_best_config_path, base_weight_path)
    modelbt_pipes = m.list([model_base.get_pipes(need_reload=False) for _ in range(config.play.max_processes)])
    
    while True:
        while not check_ng_model(config, exculds=[base_model['digest'] + '.h5']):
            logger.info(f"Next generation model is None, wait for 300s")
            sleep(300)

        logger.info(f"Loading next generation model!")
        digest = check_ng_model(config, exculds=[base_model['digest'] + '.h5'])
        ng_weight_path = os.path.join(config.resource.next_generation_model_dir, digest + '.h5')
        model_ng = load_model(config, config.resource.next_generation_config_path, ng_weight_path)
        modelng_pipes = m.list([model_ng.get_pipes(need_reload=False) for _ in range(config.play.max_processes)])

        # play_worker = EvaluateWorker(config, model1_pipes, model2_pipes)
        # play_worker.start()
        with ProcessPoolExecutor(max_workers=config.play.max_processes) as executor:
            futures = []
            for i in range(config.play.max_processes):
                eval_worker = EvaluateWorker(config, modelbt_pipes, modelng_pipes, pid=i)
                futures.append(executor.submit(eval_worker.start))
        
        wait(futures)
        model_base.close_pipes()
        model_ng.close_pipes()

        results = []
        for future in futures:
            results += future.result()
        base_elo = base_model['elo']
        ng_elo = base_elo
        for res in results:
            if res[1] == -1: # loss
                res[1] = 0
            elif res[1] != 1: # draw
                res[1] = 0.5
            if res[0] % 2 == 0:
                # red = base
                _, ng_elo = compute_elo(base_elo, ng_elo, res[1])
            else:
                # black = base
                ng_elo, _ = compute_elo(ng_elo, base_elo, 1 - res[1])
        logger.info(f"Evaluation finished, Next Generation's elo = {ng_elo}, base = {base_elo}")
        # send ng model to server
        logger.debug(f"Sending model to server")
        send_model(ng_weight_path)
        data = {'digest': digest, 'elo': ng_elo}
        http_request(config.internet.add_model_url, post=True, data=data)
        os.remove(base_weight_path)
        base_weight_path = ng_weight_path
        base_model['disgest'] = digest
        base_model['elo'] = ng_elo
        model_base = model_ng
        modelbt_pipes = m.list([model_base.get_pipes(need_reload=False) for _ in range(config.play.max_processes)])
Exemple #24
0
 def test_login_async(self, mock_urlopen):
   mock_urlopen.return_value = self._ExpectResponseFromFile('login_ok')
   callback = mock.Mock(return_value=None)
   future = self._nessus.Login('test', 'pass', callback)
   futures.wait([future])
   self.assertTrue(self._nessus.is_logged_in)
   self.assertTrue(future.done())
   callback.assert_called_once_with(mock.ANY)
def test_workers(client, s, a, b):
    N = 10
    with client.get_executor(workers=[b['address']]) as e:
        fs = [e.submit(slowinc, i) for i in range(N)]
        wait(fs)
        has_what = client.has_what()
        assert not has_what.get(a['address'])
        assert len(has_what[b['address']]) == N
def _start_all_threads(executor, num_workers):

    counter = collections.Counter(count=0)
    futures_ = []
    for i in range(num_workers):
        future = executor.submit(_wait_for_counter, counter, num_workers)
        futures_.append(future)
    futures.wait(futures_)
Exemple #27
0
def main():
    """
    Run a thread pool to handle where one thread handles one work queue.
    """
    # Have 4 Listeners one each queue
    max_workers = len(RUNNERS) * 4

    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        wait(executor.map(handle, RUNNERS))
Exemple #28
0
 def detect_new_question(self):
     """
     爬取话题页面,寻找新问题
     """
     self.new_questions = set()
     futures = []
     for topic in self.topics:
         futures.append(self.executor.submit(self.execute, topic))
     cf.wait(futures, timeout=60, return_when=cf.ALL_COMPLETED)
Exemple #29
0
    def run(self):
        # init listeners, add them to the event loop
        for s in self.sockets:
            s.setblocking(False)
            self.poller.register(s, selectors.EVENT_READ, self.accept)

        timeout = self.cfg.timeout or 0.5

        while self.alive:
            # If our parent changed then we shut down.
            if self.ppid != os.getppid():
                self.log.info("Parent changed, shutting down: %s", self)
                return

            # notify the arbiter we are alive
            self.notify()

            events = self.poller.select(0.2)
            for key, mask in events:
                callback = key.data
                callback(key.fileobj)

            # hanle keepalive timeouts
            self.murder_keepalived()

            # if we more connections than the max number of connections
            # accepted on a worker, wait until some complete or exit.
            if len(self.futures) >= self.worker_connections:
                res = futures.wait(self.futures, timeout=timeout)
                if not res:
                    self.alive = False
                    self.log.info("max requests achieved")
                    break

        # shutdown the pool
        self.poller.close()
        self.tpool.shutdown(False)

        # wait for the workers
        futures.wait(self.futures, timeout=self.cfg.graceful_timeout)

        # if we have still fures running, try to close them
        while True:
            try:
                fs = self.futures.popleft()
            except IndexError:
                break

            sock = fs.conn.sock

            # the future is not running, cancel it
            if not fs.done() and not fs.running():
                fs.cancel()

            # make sure we close the sockets after the graceful timeout
            util.close(sock)
    def wait_for_confirmation(self):
        """ If asked to wait for confirmation, waits for all external systems\
            to confirm that they are configured and have read the database

        :rtype: None
        """
        logger.info("** Awaiting for a response from an external source "
                    "to state its ready for the simulation to start **")
        wait(self._wait_futures)
        self._wait_futures = list()
Exemple #31
0
# print(task1.result())
# print(task2.result())
# print('爬虫爬取完成!')

from concurrent.futures import ThreadPoolExecutor, as_completed, wait
import time
import random
from functools import partial


def get_html(sleep_time, num):
    time.sleep(sleep_time)
    print("num {},get page {} success".format(num, sleep_time))
    return num


executor = ThreadPoolExecutor(max_workers=2)
# 通过sumbit提交到线程池中
tasks = []
for i in range(10):
    sleep_time = random.randint(2, 5)
    #把右边函数看成一个整体
    tasks.append(executor.submit(get_html, sleep_time, i))

wait(tasks, return_when='ALL_COMPLETED')
print('111111')
#阻塞 等待完成的函数
for i in as_completed(tasks):
    data = i.result()

    print('num {} success'.format(data))
Exemple #32
0
    # python3.8 默认max_workers = min(32, os.cpu_count() + 4)
    _params_len = len(params)
    _ip_proxys_len = len(ip_proxys)
    with ThreadPoolExecutor(max_workers=max_workers) as t:
        fs = [
            t.submit(
                sec_kill_task, miao_miao, params[i % _params_len],
                None if not _ip_proxys_len else {
                    'http':
                    None if
                    (index := i % _ip_proxys_len) == 0 else ip_proxys[index]
                }) for i in range(max_workers + 5)
        ]

        # 120S后结束任务
        wait(fs, 120, return_when=FIRST_COMPLETED)
        global KILL_FLAG
        KILL_FLAG = True
        print('>>>>>>>>>>>>>>>>>本轮未成功秒杀到疫苗<<<<<<<<<<<<<<<<<<<')

    print('-----DONE----')


def _get_arguments():
    """
    解析参数
    :return:
    """
    def _valid_int_type(i):
        valid_int = int(i)
        if valid_int < 1:
Exemple #33
0
    def _executors_repro(
            self,
            executors: dict,
            jobs: Optional[int] = 1) -> Mapping[str, Mapping[str, str]]:
        """Run dvc repro for the specified BaseExecutors in parallel.

        Returns:
            dict mapping stash revs to the successfully executed experiments
            for each stash rev.
        """
        result: Dict[str, Dict[str, str]] = defaultdict(dict)

        manager = Manager()
        pid_q = manager.Queue()

        rel_cwd = relpath(os.getcwd(), self.repo.root_dir)
        with ProcessPoolExecutor(max_workers=jobs) as workers:
            futures = {}
            for rev, executor in executors.items():
                future = workers.submit(
                    executor.reproduce,
                    executor.dvc_dir,
                    rev,
                    queue=pid_q,
                    name=executor.name,
                    rel_cwd=rel_cwd,
                    log_level=logger.getEffectiveLevel(),
                )
                futures[future] = (rev, executor)

            try:
                wait(futures)
            except KeyboardInterrupt:
                # forward SIGINT to any running executor processes and
                # cancel any remaining futures
                pids = {}
                while not pid_q.empty():
                    rev, pid = pid_q.get()
                    pids[rev] = pid
                for future, (rev, _) in futures.items():
                    if future.running():
                        os.kill(pids[rev], signal.SIGINT)
                    elif not future.done():
                        future.cancel()

            for future, (rev, executor) in futures.items():
                rev, executor = futures[future]

                try:
                    exc = future.exception()
                    if exc is None:
                        exec_result = future.result()
                        result[rev].update(
                            self._collect_executor(executor, exec_result))
                    elif not isinstance(exc, CheckpointKilledError):
                        logger.error(
                            "Failed to reproduce experiment '%s'",
                            rev[:7],
                        )
                except CancelledError:
                    logger.error(
                        "Cancelled before attempting to reproduce experiment "
                        "'%s'",
                        rev[:7],
                    )
                finally:
                    executor.cleanup()

        return result
Exemple #34
0
import time
import concurrent.futures as future
import utils

WORKERS = 4

start = time.time()

with future.ThreadPoolExecutor(max_workers=4) as executor:
    futures = {
        executor.submit(utils.check_website, address)
        for address in utils.WEBSITE_LIST
    }
    future.wait(futures)

end = time.time()

print("Time: %s sec" % (end - start))
Exemple #35
0
import time
import os
from concurrent.futures import ProcessPoolExecutor, as_completed, wait, ALL_COMPLETED


def process_job(var):
    time.sleep(2)
    print("process id: {}, parent process id: {}".format(
        os.getpid(), os.getppid()))
    print("process {} finish".format(os.getpid()))
    return "process_" + str(var)


if __name__ == "__main__":
    start_time = time.time()
    process_executor = ProcessPoolExecutor(max_workers=5)
    process_tasks = [
        process_executor.submit(fn=process_job, var=variable)
        for variable in range(3)
    ]
    # process_executor.shutdown()
    # print("results are: {}".format([each.result() for each in as_completed(process_tasks)]))
    wait(process_tasks, return_when=ALL_COMPLETED)
    print("results are: {}".format([task.result() for task in process_tasks]))
    print("total cost {} seconds".format(time.time() - start_time))
    print("main process finished: {}".format(os.getpid()))
Exemple #36
0
    book = html.xpath(
        "//div[@class='content_read']/div/div[@id='content']/text()")
    book = "".join(book).replace(u"\xa0\xa0\xa0\xa0", "\r\n")
    # print(book)
    with open("./quanzhifas/'{}".format(title + '.txt'), 'w',
              encoding='gbk') as f:
        f.write(book)
        print(title, '下载完成')


if __name__ == '__main__':
    url = "https://www.booktxt.net/0_595/"
    UseAgent = [
        'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36',
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60",
        "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16"
    ]
    header = {'Use-Agent': random.choices(UseAgent)[0], 'Referer': url}
    start = time.time()
    urls = get_responce(url, header)
    futers = []
    with ThreadPoolExecutor(max_workers=300) as p:
        for url in urls:
            tast = p.submit(get_book, url=url, header=header)
            futers.append(tast)

    wait(futers)
    end = time.time()
    print('下载完成,用时{}s'.format(end - start))
Exemple #37
0
    def download_dir(self,
                     blob,
                     local_path,
                     container_name=None,
                     use_basename=True,
                     workers=0):
        """
        Download a directory from Google Cloud Storage.

        Args:
            blob: `str`. blob to download.
            local_path: `str`. the path to download to.
            container_name: `str`. the name of the container.
            use_basename: `bool`. whether or not to use the basename of the key.
            workers: number of workers threads to use for parallel execution.
        """
        if not container_name:
            container_name, _, blob = self.parse_wasbs_url(blob)

        local_path = os.path.abspath(local_path)

        if use_basename:
            local_path = append_basename(local_path, blob)

        try:
            check_dirname_exists(local_path, is_dir=True)
        except PolyaxonPathException:
            os.makedirs(local_path)

        results = self.list(container_name=container_name,
                            key=blob,
                            delimiter="/")

        # Create directories
        for prefix in sorted(results["prefixes"]):
            direname = os.path.join(local_path, prefix)
            prefix = os.path.join(blob, prefix)
            # Download files under
            self.download_dir(
                blob=prefix,
                local_path=direname,
                container_name=container_name,
                use_basename=False,
            )

        pool, future_results = self.init_pool(workers)

        # Download files
        for file_key in results["blobs"]:
            file_key = file_key[0]
            filename = os.path.join(local_path, file_key)
            file_key = os.path.join(blob, file_key)
            future_results = self.submit_pool(
                workers=workers,
                pool=pool,
                future_results=future_results,
                fn=self.download_file,
                blob=file_key,
                local_path=filename,
                container_name=container_name,
                use_basename=False,
            )
        if workers:
            futures.wait(future_results)
            self.close_pool(pool=pool)
    process2 = dataconfig[40:80]
    process3 = dataconfig[80:120]
    process4 = dataconfig[120:160]
    process5 = dataconfig[160:200]

    res = []
    failure_kind = 'fs'
    # simulation(failure_kind, process1, True, '../datanew/5wss/fs/p1/', 0)

    with ProcessPoolExecutor(5) as executor:

        res.append(
            executor.submit(simulation, failure_kind, process1, True,
                            '../datatest_guding/8wss/fs/', 0))
        res.append(
            executor.submit(simulation, failure_kind, process2, True,
                            '../datatest_guding/8wss/fs/', 40))

        res.append(
            executor.submit(simulation, failure_kind, process3, True,
                            '../datatest_guding/8wss/fs/', 80))

        res.append(
            executor.submit(simulation, failure_kind, process4, True,
                            '../datatest_guding/8wss/fs/', 120))
        res.append(
            executor.submit(simulation, failure_kind, process5, True,
                            '../datatest_guding/8wss/fs/', 160))

        wait(res)
Exemple #39
0
def _compute_futures(futures: List[Future]):
    wait(futures)
    for future in futures:
        future.result()
Exemple #40
0
    def _executors_repro(
        self, executors: dict, jobs: Optional[int] = 1
    ) -> Dict[str, Dict[str, str]]:
        """Run dvc repro for the specified BaseExecutors in parallel.

        Returns:
            dict mapping stash revs to the successfully executed experiments
            for each stash rev.
        """
        result: Dict[str, Dict[str, str]] = defaultdict(dict)

        manager = Manager()
        pid_q = manager.Queue()

        rel_cwd = relpath(os.getcwd(), self.repo.root_dir)
        with ProcessPoolExecutor(max_workers=jobs) as workers:
            futures = {}
            for rev, executor in executors.items():
                pidfile = os.path.join(
                    self.repo.tmp_dir,
                    self.EXEC_PID_DIR,
                    f"{rev}{executor.PIDFILE_EXT}",
                )
                future = workers.submit(
                    executor.reproduce,
                    executor.dvc_dir,
                    rev,
                    queue=pid_q,
                    name=executor.name,
                    rel_cwd=rel_cwd,
                    log_level=logger.getEffectiveLevel(),
                    pidfile=pidfile,
                    git_url=executor.git_url,
                )
                futures[future] = (rev, executor)

            try:
                wait(futures)
            except KeyboardInterrupt:
                # forward SIGINT to any running executor processes and
                # cancel any remaining futures
                workers.shutdown(wait=False)
                pids = {}
                for future, (rev, _) in futures.items():
                    if future.running():
                        # if future has already been started by the scheduler
                        # we still have to wait until it tells us its PID
                        while rev not in pids:
                            rev, pid = pid_q.get()
                            pids[rev] = pid
                        os.kill(pids[rev], signal.SIGINT)
                    elif not future.done():
                        future.cancel()

            for future, (rev, executor) in futures.items():
                rev, executor = futures[future]

                try:
                    exc = future.exception()
                    if exc is None:
                        exec_result = future.result()
                        result[rev].update(
                            self._collect_executor(executor, exec_result)
                        )
                    elif not isinstance(exc, CheckpointKilledError):
                        logger.error(
                            "Failed to reproduce experiment '%s'", rev[:7]
                        )
                except CancelledError:
                    logger.error(
                        "Cancelled before attempting to reproduce experiment "
                        "'%s'",
                        rev[:7],
                    )
                finally:
                    executor.cleanup()

        return result
def parallel_nodes_cleaning(conn, module):
    client = conn.baremetal
    node_timeout = module.params['timeout']
    nodes = module.params['node_uuid'] + module.params['node_name']
    clean_steps = module.params['clean_steps']
    result = {}

    if module.params['raid_config']:
        for node in nodes:
            try:
                node_info = client.update_node(
                    node,
                    target_raid_config=module.params['raid_config']
                )
                result.update({node: {
                    'msg': 'Setting the raid configuration'
                           ' for node {} succeeded.'.format(node),
                    'failed': False,
                    'info': node_info,
                }})
            except exceptions.BadRequestException as e:
                result.update({node: {
                    'msg': 'Setting raid configuration'
                           ' for node {} failed. Error: {}'.format(
                               node,
                               str(e)
                            ),
                    'failed': True,
                    'error': str(e),
                    'info': {},
                }})
                nodes.pop(nodes.index(node))

    workers = min(len(nodes), module.params['concurrency']) or 1
    with futures.ThreadPoolExecutor(max_workers=workers) as executor:
        future_to_build = {
            executor.submit(
                client.set_node_provision_state,
                node,
                "clean",
                clean_steps=clean_steps,
                wait=True
            ): node for node in nodes
        }

        done, not_done = futures.wait(
            future_to_build,
            timeout=node_timeout,
            return_when=futures.ALL_COMPLETED
        )

    nodes_wait = list()
    for job in done:
        if job._exception:
            result.update(
                {
                    future_to_build[job]: {
                        'msg': 'Cleaning failed for node {}: {}'.format(
                            future_to_build[job],
                            str(job._exception)
                        ),
                        'failed': True,
                        'info': {}
                    }
                }
            )
        else:
            nodes_wait.append(future_to_build[job])
    else:
        if not_done:
            for job in not_done:
                result.update(
                    {
                        future_to_build[job]: {
                            'msg': 'Cleaning incomplete for node {}'.format(
                                future_to_build[job],
                            ),
                            'failed': True,
                            'info': {}
                        }
                    }
                )

    nodes_to_delete = []
    for node in nodes_wait:
        node_info = client.get_node(
            node,
            fields=['provision_state', 'last_error']
        ).to_dict()
        state = node_info['provision_state']
        if state == 'manageable':
            nodes_to_delete.append(node)
            result.update({node: {
                'msg': 'Successful cleaning for node %s' % node,
                'failed': False,
                'error': '',
                'info': node_info,
            }})
        elif state not in [
                'manageable', 'cleaning', 'clean wait', 'available']:
            nodes_to_delete.append(node)
            result.update({node: {
                'msg': 'Failed cleaning for node %s: %s' % (
                    node,
                    node_info['last_error'] or 'state %s' % state),
                'failed': True,
                'info': node_info,
            }})

    for node in nodes_to_delete:
        nodes_wait.remove(node)

    if nodes_wait:
        for node in nodes_wait:
            node_info = client.get_node(
                node,
                fields=['provision_state', 'last_error']
            ).to_dict()
            state = node_info['provision_state']
            result.update({node: {
                'msg': 'Timeout exceeded for node %s: '
                       'node is in state %s' % (node, state),
                'failed': True,
                'info': node_info,
            }})

    return result
Exemple #42
0
        def waitWorkers(workers):
            wait(workers, return_when=FIRST_EXCEPTION)

            for future in as_completed(workers):
                yield future.result()
Exemple #43
0
    def get_results(self) -> Iterable[ServerScanResult]:
        """Return completed server scans.
        """
        ongoing_scan_jobs = set()
        for queued_server_scan in self._queued_server_scans:
            ongoing_scan_jobs.update(queued_server_scan.all_queued_scan_jobs)

        while ongoing_scan_jobs:
            # Every 0.3 seconds, check for completed jobs
            all_completed_scan_jobs, _ = wait(ongoing_scan_jobs, timeout=0.3)

            # Check if a server scan has been fully completed
            for queued_server_scan in self._queued_server_scans:
                if not queued_server_scan.all_queued_scan_jobs.issubset(all_completed_scan_jobs):
                    # This server scan still has jobs ongoing; check the next one
                    continue

                # If we get here, all the jobs for a specific server scan have been completed
                # Generate the result for each scan command
                server_scan_results: ScanCommandResultsDict = {}
                server_scan_errors: ScanCommandErrorsDict = {}
                for scan_cmd, completed_scan_jobs in queued_server_scan.queued_scan_jobs_per_scan_command.items():
                    server_info = queued_server_scan.server_scan_request.server_info
                    implementation_cls = ScanCommandsRepository.get_implementation_cls(scan_cmd)
                    try:
                        result = implementation_cls.result_for_completed_scan_jobs(
                            server_info, list(completed_scan_jobs)
                        )
                        server_scan_results[scan_cmd] = result

                    # Process exceptions that may have been raised while the jobs were being completed
                    except ClientCertificateRequested as e:
                        error = ScanCommandError(
                            reason=ScanCommandErrorReasonEnum.CLIENT_CERTIFICATE_NEEDED,
                            exception_trace=TracebackException.from_exception(e),
                        )
                        server_scan_errors[scan_cmd] = error
                    except ConnectionToServerTimedOut as e:
                        error = ScanCommandError(
                            reason=ScanCommandErrorReasonEnum.CONNECTIVITY_ISSUE,
                            exception_trace=TracebackException.from_exception(e),
                        )
                        server_scan_errors[scan_cmd] = error
                    except Exception as e:
                        error = ScanCommandError(
                            reason=ScanCommandErrorReasonEnum.BUG_IN_SSLYZE,
                            exception_trace=TracebackException.from_exception(e),
                        )
                        server_scan_errors[scan_cmd] = error

                # Discard the corresponding jobs
                ongoing_scan_jobs.difference_update(queued_server_scan.all_queued_scan_jobs)

                # Lastly, return the fully completed server scan
                server_scan_errors.update(queued_server_scan.scan_command_errors_during_queuing)
                server_scan_result = ServerScanResult(
                    scan_commands_results=server_scan_results,
                    scan_commands_errors=server_scan_errors,
                    server_info=queued_server_scan.server_scan_request.server_info,
                    scan_commands=queued_server_scan.server_scan_request.scan_commands,
                    scan_commands_extra_arguments=queued_server_scan.server_scan_request.scan_commands_extra_arguments,
                )
                yield server_scan_result

        self._shutdown_thread_pools()
Exemple #44
0
    def _exec_attached(self, repo: "Repo", jobs: Optional[int] = 1):
        import signal
        from concurrent.futures import (
            CancelledError,
            ProcessPoolExecutor,
            wait,
        )
        from multiprocessing import Manager

        from dvc.stage.monitor import CheckpointKilledError

        result: Dict[str, Dict[str, str]] = defaultdict(dict)

        manager = Manager()
        pid_q = manager.Queue()

        with ProcessPoolExecutor(max_workers=jobs) as workers:
            futures = {}
            while self._queue:
                rev, executor = self._queue.popleft()
                infofile = self.get_infofile_path(rev)
                future = workers.submit(
                    executor.reproduce,
                    info=executor.info,
                    rev=rev,
                    queue=pid_q,
                    infofile=infofile,
                    log_level=logger.getEffectiveLevel(),
                )
                futures[future] = (rev, executor)
                self._attached[rev] = executor

            try:
                wait(futures)
            except KeyboardInterrupt:
                # forward SIGINT to any running executor processes and
                # cancel any remaining futures
                workers.shutdown(wait=False)
                pids = {}
                for future, (rev, _) in futures.items():
                    if future.running():
                        # if future has already been started by the scheduler
                        # we still have to wait until it tells us its PID
                        while rev not in pids:
                            rev, pid = pid_q.get()
                            pids[rev] = pid
                        os.kill(pids[rev], signal.SIGINT)
                    elif not future.done():
                        future.cancel()

            for future, (rev, executor) in futures.items():
                rev, executor = futures[future]

                try:
                    exc = future.exception()
                    if exc is None:
                        exec_result = future.result()
                        result[rev].update(
                            self._collect_executor(repo, executor,
                                                   exec_result))
                    elif not isinstance(exc, CheckpointKilledError):
                        logger.error("Failed to reproduce experiment '%s'",
                                     rev[:7])
                except CancelledError:
                    logger.error(
                        "Cancelled before attempting to reproduce experiment "
                        "'%s'",
                        rev[:7],
                    )
                finally:
                    self.cleanup_executor(rev, executor)

        return result
Exemple #45
0
def get_html(times):
    time.sleep(times)
    print("get page {} success".format(times))
    return times


executor = ThreadPoolExecutor(max_workers=2)
#通过submit函数提交执行的函数到线程池中, submit 是立即返回
# task1 = executor.submit(get_html, (3))
# task2 = executor.submit(get_html, (2))

#要获取已经成功的task的返回
urls = [3, 2, 4]
all_task = [executor.submit(get_html, (url)) for url in urls]
wait(all_task, return_when=FIRST_COMPLETED)
print("main")
# for future in as_completed(all_task):
#     data = future.result()
#     print("get {} page".format(data))
# 通过executor的map获取已经完成的task的值
# for data in executor.map(get_html, urls):
#     print("get {} page".format(data))

# #done方法用于判定某个任务是否完成
# print(task1.done())
# print(task2.cancel())
# time.sleep(3)
# print(task1.done())
#
# #result方法可以获取task的执行结果
Exemple #46
0
    def extract(
        self,
        extract_dir,
        compressed_dir,
        compression_level=DEFAULT_COMPRESSION_LEVEL,
        max_workers=max(os.cpu_count() - 2, 1),
        recompress=False,
        generate_string_hashes=True,
        create_entity_sheets=True,
        extract_sound_extensions=None,
        reuse_extracted=False,
    ):
        unextracted = []

        if not reuse_extracted:
            for asset in self.assets:
                if asset.filepath is None:
                    # No known filepaths matched this asset.
                    unextracted.append(asset)
                    continue

                asset.load_data(self.exe_handle)

            with ThreadPoolExecutor(max_workers=max_workers) as pool:
                futures = [
                    pool.submit(
                        self._extract_single,
                        asset,
                        extract_dir,
                        compressed_dir,
                        self.key,
                        compression_level,
                        recompress,
                    ) for asset in self.assets if asset.filepath
                ]
                wait(futures, timeout=300)

        if generate_string_hashes:
            self.hash_strings(extract_dir)

        if create_entity_sheets:
            logger.info("Creating entity sprite sheets...")

            with open(BASE_DIR / "static/game_data/entities.json",
                      encoding="utf-8") as entities_file:
                entities_json = json.loads(entities_file.read())
            with open(BASE_DIR / "static/game_data/textures.json",
                      encoding="utf-8") as textures_file:
                textures_json = json.loads(textures_file.read())

            sprite_loaders = get_all_sprite_loaders(entities_json,
                                                    textures_json, extract_dir)
            sprite_mergers = get_all_sprite_mergers(entities_json,
                                                    textures_json, extract_dir)

            with ThreadPoolExecutor(max_workers=max_workers) as pool:
                futures = [
                    pool.submit(
                        self._merge_single_entity,
                        sprite_merger,
                        sprite_loaders,
                    ) for sprite_merger in sprite_mergers
                ]
                wait(futures, timeout=300)

            logger.info("Done creating entity sprite sheets...")

        if extract_sound_extensions:
            extract_soundbank(
                extract_dir / "soundbank.bank",
                extract_dir / "soundbank",
                extract_sound_extensions,
            )

        return unextracted
Exemple #47
0
        if not os.path.exists(download_dir + group_name):
            os.mkdir(group_name)

        if flag_chk != 0:
            start_seqno = 0
        else:
            start_seqno = checkpoint[group_name][1]

        checkpoint[str(group_name)] = [group_id, end_seqno]
        print(f"Get session from group:{group_name}.")
        get_pic_ind(group_name, group_id, start_seqno, end_seqno)

    print("Get sessions complete.")


if __name__ == '__main__':
    init()
    get_group_status()
    wait(list_get_pic_content)
    with open('checkpoint.json', 'w') as f:
        json.dump(checkpoint, f)
        f.close()
    if len(unknown_msg_type_list) > 0:
        print(
            "Warning: Unknown message type found. Please send generated file 'unknown_msg_list' to author. Thank you."
        )
        with open("unknown_msg_list.txt", 'w') as f:
            for msg in unknown_msg_type_list:
                f.write(str(msg) + "\n")
            f.close()
Exemple #48
0
            print(url + "解析出现问题!")

    except Exception as e:
        print(url)
        print(e)

    # return PDB_list


if __name__ == '__main__':
    det_l = []
    if sys.platform == "win32":
        splits = "\\"
    else:
        splits = "/"

    t = ThreadPoolExecutor(8)
    t_l = []
    for sub in range(1, 14):
        print(sub)
        # url = "http://ccsipb.lnu.edu.cn/bio_data_bacteria/index.php/c_front_fungus/queryById/F%d"%sub
        url = "http://ccsipb.lnu.edu.cn/bio_data_bacteria/index.php/c_front_plasmid/queryById/LNUP%03d" % sub
        w = t.submit(parse_detail, url)
        w.done()
        t_l.append(w)
    wait(t_l)
    print("===完成===")
    cursor = handle_sql.ConnSql()
    cursor.insert_plasmid_detail(det_l)
    cursor.finished()
Exemple #49
0
import binascii
import random

HOST = '127.0.0.1'
PORT = 50018


def tcp_client():
    time.sleep(random.randint(1, 5))
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.connect((HOST, PORT))
    print("connected remote", (HOST, PORT), "local", s.getsockname())
    request = bytearray([0x31, 0x32, 0x33, 0x34])

    time.sleep(random.randint(1, 5))
    s.sendall(request)
    response = s.recv(1024)
    print('received', binascii.hexlify(response), 'local', s.getsockname())

    time.sleep(random.randint(1, 5))
    print('closed local', s.getsockname())
    s.close()


# 创建一个线程池
executor = ThreadPoolExecutor(max_workers=5)
# 执行多次tcp_client
# executor.map(tcp_client, range(5))
all_task = [executor.submit(tcp_client) for _ in range(10)]
wait(all_task)
Exemple #50
0
 def wait(self, timeout=None):
     self.logger.info("Waiting for %s jobs to complete" %
                      len(self.future_records))
     futures.wait(self.future_records.values())
                    tv_type_count += 1
                    print('类型%s' % tv_type_count)
            tv_letter_count += 1
            tv_id += 1
            print(tv_letter_count)


create_table_header()
construct_tv_queue()

list_task = []
for i in range(1, 9):
    task = executor.submit(parse_tv_list)
    list_task.append(task)
tv_queue.join()
print(wait(list_task))
print('=================列表页解析完毕=====================')
parse_tv_list()
tv_queue.join()

result_task = []

for i in range(0, 8):
    task = executor.submit(parse_result)
    result_task.append(task)

tv_list_queue.join()
print(wait(result_task))
print('==========电视剧解析完毕============')
print('=======写入完毕======')
wordbook.close()
    except:
        print('下载超时!')
        pass


if __name__ == "__main__":
    while True:
        w = int(
            input(
                '请问你要下载哪一个分辨率:\n1:2560x1440\n2:2560*1080\n3:1280*800\n4:3840x3072\n5:3840x2160\n请输入:'
            ))
        i = input('请问你要下载第几页: ')
        if w == 1:
            url = f'https://wallhaven.cc/search?categories=111&purity=111&resolutions=2560x1440&sorting=date_added&order=desc&page={i}'
        elif w == 2:
            url = f'https://wallhaven.cc/search?categories=111&purity=111&resolutions=2560x1440&sorting=date_added&order=desc&page={i}'
        elif w == 3:
            url = f'https://wallhaven.cc/search?categories=111&purity=111&resolutions=2560x1440&sorting=date_added&order=desc&page={i}'
        elif w == 4:
            url = f'https://wallhaven.cc/search?categories=111&purity=111&resolutions=2560x1440&sorting=date_added&order=desc&page={i}'
        elif w == 5:
            url = f'https://wallhaven.cc/search?categories=111&purity=111&resolutions=3840x2160&sorting=date_added&order=desc&page={i}'
        else:
            print('请正确输入数字')

        html = toplist(url)
        ex = ThreadPoolExecutor(max_workers=24)  #多线程处理模块,开启24线程赋值给变量ex
        future = [ex.submit(down_pic, toplist(url)) for url in fenxi(html)]
        wait(future, return_when=ALL_COMPLETED)
        print('所有图片下载完成!')
Exemple #53
0
def wait_any_future_available(seq: Sequence[Future]) -> None:  # type: ignore
    """Wait until any future became available."""
    wait(fs=seq, timeout=None, return_when=FIRST_COMPLETED)
Exemple #54
0
    classes = type.__subclasses__()
    for cls in classes:
        processors[cls.name()] = cls

# Load sources from config and create respective connectors
connectors = []
for source in config["connectors"]:
    connector_config = source["config"]
    if source["type"] == "mqtt":
        connector_class = MQTTConnector
    elif source["type"] == "mqtt-recording":
        connector_class = MQTTRecordingConnector
    else:
        continue

    processor_type = source["processor"]["type"]
    processor_config = {
        **source["processor"]["config"], "h3": {
            **config["h3"]
        }
    }
    processor = processors[processor_type](processor_config)

    connector = connector_class(connector_config, processor)
    connector.set_producer(producer_class, producer_config, protobuf_format)
    connectors.append(connector)

# Start all connectors
with ThreadPoolExecutor() as executor:
    wait([executor.submit(connector.start) for connector in connectors])
    threads = ThreadPoolExecutor(numThread)
    futures = []
    thStart = datetime.now()

    with ThreadPoolExecutor(max_workers=numThread) as executor:
    
        for cate_key in cate_dict.keys():
            cate_value = int(cate_key.split("-")[0])
            content_list = []
            for url in cate_dict[cate_key]:
                futures.append(threads.submit(prod_crawler, url, cate_value))
            
            with open('gt_prod_%s_%s.json' % (cate_value, today), 'w') as f:
                f.write(json.dumps(content_list, ensure_ascii=False, indent=4))
            print(cate_key + " done")

        
    wait(futures)
    thEnd = datetime.now()
    timeSpent = str(thEnd - thStart).split('.')[0]
        
    print("執行緒:" + str(numThread))
    
    prod_num = ""
    for cate_key in cate_dict.keys():
        prod_num += len(cate_dict[cate_key])
        
    print("商品數:" + str(prod_num))
    print("耗時:" + timeSpent)

Exemple #56
0
 def wait(self, timeout=None):
     wait([self.f], timeout)
 def test_wait(self):
     done, not_done = futures.wait([QUEUED_RESULT, FINISHED_RESULT],
                                   return_when=futures.FIRST_COMPLETED)
     self.assertEqual(set([FINISHED_RESULT]), done)
     self.assertEqual(set([QUEUED_RESULT]), not_done)
Exemple #58
0
 def wait(self, *args, **kwargs):
     return futures.wait(self.pending.values(), *args, **kwargs)
Exemple #59
0
def processInParallel(jobs):
    for package_name, job in jobs.items():
        job['depends'] = [n for n in job['depends'] if n in jobs.keys()]
    max_workers = cpu_count()
    threadpool = ThreadPoolExecutor(max_workers=max_workers)
    futures = {}
    finished_jobs = {}
    rc = 0
    while jobs or futures:
        # take "ready" jobs
        ready_jobs = []
        for package_name, job in jobs.items():
            if len(futures) + len(ready_jobs) >= max_workers:
                # don't schedule more jobs then workers
                # to prevent starting further jobs when a job fails
                break
            if not set(job['depends']) - set(finished_jobs.keys()):
                ready_jobs.append((package_name, job))
        for package_name, _ in ready_jobs:
            del jobs[package_name]

        # pass them to the executor
        for package_name, job in ready_jobs:
            future = threadpool.submit(job['callback'], job['opts'])
            futures[future] = package_name

        # wait for futures
        assert futures
        done_futures, _ = wait(futures.keys(),
                               timeout=60,
                               return_when=FIRST_COMPLETED)

        if not done_futures:  # timeout
            print('[Waiting for: %s]' % ', '.join(sorted(futures.values())))

        # check result of done futures
        for done_future in done_futures:
            package_name = futures[done_future]
            del futures[done_future]
            try:
                result = done_future.result()
            except CancelledError:
                # since the job hasn't been cancelled before completing
                continue
            except (Exception, SystemExit) as e:
                print('%s in %s: %s' % (type(e).__name__, package_name, e),
                      file=sys.stderr)
                import traceback
                traceback.print_exc()
                result = 1
            finished_jobs[package_name] = result
            if result and not rc:
                rc = result

        # if any job failed cancel pending futures
        if rc:
            for future in futures:
                future.cancel()
            break

    threadpool.shutdown()

    if any(finished_jobs.values()):
        failed_jobs = {
            package_name: result
            for (package_name, result) in finished_jobs.items() if result
        }
        print('Failed packages: ' + ', '.join([x for x in failed_jobs]),
              file=sys.stderr)

    return rc
Exemple #60
0
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED, FIRST_COMPLETED
import time,threading

# 参数times用来模拟网络请求的时间
def get_html(times):
    print(time.strftime("%y-%m-%d %H:%M:%S"))
    print(str(threading.current_thread()) + "  start")
    time.sleep(1)
    print(str(threading.current_thread()) + "  end")

executor = ThreadPoolExecutor(max_workers=3)
urls = [3, 2, 4] # 并不是真的url
all_task = [executor.submit(get_html, (url)) for url in urls]
wait(all_task, return_when=ALL_COMPLETED)
print("main")