コード例 #1
0
ファイル: stats_poller.py プロジェクト: AsherBond/scalr
def farm_process(tasks):
    if not tasks:
        return

    try:
        servs_pool = pool.ThreadPool(processes=CONFIG['serv_thrds'])
        rrd_pool = pool.ThreadPool(processes=CONFIG['rrd_thrds'])
        results = servs_pool.map(server_thread, [(t, rrd_pool) for t in tasks])
        servs_pool.close()

        if not results:
            return

        ra, fa, rs, fs = post_processing(results)

        for k, v in ra.iteritems():
            rrd_pool.map_async(RRDWorker().work, [{'ra': {k: v}}])

        for k, v in fa.iteritems():
            rrd_pool.map_async(RRDWorker().work, [{'fa': {k: v}}])

        for k, v in rs.iteritems():
            rrd_pool.map_async(RRDWorker().work, [{'rs': {k: v}}])

        for k, v in fs.iteritems():
            rrd_pool.map_async(RRDWorker().work, [{'fs': {k: v}}])
    except:
        LOG.error(helper.exc_info())
    finally:
        servs_pool.close()
        servs_pool.join()
        rrd_pool.close()
        rrd_pool.join()
コード例 #2
0
def multithreaded_call(
        fn: Callable[..., Any],
        args: Iterable[Tuple[Any, ...]],
        worker_count: int = 0,
        thread_pool: Optional[pool.ThreadPool] = None) -> List[Any]:
    """Runs the given function in a thread pool and returns the results.

  Args:
    fn: Function to call in parallel.
    args: An iterable of argument list to call fn with.
    worker_count: Number of thread workers to launch.
    thread_pool: Optional thread pool to use instead of creating a new one.

  Returns:
    Function results in a list.
  """
    if not thread_pool:
        if worker_count:
            threads = pool.ThreadPool(worker_count)
        else:
            threads = pool.ThreadPool()
    else:
        threads = thread_pool
    results = threads.starmap(fn, args)
    if not thread_pool:
        threads.close()
        threads.join()
    return list(results)
コード例 #3
0
ファイル: diagnostics.py プロジェクト: zmoon111/universe
    def __init__(self, n, probe_key, ignore_clock_skew=False, metadata_encoding=None, disable_action_probes=False):
        # Each QR code takes about 1ms (and updates at 5fps). We do
        # our best to ensure the QR is processed in time for the next
        # step call (n/16 would put us right at the threshold).
        self.pool = pool.ThreadPool(max(int(n/4), 1))
        self.qr_pool = pool.ThreadPool(max(int(n/8), 1))
        self.lock = threading.RLock()

        self.instance_n = [None] * n
        self.ignore_clock_skew = ignore_clock_skew
        self.disable_action_probes = disable_action_probes

        self.metadata_encoding = metadata_encoding

        self.update(probe_key=probe_key, metadata_encoding=metadata_encoding)
コード例 #4
0
    def scaling_metrics(self):
        """
        :return: list of scaling metrics
        :rtype: list

        Example::
            [{
                'id': 101011,
                'name': 'jmx.scaling',
                'value': 1,
                'error': None
            }, {
                'id': 202020,
                'name': 'app.poller',
                'value': None,
                'error': 'Couldnt connect to host'
            }]
        """

        # Obtain scaling metrics from Scalr.
        scaling_metrics = bus.queryenv_service.get_scaling_metrics()
        if not scaling_metrics:
            return []

        if not hasattr(threading.current_thread(), '_children'):
            threading.current_thread()._children = weakref.WeakKeyDictionary()

        wrk_pool = pool.ThreadPool(processes=10)

        try:
            return wrk_pool.map_async(_ScalingMetricStrategy.get,
                                      scaling_metrics).get()
        finally:
            wrk_pool.close()
            wrk_pool.join()
コード例 #5
0
ファイル: parallelcc.py プロジェクト: mhahn0106/cisa
def parallelCCompiler(self,
                      sources,
                      output_dir=None,
                      macros=None,
                      include_dirs=None,
                      debug=0,
                      extra_preargs=None,
                      extra_postargs=None,
                      depends=None):
    """
	Monkey-patch for parallel compilation with distutils.
	"""

    # those lines are copied from distutils.ccompiler.CCompiler directly
    macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
        output_dir, macros, include_dirs, sources, depends, extra_postargs)
    cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)

    def _single_compile(obj):
        try:
            src, ext = build[obj]
        except KeyError:
            return
        self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)

    # convert to list, imap is evaluated on-demand
    list(pool.ThreadPool(cpu_count()).imap(_single_compile, objects))

    return objects
コード例 #6
0
    def scaling_metrics(self):
        '''
        @return list of scaling metrics
        @rtype: list
        
        Sample: [{
            'id': 101011, 
            'name': 'jmx.scaling', 
            'value': 1, 
            'error': None
        }, {
            'id': 202020,
            'name': 'app.poller',
            'value': None,
            'error': 'Couldnt connect to host'
        }]
        '''

        # Obtain scaling metrics from Scalr.
        scaling_metrics = bus.queryenv_service.get_scaling_metrics()
        
        max_threads = 10
        wrk_pool = pool.ThreadPool(processes=max_threads)

        try:
            return wrk_pool.map_async(_ScalingMetricStrategy.get, scaling_metrics).get()
        finally:
            wrk_pool.close()
            wrk_pool.join()
コード例 #7
0
    def Execute(self, thread_count, urns=None):
        """Runs the migration with a given thread count."""

        if urns is None:
            blob_urns = list(aff4.FACTORY.ListChildren("aff4:/blobs"))
        else:
            blob_urns = [rdfvalue.RDFURN(urn) for urn in urns]

        sys.stdout.write("Blobs to migrate: {}\n".format(len(blob_urns)))
        sys.stdout.write("Threads to use: {}\n".format(thread_count))

        self._total_count = len(blob_urns)
        self._migrated_count = 0
        self._start_time = rdfvalue.RDFDatetime.Now()

        batches = collection.Batch(blob_urns, _BLOB_BATCH_SIZE)

        self._Progress()
        tp = pool.ThreadPool(processes=thread_count)
        tp.map(self._MigrateBatch, list(batches))
        self._Progress()

        if self._migrated_count == self._total_count:
            message = "\nMigration has been finished (migrated {} blobs).\n".format(
                self._migrated_count)
            sys.stdout.write(message)
        else:
            message = "Not all blobs have been migrated ({}/{})".format(
                self._migrated_count, self._total_count)
            raise AssertionError(message)
コード例 #8
0
 def test_multi_instance(self):
   """Validates that two instances of the env can run in the same thread."""
   tpool = pool.ThreadPool(processes=2)
   run1 = tpool.apply_async(self.check_determinism)
   run2 = tpool.apply_async(self.check_determinism)
   run1.get()
   run2.get()
コード例 #9
0
    def HandleEventsOnce(self, mv):
        """One turn through the loop.  Separated out for unit testing.

        @param mv: an instance of manifest_versions.ManifestVersions.
        @raise EnumeratorException if we can't enumerate any supported boards.
        """
        boards = self._enumerator.Enumerate()
        logging.info('%d boards currently in the lab: %r', len(boards), boards)
        thread_pool = pool.ThreadPool(POOL_SIZE)
        with contextlib.closing(thread_pool):
            for e in self._events.itervalues():
                if not e.ShouldHandle():
                    continue
                logging.info('Handling %s event for %d boards', e.keyword,
                             len(boards))
                args = []
                for board in boards:
                    args.append({
                        'scheduler': self._scheduler,
                        'event': e,
                        'board': board
                    })
                thread_pool.map(self.HandleBoard, args)
                logging.info('Finished handling %s event for %d boards',
                             e.keyword, len(boards))
                e.UpdateCriteria()
コード例 #10
0
ファイル: periodic.py プロジェクト: Frostman/rally
    def _run_scenario(self, cls, method_name, context, args):

        times = self.config["times"]
        period = self.config["period"]
        timeout = self.config.get("timeout", 600)

        async_results = []

        for i in range(times):
            pool = multiprocessing_pool.ThreadPool(processes=1)
            scenario_args = ((i, cls, method_name,
                              base._get_scenario_context(context), args), )
            async_result = pool.apply_async(base._run_scenario_once,
                                            scenario_args)
            async_results.append(async_result)

            if i < times - 1:
                time.sleep(period)

        results = []
        for async_result in async_results:
            try:
                result = async_result.get(timeout=timeout)
            except multiprocessing.TimeoutError as e:
                result = {
                    "duration": timeout,
                    "idle_duration": 0,
                    "error": utils.format_exc(e)
                }
            results.append(result)

        return base.ScenarioRunnerResult(results)
コード例 #11
0
def main4():
    pool = mp.ThreadPool(5)
    for i in range(5):
        gen = simple_generator2(i)
        pool.apply_async(generator_caller, (gen,))
    pool.close()
    pool.join()
コード例 #12
0
ファイル: setup.py プロジェクト: yuguangyuan/OpenMOC
def parallel_compile(self,
                     sources,
                     output_dir=None,
                     macros=None,
                     include_dirs=None,
                     debug=0,
                     extra_preargs=None,
                     extra_postargs=None,
                     depends=None):
    """A parallel version of the Distutils compile method

  Note that this routine is modified from StackOverflow post #11013851
  """

    # Copy args from distutils.ccompiler.CCompiler directly
    macros, objects, extra_postargs, pp_opts, build = \
         self._setup_compile(output_dir, macros, include_dirs,
                             sources, depends, extra_postargs)
    cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
    num_cpus = multiprocessing.cpu_count()

    # Define routine for each thread to use to compile on its own
    def _single_compile(obj):
        try:
            src, ext = build[obj]
        except KeyError:
            return
        self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)

    # Convert thread mapping to C/C++/CUDA objects to a list and return
    list(pool.ThreadPool(num_cpus).map(_single_compile, objects))
    return objects
コード例 #13
0
    def __init__(self,
                 directory,
                 image_data_generator=None,
                 target_size=(256, 256),
                 color_mode='rgb',
                 classes=None,
                 class_mode='categorical',
                 batch_size=32,
                 shuffle=True,
                 data_format='channels_last',
                 follow_links=False,
                 subset=None,
                 interpolation='nearest',
                 tfrecord='dataset.tfr',
                 num_copies=3,
                 dtype='float32'):
        self.set_processing_attrs(image_data_generator, target_size,
                                  color_mode, data_format, interpolation,
                                  tfrecord, num_copies)

        self.directory = directory
        self.white_list_formats = 'png'

        if not classes:
            classes = []
            for subdir in sorted(os.listdir(directory)):
                if os.path.isdir(os.path.join(directory, subdir)):
                    classes.append(subdir)
        self.num_classes = len(classes)
        self.class_indices = dict(zip(classes, range(len(classes))))

        p = pool.ThreadPool()
        # Second, build an index of the images
        # in the different class subfolders.
        results = []
        self.filenames = []
        i = 0
        for dirpath in (os.path.join(directory, subdir) for subdir in classes):
            results.append(
                p.apply_async(_list_valid_filenames_in_directory,
                              (dirpath, self.white_list_formats,
                               self.class_indices, False)))
        classes_list = []
        for res in results:
            classes, filenames = res.get()
            classes_list.append(classes)
            self.filenames += filenames
        self.samples = len(self.filenames)
        self.classes = np.zeros((self.samples, ), dtype='int32')
        for classes in classes_list:
            self.classes[i:i + len(classes)] = classes
            i += len(classes)

        print('Found %d images belonging to %d classes.' %
              (self.samples, self.num_classes))
        p.close()
        p.join()
        self._filepaths = [
            os.path.join(self.directory, fname) for fname in self.filenames
        ]
コード例 #14
0
ファイル: cluster.py プロジェクト: rstutsman/splinter
    def execute(self, user, cmd):
        tpool = pool.ThreadPool(processes=len(self.__hosts))
        async_results = []
        for host in self.__hosts:

            def wrapExecute(host, user, cmd):
                try:
                    host.execute(user, cmd)
                except ex.SubprocessException as e:
                    return e

            async_result = tpool.apply_async(wrapExecute, (host, user, cmd))
            async_results.append(async_result)

        for async_result in async_results:
            return_val = async_result.get()
            if return_val == None:
                continue
            elif isinstance(return_val, ex.SubprocessException):
                raise return_val
            elif isinstance(return_val, Exception):
                raise return_val
            else:
                raise Exception(
                    'Unrecognized return value from ssh: {0}'.format(
                        str(return_val)))
コード例 #15
0
def _file_per_document(exportfile):
    if not os.path.exists(exportfile):
        print "%s doesn't exist!" % exportfile
        return
    dirs, _ = os.path.split(exportfile)
    docspath = os.path.join(dirs, 'documents')
    ensure_dirpath(docspath)
    expfile = open(exportfile, 'r')

    def wat(ammapobject):
        x = True
        while x:
            ablob = ammapobject.readline()
            if ablob:
                yield ablob
            else:
                x = False

    tpool = pool.ThreadPool(pool.cpu_count() * 64)
    gettingweird = wat(mmap.mmap(expfile.fileno(), 0, prot=mmap.PROT_READ))
    job = tpool.imap_unordered(
        _fpd, itertools.izip_longest(gettingweird, (), fillvalue=docspath))
    while True:
        try:
            job.next()
        except Exception:
            return
コード例 #16
0
ファイル: windows.py プロジェクト: papagalu/cloudbase-init-ci
    def _run_command(protocol_client, shell_id, command,
                     command_type=util.POWERSHELL,
                     upper_timeout=CONFIG.argus.upper_timeout):
        command_id = None
        bare_command = command
        thread_pool = pool.ThreadPool(processes=THREADS)

        command = util.get_command(command, command_type)

        try:
            command_id = protocol_client.run_command(shell_id, command)

            result = thread_pool.apply_async(
                protocol_client.get_command_output,
                args=(shell_id, command_id))
            stdout, stderr, exit_code = result.get(
                timeout=upper_timeout)
            if exit_code:
                output = "\n\n".join([out for out in (stdout, stderr) if out])
                raise exceptions.ArgusError(
                    "Executing command {command!r} with encoded Command"
                    "{encoded_command!r} failed with exit code {exit_code!r}"
                    " and output {output!r}."
                    .format(command=bare_command,
                            encoded_command=command,
                            exit_code=exit_code,
                            output=output))

            return util.sanitize_command_output(stdout), stderr, exit_code
        except multiprocessing.TimeoutError:
            raise exceptions.ArgusTimeoutError(
                "The command '{cmd}' has timed out.".format(cmd=bare_command))
        finally:
            thread_pool.terminate()
            protocol_client.cleanup_command(shell_id, command_id)
コード例 #17
0
ファイル: test_mp_speed.py プロジェクト: vacancy/TensorArtist
def test_mt():
    pool = mppool.ThreadPool(4)
    start_time = time.time()
    lengths = pool.map(worker, range(4))
    finish_time = time.time()
    print('Multithreading: total_length={}, time={:.2f}s.'.format(
        sum(lengths), finish_time - start_time))
コード例 #18
0
    def test_out_of_order_execution2(self):
        with self.test_session() as session:
            batcher = dynamic_batching._Batcher(minimum_batch_size=1,
                                                maximum_batch_size=1,
                                                timeout_ms=None)

            tp = pool.ThreadPool(10)
            r0 = tp.apply_async(session.run, batcher.compute([[1]],
                                                             [tf.int32]))
            (input0, ), computation_id0 = session.run(
                batcher.get_inputs([tf.int32]))
            r1 = tp.apply_async(session.run, batcher.compute([[2]],
                                                             [tf.int32]))
            (input1, ), computation_id1 = session.run(
                batcher.get_inputs([tf.int32]))

            self.assertAllEqual([1], input0)
            self.assertAllEqual([2], input1)

            # These two runs are switched from testOutOfOrderExecution1.
            session.run(batcher.set_outputs([input1 + 42], computation_id1))
            session.run(batcher.set_outputs([input0 + 42], computation_id0))

            self.assertAllEqual([43], r0.get())
            self.assertAllEqual([44], r1.get())
コード例 #19
0
def main():

  # Init database

  conn = sqlite3.connect('data.db')

  c = conn.cursor()
  c.execute('CREATE TABLE IF NOT EXISTS game_urls (url text, data text)')
  c.execute('CREATE UNIQUE INDEX IF NOT EXISTS  unique_url ON game_urls (url)')
  conn.commit()

  p = pool.ThreadPool(8)

  urls = sorted(url for (url,) in conn.execute('SELECT url FROM game_urls WHERE data = ""'))
  print('{} games to fetch'.format(len(urls)))


  writer = GameWriter(conn)
  try:
    for url, game_data in p.imap_unordered(RetrieveGameData, urls):
      if not game_data:
        print('Failed to update {}'.format(url))
        continue
      writer.Insert(url, game_data)
  finally:
    writer.Flush()
コード例 #20
0
def farm_process(tasks):
    servs_pool = pool.ThreadPool(processes=config['serv_thrds'])

    try:
        results = servs_pool.map_async(server_thread, tasks).get()
    except Exception:
        logger.exception('Exception')
    finally:
        servs_pool.close()
        servs_pool.join()

    try:
        ra, fa, rs, fs = post_processing(results)

        global rrd_queue
        for k, v in ra.iteritems():
            rrd_queue.put({'ra': {k: v}})

        for k, v in fa.iteritems():
            rrd_queue.put({'fa': {k: v}})

        for k, v in rs.iteritems():
            rrd_queue.put({'rs': {k: v}})

        for k, v in fs.iteritems():
            rrd_queue.put({'fs': {k: v}})
    except Exception:
        logger.exception('Exception')
コード例 #21
0
    def test_input_shapes_should_be_equal(self):
        with self.test_session() as session:

            @dynamic_batching.batch_fn
            def f(a, b):
                return a + b

            output0 = f(tf.constant([1]), tf.constant([2]))
            output1 = f(tf.constant([[2]]), tf.constant([3]))

            tp = pool.ThreadPool(2)
            f0 = tp.apply_async(session.run, [output0])
            f1 = tp.apply_async(session.run, [output1])

            time.sleep(_SLEEP_TIME)

            coord = tf.train.Coordinator()
            tf.train.start_queue_runners(coord=coord)

            with self.assertRaises(tf.errors.CancelledError):
                f0.get()
                f1.get()

            with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
                                         'Shapes of inputs much be equal'):
                coord.join()
コード例 #22
0
ファイル: metadata.py プロジェクト: kenorb-contrib/scalarizr
    def _resolve_once_providers(self):
        if self._providers_resolved:
            return
        votes = VoteDict()
        for pvd in self.providers:
            votes[pvd] = VoteCapabilityDict.fromkeys(self.capabilities, 0)

        def vote(pvd):
            try:
                pvd.vote(votes)
            except:
                LOG.debug('{0}.vote raised: {1}'.format(
                    pvd.__class__.__name__,
                    sys.exc_info()[1]))

        pool = process_pool.ThreadPool(processes=len(self.providers))
        try:
            pool.map(vote, self.providers)
        finally:
            pool.close()
        for cap in self.capabilities:
            cap_votes = ((pvd, votes[pvd][cap]) for pvd in votes)
            cap_votes = sorted(cap_votes, key=operator.itemgetter(1))
            pvd, vote = cap_votes[-1]
            if not vote:
                pvd = self._nodata_pvd
            LOG.debug("provider for '{0}': {1}".format(cap, pvd))
            self.provider_for_capability[cap] = pvd
        self._providers_resolved = True
コード例 #23
0
    def test_two(self):
        with self.test_session() as session:

            @dynamic_batching.batch_fn
            def f(a, b):
                batch_size = tf.shape(a)[0]
                return a + b, tf.tile([batch_size], [batch_size])

            output0 = f(tf.constant([1]), tf.constant([2]))
            output1 = f(tf.constant([2]), tf.constant([3]))

            tp = pool.ThreadPool(2)
            f0 = tp.apply_async(session.run, [output0])
            f1 = tp.apply_async(session.run, [output1])

            # Make sure both inputs are in the batcher before starting it.
            time.sleep(_SLEEP_TIME)

            tf.train.start_queue_runners()

            result0, batch_size0 = f0.get()
            result1, batch_size1 = f1.get()

            self.assertAllEqual([3], result0)
            self.assertAllEqual([2], batch_size0)
            self.assertAllEqual([5], result1)
            self.assertAllEqual([2], batch_size1)
コード例 #24
0
def main():
    """ Main script """
    try:
        username, password, url, collection_name, cluster_id, ranker_id, relevance_path,\
            output_path, num_threads, fl, use_debug = parse_args()
        if use_debug:
            logger.info('Setting logger to level=DEBUG')
            logger.setLevel(logging.DEBUG)

        relevance_dict, (succ_rows, fail_rows) = read_relevance_file(relevance_path)
        logger.info('Total Number of Queries in Relevance File = %d' % (succ_rows + fail_rows))
        logger.info('Total Number of Queries being sent to re-rank API = %d' % succ_rows)
        if ranker_id:
            thread_obj = RetrieveAndRankQueryThread(username, password, url, \
                collection_name, cluster_id, ranker_id, fl=fl)
        else:
            thread_obj = SolrThread(username, password, url, collection_name, \
                cluster_id, fl=fl)
        thread_pool = multi_pool.ThreadPool(processes=num_threads)
        question_results = thread_pool.map_async(func=thread_obj,
                                                 iterable=[{'query': q} for (q) in relevance_dict.iteritems()]).get()
        print ('Responses retrieved from Retrieve and Rank')
        experiment_entries = create_experiment_object(question_results, relevance_dict)
        experiment_metadata = {'ranker_id': ranker_id, 'solr_collection': collection_name, 'solr_cluster_id': cluster_id,
                               'username': username, 'password': password, 'url': url, 'time':str(datetime.datetime.now())}
        experiment_obj = dict(experiment_entries=experiment_entries, experiment_metadata=experiment_metadata)
        print ('Writing results to output_path=%r' % output_path)
        with open(output_path, 'wt') as outfile:
            json.dump(experiment_obj, outfile)
        print ('Exiting with status code 0')
        sys.exit(0)
    except Exception as e:
        logging.warning('Exception %r in main thread' % e)
        print ('Exiting with status code 1')
        sys.exit(1)
コード例 #25
0
ファイル: builder.py プロジェクト: stheid/fuzzbench
def retry_build_loop(build_func: Callable, inputs: List[Tuple],
                     num_concurrent_builds: int) -> List:
    """Calls |build_func| in parallel on |inputs|. Repeat on failures up to
    |NUM_BUILD_RETRIES| times. Returns the list of inputs that |build_func| was
    called successfully on."""
    successes = []
    logs.info('Concurrent builds: %d.', num_concurrent_builds)
    with mp_pool.ThreadPool(num_concurrent_builds) as pool:
        for _ in range(NUM_BUILD_RETRIES):
            logs.info('Building using (%s): %s', build_func, inputs)
            results = pool.starmap(build_func, inputs)
            curr_successes, curr_failures = split_successes_and_failures(
                inputs, results)

            logs.info('Build successes: %s', curr_successes)
            successes.extend(curr_successes)
            if not curr_failures:
                break

            logs.error('Build failures: %s', curr_failures)
            inputs = curr_failures
            sleep_interval = random.uniform(1, BUILD_FAIL_WAIT)
            logs.info('Sleeping for %d secs before retrying.', sleep_interval)
            time.sleep(sleep_interval)

    return successes
コード例 #26
0
def solve(din):
    prog = list(din)
    tp, *mqs = pool.ThreadPool(processes=2), Queue(), Queue()
    tp.apply_async(run, (0, prog, mqs))
    p1 = tp.apply_async(run, (1, prog, mqs))

    return run(0, prog, None), p1.get()
コード例 #27
0
    def MigrateClients(self, client_urns):
        """Migrates entire VFS of given client list to the relational data store."""
        self._start_time = rdfvalue.RDFDatetime.Now()

        self._client_urns_to_migrate = client_urns
        self._client_urns_migrated = []
        self._client_urns_failed = []

        to_migrate_count = len(self._client_urns_to_migrate)
        sys.stdout.write("Clients to migrate: {}\n".format(to_migrate_count))

        batches = collection.Batch(client_urns, self.client_batch_size)

        tp = pool.ThreadPool(processes=self.thread_count)
        tp.map(self.MigrateClientBatch, list(batches))

        migrated_count = len(self._client_urns_migrated)
        sys.stdout.write("Migrated clients: {}\n".format(migrated_count))

        if to_migrate_count == migrated_count:
            sys.stdout.write("All clients migrated successfully!\n")
        else:
            message = "Not all clients have been migrated ({}/{})".format(
                migrated_count, to_migrate_count)
            raise RuntimeError(message)
コード例 #28
0
    def _invoke_prefetch(self):
        assert self._next is None
        if not self._repeat and self.epoch > 0:
            return
        if self._pool is None:
            self._pool = pool.ThreadPool(self.n_threads)
        n = len(self.dataset)
        i = self.current_position

        order = self._order
        args = []
        dataset = self.dataset
        epoch = self.epoch
        is_new_epoch = False
        for _ in six.moves.range(self.batch_size):
            index = i if order is None else order[i]
            args.append((dataset, index))
            i += 1
            if i >= n:
                epoch += 1
                is_new_epoch = True
                i = 0
                if not self._repeat:
                    break
                if order is not None:
                    # We cannot shuffle the order directly here, since the
                    # iterator may be serialized before the prefetched data are
                    # consumed by the user, in which case an inconsistency
                    # appears.
                    order = order.copy()
                    numpy.random.shuffle(order)

        self._next = self._pool.map_async(MultithreadIterator._read, args)
        self._next_state = (i, epoch, is_new_epoch, order)
コード例 #29
0
def main():
    blog_urls = load_blog_urls()
    pool = mpool.ThreadPool(THREAD_COUNT)
    for blog_url in blog_urls:
        pool.apply_async(parse_blog, args=(blog_url, ))
    pool.close()
    pool.join()
コード例 #30
0
ファイル: sampling_pool.py プロジェクト: GeoMop/MLMC
 def __init__(self, n_thread, work_dir=None, debug=False):
     super().__init__(n_thread, work_dir=work_dir, debug=debug)
     self._pool = pool.ThreadPool(n_thread)
     self._failed_queues = {}
     self._queues = {}
     self._n_running = 0
     self.times = {}