Esempio n. 1
0
def GetSemaLock(useglobal=True):
	""" Return the semaphore and lock objects needed for multi threading."""
	global NCORES
	global MAXCORES
	global Global_Sema
	if min(NCORES, MAXCORES)>1:
		if useglobal: # use a global semaphore
			if Global_Sema is None:
				Global_Sema = mpthread.BoundedSemaphore(min(NCORES, MAXCORES))
		else: # use a local semaphore at each call to BranchPop
			sema = mpthread.BoundedSemaphore(min(NCORES, MAXCORES))
		lock = mpthread.Lock()
		Parallel = True
	else:   # If parallel execution is not possible (as just one core could be used), concurrent execution is performed using threads.
		if useglobal: 
			if Global_Sema is None:
				Global_Sema = threading.BoundedSemaphore(NCORES) 
		else:
			sema = threading.BoundedSemaphore(NCORES)
		lock = threading.Lock()
		Parallel = False
	if useglobal:
		return Global_Sema, lock , Parallel 
	else:
		return sema, lock , Parallel 
    def build_db(self, use_symbol):
        global pool_sem

        pool_sem = multiprocessing.BoundedSemaphore(value=1)

        self.create_db()
        """ 表functions里面创建了这些字段 project , compiler , optimization , file_name , function_name ,cfg """
        """acfg表: id , acfg  . 
        lstm_cfg表:  id, lstm_cfg"""
        file_list = self.scan_for_file(self.root_path)  #遍历文件获取.o结尾的文件

        print('Found ' + str(len(file_list)) + ' during the scan')
        file_list = self.remove_override(
            file_list)  # 去除重复的(去掉新扫描到的文件中包含的数据库中已经存在的文件名)
        print('Find ' + str(len(file_list)) + ' files to analyze')
        random.shuffle(file_list)  #序列中所有元素随机排序

        t_args = [(f, self.db_name, use_symbol)
                  for f in file_list]  ##为每个文件创建一个元组(包含文件名,数据库名,对数据库的说明)

        # Start a parallel pool to analyze files
        p = Pool(processes=1, maxtasksperchild=1)  #多进程来处理文件
        for _ in tqdm(p.imap_unordered(DatabaseFactory.worker, t_args),
                      total=len(file_list)):
            pass

        p.close()
        p.join()
Esempio n. 3
0
def main():
  assert len(sys.argv) > 1
  in_path = sys.argv[1]
  assert in_path.endswith('.in')
  IN = open(in_path, 'rt')
  os.chdir(os.path.dirname(sys.argv[0]))
  T = int(IN.readline())
  data = []
  for t in range(T): data.append(get_single_test_data(IN))
  IN.close()
  
  out_dirname = in_path[:-3]
  if os.path.exists(out_dirname): shutil.rmtree(out_dirname)
  os.makedirs(out_dirname)
  
  test_filenames = [os.path.join(out_dirname, 'test%.5d.out' % (t + 1)) for t in range(T)]
  
  LOCK = mp.BoundedSemaphore(5)
  processes = []
  for t in range(T): processes.append(mp.Process(target=solve_single_test, args=(LOCK, t, data[t], test_filenames[t])))
  
  for p in processes: p.start()
  for p in processes: p.join()
  
  out_path = '%s.out' % (out_dirname)
  
  FULL_OUT = open(out_path, 'wt')
  
  for t in range(T):
    TEST_RESULT = open(test_filenames[t], 'rt')
    for l in TEST_RESULT:
      FULL_OUT.write(l)
    TEST_RESULT.close()
  
  FULL_OUT.close()
Esempio n. 4
0
    def simulate(self, srcPath, nbJobs, args):
        if args.count("-q") and nbJobs > 1:
            print "Cannot use -q option in simulations when using more than one job."
            return;

        if not os.path.exists(os.path.join(srcPath, "trace")):
            print "Simulation error : trace directory does not exists."
            return 2

        if not os.listdir(os.path.join(srcPath, "trace")):
            print "Simulation error : trace directory is empty."
            return 2

        self.sema = multiprocessing.BoundedSemaphore(value=nbJobs)
        results = multiprocessing.Manager().list()
        jobs = [multiprocessing.Process(target=self.simulateMulti, args=(srcPath, processor, results, args)) 
            for processor in self.processors]    
        for job in jobs: job.start()
        for job in jobs: job.join()
        retcode = max(results)

        if retcode == 2: 
            raise Exception("Problem during the simulation")

        return retcode
Esempio n. 5
0
def test_partially_pickleables_multiprocessing():
    '''
    "Partially-pickleable" means an object which is atomically pickleable but
    not pickleable.
    '''

    if not import_tools.exists('multiprocessing'):
        raise nose.SkipTest('`multiprocessing` is not installed.')

    import multiprocessing

    x = PickleableObject()
    x.lock = threading.Lock()

    partially_pickleables = [
        x, [multiprocessing.BoundedSemaphore()], {
            1: multiprocessing.Lock(),
            2: 3
        },
        set([multiprocessing.Queue(), x])
    ]

    for thing in partially_pickleables:
        assert pickle_tools.is_atomically_pickleable(thing)
        assert not is_pickle_successful(thing)
Esempio n. 6
0
    def __init__(self, log_name, experiments, cpu_limit=2**16):
        """
        :param experiments: A list of pypuf.experiments.experiment.base.Experiment
        :param log_name: A unique file path where to output should be logged.
        :param cpu_limit: Maximum number of parallel processes that run experiments.
        """

        # Store experiments list
        self.experiments = experiments

        # Setup logging to both file and console
        file_handler = logging.FileHandler(filename='%s.log' % log_name,
                                           mode='w')
        file_handler.setLevel(logging.INFO)

        stream_handler = logging.StreamHandler()
        stream_handler.setLevel(logging.INFO)

        self.logger = logging.getLogger(log_name)
        self.logger.setLevel(logging.INFO)
        self.logger.addHandler(file_handler)
        self.logger.addHandler(stream_handler)

        # Setup parallel execution limit
        self.cpu_limit = min(cpu_limit, multiprocessing.cpu_count())
        self.semaphore = multiprocessing.BoundedSemaphore(self.cpu_limit)
Esempio n. 7
0
    def run(self):
        if 'remotes' in self._config:
            self._remote_pipe(block=False)

        if self._server:
            threading.Thread(target=self._serve_forever).start()

        if self._server_mode == 'idle':
            print('[!] going idle mode')
            return

        # make sure all threads are initialized before start working
        init_barrier = multiprocessing.Barrier(len(self._stages) + 1)
        thread_sem = multiprocessing.BoundedSemaphore(
            multiprocessing.cpu_count())
        ps = []
        for s in self._stages:
            print('[+] intializing process:', s.name)
            target = pipeline_stage_worker
            if s.get_max_parallel() == 1:
                target = no_fork_pipeline_stage_worker
            kwargs = {
                'init_barrier': init_barrier,
                'thread_sem': thread_sem,
                'stage': s,
                'context': self._context,
            }
            p = multiprocessing.Process(target=target, kwargs=kwargs)
            ps.append(p)
        print('[+] starting processes')
        [p.start() for p in ps]
        init_barrier.wait()
Esempio n. 8
0
    def __init__(self,
                 interface: str,
                 wordlist='',
                 scan_time='7',
                 time_out='15',
                 hash_file='hashes.22000',
                 pot_file='found.potfile'):

        self._wlan_mac = ['cat', f'/sys/class/net/{interface}/address']
        self._commsupp = [
            'timeout', scan_time, 'wpa_supplicant', '-c', 'wpa_supp.conf',
            '-i', interface, '-dd'
        ]  # using timeout, just works...
        self._commsupp2 = [
            'timeout', time_out, 'wpa_supplicant', '-c', 'wpa_supp.conf', '-i',
            interface, '-dd'
        ]
        self.hash_file = hash_file
        self.pot_file = pot_file
        self.wordlist = wordlist
        self._ls = LineScrapper()
        self.sudo_require = elevator()
        try:
            self._sem = mp.BoundedSemaphore(value=mp.cpu_count())
        except FileNotFoundError:
            print(
                'Python multiprocessing BoundedSemaphore does not seem to work on your system. Starting '
                'capture-only mode ...')
            self.wordlist = ''
Esempio n. 9
0
    def build_db(self, use_symbol, depth):
        global pool_sem

        pool_sem = multiprocessing.BoundedSemaphore(value=1)

        instruction_converter = InstructionsConverter('data/i2v/word2id.json')
        self.create_db()
        file_list = self.scan_for_file(self.root_path)

        print('Found ' + str(len(file_list)) + ' during the scan')
        file_list = self.remove_override(file_list)
        print('Find ' + str(len(file_list)) + ' files to analyze')
        random.shuffle(file_list)

        t_args = [(f, self.db_name, use_symbol, depth, instruction_converter)
                  for f in file_list]

        # Start a parallel pool to analyze files
        p = Pool(processes=None, maxtasksperchild=20)
        for _ in tqdm(p.imap_unordered(DatabaseFactory.worker, t_args),
                      total=len(file_list)):
            pass

        p.close()
        p.join()
Esempio n. 10
0
    def __init__(self, address, handler, bind_and_activate=True):
        """Create server."""
        ipv6 = ":" in address[0]

        if ipv6:
            self.address_family = socket.AF_INET6

        # Do not bind and activate, as we might change socket options
        super().__init__(address, handler, False)

        if ipv6:
            # Only allow IPv6 connections to the IPv6 socket
            self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)

        if self.max_connections:
            self.connections_guard = multiprocessing.BoundedSemaphore(
                self.max_connections)
        else:
            # use dummy context manager
            self.connections_guard = contextlib.ExitStack()

        if bind_and_activate:
            try:
                self.server_bind()
                self.server_activate()
            except BaseException:
                self.server_close()
                raise
Esempio n. 11
0
    def _execute(self, deadline):
        semaphores = {
            k: NopSemaphore()
            if v is None else multiprocessing.BoundedSemaphore(v)
            for k, v in self.lit_config.parallelism_groups.items()
        }

        self._increase_process_limit()

        # Start a process pool. Copy over the data shared between all test runs.
        # FIXME: Find a way to capture the worker process stderr. If the user
        # interrupts the workers before we make it into our task callback, they
        # will each raise a KeyboardInterrupt exception and print to stderr at
        # the same time.
        pool = multiprocessing.Pool(self.workers, lit.worker.initialize,
                                    (self.lit_config, semaphores))

        self._install_win32_signal_handler(pool)

        async_results = [
            pool.apply_async(lit.worker.execute,
                             args=[test],
                             callback=self._process_completed)
            for test in self.tests
        ]
        pool.close()

        try:
            self._wait_for(async_results, deadline)
        except:
            pool.terminate()
            raise
        finally:
            pool.join()
Esempio n. 12
0
    def _execute(self, deadline):
        self._increase_process_limit()

        semaphores = {
            k: multiprocessing.BoundedSemaphore(v)
            for k, v in self.lit_config.parallelism_groups.items()
            if v is not None
        }

        pool = multiprocessing.Pool(self.workers, lit.worker.initialize,
                                    (self.lit_config, semaphores))

        async_results = [
            pool.apply_async(lit.worker.execute,
                             args=[test],
                             callback=self.progress_callback)
            for test in self.tests
        ]
        pool.close()

        try:
            self._wait_for(async_results, deadline)
        except:
            pool.terminate()
            raise
        finally:
            pool.join()
Esempio n. 13
0
def test_non_atomically_pickleables_multiprocessing():
    '''
    Test `is_atomically_pickleable` on non-atomically pickleable objects.
    
    Not including `multiprocessing` objects.
    '''

    if not import_tools.exists('multiprocessing'):
        raise nose.SkipTest('`multiprocessing` is not installed.')

    import multiprocessing

    non_pickleables = [
        multiprocessing.Lock(),
        multiprocessing.BoundedSemaphore(),
        multiprocessing.Condition(),
        multiprocessing.JoinableQueue(),
        multiprocessing.Pool(),
        multiprocessing.Queue(),
        multiprocessing.RLock(),
        multiprocessing.Semaphore(),
    ]

    for thing in non_pickleables:
        assert not pickle_tools.is_atomically_pickleable(thing)
        assert not is_pickle_successful(thing)

    assert not pickle_tools.is_atomically_pickleable(NonPickleableObject())
Esempio n. 14
0
    def __init__(self):
        super(FSScanProcessor, self).__init__()

        self.eventManager = FSEventManager.instance()
        self.settings = Settings.instance()
        self.config = Config.instance()

        self._logger = logging.getLogger(__name__)
        self._logger.setLevel(logging.DEBUG)

        self._prefix = None
        self._resolution = 16
        self._number_of_pictures = 0
        self._total = 0
        self._laser_positions = 1
        self._progress = 0
        self._is_color_scan = True
        self.point_cloud = None
        self.image_task_q = multiprocessing.Queue(self.config.process_numbers +
                                                  1)
        self.current_position = 0
        self._laser_angle = 33.0
        self._stop_scan = False
        self._current_laser_position = 1

        self.semaphore = multiprocessing.BoundedSemaphore()
        self.event_q = self.eventManager.get_event_q()

        self._worker_pool = FSImageWorkerPool(self.image_task_q, self.event_q)
        self.hardwareController = HardwareController.instance()
        self.eventManager.subscribe(FSEvents.ON_IMAGE_PROCESSED,
                                    self.image_processed)
        self._scan_brightness = self.settings.camera.brightness
        self._scan_contrast = self.settings.camera.contrast
        self._scan_saturation = self.settings.camera.saturation
Esempio n. 15
0
    def __init__(
        self, maxsize=0, encode=pickle.dumps, decode=pickle.loads, bufsize=1024
    ):
        if maxsize <= 0:
            maxsize = multiprocessing.synchronize.SEM_VALUE_MAX

        self._encode = encode
        self._decode = decode
        self._bufsize = bufsize

        self._sem = multiprocessing.BoundedSemaphore(maxsize)

        # Items are enqueued in this buffer before the feeder thread sends them over the pipe
        self._buffer = collections.deque()

        self._reader, self._writer = Pipe(duplex=False)

        # Notify _feeder that an item is in the buffer
        self._notempty = threading.Condition()

        # _feeder thread
        self._thread = None

        self._writelock = multiprocessing.Lock()
        self._readlock = multiprocessing.Lock()
 def __init__(self,
              max_workers=None,
              mp_context=None,
              initializer=None,
              initargs=()):
     super().__init__(max_workers, mp_context, initializer, initargs)
     self.semaphore = multiprocessing.BoundedSemaphore(max_workers)
Esempio n. 17
0
def _get_worker_sidedata_adder(srcrepo, destrepo):
    """The parallel version of the sidedata computation

    This code spawn a pool of worker that precompute a buffer of sidedata
    before we actually need them"""
    # avoid circular import copies -> scmutil -> worker -> copies
    from . import worker

    nbworkers = worker._numworkers(srcrepo.ui)

    tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
    revsq = multiprocessing.Queue()
    sidedataq = multiprocessing.Queue()

    assert srcrepo.filtername is None
    # queue all tasks beforehand, revision numbers are small and it make
    # synchronisation simpler
    #
    # Since the computation for each node can be quite expensive, the overhead
    # of using a single queue is not revelant. In practice, most computation
    # are fast but some are very expensive and dominate all the other smaller
    # cost.
    for r in srcrepo.changelog.revs():
        revsq.put(r)
    # queue the "no more tasks" markers
    for i in range(nbworkers):
        revsq.put(None)

    allworkers = []
    for i in range(nbworkers):
        args = (srcrepo, revsq, sidedataq, tokens)
        w = multiprocessing.Process(target=_sidedata_worker, args=args)
        allworkers.append(w)
        w.start()

    # dictionnary to store results for revision higher than we one we are
    # looking for. For example, if we need the sidedatamap for 42, and 43 is
    # received, when shelve 43 for later use.
    staging = {}

    def sidedata_companion(revlog, rev):
        data = {}, False
        if util.safehasattr(revlog, b'filteredrevs'):  # this is a changelog
            # Is the data previously shelved ?
            sidedata = staging.pop(rev, None)
            if sidedata is None:
                # look at the queued result until we find the one we are lookig
                # for (shelve the other ones)
                r, data = sidedataq.get()
                while r != rev:
                    staging[r] = data
                    r, sidedata = sidedataq.get()
            tokens.release()
        sidedata, has_copies_info = data
        new_flag = 0
        if has_copies_info:
            new_flag = sidedataflag.REVIDX_HASCOPIESINFO
        return False, (), sidedata, new_flag, 0

    return sidedata_companion
Esempio n. 18
0
 def __init__(self, lit_config, tests):
     self.lit_config = lit_config
     self.tests = tests
     # Set up semaphores to limit parallelism of certain classes of tests.
     self.parallelism_semaphores = {
             k : NopSemaphore() if v is None else
                 multiprocessing.BoundedSemaphore(v)
             for k, v in lit_config.parallelism_groups.items()}
def read_data(path, var, tests, month, rslt):
    threadlock = multiprocessing.BoundedSemaphore(4)
    for test in tests:
        threadlock.acquire()
        t = threading.Thread(target=read_data_thread,
                             args=(path, var, test, month, rslt, threadlock))
        t.start()
        t.join()
Esempio n. 20
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     if self.max_connections:
         self.connections_guard = multiprocessing.BoundedSemaphore(
             self.max_connections)
     else:
         # use dummy context manager
         self.connections_guard = contextlib.ExitStack()
Esempio n. 21
0
def run_clients(options, db_table_set):
    # Spawn one client for each db.table
    exit_event = multiprocessing.Event()
    processes = []
    error_queue = multiprocessing.queues.SimpleQueue()
    interrupt_event = multiprocessing.Event()
    stream_semaphore = multiprocessing.BoundedSemaphore(options["clients"])

    signal.signal(signal.SIGINT, lambda a,b: abort_export(a, b, exit_event, interrupt_event))

    try:
        progress_info = [ ]

        for (db, table) in db_table_set:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1),
                                  multiprocessing.Value(ctypes.c_longlong, 0)))
            processes.append(multiprocessing.Process(target=export_table,
                                                     args=(options["host"],
                                                           options["port"],
                                                           options["auth_key"],
                                                           db, table,
                                                           options["directory_partial"],
                                                           options["fields"],
                                                           options["format"],
                                                           error_queue,
                                                           progress_info[-1],
                                                           stream_semaphore,
                                                           exit_event)))
            processes[-1].start()

        # Wait for all tables to finish
        while len(processes) > 0:
            time.sleep(0.1)
            if not error_queue.empty():
                exit_event.set() # Stop rather immediately if an error occurs
            processes = [process for process in processes if process.is_alive()]
            update_progress(progress_info)

        # If we were successful, make sure 100% progress is reported
        # (rows could have been deleted which would result in being done at less than 100%)
        if error_queue.empty() and not interrupt_event.is_set():
            print_progress(1.0)

        # Continue past the progress output line
        print ""
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if not error_queue.empty():
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        while not error_queue.empty():
            error = error_queue.get()
            print >> sys.stderr, "Traceback: %s" % (error[2])
            print >> sys.stderr, "%s: %s" % (error[0].__name__, error[1])
            raise RuntimeError("Errors occurred during export")
Esempio n. 22
0
 def __init__(self, name, processors, memories, targetAltera, family, device, package):
     self.name = name
     self.family = family
     self.device = device
     self.package = package
     self.processors = processors
     self.memories = memories
     self.targetAltera = targetAltera
     self.sema = multiprocessing.BoundedSemaphore(value=1)
Esempio n. 23
0
 def __init__(self, wordlist_name: str, hash_file='hashes.22000', pot_file='found.potfile'):
     self.hash_file = fm.try_open_read_file(hash_file)
     self.pot_file = pot_file
     self._wordlist_name = wordlist_name
     try:
         self._sem = mp.BoundedSemaphore(value=mp.cpu_count())
     except FileNotFoundError:
         print('Python multiprocessing BoundedSemaphore does not seem to work on your system. Exiting...')
         exit(1)
Esempio n. 24
0
 def __init__(self, lit_config, tests):
     self.lit_config = lit_config
     self.tests = tests
     # Set up semaphores to limit parallelism of certain classes of tests.
     # For example, some ASan tests require lots of virtual memory and run
     # faster with less parallelism on OS X.
     self.parallelism_semaphores = \
             {k: multiprocessing.BoundedSemaphore(v) for k, v in
              self.lit_config.parallelism_groups.items()}
Esempio n. 25
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--bazel-config",
        help=
        ("Bazel --config flag values to use. "
         "Configs are sourced from the .bazelrc defined in the BuildBuddy repo at HEAD (on GitHub) "
         "as well as the local ~/.bazelrc."),
        action="append",
    )
    parser.add_argument(
        "-p",
        "--max-parallel-invocations",
        help="Max number of parallel bazel invocations.",
        type=int,
        default=2,
    )
    parser.add_argument(
        "-n",
        "--num-developers",
        help="Number of developers in the simulation.",
        type=int,
        default=5,
    )
    args = parser.parse_args()

    os.chdir(sys.path[0])

    data_dir = "/tmp/tmp_buildbuddy_simulate_traffic"
    print(f"Data directory: {data_dir}")

    if not os.path.exists(data_dir):
        sh(f"mkdir -p {data_dir}")
    os.chdir(data_dir)
    if not os.path.exists("buildbuddy"):
        sh("git clone https://github.com/buildbuddy-io/buildbuddy")

    bazel_lock = multiprocessing.BoundedSemaphore(
        args.max_parallel_invocations)
    bazel_configs = args.bazel_config

    def spawn_developer(data_dir):
        Developer(data_dir, bazel_configs, bazel_lock).develop()

    num_developers = args.num_developers
    procs = []
    for developer_id in range(1, num_developers + 1):
        developer_data_dir = os.path.join(data_dir,
                                          f"developer_{developer_id}")
        p = multiprocessing.Process(target=spawn_developer,
                                    args=(developer_data_dir, ))
        procs.append(p)
        p.start()
    for p in procs:
        p.join()
Esempio n. 26
0
File: gcmole.py Progetto: ljp2010/v8
def InvokeClangPluginForEachFile(
    filenames,
    plugin,
    plugin_args,
    arch_cfg,
    flags,
    clang_bin_dir,
    clang_plugins_dir,
):
    cmd_line = MakeClangCommandLine(plugin, plugin_args, arch_cfg,
                                    clang_bin_dir, clang_plugins_dir)
    verbose = flags["verbose"]
    outputs = {}
    if flags["sequential"]:
        log("** Sequential execution.")
        for filename in filenames:
            returncode, stdout, stderr = InvokeClangPluginForFile(
                filename, cmd_line, verbose)
            if returncode != 0:
                sys.stderr.write(stderr)
                sys.exit(returncode)
            outputs[filename] = (stdout, stderr)
    else:
        log("** Parallel execution.")
        cpus = multiprocessing.cpu_count()
        pool = multiprocessing.Pool(cpus)
        try:
            # Track active invokes with a semaphore, to prevent submitting too many
            # concurrent invokes to the pool.
            execution_slots = multiprocessing.BoundedSemaphore(cpus)

            async_outputs = {}
            for filename in filenames:
                execution_slots.acquire()

                def callback(output):
                    execution_slots.release()

                async_outputs[filename] = pool.apply_async(
                    InvokeClangPluginForFile, (filename, cmd_line, verbose),
                    callback=callback)

            for filename, output in async_outputs.items():
                returncode, stdout, stderr = output.get()
                if returncode != 0:
                    sys.stderr.write(stderr)
                    sys.exit(returncode)
                outputs[filename] = (stdout, stderr)
        except KeyboardInterrupt as e:
            pool.terminate()
            pool.join()
            raise e
        finally:
            pool.close()

    return outputs
Esempio n. 27
0
    def __init__(self, vecDim, directory, corpusPath, corpusName,
                 vocabInputFile, vectorInputFile, debug, funcWordFile):

        self.vecDim = vecDim
        self.corpusPath = directory + corpusPath
        self.corpusName = directory + corpusName
        self.vocabFile = directory + vocabInputFile
        self.vecFile = directory + vectorInputFile
        self.debug = debug

        self.readVocabFromFile()
        self.readVectorFromFile()
        self.readFuncWords(funcWordFile)

        if debug:
            self.sem = multiprocessing.BoundedSemaphore(1)
        else:
            self.sem = multiprocessing.BoundedSemaphore(
                multiprocessing.cpu_count() - 1)
Esempio n. 28
0
    def __init__(self, max_items, **kwargs):
        """Initialize a process pool.

        :param max_items: maximum number of simultaneous work items.
            Calls to `.submit` will block if there are too many unprocessed
            items.
        :param kwargs: key-word arguments to `ProcessPoolExecutor`.
        """
        super().__init__(**kwargs)
        self.semaphore = multiprocessing.BoundedSemaphore(max_items)
Esempio n. 29
0
    def _execute(self, deadline):
        semaphores = {
            k: NopSemaphore()
            if v is None else multiprocessing.BoundedSemaphore(v)
            for k, v in self.lit_config.parallelism_groups.items()
        }

        # Start a process pool. Copy over the data shared between all test runs.
        # FIXME: Find a way to capture the worker process stderr. If the user
        # interrupts the workers before we make it into our task callback, they
        # will each raise a KeyboardInterrupt exception and print to stderr at
        # the same time.
        pool = multiprocessing.Pool(self.workers, lit.worker.initializer,
                                    (self.lit_config, semaphores))

        # Install a console-control signal handler on Windows.
        if lit.util.win32api is not None:

            def console_ctrl_handler(type):
                print('\nCtrl-C detected, terminating.')
                pool.terminate()
                pool.join()
                lit.util.abort_now()
                return True

            lit.util.win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)

        try:
            async_results = [
                pool.apply_async(
                    lit.worker.run_one_test,
                    args=(test, ),
                    callback=lambda r, t=test: self._consume_test_result(t, r))
                for test in self.tests
            ]
            pool.close()

            # Wait for all results to come in. The callback that runs in the
            # parent process will update the display.
            for a in async_results:
                timeout = deadline - time.time()
                a.wait(timeout)
                if not a.successful():
                    # TODO(yln): this also raises on a --max-time time
                    a.get()  # Exceptions raised here come from the worker.
                if self.hit_max_failures:
                    break
        except:
            # Stop the workers and wait for any straggling results to come in
            # if we exited without waiting on every async result.
            pool.terminate()
            raise
        finally:
            pool.join()
Esempio n. 30
0
    def __init__(self, port=9525):
        self._data_pipe_r, self._data_pipe_w = multiprocessing.Pipe(False)
        self._ack_pipe_r, self._ack_pipe_w = multiprocessing.Pipe(False)
        self._semaphore = multiprocessing.BoundedSemaphore(1)  # no concurrency allowed
        self._server_process = process.ServerProcess("metadata frame net socket reading process",
                                                     net_sock_server.frame_net_socket_reader_server,
                                                     self._data_pipe_w, self._ack_pipe_r, self._semaphore, port)
        ret = self._server_process.start()
        if ret:
            print("%s is running" % self._server_process.name())

        print("net socket based frame reader created, port = %d" % port)