Exemple #1
0
def run_using_threadpool(fn_to_execute, inputs, pool_size):
  """For internal use only; no backwards-compatibility guarantees.

  Runs the given function on given inputs using a thread pool.

  Args:
    fn_to_execute: Function to execute
    inputs: Inputs on which given function will be executed in parallel.
    pool_size: Size of thread pool.
  Returns:
    Results retrieved after executing the given function on given inputs.
  """

  # ThreadPool crashes in old versions of Python (< 2.7.5) if created
  # from a child thread. (http://bugs.python.org/issue10015)
  if not hasattr(threading.current_thread(), '_children'):
    threading.current_thread()._children = weakref.WeakKeyDictionary()
  pool = ThreadPool(min(pool_size, len(inputs)))
  try:
    # We record and reset logging level here since 'apitools' library Beam
    # depends on updates the logging level when used with a threadpool -
    # https://github.com/google/apitools/issues/141
    # TODO: Remove this once above issue in 'apitools' is fixed.
    old_level = logging.getLogger().level
    return pool.map(fn_to_execute, inputs)
  finally:
    pool.terminate()
    logging.getLogger().setLevel(old_level)
    def launch_parallel_tests(self):
        image_name = "django_parallel_tests/%s" % self.project_name
        if len(self.docker.images(name=image_name)) == 0:
            self.build_image()

        req_hash = hashlib.sha224(str(sorted(self.requirements))).hexdigest()
        try:
            last_req_hash = open(".last_requirements").read().strip()
        except:
            last_req_hash = None

        if req_hash != last_req_hash:
            self.build_image()
            with open(".last_requirements", "w") as f:
                f.write(req_hash)

        pool = ThreadPool()
        tests = [[test] for test in self.tests]
        run_tests = partial(run_tests_for_project, self.project_name)

        result = pool.map_async(run_tests, tests)
        try:
            while True:
                time.sleep(0.1)
                if result.ready():
                    print "got result", result.get()
                    return
        except KeyboardInterrupt:
            pool.terminate()
            pool.join()
        else:
            pool.close()
            pool.join()
Exemple #3
0
def execute_command(command, path, shell=True, env=None, print_output=True):
    """Execute command via thread"""

    cmd_env = os.environ.copy()
    if env:
        cmd_env.update(env)
    if print_output:
        pipe = None
    else:
        pipe = subprocess.PIPE
    pool = ThreadPool()
    try:
        result = pool.apply(execute_subprocess_command,
                            args=(command, path),
                            kwds={'shell': shell, 'env': cmd_env, 'stdout': pipe, 'stderr': pipe})
        pool.close()
        pool.join()
        return result
    except (KeyboardInterrupt, SystemExit):
        if pool:
            pool.close()
            pool.terminate()
        print()
        cprint(' - Command failed', 'red')
        print()
        return 1
    except Exception as err:
        if pool:
            pool.close()
            pool.terminate()
        print()
        cprint(' - Command failed', 'red')
        print(err)
        print()
        return 1
Exemple #4
0
 def run(self, max_number_of_live_tokens=None, group=None):
     group = Pool()
     try:
         stages = []
         
         in_q = _DummyQueue()
         end_in = Event()
         if self._filters[0].is_serial:
             serial = Lock()
         else:
             serial = _DummyLock()
         if self._filters[0].is_ordered:
             out_q = PriorityQueue()
         else:
             out_q = Queue()
         
         
         for i, f in enumerate(self._filters):
             pass
                 
         send_q, recv_q = Queue(), Queue()
         
         group.close()
     except:
         group.terminate()
     finally:
         group.join()
 def foreach_source_repository(self, call_function, *posargs, **kwargs):
     """Call the function on each of the SourceRepository instances."""
     worker = RepositoryWorker(call_function, *posargs, **kwargs)
     all_repos = self.__source_repositories
     num_threads = min(self.__max_threads, len(all_repos))
     if num_threads > 1:
         pool = ThreadPool(num_threads)
         logging.info('Mapping %d/%s', len(all_repos.keys()),
                      all_repos.keys())
         try:
             raw_list = pool.map(worker, all_repos.values())
             result = {name: value for name, value in raw_list}
         except Exception:
             logging.error('Map caught exception')
             raise
         logging.info('Finished mapping')
         pool.terminate()
     else:
         # If we have only one thread, skip the pool
         # this is primarily to make debugging easier.
         result = {
             name: worker(repository)[1]
             for name, repository in all_repos.items()
         }
     return result
Exemple #6
0
    def getMessagesBySource(self, source, batch_mode=False):
        """
        Returns the messages for the given source, including messages
        from the configured builder (if available) and static checks
        Extra arguments are
        """
        self._setupEnvIfNeeded()

        if self._USE_THREADS:
            records = []
            pool = ThreadPool()

            static_check = pool.apply_async(
                getStaticMessages, args=(source.getSourceContent().split('\n'), ))

            if self._isBuilderCallable():
                builder_check = pool.apply_async(self._getBuilderMessages,
                                                 args=[source, batch_mode])
                records += builder_check.get()

            records += static_check.get()

            pool.terminate()
            pool.join()
        else:
            records = getStaticMessages(source.getSourceContent().split('\n'))
            if self._isBuilderCallable():
                records += self._getBuilderMessages(source, batch_mode)

        self._saveCache()
        return records
Exemple #7
0
def get_for_genres(genres):
    genres = set(genres)
    playlists = {}
    new_genres = set()

    for page in xrange(5):
        args = []
        for g in genres:
            args.append((g, page))

        try:
            pool = ThreadPool(PROCESSES)
            pfunc = parse_page
            for i, res in enumerate(pool.imap_unordered(pfunc, args)):
                genre, page, pl, found = res
                print "%d/%d" % (i + 1, len(args))
                playlists.update(pl)
                new_genres |= found
                if not pl:
                    genres.remove(genre)
        except Exception as e:
            print e
            return playlists, []
        finally:
            pool.terminate()
            pool.join()

    return playlists, new_genres
Exemple #8
0
def load_ks(ks_directory, verbose=False):
    NUM_TREADS = int(multiprocessing.cpu_count())

    if verbose:
        print(f"loading hotpotqa knowledge source with {NUM_TREADS} threads")
    pool = ThreadPool(NUM_TREADS)

    filenames = []
    directories = [
        os.path.join(ks_directory, o) for o in os.listdir(ks_directory)
        if os.path.isdir(os.path.join(ks_directory, o))
    ]
    for directory in directories:
        onlyfiles = [
            f for f in os.listdir(directory)
            if os.path.isfile(os.path.join(directory, f))
        ]
        for filetto in onlyfiles:
            filename = "{}/{}".format(directory, filetto)
            filenames.append(filename)

    arguments = [{
        "id": i,
        "filenames": chunk,
        "verbose": verbose
    } for i, chunk in enumerate(chunk_it(filenames, NUM_TREADS))]

    results = pool.map(run_thread, arguments)
    output_dict = {}
    for x in results:
        output_dict.update(x)
    pool.terminate()
    pool.join()

    return output_dict
Exemple #9
0
def run_tidy(sha="HEAD", is_rev_range=False):
    diff_cmdline = ["git", "diff" if is_rev_range else "show", sha]

    # Figure out which paths changed in the given diff.
    changed_paths = subprocess.check_output(
        diff_cmdline + ["--name-only", "--pretty=format:"]).splitlines()
    changed_paths = [p for p in changed_paths if p]

    # Produce a separate diff for each file and run clang-tidy-diff on it
    # in parallel.
    #
    # Note: this will incorporate any configuration from .clang-tidy.
    def tidy_on_path(path):
        patch_file = tempfile.NamedTemporaryFile()
        cmd = diff_cmdline + [
            "--src-prefix=%s/" % ROOT,
            "--dst-prefix=%s/" % ROOT, "--", path
        ]
        subprocess.check_call(cmd, stdout=patch_file, cwd=ROOT)
        cmdline = [
            CLANG_TIDY_DIFF, "-clang-tidy-binary", CLANG_TIDY, "-p0", "-path",
            BUILD_PATH, "-extra-arg=-DCLANG_TIDY"
        ]
        return subprocess.check_output(cmdline,
                                       stdin=file(patch_file.name),
                                       cwd=ROOT)

    pool = ThreadPool(multiprocessing.cpu_count())
    try:
        return "".join(pool.imap(tidy_on_path, changed_paths))
    except KeyboardInterrupt as ki:
        sys.exit(1)
    finally:
        pool.terminate()
        pool.join()
Exemple #10
0
def write_feed_dangerously(feed: Feed,
                           outpath: str,
                           nodes: Optional[Collection[str]] = None) -> str:
    """Naively write a feed to a zipfile

    This function provides no sanity checks. Use it at
    your own risk.
    """
    nodes = DEFAULT_NODES if nodes is None else nodes
    try:
        tmpdir = tempfile.mkdtemp()

        def write_node(node):
            df = feed.get(node)
            if not df.empty:
                path = os.path.join(tmpdir, node)
                df.to_csv(path, index=False)

        pool = ThreadPool(len(nodes))
        try:
            pool.map(write_node, nodes)
        finally:
            pool.terminate()

        if outpath.endswith(".zip"):
            outpath, _ = os.path.splitext(outpath)

        outpath = shutil.make_archive(outpath, "zip", tmpdir)
    finally:
        shutil.rmtree(tmpdir)

    return outpath
Exemple #11
0
 def from_dataloop(self, annotations, to_format, conversion_func=None, item=None):
     pool = ThreadPool(processes=6)
     for i_annotation, annotation in enumerate(annotations):
         if conversion_func is None:
             pool.apply_async(
                 func=self.converter_dict[to_format]["to"],
                 kwds={"annotation": annotation,
                       "i_annotation": i_annotation,
                       'annotations': annotations,
                       'item': item}
             )
         else:
             pool.apply_async(
                 func=self.custom_format,
                 kwds={
                     "annotation": annotation,
                     "i_annotation": i_annotation,
                     "conversion_func": conversion_func,
                     'annotations': annotations
                 },
             )
     pool.close()
     pool.join()
     pool.terminate()
     return annotations
    def test_concurrent_invalidate_metadata(self):
        """Test concurrent requests for INVALIDATE METADATA not hang"""
        test_self = self

        class ThreadLocalClient(threading.local):
            def __init__(self):
                self.client = test_self.create_impala_client()

        tls = ThreadLocalClient()

        def run_invalidate_metadata():
            # TODO(IMPALA-9123): Detect hangs here instead of using pytest.mark.timeout
            self.execute_query_expect_success(tls.client,
                                              "invalidate metadata")

        NUM_ITERS = 20
        pool = ThreadPool(processes=2)
        for i in xrange(NUM_ITERS):
            # Run two INVALIDATE METADATA commands in parallel
            r1 = pool.apply_async(run_invalidate_metadata)
            r2 = pool.apply_async(run_invalidate_metadata)
            try:
                r1.get(timeout=60)
                r2.get(timeout=60)
            except TimeoutError:
                assert False, "INVALIDATE METADATA timeout in 60s!"
        pool.terminate()
Exemple #13
0
def test_deadlock():
    db = SqliteDb(":memory:")
    mgr = MyOmen(db, cars=Cars)
    mgr.cars = mgr[Cars]
    car = mgr.cars.add(Car(gas_level=2))

    def insert(i):
        try:
            with car:
                car.gas_level = i
                if i == 0:
                    time.sleep(1)
                return True
        except OmenLockingError:
            return False

    num = 3
    pool = ThreadPool(num)

    ret = pool.map(insert, range(num))

    assert sorted(ret) == [False, False, True]

    pool.terminate()
    pool.join()
Exemple #14
0
def _synchronize_profiles_to_sites(logger, profiles_to_synchronize):
    if not profiles_to_synchronize:
        return

    remote_sites = [(site_id, config.site(site_id))
                    for site_id in config.get_login_slave_sites()]

    logger.info('Credentials changed for %s. Trying to sync to %d sites' %
                (", ".join(profiles_to_synchronize.keys()), len(remote_sites)))

    states = sites.states()

    pool = ThreadPool()
    jobs = []
    for site_id, site in remote_sites:
        jobs.append(
            pool.apply_async(_sychronize_profile_worker,
                             (states, site_id, site, profiles_to_synchronize)))

    results = []
    start_time = time.time()
    while time.time() - start_time < 30:
        for job in jobs[:]:
            try:
                results.append(job.get(timeout=0.5))
                jobs.remove(job)
            except mp_TimeoutError:
                pass
        if not jobs:
            break

    contacted_sites = {x[0] for x in remote_sites}
    working_sites = {result.site_id for result in results}
    for site_id in contacted_sites - working_sites:
        results.append(
            SynchronizationResult(
                site_id,
                error_text=_("No response from update thread"),
                failed=True))

    for result in results:
        if result.error_text:
            logger.info('  FAILED [%s]: %s' %
                        (result.site_id, result.error_text))
            if config.wato_enabled:
                add_change("edit-users",
                           _('Password changed (sync failed: %s)') %
                           result.error_text,
                           add_user=False,
                           sites=[result.site_id],
                           need_restart=False)

    pool.terminate()
    pool.join()

    num_failed = sum([1 for result in results if result.failed])
    num_disabled = sum([1 for result in results if result.disabled])
    num_succeeded = sum([1 for result in results if result.succeeded])
    logger.info('  Disabled: %d, Succeeded: %d, Failed: %d' %
                (num_disabled, num_succeeded, num_failed))
Exemple #15
0
def run_tidy(sha="HEAD", is_rev_range=False):
    diff_cmdline = ["git", "diff" if is_rev_range else "show", sha]

    # Figure out which paths changed in the given diff.
    changed_paths = subprocess.check_output(diff_cmdline + ["--name-only", "--pretty=format:"]).splitlines()
    changed_paths = [p for p in changed_paths if p]

    # Produce a separate diff for each file and run clang-tidy-diff on it
    # in parallel.
    def tidy_on_path(path):
        patch_file = tempfile.NamedTemporaryFile()
        cmd = diff_cmdline + [
            "--src-prefix=%s/" % ROOT,
            "--dst-prefix=%s/" % ROOT,
            "--",
            path]
        subprocess.check_call(cmd, stdout=patch_file, cwd=ROOT)
        cmdline = [CLANG_TIDY_DIFF,
                   "-clang-tidy-binary", CLANG_TIDY,
                   "-p0",
                   "--",
                   "-DCLANG_TIDY"] + compile_flags.get_flags()
        return subprocess.check_output(
            cmdline,
            stdin=file(patch_file.name),
            cwd=ROOT)
    pool = ThreadPool(multiprocessing.cpu_count())
    try:
        return "".join(pool.imap(tidy_on_path, changed_paths))
    except KeyboardInterrupt as ki:
        sys.exit(1)
    finally:
        pool.terminate()
        pool.join()
class Threaded:
    """An Abstract Class for providing instance-wide Multithreading Capability"""
    def __init__(self):
        self._threads = ThreadPool()

    def __del__(self):
        self._threaded_shutdown(hard=True)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self._threaded_shutdown()

    def _threaded_shutdown(self, hard=False):
        """
        Clean up function to assure proper release of ThreadPool resources.
        :param hard: Forces running threads to join immediately if True; else allow them to finish their tasks before joining.
        :return: None
        """
        # print('Shutting down threadpool')
        try:
            if hard:
                self._threads.terminate()
            else:
                self._threads.close()
            self._threads.join()
        except Exception as e:
            print(e)
Exemple #17
0
    def _run_tests(self):
        "Runs the tests, produces no report."
        run_alone = []

        tests = self._tests
        pool = ThreadPool(self._worker_count)
        try:
            for cmd, options in tests:
                options = options or {}
                if matches(self._configured_run_alone_tests, cmd):
                    run_alone.append((cmd, options))
                else:
                    self._spawn(pool, cmd, options)
            pool.close()
            pool.join()

            if run_alone:
                util.log("Running tests marked standalone")
                for cmd, options in run_alone:
                    self._run_one(cmd, **options)
        except KeyboardInterrupt:
            try:
                util.log('Waiting for currently running to finish...')
                self._reap_all()
            except KeyboardInterrupt:
                pool.terminate()
                raise
        except:
            pool.terminate()
            raise
Exemple #18
0
    def getMessagesBySource(self, source, batch_mode=False):
        """
        Returns the messages for the given source, including messages
        from the configured builder (if available) and static checks
        Extra arguments are
        """
        self._setupEnvIfNeeded()

        if self._USE_THREADS:
            records = []
            pool = ThreadPool()

            static_check = pool.apply_async(
                getStaticMessages,
                args=(source.getRawSourceContent().split('\n'), ))

            if self._isBuilderCallable():
                builder_check = pool.apply_async(self._getBuilderMessages,
                                                 args=[source, batch_mode])
                records += builder_check.get()

            records += static_check.get()

            pool.terminate()
            pool.join()
        else:
            records = getStaticMessages(
                source.getRawSourceContent().split('\n'))
            if self._isBuilderCallable():
                records += self._getBuilderMessages(source, batch_mode)

        self._saveCache()
        return records
Exemple #19
0
    def run(self, progress_bar: bool = False, notebook_mode: bool = False):
        """
        When you run parallel in root, please use this function
        :progress_bar: Use tqdm to show the progress of calling Parallel
        :notebook_mode: Put it to true if run mlchain inside notebook
        """
        if self.threading:
            pool = ThreadPool(max(1, self.max_threads))
            if progress_bar:
                self.show_progress_bar = True
                self.progress_bar = TrioProgress(total=len(self.tasks),
                                                 notebook_mode=notebook_mode)

            async_result = [
                pool.apply_async(self.exec_task, args=[task, idx])
                for idx, task in enumerate(self.tasks)
            ]

            results = []
            for result in async_result:
                output = result.get()
                if isinstance(output, Exception):
                    pool.terminate()
                    pool.close()
                    raise output
                results.append(output)
            pool.close()
            return results
        if progress_bar:
            self.show_progress_bar = True
            self.progress_bar = TrioProgress(total=len(self.tasks),
                                             notebook_mode=notebook_mode)
        return trio.run(self.dispatch)
Exemple #20
0
 def reindex_items(self, items):
     try:
         pool = ThreadPool(50)
         pool.map(self.reindex_one, items)
         pool.terminate()
     except Exception:
         self.logger.exception('Невозможно индексировать элементы')
Exemple #21
0
 def monitor_worker(func, *args, **kwargs):
     """
     Worker process will start a thread to excute Task, and another to monitor whether it is timeout.
     :param func:
     :param args:
     :param kwargs:
     :return:
     """
     # print("===> now task get a worker, PID:", os.getpid())
     timeout = kwargs.get('timeout', None)
     # start a thread to monitor
     p = ThreadPool(1)
     res = p.apply_async(func, args=args)
     try:
         # wait result
         out = res.get(timeout)
         return out
     except multiprocessing.TimeoutError:
         # exit when timeout
         p.terminate()
         raise Exception("timeout", args)
     except Exception as e:
         # other error
         p.terminate()
         raise Exception(str(e), args)
Exemple #22
0
 def play_turn(self, turn, f=None):
     ''' Plays out a single turn of the game '''
     if self.done: return
     try:
         if self.clock[turn] < float('inf'):
             pool = ThreadPool(processes=1)
             self.start_time = time.time()
             async_result = pool.apply_async(self.players[turn].make_move)
             move, wall = async_result.get(timeout=self.clock[turn])
             end = time.time()
             self.clock[turn] -= end-self.start_time
             pool.terminate()
         else:
             move, wall = self.players[turn].make_move()
         if move not in self.get_adjacent(self.positions[turn]):
             return (turn+1)%2
         self.positions[turn] = move
         if wall in self.positions or not self.floor[wall[0]][wall[1]]:
             return (turn+1)%2
         self.floor[wall[0]][wall[1]] = False
         if f:
             f.write("{0[0]}, {0[1]}, {1[0]}, {1[1]}\n".format(move, wall))
     except:
         print(sys.exc_info())
         return (turn+1)%2
     return -1
  def collect_logs(self):
    """Collect all the microservice log files."""
    log_dir = os.path.join(self.options.log_dir, 'service_logs')
    if not os.path.exists(log_dir):
      os.makedirs(log_dir)

    def fetch_service_log(service):
      try:
        logging.debug('Fetching logs for "%s"...', service)
        deployer = (self if service in HALYARD_SERVICES
                    else self.__spinnaker_deployer)
        deployer.do_fetch_service_log_file(service, log_dir)
      except Exception as ex:
        message = 'Error fetching log for service "{service}": {ex}'.format(
            service=service, ex=ex)
        if ex.message.find('No such file') >= 0:
          message += '\n    Perhaps the service never started.'
          # dont log since the error was already captured.
        else:
          logging.error(message)
          message += '\n{trace}'.format(
              trace=traceback.format_exc())

        write_data_to_secure_path(
            message, os.path.join(log_dir, service + '.log'))

    logging.info('Collecting server log files into "%s"', log_dir)
    all_services = list(SPINNAKER_SERVICES)
    all_services.extend(HALYARD_SERVICES)
    thread_pool = ThreadPool(len(all_services))
    thread_pool.map(fetch_service_log, all_services)
    thread_pool.terminate()
def download(processes=16):
    if not os.path.exists(COUNTRIES_VALIDATION_DATA_DIR):
        os.mkdir(COUNTRIES_VALIDATION_DATA_DIR)
    data = manager.dict()
    countries = fetch(MAIN_URL)['countries'].split('~')
    countries = ['PL']
    for country in countries:
        work_queue.put(country)
    workers = ThreadPool(processes, worker, initargs=(data,))
    work_queue.join()
    workers.terminate()
    logger.debug('Queue finished')
    with io.open(COUNTRY_PATH % 'all', 'w', encoding='utf8') as all_output:
        all_output.write(u'{')
        for country in countries:
            country_dict = {}
            for key, address_data in data.items():
                if key[:2] == country:
                    country_dict[key] = address_data
            logger.debug('Saving %s', country)
            country_json = serialize(country_dict, COUNTRY_PATH % country.lower())
            all_output.write(country_json[1:-1])
            if country != countries[-1]:
                all_output.write(u',')
        all_output.write(u'}')
def fill_object_container(object_container, ids):
    def worker_task(id):
        directory_object = DirectoryObject()
        imdb_id = SharedCodeService.trakt.movies_fill_movie_object(
            directory_object, id)
        if imdb_id:
            directory_object.key = Callback(movie_menu,
                                            title=directory_object.title,
                                            imdb_id=imdb_id)
            return directory_object
        return -1

    if Platform.OS != 'Linux':
        thread_pool = ThreadPool(10)
        map_results = thread_pool.map(worker_task, ids)

        thread_pool.terminate()
        thread_pool.join()
    else:
        map_results = []
        for id in ids:
            map_results.append(worker_task(id))

    for map_result in map_results:
        if map_result and map_result != -1:
            object_container.add(map_result)
Exemple #26
0
def start_collecting():
    db = Data()
    while True:
        result_dict = {}
        _from = time.time()
        result_dict['time'] = time.strftime("%Y.%m.%d - %H:%M:%S")
        cur_setup = db.get_cur_setup()
        list_ = []
        for key, item in cur_setup.items():
            list_.append([key, item])
        pool = ThreadPool(5)
        ret = pool.map(do_func, list_)
        pool.terminate()
        pool.join()
        for jdx, item in enumerate(list_):
            tmp_dict = {}
            for idx, pair in enumerate(item[1]):
                tmp_dict[pair] = ret[jdx][idx]
            result_dict[item[0]] = tmp_dict

        #print (result_dict)
        db.insert_tick(result_dict)
        end_time = time.time() - _from
        if end_time < 1:
            time.sleep(1 - end_time)
def read_parallel_local(ids: List[int],
                        directory: str,
                        processes=20) -> List[str]:
    """
    Paralleize the reading of documents on the local volume.
    ids: List of ids of the texts to read.
    dir: Directory of the local volume that contains the text.
    processs: number of parallel processes to use.

    """
    def read_legislative_file(rev_id: int):
        try:
            with open(directory + "/" + str(rev_id) + ".txt") as f:
                doc = f.read()
                return doc
        except (OSError, IOError):
            return None

    start_time = time.time()
    tp = ThreadPool(processes=processes)
    docs = tp.map(read_legislative_file, ids)
    tp.terminate()
    tp.close()

    print(
        f"Took {(time.time()-start_time)/60.0} min ({time.time()-start_time} sec)to open {len(ids)} files with {processes} processes."
    )

    return docs
    def test_concurrent_create_kudu_table(self, unique_database):
        table_name = unique_database + "." + TBL_NAME
        test_self = self

        class ThreadLocalClient(threading.local):
            def __init__(self):
                self.client = test_self.create_impala_client()

        tls = ThreadLocalClient()

        def run_create_table_if_not_exists():
            self.execute_query_expect_success(
                tls.client, "create table if not exists %s "
                "(id int, primary key(id)) stored as kudu" % table_name)

        # Drop table before run test if exists
        self.execute_query("drop table if exists %s" % table_name)
        NUM_ITERS = 20
        pool = ThreadPool(processes=3)
        for i in xrange(NUM_ITERS):
            # Run several commands by specific time interval to reproduce this bug
            r1 = pool.apply_async(run_create_table_if_not_exists)
            r2 = pool.apply_async(run_create_table_if_not_exists)
            # Sleep to make race conflict happens in different places
            time.sleep(1)
            r3 = pool.apply_async(run_create_table_if_not_exists)
            r1.get()
            r2.get()
            r3.get()
            # If hit IMPALA-8984, this query would be failed due to table been deleted in kudu
            self.execute_query_expect_success(tls.client,
                                              "select * from %s" % table_name)
            self.execute_query("drop table if exists %s" % table_name)
        pool.terminate()
Exemple #29
0
    def run(self):
        if not self.platforms:
            raise RuntimeError("No enabled platform to build on")

        thread_pool = ThreadPool(len(self.platforms))
        result = thread_pool.map_async(self.select_and_start_cluster, self.platforms)

        try:
            result.get()
        # Always clean up worker builds on any error to avoid
        # runaway worker builds (includes orchestrator build cancellation)
        except Exception:
            thread_pool.terminate()
            self.log.info('build cancelled, cancelling worker builds')
            if self.worker_builds:
                ThreadPool(len(self.worker_builds)).map(
                    lambda bi: bi.cancel_build(), self.worker_builds)
            while not result.ready():
                result.wait(1)
            raise
        else:
            thread_pool.close()
            thread_pool.join()

        fail_reasons = {
            build_info.platform: build_info.get_fail_reason()
            for build_info in self.worker_builds
            if not build_info.build or not build_info.build.is_succeeded()
        }

        if fail_reasons:
            raise PluginFailedException(json.dumps(fail_reasons))
Exemple #30
0
def main(compilation_db_path, source_files, verbose, formatter, iwyu_args):
    """ Entry point. """
    # Canonicalize compilation database path
    if os.path.isdir(compilation_db_path):
        compilation_db_path = os.path.join(compilation_db_path,
                                           'compile_commands.json')

    compilation_db_path = os.path.realpath(compilation_db_path)
    if not os.path.isfile(compilation_db_path):
        print('ERROR: No such file or directory: \'%s\'' % compilation_db_path)
        return 1

    # Read compilation db from disk
    with open(compilation_db_path, 'r') as fileobj:
        compilation_db = json.load(fileobj)

    # expand symlinks
    for entry in compilation_db:
        entry['file'] = os.path.realpath(entry['file'])

    # Cross-reference source files with compilation database
    source_files = [os.path.realpath(s) for s in source_files]
    if not source_files:
        # No source files specified, analyze entire compilation database
        entries = compilation_db
    else:
        # Source files specified, analyze the ones appearing in compilation db,
        # warn for the rest.
        entries = []
        for source in source_files:
            matches = [e for e in compilation_db if e['file'] == source]
            if matches:
                entries.extend(matches)
            else:
                print('WARNING: \'%s\' not found in compilation database.' %
                      source)

    # Run analysis
    build_dir = os.path.dirname(compilation_db_path)

    def run_iwyu_task(entry):
        cwd, compile_command = entry['directory'], entry['command']
        compile_command = workaround_parent_dir_relative_includes(
            cwd, compile_command)
        compile_command = workaround_add_libcpp(build_dir, compile_command)
        return run_iwyu(cwd, compile_command, iwyu_args, verbose)

    pool = ThreadPool(multiprocessing.cpu_count())
    try:
        for iwyu_output in pool.imap_unordered(run_iwyu_task, entries):
            formatter(iwyu_output)
    except KeyboardInterrupt as ki:
        sys.exit(1)
    except OSError as why:
        print('ERROR: Failed to launch include-what-you-use: %s' % why)
        return 1
    finally:
        pool.terminate()
        pool.join()
    return 0
Exemple #31
0
def decrack(arch_file_path, arch_file_mime_type, arch_file_name, username,
            email):

    start_date = datetime.now()
    start_time = perf_counter()
    pool = ThreadPool(processes=1)
    pool_result = pool.apply_async(file_handler.handle_file,
                                   (arch_file_path, arch_file_mime_type))
    try:
        password = pool_result.get(timeout=DECRACK_TIMEOUT)
    except TimeoutError as e:
        pool.terminate()
        logging.info(
            f'[ARCH] Archive cannot be decrypted after given time of {format_timespan(DECRACK_TIMEOUT)}'
        )
        send_arch_message(user=username,
                          mail=email,
                          file_name=arch_file_name,
                          time=DECRACK_TIMEOUT)
        return
    elapsed_time = perf_counter() - start_time
    end_date = datetime.now()
    pool.terminate()
    duration_message = f"""
    <div>Cracking password started at: {start_date}</div>
    <div>Cracking password ended at: {end_date}</div>
    """
    file_handler.clear_file(arch_file_path)
    send_arch_message(user=username,
                      mail=email,
                      password=password,
                      file_name=arch_file_name,
                      time=elapsed_time,
                      additional_info=duration_message)
def main(rank, num_threads, folder, chunk_size):

    print("loading chunk {}".format(rank), flush=True)
    documents = load_chunk(rank)

    arguments = [{
        "rank": rank,
        "id": id,
        "documents": chunk,
        "nlp": spacy.load("en_core_web_sm"),
        "chunk_size": chunk_size,
    } for id, chunk in enumerate(utils.chunk_it(documents, num_threads))]

    print("starting {} threads in {}".format(num_threads, rank))
    pool = ThreadPool(num_threads)
    results = pool.map(run_thread, arguments)

    f = open(
        os.path.join(folder, "kilt_{}.jsonl".format(rank)),
        "w+",
    )

    i = 1
    for output in results:
        for msg in output:
            f.write("{}\t{}\n".format(i, json.dumps(msg)))
            i += 1
    f.close()
    pool.terminate()
    pool.join()
    print("done {}".format(rank))
    def open_pipe(self, pipe_name=None, num_allowed_instances=1):
        if pipe_name is None:
            pipe_name = CommunicationHandler.RLGYM_GLOBAL_PIPE_NAME

        if self.is_connected():
            self.close_pipe()

        self._connected = False

        pool = ThreadPool(processes=1)
        pool.apply_async(CommunicationHandler.handle_diemwin_potential,
                         args=[self.is_connected])

        #win32pipe.PIPE_UNLIMITED_INSTANCES
        self._pipe = win32pipe.CreateNamedPipe(
            pipe_name,
            win32pipe.PIPE_ACCESS_DUPLEX | win32file.FILE_FLAG_OVERLAPPED,
            win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_READMODE_MESSAGE
            | win32pipe.PIPE_WAIT, num_allowed_instances,
            CommunicationHandler.RLGYM_DEFAULT_PIPE_SIZE,
            CommunicationHandler.RLGYM_DEFAULT_PIPE_SIZE, 0, None)

        win32pipe.ConnectNamedPipe(self._pipe)

        self._current_pipe_name = pipe_name
        self._connected = True

        pool.terminate()
        pool.join()
def threadify(f, arr):
    # Maps array with function f parallelized
    pool = ThreadPool(mp.cpu_count())
    result = pool.map(f, arr)
    pool.terminate()
    pool.join()
    return result
Exemple #35
0
def run_using_threadpool(fn_to_execute, inputs, pool_size):
    """For internal use only; no backwards-compatibility guarantees.

  Runs the given function on given inputs using a thread pool.

  Args:
    fn_to_execute: Function to execute
    inputs: Inputs on which given function will be executed in parallel.
    pool_size: Size of thread pool.
  Returns:
    Results retrieved after executing the given function on given inputs.
  """

    # ThreadPool crashes in old versions of Python (< 2.7.5) if created
    # from a child thread. (http://bugs.python.org/issue10015)
    if not hasattr(threading.current_thread(), '_children'):
        threading.current_thread()._children = weakref.WeakKeyDictionary()
    pool = ThreadPool(min(pool_size, len(inputs)))
    try:
        # We record and reset logging level here since 'apitools' library Beam
        # depends on updates the logging level when used with a threadpool -
        # https://github.com/google/apitools/issues/141
        # TODO: Remove this once above issue in 'apitools' is fixed.
        old_level = logging.getLogger().level
        return pool.map(fn_to_execute, inputs)
    finally:
        pool.terminate()
        logging.getLogger().setLevel(old_level)
Exemple #36
0
def download(country=None, processes=16):
    if not os.path.exists(COUNTRIES_VALIDATION_DATA_DIR):
        os.mkdir(COUNTRIES_VALIDATION_DATA_DIR)
    data = manager.dict()
    countries = get_countries()
    if country:
        country = country.upper()
        if country not in countries:
            raise ValueError(
                    '%s is not supported country code' % country)
        countries = [country]
    for country in countries:
        work_queue.put(country)
    workers = ThreadPool(processes, worker, initargs=(data,))
    work_queue.join()
    workers.terminate()
    logger.debug('Queue finished')
    with io.open(COUNTRY_PATH % 'all', 'w', encoding='utf8') as all_output:
        all_output.write(u'{')
        for country in countries:
            country_dict = {}
            for key, address_data in data.items():
                if key[:2] == country:
                    country_dict[key] = address_data
            logger.debug('Saving %s', country)
            country_json = serialize(country_dict, COUNTRY_PATH % country.lower())
            all_output.write(country_json[1:-1])
            if country != countries[-1]:
                all_output.write(u',')
        all_output.write(u'}')
Exemple #37
0
    def _run_tests(self):
        "Runs the tests, produces no report."
        run_alone = []

        tests = self._tests
        pool = ThreadPool(self._worker_count)
        try:
            for cmd, options in tests:
                options = options or {}
                if matches(self._configured_run_alone_tests, cmd):
                    run_alone.append((cmd, options))
                else:
                    self._spawn(pool, cmd, options)
            pool.close()
            pool.join()

            if run_alone:
                util.log("Running tests marked standalone")
                for cmd, options in run_alone:
                    self._run_one(cmd, **options)
        except KeyboardInterrupt:
            try:
                util.log('Waiting for currently running to finish...')
                self._reap_all()
            except KeyboardInterrupt:
                pool.terminate()
                raise
        except:
            pool.terminate()
            raise
def run_in_parallel(worker, args, ncores=None):
    def args_unpack(worker, args):
        return worker(*args)

    count = multiprocessing.cpu_count()

    if ncores:
        count = min(ncores, count)

    pool = ThreadPool(processes=count)

    tasks = []

    for arg in args:
        task = pool.apply_async(
            args_unpack,
            args=(worker, arg),
        )
        tasks.append(task)

    pool.close()
    pool.join()

    [task.get() for task in tasks]

    pool.terminate()
    del pool
Exemple #39
0
 def wrapper(*args, **kwargs):
     pool = ThreadPool(processes=1)
     async = pool.apply_async(callback, args, kwargs)
     try:
         return async.get(self.get_timeout_sec(route))
     except TimeoutError:
         pool.terminate()
         raise bottle.HTTPError(503, 'Service Unavailable, process timeout')
Exemple #40
0
 def __do_proc(self, buf):
     pool = ThreadPool(processes=1)
     result = pool.apply_async(self.proc, args=(buf,))
     try:
         result.get(timeout=self.__timeout)
     except TimeoutError:
         self._log('timeout (%ss)' % str(self.__timeout))
     finally:
         pool.terminate()
Exemple #41
0
def pmap(f, iterable):
    """Map `f` over `iterable` in parallel using a ``ThreadPool``.
    """
    p = ThreadPool()
    try:
        result = p.map(f, iterable)
    finally:
        p.terminate()
    return result
Exemple #42
0
    def get_data(self, targets, start="", stop="", interval_str=""):
        if isinstance(targets, basestring):
            targets = [targets]
        current_handlers = {}
        for target in targets:
            handler = self.get_handler(target)
            if handler not in current_handlers:
                current_handlers[handler] = []
            current_handlers[handler].append(target)
        max_targets = 5  # max targets per http request
        started_at = time.time()
        run_args = []
        for handler, targets in current_handlers.items():
            while targets:
                run_args.append((handler.get_data, targets[:max_targets]))
                targets = targets[max_targets:]

        def _run((func, targets)):
            try:
                return func(targets, start=start, stop=stop,
                            interval_str=interval_str)
            except Exception as exc:
                log.warning("Multihandler got response: %r", exc)
                return []

        pool = ThreadPool(10)
        parts = pool.map(_run, run_args)
        data = reduce(lambda x, y: x + y, parts)
        pool.terminate()
        log.info("Multihandler get_data completed in: %.2f secs",
                 time.time() - started_at)

        # align start/stop
        starts = set()
        stops = set()
        for item in data:
            starts.add(item['datapoints'][0][1])
            stops.add(item['datapoints'][-1][1])
        start = max(starts) if len(starts) > 1 else 0
        stop = min(stops) if len(stops) > 1 else 0
        if start or stop:
            log.debug("%s %s %s %s", starts, start, stops, stop)
            for item in data:
                if start:
                    for i in range(len(item['datapoints'])):
                        if item['datapoints'][i][1] >= start:
                            if i:
                                item['datapoints'] = item['datapoints'][i:]
                            break
                if stop:
                    for i in range(len(item['datapoints'])):
                        if item['datapoints'][-(i+1)][1] <= stop:
                            if i:
                                item['datapoints'] = item['datapoints'][:-i]
                            break
        return data
Exemple #43
0
def main(compilation_db_path, source_files, verbose, formatter, iwyu_args):
    """ Entry point. """
    # Canonicalize compilation database path
    if os.path.isdir(compilation_db_path):
        compilation_db_path = os.path.join(compilation_db_path,
                                           'compile_commands.json')

    compilation_db_path = os.path.realpath(compilation_db_path)
    if not os.path.isfile(compilation_db_path):
        print('ERROR: No such file or directory: \'%s\'' % compilation_db_path)
        return 1

    # Read compilation db from disk
    with open(compilation_db_path, 'r') as fileobj:
        compilation_db = json.load(fileobj)

    # expand symlinks
    for entry in compilation_db:
        entry['file'] = os.path.realpath(entry['file'])

    # Cross-reference source files with compilation database
    source_files = [os.path.realpath(s) for s in source_files]
    if not source_files:
        # No source files specified, analyze entire compilation database
        entries = compilation_db
    else:
        # Source files specified, analyze the ones appearing in compilation db,
        # warn for the rest.
        entries = []
        for source in source_files:
            matches = [e for e in compilation_db if e['file'] == source]
            if matches:
                entries.extend(matches)
            else:
                print('WARNING: \'%s\' not found in compilation database.' %
                      source)

    # Run analysis
    def run_iwyu_task(entry):
        cwd, compile_command = entry['directory'], entry['command']
        compile_command = workaround_parent_dir_relative_includes(
            cwd, compile_command)
        return run_iwyu(cwd, compile_command, iwyu_args, verbose)
    pool = ThreadPool(multiprocessing.cpu_count())
    try:
        for iwyu_output in pool.imap_unordered(run_iwyu_task, entries):
            formatter(iwyu_output)
    except KeyboardInterrupt as ki:
        sys.exit(1)
    except OSError as why:
        print('ERROR: Failed to launch include-what-you-use: %s' % why)
        return 1
    finally:
        pool.terminate()
        pool.join()
    return 0
 def _test_wrapper(self, *args, **kwargs):
     pool = ThreadPool(processes=1)
     result = pool.apply_async(test, args=(self, ) + args, kwds=kwargs)
     now = time.time()
     while not result.ready():
         if (time.time() - now > timeout):
             pool.terminate()
             self.fail()
         time.sleep(0.1)
     return result.get()
Exemple #45
0
 def proc(self, sock, trig, op, args, timeout):
     pool = ThreadPool(processes=1)
     async = pool.apply_async(trig.proc, (op, args))
     try:
         res = async.get(timeout)
         stream_input(sock, res)
     except TimeoutError:
         log_err(self, 'failed to process (timeout)')
         pool.terminate()
     finally:
         sock.close()
Exemple #46
0
 def _proc_safe(self, target, args, timeout):
     ret = None
     pool = ThreadPool(processes=1)
     result = pool.apply_async(target, args=args)
     try:
         ret = result.get(timeout=timeout)
     except TimeoutError:
         log_debug(self, "timeout")
     finally:
         pool.terminate()
     return ret
    def inspect(self, source, preset=None, **options):
        """https://github.com/frictionlessdata/goodtables-py#inspector
        """

        # Start timer
        start = datetime.datetime.now()

        # Prepare preset
        preset = self.__get_source_preset(source, preset)
        if preset == 'nested':
            options['presets'] = self.__presets
            for s in source:
                if s.get('preset') is None:
                    s['preset'] = self.__get_source_preset(s['source'])

        # Prepare tables
        preset_func = self.__get_preset(preset)['func']
        warnings, tables = preset_func(source, **options)
        if len(tables) > self.__table_limit:
            warnings.append(
                'Dataset inspection has reached %s table(s) limit' %
                (self.__table_limit))
            tables = tables[:self.__table_limit]

        # Collect table reports
        table_reports = []
        if tables:
            tasks = []
            pool = ThreadPool(processes=len(tables))
            try:
                for table in tables:
                    tasks.append(pool.apply_async(self.__inspect_table, (table,)))
                for task in tasks:
                    table_warnings, table_report = task.get()
                    warnings.extend(table_warnings)
                    table_reports.append(table_report)
            finally:
                pool.terminate()

        # Stop timer
        stop = datetime.datetime.now()

        # Compose report
        report = {
            'time': round((stop - start).total_seconds(), 3),
            'valid': all(item['valid'] for item in table_reports),
            'error-count': sum(len(item['errors']) for item in table_reports),
            'table-count': len(tables),
            'tables': table_reports,
            'warnings': warnings,
            'preset': preset,
        }

        return report
    def run(self):
        if not self.platforms:
            raise RuntimeError("No enabled platform to build on")
        self.set_build_image()

        thread_pool = ThreadPool(len(self.platforms))
        result = thread_pool.map_async(self.select_and_start_cluster, self.platforms)

        try:
            result.get()
        # Always clean up worker builds on any error to avoid
        # runaway worker builds (includes orchestrator build cancellation)
        except Exception:
            thread_pool.terminate()
            self.log.info('build cancelled, cancelling worker builds')
            if self.worker_builds:
                ThreadPool(len(self.worker_builds)).map(
                    lambda bi: bi.cancel_build(), self.worker_builds)
            while not result.ready():
                result.wait(1)
            raise
        else:
            thread_pool.close()
            thread_pool.join()

        annotations = {'worker-builds': {
            build_info.platform: build_info.get_annotations()
            for build_info in self.worker_builds if build_info.build
        }}

        self._apply_repositories(annotations)

        labels = self._make_labels()

        fail_reasons = {
            build_info.platform: build_info.get_fail_reason()
            for build_info in self.worker_builds
            if not build_info.build or not build_info.build.is_succeeded()
        }

        workspace = self.workflow.plugin_workspace.setdefault(self.key, {})
        workspace[WORKSPACE_KEY_UPLOAD_DIR] = self.koji_upload_dir
        workspace[WORKSPACE_KEY_BUILD_INFO] = {build_info.platform: build_info
                                               for build_info in self.worker_builds}

        if fail_reasons:
            return BuildResult(fail_reason=json.dumps(fail_reasons),
                               annotations=annotations, labels=labels)

        return BuildResult.make_remote_image_result(annotations, labels=labels)
Exemple #49
0
    def _check_devices(self, printer):
        # Printer objects aren't threadsafe, so we need to protect calls to them.
        lock = threading.Lock()
        pool = None

        # Push the executables and other files to the devices; doing this now
        # means we can do this in parallel in the manager process and not mix
        # this in with starting and stopping workers.
        def setup_device(worker_number):
            d = self.create_driver(worker_number)
            serial = d._device.serial  # pylint: disable=protected-access

            def log_safely(msg, throttled=True):
                if throttled:
                    callback = printer.write_throttled_update
                else:
                    callback = printer.write_update
                with lock:
                    callback("[%s] %s" % (serial, msg))

            log_safely("preparing device", throttled=False)
            try:
                d._setup_test(log_safely)
                log_safely("device prepared", throttled=False)
            except (ScriptError, driver.DeviceFailure) as e:
                with lock:
                    _log.warning("[%s] failed to prepare_device: %s", serial, str(e))
            except KeyboardInterrupt:
                if pool:
                    pool.terminate()

        # FIXME: It would be nice if we knew how many workers we needed.
        num_workers = self.default_child_processes()
        num_child_processes = int(self.get_option("child_processes"))
        if num_child_processes:
            num_workers = min(num_workers, num_child_processes)
        if num_workers > 1:
            pool = ThreadPool(num_workers)
            try:
                pool.map(setup_device, range(num_workers))
            except KeyboardInterrupt:
                pool.terminate()
                raise
        else:
            setup_device(0)

        if not self._devices.prepared_devices():
            _log.error("Could not prepare any devices for testing.")
            return test_run_results.NO_DEVICES_EXIT_STATUS
        return test_run_results.OK_EXIT_STATUS
Exemple #50
0
  def validate_test_requirements(self, test_name, spec):
    """Determine whether or not the test requirements are satisfied.

    If not, record the reason a skip or failure.
    This may throw exceptions, which are immediate failure.

    Args:
      test_name: [string] The name of the test.
      spec: [dict] The profile specification containing requirements.
            This argument will be pruned as values are consumed from it.

    Returns:
      True if requirements are satisifed, False if not.
    """
    if not 'api' in spec:
      raise ValueError('Test "{name}" is missing an "api" spec.'.format(
          name=test_name))
    requires = spec.pop('requires', {})
    configuration = requires.pop('configuration', {})
    our_config = vars(self.options)
    for key, value in configuration.items():
      if key not in our_config:
        message = ('Unknown configuration key "{0}" for test "{1}"'
                   .format(key, test_name))
        raise KeyError(message)
      if value != our_config[key]:
        reason = ('Skipped test {name} because {key}={want} != {have}'
                  .format(name=test_name, key=key,
                          want=value, have=our_config[key]))
        logging.warning(reason)
        with self.__lock:
          self.__skipped.append((test_name, reason))
        return False

    services = set(requires.pop('services', []))
    services.add(spec.pop('api'))

    if requires:
      raise ValueError('Unexpected fields in {name}.requires: {remaining}'
                       .format(name=test_name, remaining=requires))
    if spec:
      raise ValueError('Unexpected fields in {name} specification: {remaining}'
                       .format(name=test_name, remaining=spec))

    thread_pool = ThreadPool(len(services))
    thread_pool.map(self.wait_on_service, services)
    thread_pool.terminate()
    return True
    def __run__(self, result):
        self._is_run = True

        pool = ThreadPool(
            get_pool_size_of_value(self.config.ASYNC_TESTS, in_two=True),
        )

        try:
            for case in self.objects:
                pool.apply_async(target, args=(case, result))

            pool.close()
            pool.join()
        except ALLOW_RAISED_EXCEPTIONS:
            pool.terminate()
            raise
Exemple #52
0
def parse_playlists(pl_dict):
    result = {}
    try:
        pool = ThreadPool(PROCESSES)
        pfunc = parse_playlist
        args = pl_dict.keys()
        for i, (uri, streams) in enumerate(pool.imap_unordered(pfunc, args)):
            print "%d/%d" % (i + 1, len(args))
            result[uri] = (pl_dict[uri], streams)
    except Exception as e:
        print e
        return {}
    finally:
        pool.terminate()
        pool.join()

    return result
def _pool(func, f):
    global fail_file
    pool = ThreadPool(processes=threads)
    try:
        pool.map(func, f)
        pool.close()
        pool.join()
    except TypeError as e:
        print "Type error: {0}".format(e.message)
    except:
        print "Unexpected error:", sys.exc_info()[0]
        for f in _files:
            log(f + ' | Pool map error')
            fail_file += 1
            print f.encode('utf-8') + ' | ERROR: Pool map'
        pool.close()
        pool.join()

    pool.terminate()
Exemple #54
0
 def proc(self, buf):
     try:
         args = json.loads(buf)
         tsk_name = args['task']
         tsk_args = args['args']
         tsk_timeout = args['timeout']
         if not self._tasks.has_key(tsk_name):
             log_err(self, 'no such task %s', tsk_name)
             return
         pool = ThreadPool(processes=1)
         async = pool.apply_async(self._tasks[tsk_name].proc, (), tsk_args)
         try:
             return async.get(tsk_timeout)
         except TimeoutError:
             log_err(self, 'failed to process (timeout)')
             pool.terminate()
             return
     except:
         log_err(self, 'failed to process')
def parallel_run(ga:callable, runs:int):
    #logging.basicConfig(level=logging.DEBUG, format='%(message)s')
    #my_log()

    solutions = []
    N_process = 7
    hits = 0
    tp = ThreadPool(processes=N_process)

    results = mp.Queue(2*N_process)
    completedTasks = 0

    debug = DebugExecutions()

    for i in range(min(N_process, runs)):
        population = CryptoArithmetic.makeSpecimen(100)
        tp.apply_async(ga, args= (population,), callback=results.put)

    while completedTasks < runs:
        res = results.get()
        completedTasks += 1
        GA.debug(res['population'], res['best'])
        solution = debug.inspect(res)
        if solution:
            solutions.append(solution)

        if completedTasks >= runs:
            break
        population = CryptoArithmetic.makeSpecimen(100)
        tp.apply_async(ga, args= (population,), callback=results.put)
    tp.close()
    tp.terminate()

    hits = debug.hits
    hits_rate = round((hits/runs)*100, 2)
    logging.info("   CONVERGENCE of {}% ; {} times in {}!".format(hits_rate, hits, runs))
    logging.info("   Needed, in general, {} Generations.".format(debug.generation))
    for s in solutions:
        logging.debug(s)
        logging.debug(CryptoArithmetic.CA_str_values(s.chromosome))
    #GA.init.first_time = True
    return hits_rate
Exemple #56
0
class TaskPool:
    def __init__(self, num_task=10):
        self.__num_task = num_task
        self.__pool = Pool(self.__num_task)

    def add_task(self, func, *args, **kwargs):
        self.__pool.apply_async(func, args, kwargs)

    def join(self):
        self.__pool.close()
        self.__pool.join()
        self.__pool = Pool(self.__num_task)

    def halt(self):
        self.__pool.terminate()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.halt()
Exemple #57
0
def main(args):

    # Prerequisites for monitoring
    if not args.input:
        return 2
    else:
        try:
            os.mkdir(args.output)
        except OSError:
            pass
        finally:
            if not os.path.isdir(args.output):
                return 1

    # Bug in ThreadPool means ^C ineffective. Use ^\ instead
    pool = ThreadPool(len(args.input))
    pool.map(monitor, [(args, n, src) for (n, src) in enumerate(args.input)])
    pool.close()
    pool.terminate()
    pool.join()
    return 0
def fill_object_container(object_container, ids):
    def worker_task(id):
        directory_object = DirectoryObject()
        imdb_id = SharedCodeService.trakt.movies_fill_movie_object(directory_object, id)
        if imdb_id:
            directory_object.key = Callback(movie_menu, title=directory_object.title, imdb_id=imdb_id)
            return directory_object
        return -1

    if Platform.OS != 'Linux':
        thread_pool = ThreadPool(10)
        map_results = thread_pool.map(worker_task, ids)

        thread_pool.terminate()
        thread_pool.join()
    else:
        map_results = []
        for id in ids:
            map_results.append(worker_task(id))

    for map_result in map_results:
        if map_result and map_result != -1:
            object_container.add(map_result)