Exemple #1
0
    def crawl(self, max_pages=None):
        '''start the crawler in motion'''
        crawl_pool = ThreadPool(self.num_threads)

        logger.info('│ ├ Starting {} crawler threads...'.format(
            self.num_threads))

        crawl_pool.apply(self._crawl_thread, args=(self.start_url, ))

        empty_count = 0
        while True:
            crawl_pool.apply_async(self._crawl_thread)

            # if queue is empty sleep
            if len(self._urls_to_crawl) == 0:
                empty_count += 1
                time.sleep(self.empty_wait)

            # if queue is still empty after sleeping/retrying x many times then exit
            if empty_count >= self.empty_retry_count:
                break

        crawl_pool.close()
        crawl_pool.join()

        logger.info('│ └ crawled {} links!'.format(len(
            self._urls_been_crawled)))
Exemple #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("out_dir")
    parser.add_argument("protocol_xmls", nargs="+")
    args = parser.parse_args()

    interface_names = set()
    interfaces = []
    for protocol_xml in args.protocol_xmls:
        xml = xmltodict.parse(Path(protocol_xml).read_text())
        for interface in xml["protocol"]["interface"]:
            name = interface["@name"]
            interface_names.add(name)
            interfaces.append(interface)

    out_dir = Path(args.out_dir)
    os.makedirs(out_dir / "common", exist_ok=True)
    os.makedirs(out_dir / "client", exist_ok=True)
    os.makedirs(out_dir / "server", exist_ok=True)
    thread_pool = ThreadPool()
    interface_names = list(sorted(interface_names))
    for interface in interfaces:
        name = interface["@name"]
        thread_pool.apply(generate_interface_file,
                          [interface_names, out_dir, interface])

    thread_pool.close()
    thread_pool.join()
    generate_common_mod_file(out_dir / "common" / "mod.rs", interface_names)
    generate_mod_file(out_dir / "client" / "mod.rs", interface_names)
    generate_mod_file(out_dir / "server" / "mod.rs", interface_names)
    generate_mod_file(out_dir / "mod.rs", ["common", "client", "server"])
    subprocess.check_output(["rustfmt"] + list(out_dir.glob("**/*.rs")))
Exemple #3
0
def check_all(files, channel):
    to_build = {}
    index = 0
    pool = ThreadPool(processes=cpu_count() - 1)
    results = []

    for package in PACKAGES:
        new_package = True
        for py in PYTHON:
            for np in NUMPY:
                args = (files, to_build, index, package, channel, py, np)
                if new_package:
                    # Do the first by itself or conda build may throw errors
                    pool.apply(func=_check_thread, args=args)
                    new_package = False
                else:
                    # Do the rest in parallel
                    res = pool.apply_async(func=_check_thread, args=args)
                    results.append(res)
                index += 1

    for res in results:
        res.wait()

    return to_build
 def run(self):
     p = Pool(self.pool_num_)
     p.apply(self.__worker)
     logger.d('closing sound pool.')
     p.close()
     p.terminate()
     p.join()
     logger.d('closed sound pool.')
Exemple #5
0
def writeData():
    # redisClient = redis.Redis(host='10.3.47.20', port='10000')

    # file = FileReaderClass(setKey + '.xml')
    file = FileReaderClass('/Users/zhengwei/Desktop/test3.xml')  # 测试源数据
    datas = file.readDatas()
    pool = ThreadPool(processes=10)
    print(datas)
    for key in datas:
        pool.apply(__writeData__, (__redisClient, key, datas[key]))
Exemple #6
0
class MainFrame(wx.Frame):
 """Main frame."""
 def __init__(self):
  self.pool = ThreadPool()
  super(MainFrame, self).__init__(None, title = ' Opperational Status')
  p = wx.Panel(self)
  default_style = wx.TE_RICH | wx.TE_READONLY | wx.TE_MULTILINE
  sizer_sms = wx.BoxSizer(wx.VERTICAL)
  sizer_sms.Add(wx.StaticText(p, label = '&SMS Status'), 0, wx.GROW)
  self.status_sms = wx.TextCtrl(p, style = default_style)
  sizer_sms.Add(self.status_sms, 1, wx.GROW)
  sizer_3r = wx.BoxSizer(wx.VERTICAL)
  sizer_3r.Add(wx.StaticText(p, label = '&3R Status'), 0, wx.GROW)
  self.status_3r = wx.TextCtrl(p, style = default_style)
  sizer_3r.Add(self.status_3r, 1, wx.GROW)
  sizer_email = wx.BoxSizer(wx.VERTICAL)
  sizer_email.Add(wx.StaticText(p, label = '&Email Status'), 0, wx.GROW)
  self.status_email = wx.TextCtrl(p, style = default_style)
  sizer_email.Add(self.status_email, 1, wx.GROW)
  s = wx.BoxSizer(wx.HORIZONTAL)
  s.AddMany(
   [
    (sizer_sms, 1, wx.GROW),
    (sizer_3r, 1, wx.GROW),
    (sizer_email, 1, wx.GROW)
   ]
  )
  self.timer = wx.Timer(self)
  self.Bind(wx.EVT_SHOW, self.on_show)
  self.Bind(wx.EVT_TIMER, self.on_update, self.timer)
  self.Bind(wx.EVT_CLOSE, self.on_close)
 
 def on_show(self, event):
  """Window has been shown."""
  self.timer.Start(10000)
  self.on_update(event)
  event.Skip()
 
 def _get_sms(self):
  wx.CallAfter(self.status_sms.SetValue, get_sms())
 
 def _get_email(self):
  wx.CallAfter(self.status_email.SetValue, get_email())
 
 def on_update(self, event):
  self.pool.apply(self._get_sms)
  self.pool.apply(self._get_email)
  
 
 def on_close(self, event):
  """Window is closing."""
  self.timer.Stop()
  event.Skip()
Exemple #7
0
def __clean__():
    # redisClient = redis.Redis(host='10.3.47.20', port='10000')

    set = __redisClient.smembers(__setKey)

    pool = ThreadPool(processes=10)
    for key in set:
        print(key)
        pool.apply(__deletekey__, (__redisClient, key))
        # redisClient.delete(key)
    # redisClient.delete(__setKey)
    pool.apply(__deletekey__, (__redisClient, __setKey))
    def consume_keys_asynchronous_threads(self):
        """
        Work through the keys to look up asynchronously using multiple threads
        """
        print("\nLooking up " + self.input_queue.qsize().__str__() + " keys from " + self.source_name + "\n")
        jobs = multiprocessing.cpu_count()*4 if (multiprocessing.cpu_count()*4 < self.input_queue.qsize()) \
            else self.input_queue.qsize()

        pool = ThreadPool(jobs)

        for x in range(jobs):
            pool.apply(self.data_worker, [], self.worker_args)

        pool.close()
        pool.join()
def find_path(start_page, end_page):
    path = Manager().dict()
    path[start_page] = [start_page]
    Q = deque([start_page])
    results = []
    while len(Q) != 0:  # Run a loop as long as there is things on queue
        current_page = Q.popleft()
        links = get_pages(current_page)
        if links:
            pool = ThreadPool(processes=len(links) + 1)
            for link in links:
                if (
                        len(links) > 16
                ):  # This if-else pair is limiting the amount of workers to max 16
                    workers = 16
                else:
                    workers = len(links)
                pool = ThreadPool(processes=workers)  # Generate a threadpool
                for link in links:  # Iterate over the links and assign the workers to the task
                    results.append(
                        pool.apply(generate_path,
                                   args=(path, current_page, link, end_page)))
                pool.terminate()
                for result in results:
                    if type(result) == list:
                        return result
                    if result:  # Just to make sure that None values won't get to the queue
                        Q.append(result)
        else:
            continue
Exemple #10
0
def get_exchange_prices():
    funcs = [
        load_se_prices, load_ts_prices, load_sx_prices, load_ct_prices,
        load_cb_prices, load_btc_price
    ]

    all_prices = [
        globalvars.se_prices, globalvars.ts_prices, globalvars.sx_prices,
        globalvars.ct_prices, globalvars.cb_prices, globalvars.btc_price
    ]

    pool = ThreadPool()

    new_prices = []

    for i, f in enumerate(funcs):
        new_prices.append(pool.apply(f, (all_prices[i], )))

    for i, price in enumerate(new_prices):
        if i == 0:
            globalvars.se_prices = price
        elif i == 1:
            globalvars.ts_prices = price
        elif i == 2:
            globalvars.sx_prices = price
        elif i == 3:
            globalvars.ct_prices = price
        elif i == 4:
            globalvars.cb_prices = price
        elif i == 5:
            globalvars.btc_price = price

    pool.close()
    pool.join()
Exemple #11
0
    def consume_keys_asynchronous_threads(self):
        """
        Work through the keys to look up asynchronously using multiple threads
        """
        print("\nLooking up " + self.input_queue.qsize().__str__() +
              " keys from " + self.source_name + "\n")
        jobs = multiprocessing.cpu_count()*4 if (multiprocessing.cpu_count()*4 < self.input_queue.qsize()) \
            else self.input_queue.qsize()

        pool = ThreadPool(jobs)

        for x in range(jobs):
            pool.apply(self.data_worker, [], self.worker_args)

        pool.close()
        pool.join()
Exemple #12
0
def execute_command(command, path, shell=True, env=None, print_output=True):
    """Execute command via thread"""

    cmd_env = os.environ.copy()
    if env:
        cmd_env.update(env)
    if print_output:
        pipe = None
    else:
        pipe = subprocess.PIPE
    pool = ThreadPool()
    try:
        result = pool.apply(execute_subprocess_command,
                            args=(command, path),
                            kwds={'shell': shell, 'env': cmd_env, 'stdout': pipe, 'stderr': pipe})
        pool.close()
        pool.join()
        return result
    except (KeyboardInterrupt, SystemExit):
        if pool:
            pool.close()
            pool.terminate()
        print()
        cprint(' - Command failed', 'red')
        print()
        return 1
    except Exception as err:
        if pool:
            pool.close()
            pool.terminate()
        print()
        cprint(' - Command failed', 'red')
        print(err)
        print()
        return 1
Exemple #13
0
    def serve_forever(self):
        try:
            pool = ThreadPool(self.pool_size)

            while True:
                conn, addr = self.sock.accept()
                logging.debug('Connected | P: {} | PID: {}'.format(
                    multiprocessing.current_process().name, os.getpid()))
                pool.apply(self.request_handler,
                           args=(conn, addr, self.root_dir, True))

        except (KeyboardInterrupt, SystemExit) as e:
            pass
        finally:
            self.sock.close()
            logging.debug('Stopped | P: {} | PID: {}'.format(
                multiprocessing.current_process().name, os.getpid()))
Exemple #14
0
class TCPServer(object):
    """
    Base Server class.  Listens for requests
    and queues them to be handled by a worker thread.
    """
    def __init__(self, host=None, port=None, **kwargs):
        self.host = host if host else SERVER_DEFAULT
        self.port = port if port else PORT_DEFAULT
        self.commands = kwargs.get("commands", {})
        threads = kwargs.get("threads", NUM_THREADS)
        self.request_queue = ThreadPool(threads)
        self.socket = None
        self.make_conn()
        self.start_signal_handler()

    def make_conn(self):
        """
        Open a socket and bind it to our address and port.
        """
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.bind((self.host, self.port))
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.socket.listen(5)

    def signal_handler(self, signal, frame):
        self.request_queue.join()
        self.socket.close()

    def start_signal_handler(self):
        signal.signal(signal.SIGINT, self.signal_handler)

    def listen(self):
        print "TCPServer is listening at %s:%d!" % (self.host, self.port)
        hf = HandlerFactory(self.commands)
        while True:
            logging.debug("TCPServer accepting requests.")
            client_sock, client_addr = self.socket.accept()
            client_host, client_port = client_addr
            logging.debug("TCPServer handling request from %s:%s." % (client_host, client_port))
            handler = RequestHandler(hf,
                                     client_host,
                                     client_port,
                                     client_sock)
            self.request_queue.apply(handler.handle, ())
        self.socket.close()
Exemple #15
0
def execute_command(command, path, **kwargs):
    """Execute command via thread

    .. py:function:: execute_command(command, path, shell=True, env=None, print_output=True)

    :param command: Command to run
    :type command: str or list[str]
    :param str path: Path to set as ``cwd``

    Keyword Args:
        shell (bool): Whether to execute subprocess as ``shell``
        env (dict): Enviroment to set as ``env``
        print_output (bool): Whether to print output

    :return: Command return code
    :rtype: int
    :raise ClowderError:
    """

    shell = kwargs.get('shell', True)
    env = kwargs.get('env', None)
    print_output = kwargs.get('print_output', True)

    cmd_env = os.environ.copy()
    if env:
        cmd_env.update(env)

    if print_output:
        pipe = None
    else:
        pipe = subprocess.PIPE

    pool = ThreadPool()

    try:
        result = pool.apply(execute_subprocess_command,
                            args=(command, path),
                            kwds={
                                'shell': shell,
                                'env': cmd_env,
                                'stdout': pipe,
                                'stderr': pipe
                            })
        pool.close()
        pool.join()
        return result
    except (KeyboardInterrupt, SystemExit):
        if pool:
            pool.close()
            pool.terminate()
        raise ClowderError(colored('- Command interrupted', 'red'))
    except Exception as err:
        if pool:
            pool.close()
            pool.terminate()
        raise ClowderError(
            colored('\n - Command failed', 'red') + str(err) + '\n')
Exemple #16
0
class IndexSearcher:
    def __init__(self, searchThreadNum, indexManager):
        self._indexManager = indexManager
        self._logger = Logger.Get('IndexSearcher')
        self._searchThreads = Pool(searchThreadNum)

    def Search(self, termIdList):
        indexSearchRequest = IndexSearchRequest(termIdList)
        if self._logger.isEnabledFor(logging.DEBUG):
            self._logger.debug('SearchPrepare, termIdList = %s'
                               % str(termIdList))
        self._Search(indexSearchRequest)
        return indexSearchRequest.result

    def _Search(self, indexSearchRequest):
        termIdList = indexSearchRequest.termIdList
        for termId in termIdList:
            (ret, retCode) = self._indexManager.Fetch(termId)
            if retCode == True:
                if self._logger.isEnabledFor(logging.DEBUG):
                    self._logger.debug('fetch term %d success' % termId)
                indexSearchRequest.indexHandler.Add(ret)
            else:
                if ret == None:
                    if self._logger.isEnabledFor(logging.DEBUG):
                        self._logger.debug('term %d not exist' % termId)
                    indexSearchRequest.result = None
                    return
                else:
                    if self._logger.isEnabledFor(logging.DEBUG):
                        self._logger.debug('fetch term %d from diskio' % termId)
                    indexSearchRequest.waitingIORequests.append(ret)
        self._searchThreads.apply(self._Searching, (indexSearchRequest,))

    def _Searching(self, indexSearchRequest):
        waitingRequests = indexSearchRequest.waitingIORequests
        indexHandler = indexSearchRequest.indexHandler
        for readRequest in waitingRequests:
            readRequest.Wait()
            indexHandler.Add(readRequest.result)
        if self._logger.isEnabledFor(logging.DEBUG):
            self._logger.debug('all posting list are ready, request id: %s'
                               % indexSearchRequest.id)
        indexSearchRequest.result = indexHandler.Intersect()
Exemple #17
0
class MyThreadPool:
    """ Pool of threads consuming tasks from a queue """
    def __init__(self, num_threads):
        self._pool = ThreadPool(num_threads)
        self._ClientMap = {}
        self._db = DB.DB()

    def add_task(self, socket, id):
        """ Add a task to the queue """
        self._ClientMap[id] = socket
        self._pool.apply(self.search, (id, ))

    def search(self, id):
        try:
            ans = None
            self._db.connect()
            self._db.execute("UPDATE public.ex2 SET status = 1 WHERE id = %s",
                             (id, ))
            url = self._db.execute(
                "SELECT public.ex2.url FROM public.ex2 WHERE public.ex2.id= %s",
                (id, ))
            string = self._db.execute(
                "SELECT public.ex2.value FROM public.ex2 WHERE public.ex2.id= %s",
                (id, ))
            r = requests.get(url)
            html = r.text
            soup = BeautifulSoup(html)
            de_string = string.decode('UTF-8')
            found = soup.findAll(text=re.compile(de_string))
            if len(found) == 0:
                ans = False
            else:
                ans = True
        except Exception as exp:
            print(exp)
        self._ClientMap[id].send("ans: " + str(ans))
        self._db.execute(
            "UPDATE public.ex2 SET status = 2, finish_timestamp = current_timestamp WHERE id = %s",
            (id, ))
        self._ClientMap[id].shutdown()
        self._ClientMap[id].close()
        self._ClientMap.pop(id)
Exemple #18
0
class AccountsUpdaterDemon:
    def __init__(self,
                 logger,
                 collection: AccountsCollection,
                 interval_in_seconds: int = 10 * 60):
        self._logger = logger
        self._collection: AccountsCollection = collection
        self._subscribers_ids = []
        self._is_running = False
        self._interval_in_seconds = interval_in_seconds
        self._demon_thread = None
        self._pool = ThreadPool(4)

    def subscribe_id(self, account_id: str) -> None:
        self._subscribers_ids.append(account_id)

    def unsubscribe_id(self, account_id: str) -> None:
        self._subscribers_ids.remove(account_id)

    def start(self) -> None:
        if self._is_running:
            raise Exception('Demon is already running')
        self._is_running = True
        self._demon_thread = threading.Thread(target=self._run)
        self._demon_thread.setDaemon(True)
        self._logger.info('Starting AccountUpdaterDemon')
        self._demon_thread.start()

    def stop(self) -> None:
        self._is_running = False

    def _run(self) -> None:
        if not self._is_running:
            return
        self._logger.info('Starting update holds')
        self._update_balances()
        threading.Timer(self._interval_in_seconds, self._run).start()

    def _update_balances(self) -> None:
        for subscriber_id in self._subscribers_ids:
            self._pool.apply(
                lambda: self._collection.substract_hold(subscriber_id))
Exemple #19
0
def _run_processing_jobs(parameter_dict, reader, n_processes,
                         process_batch_size):
    """Creates document batches and dispatches them to processing nodes.

    :param parameter_dict: dataset import's parameters.
    :param reader: dataset importer's document reader.
    :param n_processes: size of the multiprocessing pool.
    :param process_batch_size: the number of documents to process at any given time by a node.
    :type parameter_dict: dict
    :type n_processes: int
    :type process_batch_size: int
    """
    from django import db
    db.connections.close_all()

    if parameter_dict.get('remove_existing_dataset', False):
        _remove_existing_dataset(parameter_dict)

    import_job_lock = Lock()

    process_pool = Pool(processes=n_processes,
                        initializer=_init_pool,
                        initargs=(import_job_lock, ))
    batch = []

    for document in reader.read_documents(**parameter_dict):
        batch.append(document)

        # Send documents when they reach their batch size and empty it.
        if len(batch) == process_batch_size:
            process_pool.apply(_processing_job, args=(batch, parameter_dict))
            batch = []

    # Send the final documents that did not reach the batch size.
    if batch:
        process_pool.apply(_processing_job, args=(batch, parameter_dict))

    process_pool.close()
    process_pool.join()

    _complete_import_job(parameter_dict)
Exemple #20
0
 def talkback(self):
     # Function that creates threads depending on the number of calls to say().Each thread makes a call to Getoutput
     # class where actual execution occurs
     if self.words == "":
         self.say()
         pool = ThreadPool(1)
         for n in range(self.no):
             if self.qu.empty():
                 break
             else:
                 self.status = pool.apply(GetOutput, (self.qu.get(), ))
                 self.status_output += self.status.output
                 self.status_error += self.status.error
     else:
         pool = ThreadPool(1)
         for n in range(self.no):
             if self.qu.empty():
                 break
             else:
                 self.status = pool.apply(GetOutput, (self.qu.get(), ))
                 self.status_output += self.status.output
                 self.status_error += self.status.error
Exemple #21
0
class TCPServer(object):
    def __init__(self, host=None, port=None, **kwargs):
        self.host = host if host else SERVER_DEFAULT
        self.port = port if port else PORT_DEFAULT
        self.commands = kwargs.get("commands", {})
        threads = kwargs.get("threads", NUM_THREADS)
        self.request_queue = ThreadPool(threads)
        self.socket = None
        self.make_conn()
        self.start_signal_handler()

    def make_conn(self):
        """
        打开socket,绑定ip port
        """
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.bind((self.host, self.port))
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.socket.listen(5)

    def signal_handler(self, signal, frame):
        self.request_queue.join()
        self.socket.close()

    def start_signal_handler(self):
        signal.signal(signal.SIGINT, self.signal_handler)

    def listen(self):
        print "TCPServer is listening at %s:%d!" % (self.host, self.port)
        hf = HandlerFactory(self.commands)
        while True:
            logging.debug("TCPServer accepting requests.")
            client_sock, client_addr = self.socket.accept()
            client_host, client_port = client_addr
            logging.debug("TCPServer handling request from %s:%s." %
                          (client_host, client_port))
            handler = RequestHandler(hf, client_host, client_port, client_sock)
            self.request_queue.apply(handler.handle, ())
        self.socket.close()
Exemple #22
0
def _run_processing_jobs(parameter_dict, reader, n_processes, process_batch_size):
    """Creates document batches and dispatches them to processing nodes.

    :param parameter_dict: dataset import's parameters.
    :param reader: dataset importer's document reader.
    :param n_processes: size of the multiprocessing pool.
    :param process_batch_size: the number of documents to process at any given time by a node.
    :type parameter_dict: dict
    :type n_processes: int
    :type process_batch_size: int
    """
    from django import db
    db.connections.close_all()

    if parameter_dict.get('remove_existing_dataset', False):
        _remove_existing_dataset(parameter_dict)

    import_job_lock = Lock()

    process_pool = Pool(processes=n_processes, initializer=_init_pool, initargs=(import_job_lock,))
    batch = []

    for document in reader.read_documents(**parameter_dict):
        batch.append(document)

        # Send documents when they reach their batch size and empty it.
        if len(batch) == process_batch_size:
            process_pool.apply(_processing_job, args=(batch, parameter_dict))
            batch = []

    # Send the final documents that did not reach the batch size.
    if batch:
        process_pool.apply(_processing_job, args=(batch, parameter_dict))

    process_pool.close()
    process_pool.join()

    _complete_import_job(parameter_dict)
Exemple #23
0
def processVideoThreaded(video, rho, threshold):
    cap = cv2.VideoCapture(video)
    image_queue = deque()
    pool = ThreadPool(processes=cv2.getNumberOfCPUs() * 2)
    results = []
    total = 0
    while True:
        got_image, image = cap.read()
        if not (got_image):
            break
        total += 1
        results.append(pool.apply(processImage, args=(image, rho, threshold)))
    print results
    hazed = results.count(True)
    cap.release()
    hazed_total = float(hazed) / total * 100
    print total, hazedhazed_total
    if hazed_total > 0:
        return True
    return False
Exemple #24
0
def download_image_thread(url_list,
                          our_dir,
                          num_processes,
                          remove_bad=False,
                          Async=True):
    '''
    多线程下载图片
    :param url_list: image url list
    :param our_dir:  保存图片的路径
    :param num_processes: 开启线程个数
    :param remove_bad: 是否去除下载失败的数据
    :param Async:是否异步
    :return: 返回图片的存储地址列表
    '''
    # 开启多线程
    if not os.path.exists(our_dir):
        os.makedirs(our_dir)
    pool = ThreadPool(processes=num_processes)
    thread_list = []
    for image_url in url_list:
        if Async:
            out = pool.apply_async(func=download_image,
                                   args=(image_url, our_dir))  # 异步
        else:
            out = pool.apply(func=download_image,
                             args=(image_url, our_dir))  # 同步
        thread_list.append(out)

    pool.close()
    pool.join()
    # 获取输出结果
    image_list = []
    if Async:
        for p in thread_list:
            image = p.get()  # get会阻塞
            image_list.append(image)
    else:
        image_list = thread_list
    if remove_bad:
        image_list = [i for i in image_list if i is not None]
    return image_list
Exemple #25
0
def find_shortest_path(start, end):
    path = Manager().dict()
    path[start] = [start]
    Q = deque([start])

    while len(Q) != 0:
        page = Q.popleft()

        links = get_links(page)
        pool = ThreadPool(processes=len(links))
        results = [
            pool.apply(thread_populate, args=(path, page, link, end))
            for link in links
        ]
        pool.terminate()
        for result in results:
            if type(result) == list:
                return result
            Q.append(result)

    return None
def processVideoThreaded(video, lower, upper, threshold):
    cap = cv2.VideoCapture(video)
    image_queue = deque()
    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes=threadn)
    results = []
    total = 0
    while True:
        got_image, image = cap.read()
        if not (got_image):
            break
        total += 1
        results.append(
            pool.apply(processImageVideoThreaded, (image, lower, upper)))
    #print results
    blurred = results.count(True)
    cap.release()
    blurred_total = float(blurred) / total * 100
    #print total,blurred,blurred_total
    if blurred_total > threshold:
        return True
    return False
Exemple #27
0
def add_exchange_prices(coin_name):
    buy_price_se = 0.0
    buy_price_ts = 0.0
    buy_price_sx = 0.0
    buy_price_ct = 0.0
    buy_price_cb = 0.0

    funcs = [
        add_se_prices, add_ts_prices, add_sx_prices, add_ct_prices,
        add_cb_prices
    ]

    all_prices = [
        globalvars.se_prices, globalvars.ts_prices, globalvars.sx_prices,
        globalvars.ct_prices, globalvars.cb_prices
    ]

    pool = ThreadPool()

    new_prices = []

    for i, f in enumerate(funcs):
        new_prices.append(pool.apply(f, (all_prices[i], coin_name)))

    for i, price in enumerate(new_prices):
        if i == 0:
            buy_price_se = price
        elif i == 1:
            buy_price_ts = price
        elif i == 2:
            buy_price_sx = price
        elif i == 3:
            buy_price_ct = price
        elif i == 4:
            buy_price_cb = price

    pool.close()
    pool.join()
    return buy_price_se, buy_price_ts, buy_price_sx, buy_price_ct, buy_price_cb
Exemple #28
0
def key_generation_test(num_threads, num_users, file_size, policy_size,
                        num_attributes):
    with open(test_users_file_name.format(policy_size, num_attributes),
              'r') as users_file:
        users_meta = json.load(users_file)

    transaction_times = []
    users = []

    def save_user(user_meta):
        user_object = {
            'first_name': user_meta['attributes'][0],
            'last_name': user_meta['attributes'][1],
            'attributes': user_meta['attributes'][2:]
        }
        start = time.time()
        result = requests.post('{}/user'.format(il_upstream_url),
                               json=user_object,
                               headers=headers,
                               auth=auth)
        end = time.time()
        print(result.status_code)
        transaction_time = end - start
        contents = result.json()
        transaction_times.append(transaction_time)
        users.append({
            'user_id': contents['user_id'],
            'private_key': contents['private_key'],
            'policy': user_meta['policy'],
            'attributes': user_meta['attributes']
        })

    pool = ThreadPool(num_users)

    test_start_date = datetime.datetime.utcnow()

    for user_meta in users_meta:
        pool.apply(save_user, (user_meta, ))

    pool.close()
    pool.join()
    test_end_date = datetime.datetime.utcnow()

    with open(users_file_name.format(policy_size, num_attributes),
              'w') as users_file:
        print(len(users))
        json.dump(users, users_file)

    with open(
            keygen_file_name.format(num_threads, num_users, file_size,
                                    policy_size, num_attributes),
            'w') as transaction_times_file:
        total_time = sum(transaction_times)
        num_transactions = len(transaction_times)
        avg_txn_time = total_time / float(num_transactions)
        rate = 1 / avg_txn_time
        transaction_times_file.write(
            'Key Generation Test - {} Thread, {} User, {} kb, {} attributes in policy, {} attributes in key\n'
            .format(num_threads, num_users, file_size, policy_size,
                    num_attributes))
        transaction_times_file.write(
            'Test start: {}\n'.format(test_start_date))
        transaction_times_file.write('Test end: {}\n'.format(test_end_date))
        transaction_times_file.write(
            'Total number of transactions: {}\n'.format(num_transactions))
        transaction_times_file.write('Total time: {}\n'.format(total_time))
        transaction_times_file.write(
            'Average transaction time: {}\n'.format(avg_txn_time))
        transaction_times_file.write(
            'Transactions per second: {}\n'.format(rate))
        transaction_times_file.write('\n')
        transaction_times_file.writelines(
            ["{}\n".format(txn_time) for txn_time in transaction_times])
Exemple #29
0
def query_encounter_test(num_threads, num_users, file_size, policy_size,
                         num_attributes):
    with open(encounter_ids_file_name, 'r') as encounter_ids_file:
        encounter_ids = json.load(encounter_ids_file)
    with open(users_file_name.format(policy_size, num_attributes),
              'r') as users_file:
        users = json.load(users_file)

    transaction_times = []
    encounters = []

    def query(encounter_id, user):
        payload = {'private_key': user['private_key']}
        start = time.time()
        response = requests.post('{}/encounters/{}'.format(
            il_upstream_url, encounter_id),
                                 headers=headers,
                                 auth=auth,
                                 json=payload)
        end = time.time()
        print(response.status_code)
        transaction_time = end - start
        transaction_times.append(transaction_time)
        encounters.append(response.json())

    pool = ThreadPool(num_users)
    test_start_date = datetime.datetime.utcnow()
    for encounter_id, user in zip(encounter_ids, users):
        pool.apply(query, (encounter_id, user))

    pool.close()
    pool.join()
    test_end_date = datetime.datetime.utcnow()

    with open(encounters_file_name, 'w') as encounters_file:
        print(len(encounters))
        json.dump(encounters, encounters_file)

    with open(
            query_file_name.format(num_threads, num_users, file_size,
                                   policy_size, num_attributes),
            'w') as transaction_times_file:
        total_time = sum(transaction_times)
        num_transactions = len(transaction_times)
        avg_txn_time = total_time / float(num_transactions)
        rate = 1 / avg_txn_time
        transaction_times_file.write(
            'Query Encounter Test - {} Thread, {} User, {} kb, {} attributes in policy, {} attributes in key\n'
            .format(num_threads, num_users, file_size, policy_size,
                    num_attributes))
        transaction_times_file.write(
            'Test start: {}\n'.format(test_start_date))
        transaction_times_file.write('Test end: {}\n'.format(test_end_date))
        transaction_times_file.write(
            'Total number of transactions: {}\n'.format(num_transactions))
        transaction_times_file.write('Total time: {}\n'.format(total_time))
        transaction_times_file.write(
            'Average transaction time: {}\n'.format(avg_txn_time))
        transaction_times_file.write(
            'Transactions per second: {}\n'.format(rate))
        transaction_times_file.write('\n')
        transaction_times_file.writelines(
            ["{}\n".format(txn_time) for txn_time in transaction_times])
Exemple #30
0
from threading import current_thread


__author__ = 'zipee'

from multiprocessing.pool import ThreadPool

def work(a):
    thread_name = current_thread().name
    print(f"this is thread: {thread_name}")
    return f"thread: {thread_name} return {a}"

if __name__ == '__main__':
    pool = ThreadPool(3)
    for i in range(10):
        result = pool.apply(work, (i,))
        print(result)
    print("apply all done")

###########################################
    results = []
    for i in range(10):
        result = pool.apply_async(work, (i,))
        results.append(result)
    for result in results:
        print(result.get())
    print("apply all done")
###########################################
    results = pool.map(work, (i,))
    print(results)
###########################################
Exemple #31
0
class AlgoGenetic:
    def __init__(self,
                 population_size: int,
                 genome_size: int,
                 mutate_ratio: float,
                 crossover_ratio: float,
                 factory: IndividualFactory,
                 range_min=-1,
                 range_max=1):
        assert genome_size != 0
        self.population_size = population_size
        self.genome_size = genome_size
        self.mutate_ratio = mutate_ratio
        self.crossover_ratio = crossover_ratio
        self.factory = factory
        self.range_min = range_min
        self.range_max = range_max

        self.pool = ThreadPool(processes=4)

    @abstractmethod
    def init_population(self) -> population:
        pass

    @abstractmethod
    def select_mates(self, potential_mates_pool) -> [Individual, Individual]:
        """
        Select two different mates based on their fitness
        :param potential_mates_pool: the population you want individual to be pick up from
        :return: a couple from potential_mates_pool, ready to reproduce
        """
        pass

    @abstractmethod
    def reproduction(self, mate1, mate2) -> [Individual, Individual]:
        """
        Simulate the reproduction from 2 mates
        no mutation is done, therefore no gene is lost
        :return: 2 children from the reproduction
        """
        pass

    @abstractmethod
    def mutation(self, population) -> population:
        """
        Mutate a population by changing his genome
        this method keep the population size
        :return: a mutated population
        """
        pass

    def step(self, previous_population: population) -> population:
        """
        Bring the population a step forward evolution
        """
        children = []
        while len(children) < self.population_size:
            mate1, mate2 = self.select_mates(previous_population)
            child1, child2 = self.reproduction(mate1, mate2)
            children.append(child1)
            children.append(child2)
        if len(children) == self.population_size + 1:
            children.pop()
        return self.mutation(children)

    def step_paralleled(self, previous_population: population) -> population:
        return self.pool.apply(self.step, [previous_population])
Exemple #32
0
class OrderedEnqueuer(SequenceEnqueuer):
    """Builds a Enqueuer from a Sequence.

    Used in `fit_generator`, `evaluate_generator`, `predict_generator`.

    # Arguments
        sequence: A `keras.utils.data_utils.Sequence` object.
        use_multiprocessing: use multiprocessing if True, otherwise threading
        shuffle: whether to shuffle the data at the beginning of each epoch
    """
    def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
        self.sequence = sequence
        self.use_multiprocessing = use_multiprocessing
        self.shuffle = shuffle
        self.workers = 0
        self.executor = None
        self.queue = None
        self.run_thread = None
        self.stop_signal = None

    def is_running(self):
        return self.stop_signal is not None and not self.stop_signal.is_set()

    def start(self, workers=1, max_queue_size=10):
        """Start the handler's workers.

        # Arguments
            workers: number of worker threads
            max_queue_size: queue size
                (when full, workers could block on `put()`)
        """
        if self.use_multiprocessing:
            self.executor = multiprocessing.Pool(workers)
        else:
            self.executor = ThreadPool(workers)
        self.workers = workers
        self.queue = queue.Queue(max_queue_size)
        self.stop_signal = threading.Event()
        self.run_thread = threading.Thread(target=self._run)
        self.run_thread.daemon = True
        self.run_thread.start()

    def _run(self):
        """Function to submit request to the executor and queue the `Future` objects."""
        sequence = list(range(len(self.sequence)))
        self._send_sequence()  # Share the initial sequence
        while True:
            if self.shuffle:
                random.shuffle(sequence)
            for i in sequence:
                if self.stop_signal.is_set():
                    return
                self.queue.put(self.executor.apply_async(get_index, (i, )),
                               block=True)
            # Call the internal on epoch end.
            self.sequence.on_epoch_end()
            self._send_sequence()  # Update the pool

    def get(self):
        """Creates a generator to extract data from the queue.

        Skip the data if it is `None`.

        # Returns
            Generator yielding tuples (inputs, targets)
                or (inputs, targets, sample_weights)
        """
        try:
            while self.is_running():
                inputs = self.queue.get(block=True).get()
                if inputs is not None:
                    yield inputs
        except Exception as e:
            self.stop()
            raise StopIteration(e)

    def _send_sequence(self):
        """Send current Sequence to all workers."""
        global _SHARED_SEQUENCE
        _SHARED_SEQUENCE = self.sequence  # For new processes that may spawn
        if not self.use_multiprocessing:
            # Threads are from the same process so they already share the sequence.
            return
        _SHARED_DICT.clear()
        while len(
                _SHARED_DICT) < self.workers and not self.stop_signal.is_set():
            # Ask the pool to update till everyone is updated.
            self.executor.apply(_update_sequence, args=(self.sequence, ))
        # We're done with the update

    def stop(self, timeout=None):
        """Stops running threads and wait for them to exit, if necessary.

        Should be called by the same thread which called `start()`.

        # Arguments
            timeout: maximum time to wait on `thread.join()`
        """
        self.stop_signal.set()
        with self.queue.mutex:
            self.queue.queue.clear()
            self.queue.unfinished_tasks = 0
            self.queue.not_full.notify()
        self.executor.close()
        self.executor.join()
        self.run_thread.join(timeout)
#!/usr/bin/env python
"""
Parallel clusterized gridftp sync
"""

import os
import time
import subprocess
from functools import partial
from multiprocessing.pool import ThreadPool

source = '/mnt/lfs4/simprod/dagtemp2/20009'
dest = 'gsiftp://gridftp-scratch.icecube.wisc.edu/local/simprod/20009'

pool = ThreadPool(100)

for root,dirs,files in os.walk(source):
    for f in files:
        s = os.path.join(root,f)
        d = s.replace(source,dest)
        cmd = 'globus-url-copy -sync -v -cd -rst file://%s %s'%(s,d)
        pool.apply(partial(subprocess.call,cmd,shell=True))
pool.join()
def analyze_airfoil(x,
                    y_u,
                    y_l,
                    cl,
                    rey,
                    mach=0,
                    xf=None,
                    pool=None,
                    show_output=False):
    """
    Analyze an airfoil at a given lift coefficient for given Reynolds and Mach numbers using XFoil.

    Parameters
    ----------
    x : array_like
        Airfoil x-coordinates
    y_u, y_l : array_like
        Airfoil upper and lower curve y-coordinates
    cl : float
        Target lift coefficient
    rey, mach : float
        Reynolds and Mach numbers
    xf : XFoil, optional
        An instance of the XFoil class to use to perform the analysis. Will be created if not given
    pool : multiprocessing.ThreadPool, optional
        An instance of the multiprocessing.Threadpool class used to run the xfoil_worker. Will be created if not given
    show_output : bool, optional
        If True, a debug string will be printed after analyses. False by default.

    Returns
    -------
    cd, cm : float or np.nan
        Drag and moment coefficients of the airfoil at specified conditions, or nan if XFoil did not run successfully
    """
    # If the lower and upper curves swap, this is a bad, self-intersecting airfoil. Return 1e27 immediately.
    if np.any(y_l > y_u):
        return np.nan
    else:
        clean_xf = False
        if xf is None:
            xf = XFoil()
            xf.print = show_output
            clean_xf = True

        clean_pool = False
        if pool is None:
            pool = ThreadPool(processes=1)
            clean_pool = True

        xf.airfoil = Airfoil(x=np.concatenate((x[-1:0:-1], x)),
                             y=np.concatenate((y_u[-1:0:-1], y_l)))
        xf.Re = rey
        xf.M = mach
        xf.max_iter = 100
        xf.n_crit = 0.1
        cd, cm = pool.apply(xfoil_worker, args=(xf, cl))

    if clean_xf:
        del xf
    if clean_pool:
        del pool

    return cd, cm, None if clean_xf else xf
Exemple #35
0
class HTTPFetchPool:
	_num_thread = 5
	_retry_thread = 50
	_retry_limit = 10
	_thread_pool = None
	_retry_pool = None
	_timeout = 3
	_retry_timeout = 10
	_retry_sleep = 3

	def __init__ (self, num_thread=5, retry_thread=50, retry_limit=10):
		self._num_thread = num_thread
		self._retry_thread = retry_thread
		self._retry_limit = retry_limit
		
	def start (self):
		self._thread_pool = ThreadPool(self._num_thread)
		self._retry_pool = ThreadPool(self._retry_thread)

	def addAsyncJob (self, url, headers=None, data=None, callback=None, *args,
		**kwargs): 
		kwargs['_fetcher'] = self
		kwargs['_url'] = url
		kwargs['_header'] = headers
		kwargs['_data'] = data
		kwargs['_callback'] = callback
		kwargs['_async'] = True
		return self._thread_pool.apply_async(self.download, args, kwargs
				, self.middleman) 
	
	def addSyncJob (self, url, headers=None, data=None, callback=None, *args,
		**kwargs): 
		kwargs['_fetcher'] = self
		kwargs['_url'] = url
		kwargs['_header'] = headers
		kwargs['_data'] = data
		kwargs['_callback'] = callback
		kwargs['_async'] = False
#		try:
		result = self._thread_pool.apply(self.download, args, kwargs)
#		except Exception as err:
#			result.status = -1
#			result.exception = err
#			print err.reason
#			raise
		return self.middleman(result)
	
	def addRetryJob (self, *args, **kwargs):
		if kwargs['_async']:
			return self._retry_pool.apply_async(self.retry, args, kwargs, self.middleman)
		else:
			return self._retry_pool.apply(self.retry, args, kwargs)
	
	@classmethod
	def middleman (cls, result):
		print "Middleman"
		callback = result.kwargs['_callback']
		if result.status == -1 and result.retry_asyncresult != None:
			return result

		if callback:
			result = callback(result)

		return result

	def stop (self):
		self._thread_pool.close()
		self._thread_pool.join()
		self._retry_pool.close()
		self._retry_pool.join()

	def retry (self, *args, **kwargs):
		url = kwargs['_url']
		headers = kwargs['_header']
		data = kwargs['_data']

		print "Start retry " + url

		result = HTTPFetchResult()
		
		retrycount = 0
		
		while retrycount < self._retry_limit:
			retrycount = retrycount + 1
			try:
				result = doDownload(url, headers, data, self._retry_timeout)
			except (HTTPError, URLError) as e:
				print "Error %d/%d" % (retrycount, self._retry_limit)
				print e.reason
				result.status = -1
				result.exception = e
				result.retry_asyncresult = None
			except Exception as err:
				print "Fatal Error " + url + " " + err.reason
				result.status = -1
				result.exception = err
				result.retry_asyncresult = None
				result.args = args
				result.kwargs = kwargs
				raise
#				return result
			else:
				result.status = 0
				result.exception = None
				result.retry_asyncresult = None
				break
			time.sleep(_retry_sleep)
		
		if result.status < 0:
			print "Failed after %d Retries: %s" % (self._retry_limit, url)
			raise 

		result.args = args
		result.kwargs = kwargs
		return result

	def download (self, *args, **kwargs):
		url = kwargs['_url']
		headers = kwargs['_header']
		data = kwargs['_data']

		print "Start download " + url

		result = HTTPFetchResult()
		
		try:
			result = doDownload(url, headers, data, self._timeout)
		except Exception as err:
			print "Moved to Retry Pool"
			result.status = -1
			result.exception = err
			if kwargs['_async']:
				result.retry_asyncresult = self.addRetryJob(*args, **kwargs)
			else:
				result = self.addRetryJob(*args, **kwargs)
		
		result.args = args
		result.kwargs = kwargs
		return result
Exemple #36
0
def key_generation_test(num_threads, num_users, file_size, policy_size,
                        num_attributes):
    with open(test_users_file_name.format(policy_size, num_attributes),
              'r') as users_file:
        users_meta = json.load(users_file)

    transaction_times = []
    users = []

    def save_user_meta(user_meta):
        start = time.time()
        user_id, private_key, status_code = save_user(user_meta['attributes'])
        end = time.time()
        print(status_code)
        transaction_time = end - start
        transaction_times.append(transaction_time)
        users.append({
            'user_id': user_id,
            'private_key': private_key,
            'policy': user_meta['policy'],
            'attributes': user_meta['attributes']
        })

    pool = ThreadPool(num_users)

    test_start_date = datetime.datetime.utcnow()

    for user_meta in users_meta:
        pool.apply(save_user_meta, (user_meta, ))

    pool.close()
    pool.join()
    test_end_date = datetime.datetime.utcnow()

    with open(users_file_name.format(policy_size, num_attributes),
              'w') as users_file:
        print(len(users))
        json.dump(users, users_file)

    with open(
            keygen_file_name.format(num_threads, num_users, file_size,
                                    policy_size, num_attributes),
            'w') as transaction_times_file:
        total_time = sum(transaction_times)
        num_transactions = len(transaction_times)
        avg_txn_time = total_time / float(num_transactions)
        rate = 1 / avg_txn_time
        transaction_times_file.write(
            'Key Generation Test - {} Thread, {} User, {} kb, {} attributes in policy, {} attributes in key\n'
            .format(num_threads, num_users, file_size, policy_size,
                    num_attributes))
        transaction_times_file.write(
            'Test start: {}\n'.format(test_start_date))
        transaction_times_file.write('Test end: {}\n'.format(test_end_date))
        transaction_times_file.write(
            'Total number of transactions: {}\n'.format(num_transactions))
        transaction_times_file.write('Total time: {}\n'.format(total_time))
        transaction_times_file.write(
            'Average transaction time: {}\n'.format(avg_txn_time))
        transaction_times_file.write(
            'Transactions per second: {}\n'.format(rate))
        transaction_times_file.write('\n')
        transaction_times_file.writelines(
            ["{}\n".format(txn_time) for txn_time in transaction_times])
Exemple #37
0
def save_encounter_test(num_threads, num_users, file_size, policy_size,
                        num_attributes):
    with open(test_encounters_file_name.format(file_size),
              'r') as encounters_file:
        encounters = json.load(encounters_file)
    with open(users_file_name.format(policy_size, num_attributes),
              'r') as users_file:
        users = json.load(users_file)

    pool = ThreadPool(num_users)

    transaction_times = []
    encounter_ids = []

    # encounters = encounters[:20]
    def save(encounter, user):
        start = time.time()
        encounter_id, status_code = save_encounter(encounter, user['policy'],
                                                   user['user_id'])
        end = time.time()
        transaction_time = end - start
        transaction_times.append(transaction_time)
        encounter_ids.append(encounter_id)

    pool = ThreadPool(num_users)

    test_start_date = datetime.datetime.utcnow()
    for encounter, user in zip(encounters, users):
        pool.apply(save, (encounter, user))

    pool.close()
    pool.join()
    test_end_date = datetime.datetime.utcnow()

    with open(encounter_ids_file_name, 'w') as encounter_ids_file:
        print(len(encounter_ids))
        json.dump(encounter_ids, encounter_ids_file)

    with open(
            save_file_name.format(num_threads, num_users, file_size,
                                  policy_size, num_attributes),
            'w') as transaction_times_file:
        total_time = sum(transaction_times)
        num_transactions = len(transaction_times)
        avg_txn_time = total_time / float(num_transactions)
        rate = 1 / avg_txn_time
        transaction_times_file.write(
            'Save Encounter Test - {} Thread, {} User, {} kb, {} attributes in policy, {} attributes in key\n'
            .format(num_threads, num_users, file_size, policy_size,
                    num_attributes))
        transaction_times_file.write(
            'Test start: {}\n'.format(test_start_date))
        transaction_times_file.write('Test end: {}\n'.format(test_end_date))
        transaction_times_file.write(
            'Total number of transactions: {}\n'.format(num_transactions))
        transaction_times_file.write('Total time: {}\n'.format(total_time))
        transaction_times_file.write(
            'Average transaction time: {}\n'.format(avg_txn_time))
        transaction_times_file.write(
            'Transactions per second: {}\n'.format(rate))
        transaction_times_file.write('\n')
        transaction_times_file.writelines(
            ["{}\n".format(txn_time) for txn_time in transaction_times])