Esempio n. 1
0
    def __init__(self,
                 db_name,
                 proc_count,
                 site_base_url,
                 fUseCache=True,
                 fCacheSearchPages=True,
                 fUseCookies=False,
                 timeout=secHTTP_WAIT_TIMEOUT,
                 search_proc_count=2,
                 proxies=None):

        self.proxies = proxies
        self.queue = Queue()
        self.fSaveSearchPages = fCacheSearchPages
        self.site_base_url = site_base_url
        self.pool = Pool(processes=proc_count)

        self.search_queue = Queue()
        self.url_extract_pool = Pool(processes=search_proc_count)

        URL_Fetcher.__init__(self,
                             db_name,
                             fUseCache,
                             fUseCookies,
                             timeout=timeout,
                             proxies=proxies)
Esempio n. 2
0
def main():
    '''load video, process frames, display to user'''
    tque = Queue()  #(maxsize=120)
    framequeue = Queue()  #(maxsize=120)

    cthread = threading.Thread(target=cvworker, args=(tque, ))
    cthread.daemon = True
    cthread.start()

    tthread = threading.Thread(target=tfworker, args=(tque, framequeue))
    tthread.daemon = True  #terminate testloop when user closes window
    tthread.start()

    start = time.time()

    frame = 0
    videoend = False
    while True:
        cvw = cv2.waitKey(1)
        if cvw & 0xFF == ord('q'): break
        if not videoend:
            print('got', frame, time.time())
            frame += 1
            print('frame:', frame)
            f = framequeue.get()
            if type(f) == type(None):
                videoend = True
                pass  #whats this do
            else:
                #time.sleep(1/30) #limit to realtime
                cv2.imshow('frame', f)

    print('new took:', time.time() - start)
    cv2.destroyAllWindows()
Esempio n. 3
0
    def run(self, videosupport=False, debug=False, cpulimit=False):
        '''start the application!'''
        self.tque = Queue()  #(maxsize=120)
        self.framequeue = Queue()  #(maxsize=120)
        self.cvcommandqueue = Queue()
        self.videosupport = videosupport

        self.cvthread = None
        if self.videosupport:
            self.cvthread = threading.Thread(target=nnlib.cvworker,
                                             args=(self.tque,
                                                   self.cvcommandqueue,
                                                   cpulimit))
            self.cvthread.daemon = True
            self.cvthread.start()
        self.tthread = threading.Thread(target=nnlib.tfworker,
                                        args=(self.tque, self.framequeue))
        self.tthread.daemon = True
        self.tthread.start()

        #'''
        self.updatethread = threading.Thread(target=updatethread,
                                             args=(self, ))
        self.updatethread.daemon = True
        self.updatethread.start()
        #'''
        self.debug = debug
        if self.debug:
            self.root.after(100, self.set_image, 'demo5.jpg')
        self.root.mainloop()
Esempio n. 4
0
def parallel_apply_generator(
    func, iterable, workers, max_queue_size, dummy=False, random_seeds=True
):
    """多进程或多线程地将func应用到iterable的每个元素中。
    注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是
    输出可能是func(c), func(a), func(b)。结果将作为一个
    generator返回,其中每个item是输入的序号以及该输入对应的
    处理结果。
    参数:
        dummy: False是多进程/线性,True则是多线程/线性;
        random_seeds: 每个进程的随机种子。
    """
    if dummy:
        from multiprocessing.dummy import Pool, Queue
    else:
        from multiprocessing import Pool, Queue

    in_queue, out_queue, seed_queue = Queue(max_queue_size), Queue(), Queue()
    if random_seeds is True:
        random_seeds = [None] * workers
    elif random_seeds is None or random_seeds is False:
        random_seeds = []
    for seed in random_seeds:
        seed_queue.put(seed)

    def worker_step(in_queue, out_queue):
        """单步函数包装成循环执行
        """
        if not seed_queue.empty():
            np.random.seed(seed_queue.get())
        while True:
            i, d = in_queue.get()
            r = func(d)
            out_queue.put((i, r))

    # 启动多进程/线程
    pool = Pool(workers, worker_step, (in_queue, out_queue))

    # 存入数据,取出结果
    in_count, out_count = 0, 0
    for i, d in enumerate(iterable):
        in_count += 1
        while True:
            try:
                in_queue.put((i, d), block=False)
                break
            except six.moves.queue.Full:
                while out_queue.qsize() > max_queue_size:
                    yield out_queue.get()
                    out_count += 1
        if out_queue.qsize() > 0:
            yield out_queue.get()
            out_count += 1

    while out_count != in_count:
        yield out_queue.get()
        out_count += 1

    pool.terminate()
Esempio n. 5
0
def parallel_apply(func,
                   iterable,
                   workers,
                   max_queue_size,
                   callback=None,
                   dummy=False):
    """多进程或多线程地将func应用到iterable的每个元素中。
    注意这个apply是异步且无序的,也就是说依次输入a,b,c,但是
    输出可能是func(c), func(a), func(b)。
    参数:
        dummy: False是多进程/线性,True则是多线程/线性;
        callback: 处理单个输出的回调函数;
    """
    if dummy:
        from multiprocessing.dummy import Pool, Queue
    else:
        from multiprocessing import Pool, Queue

    in_queue, out_queue = Queue(max_queue_size), Queue()

    # 启动多进程/线程
    pool = Pool(workers, worker_step, (func, in_queue, out_queue))

    if callback is None:
        results = []

    # 后处理函数
    def process_out_queue():
        out_count = 0
        for _ in range(out_queue.qsize()):
            d = out_queue.get()
            out_count += 1
            if callback is None:
                results.append(d)
            else:
                callback(d)
        return out_count

    # 存入数据,取出结果
    in_count, out_count = 0, 0
    for d in iterable:
        in_count += 1
        while True:
            try:
                in_queue.put(d, block=False)
                break
            except six.moves.queue.Full:
                out_count += process_out_queue()
        if in_count % max_queue_size == 0:
            out_count += process_out_queue()

    while out_count != in_count:
        out_count += process_out_queue()

    pool.terminate()

    if callback is None:
        return results
Esempio n. 6
0
 def __init__(self, model_dir, path_group_dict: Dict[str, int]):
     self.feed = Queue()
     self.mid = Queue()
     self.out = Queue()
     t = MtCNN(self.feed, self.mid)
     t.start()
     t1 = EmbeddingCmp(self.feed, self.mid, self.out, path_group_dict,
                       model_dir)
     t1.start()
 def __init__(self):
     self.queue = list()
     self.queue_index = -1
     self.play_queue_order = list()
     self.play_modes = TizEnumeration(["NORMAL", "SHUFFLE"])
     self.current_play_mode = self.play_modes.NORMAL
     self.now_playing_stream = None
     # Create multiprocess queues
     self.task_queue = Queue()
     self.done_queue = Queue()
     # Workers
     self.workers = list()
Esempio n. 8
0
 def __init__(self, api_key=API_KEY):
     self.queue = list()
     self.queue_index = -1
     self.play_queue_order = list()
     self.play_modes = TizEnumeration(["NORMAL", "SHUFFLE"])
     self.current_play_mode = self.play_modes.NORMAL
     self.now_playing_stream = None
     # Create multiprocess queues
     self.task_queue = Queue()
     self.done_queue = Queue()
     # Workers
     self.workers = list()
     self.api_key = api_key if api_key != "" else API_KEY
     pafy.set_api_key(self.api_key)
Esempio n. 9
0
def get_stats():
	print 'Fetching NBA player stats...'
	stats_outfile = RUNDAY+'_nba_stats.csv'
	csvout = open(stats_outfile, 'wb')

	NUM_THREADS = 8

	in_queue = Queue()
	out_queue = Queue()
	queue_players(in_queue)

	while not in_queue.empty():	
		jobs = []

		for i in range(NUM_THREADS):
			if not in_queue.empty():
				thread = Process(target=get_stats_helper, args=(in_queue, out_queue))
				jobs.append(thread)
				thread.start()
		for thread in jobs:
			thread.join()	

		while not out_queue.empty():
			player = out_queue.get()
			del player['SUCCESS']
			try: 
				name = player['NAME']
			except KeyError as e:
				continue
			player['TIME'] = RUNDAY
			fieldnames = [
				'TIME',
				'NAME', 
				'JERSEY',
				'SPORT',
				'TEAM',
				'POSITION',
				'PTS',
				'REB',
				'AST',
				'URL'
			]
		
			csvwriter = csv.DictWriter(csvout, delimiter='|', fieldnames=fieldnames)
			csvwriter.writerow(player)
	csvout.close()

	print 'Finished fetching NBA player stats.'
	print 'Ouput saved in %s' % stats_outfile
def _get_system_information_threaded(host):
    system_information_functions = [
        collect_win_application_stats, collect_win_bios_stats,
        collect_win_disk_stats, collect_win_local_account_stats,
        collect_win_local_group_stats, collect_win_mem_stats,
        collect_win_network_stats, collect_os_stats,
        collect_win_processes_stats, collect_win_cpu_stats,
        collect_win_services_stats
    ]
    system_information = {}
    queue = Queue()
    list_of_processes = []

    for hardware in system_information_functions:
        process = _Process(target=hardware, args=(
            host,
            1,
            queue,
        ))
        list_of_processes.append(process)
        process.start()

    for process in list_of_processes:
        process.join()
        system_information.update(queue.get())
    return system_information
Esempio n. 11
0
    def run(self):
        in_queue, out_queue = Queue(), Queue()
        for i in self.a:
            in_queue.put(i)

        def f(in_queue, out_queue):
            while not in_queue.empty():
                time.sleep(1)
                out_queue.put(in_queue.get() + 1)

        pool = Pool(4, f, (in_queue, out_queue))
        self.b = []
        while len(self.b) < len(self.a):
            if not out_queue.empty():
                self.b.append(out_queue.get())
        pool.terminate()
Esempio n. 12
0
def send_emails(modeladmin, request, queryset):
    messages = Queue()
    for user in queryset:
        process = Process(target=send_email, args=(user, messages))
        process.start()
        messages.get().send()
        process.join()
Esempio n. 13
0
def main():
    queue = Queue()
    pool = ThreadPool()
    pool.apply_async(consumer, args=(queue, ))
    pool.apply_async(producer, args=(queue, ))
    pool.close()
    pool.join()
Esempio n. 14
0
 def __init__(self, feeder, fifo_path, end_nl_q=None, daemon=True, *args):
     os.mkfifo(fifo_path)
     super().__init__(daemon=daemon)
     self.feeder = feeder
     self.fifo_path = fifo_path
     self.end_nl_q = Queue() if end_nl_q is None else end_nl_q
     self._exception = None
     self._want_join = threading.Event()
Esempio n. 15
0
    def __init__(self, w3af_core):
        """
        :param w3af_core: The w3af core that we'll use for status reporting
        """
        super(seed, self).__init__(name='%sController' % self.get_name())

        self._w3af_core = w3af_core

        # See documentation in the property below
        self._out_queue = Queue()
Esempio n. 16
0
def main():
    q = Queue(10)  # 为了演示,我这边限制一下
    pool = ThreadPool()
    # 一个生产者
    pool.apply_async(producer, args=(q, ))
    # 两个消费者
    pool.apply_async(consumer, args=(q, 1))
    pool.apply_async(consumer, args=(q, 2))
    pool.close()
    pool.join()
Esempio n. 17
0
def scoreDuplicates(record_pairs: RecordPairs,
                    data_model,
                    classifier,
                    num_cores: int = 1):
    if num_cores < 2:
        from multiprocessing.dummy import Process, Queue
        SimpleQueue = Queue
    else:
        from .backport import Process, SimpleQueue, Queue  # type: ignore

    first, record_pairs = peek(record_pairs)
    if first is None:
        raise BlockingError("No records have been blocked together. "
                            "Is the data you are trying to match like "
                            "the data you trained on?")

    record_pairs_queue: _Queue = Queue(2)
    score_queue: _SimpleQueue = SimpleQueue()
    result_queue: _SimpleQueue = SimpleQueue()

    n_map_processes = max(num_cores, 1)
    score_records = ScoreDupes(data_model, classifier, record_pairs_queue,
                               score_queue)
    map_processes = [
        Process(target=score_records) for _ in range(n_map_processes)
    ]

    for process in map_processes:
        process.start()

    reduce_process = Process(target=mergeScores,
                             args=(score_queue, result_queue, n_map_processes))
    reduce_process.start()

    fillQueue(record_pairs_queue, record_pairs, n_map_processes)

    result = result_queue.get()
    if isinstance(result, Exception):
        raise ChildProcessError

    if result:
        scored_pairs_file, dtype, size = result
        scored_pairs = numpy.memmap(scored_pairs_file,
                                    dtype=dtype,
                                    shape=(size, ))
    else:
        dtype = numpy.dtype([('pairs', object, 2), ('score', 'f4', 1)])
        scored_pairs = numpy.array([], dtype=dtype)

    reduce_process.join()

    for process in map_processes:
        process.join()

    return scored_pairs
Esempio n. 18
0
File: seed.py Progetto: weisst/w3af
    def __init__(self, w3af_core):
        '''
        :param w3af_core: The w3af core that we'll use for status reporting
        '''
        super(seed, self).__init__()
        self.name = 'Seed'

        self._w3af_core = w3af_core

        # See documentation in the property below
        self._out_queue = Queue()
    def __init__(self, base_path, callback=None):

        if not os.path.isdir(base_path):
            raise IOError("Base path not found: " + base_path)

        self.base_path = base_path
        self.unsearched = Manager().Queue()
        self.dirpath_queue = Queue()
        self.cpu_count = multiprocessing.cpu_count()
        self.pool = Pool(self.cpu_count)
        self.first_level_dirs = ""
        self.callback = callback
Esempio n. 20
0
 def __init__(self):
     self.starturl = 'http://www.mzitu.com/all/'
     self.ua = UserAgent()
     self.qu = Queue(1000)
     self.qu2 = Queue(1000)
     self.suburls = []
     self.oldmurls = []
     conno = sqlite3.connect('mzituoldu.db')
     try:
         conno.execute('create table oldmurls(url text primary key)')
     except (Exception) as e:
         print('创建表ou:{}'.format(e))
     conno.close()
     conns = sqlite3.connect('mzitusubu.db')
     try:
         conns.execute(
             'create table suburls(url text primary key,status int default 1)'
         )
     except (Exception) as e:
         print('创建表su:{}'.format(e))
     conns.close()
Esempio n. 21
0
    class Runner(QObject):
        """Runner for run in main thread.  """

        result = Queue(1)

        def event(self, event):
            if event.type() == Event.event_type:
                try:
                    self.result.put(event.func(*event.args, **event.kwargs))
                    return True
                except AttributeError:
                    return False
            return super(Runner, self).event(event)
Esempio n. 22
0
def find_words(start_words,
               center_words=None,
               neg_words=None,
               min_sim=0.6,
               max_sim=1.,
               alpha=0.25):
    if center_words == None and neg_words == None:
        min_sim = max(min_sim, 0.6)
    center_vec, neg_vec = np.zeros([word_size]), np.zeros([word_size])
    if center_words:  # 中心向量是所有中心种子词向量的平均
        _ = 0
        for w in center_words:
            if w in word2vec.wv.vocab:
                center_vec += word2vec[w]
                _ += 1
        if _ > 0:
            center_vec /= _
    if neg_words:  # 负向量是左右负种子词向量的平均(此处无用)
        _ = 0
        for w in neg_words:
            if w in word2vec.wv.vocab:
                neg_vec += word2vec[w]
                _ += 1
        if _ > 0:
            neg_vec /= _
    queue_count = 1
    task_count = 0
    cluster = []
    queue = Queue()  # 建立队列
    for w in start_words:
        queue.put((0, w))
        if w not in cluster:
            cluster.append(w)
    while not queue.empty():
        idx, word = queue.get()
        queue_count -= 1
        task_count += 1
        sims = most_similar(word, center_vec, neg_vec)
        min_sim_ = min_sim + (max_sim - min_sim) * (1 - np.exp(-alpha * idx))
        if task_count % 10 == 0:
            log = '%s in cluster, %s in queue, %s tasks done, %s min_sim' % (
                len(cluster), queue_count, task_count, min_sim_)
            print(log)
        for i, j in sims:
            if j >= min_sim_:
                if i not in cluster and is_good(i):  # is_good是人工写的过滤规则
                    queue.put((idx + 1), i)
                    if i not in cluster and is_good(i):
                        cluster.append(i)
                    queue_count += 1
    return cluster
Esempio n. 23
0
def run_tasks(tasks, context=None, nb_threads=1, watchdog=None):
    got_keyboard_interrupt = False
    watchdogs = [
        lambda _: _KEYBOARD_INTERRUPT_ERROR_MESSAGE
        if got_keyboard_interrupt else None
    ]
    if watchdog:
        watchdogs.append(watchdog)

    for task in tasks:
        check_task_dependencies(task)

    remaining_tasks = list(tasks)
    completed_tasks = list()

    pool = Pool(nb_threads)
    completed_tasks_queue = Queue()

    try:
        schedule_tasks_to_be_run(
            pop_runnable_tasks(remaining_tasks, completed_tasks, nb_threads),
            watchdogs, context, pool, completed_tasks_queue)

        while len(completed_tasks) != len(tasks):
            # wait for one task to complete
            completed_task = completed_tasks_queue.get()
            completed_tasks.append(completed_task)

            # schedule tasks to be run waiting for task success or simple completion
            tasks_to_be_run = pop_runnable_tasks(remaining_tasks,
                                                 completed_tasks, nb_threads)
            schedule_tasks_to_be_run(tasks_to_be_run, watchdogs, context, pool,
                                     completed_tasks_queue)

    except KeyboardInterrupt:
        got_keyboard_interrupt = True
        skip_all_tasks(tasks, remaining_tasks, completed_tasks, context, pool,
                       completed_tasks_queue,
                       _KEYBOARD_INTERRUPT_ERROR_MESSAGE)

    finally:
        pool.close()

    exceptions = [
        task.result.stacktrace for task in tasks
        if isinstance(task.result, TaskResultException)
    ]
    if exceptions:
        raise TasksExecutionFailure("Caught exceptions:\n%s" %
                                    "\n".join(exceptions))
Esempio n. 24
0
    def __init__(self, count=0):
        # 设置Pool运行状态
        self.running = True

        from os import cpu_count  # 用到的时候导入对应模块即可
        # 默认是CPU核数,且至少有一个线程
        if count <= 0:
            count = cpu_count() or 1
        # 设置线程数
        self.queue = Queue(count)

        # 启动对应个数的线程
        for _ in range(count):
            Task(self.queue)  # 不能在这直接启动,会阻塞Pool的
Esempio n. 25
0
def scoreDuplicates(records, data_model, classifier, num_cores=1, threshold=0) :
    if num_cores < 2 :
        from multiprocessing.dummy import Process, Queue
        SimpleQueue = Queue
    else :
        from .backport import Process, SimpleQueue, Queue

    first, records = peek(records)
    if first is None:
        raise ValueError("No records have been blocked together. "
                         "Is the data you are trying to match like "
                         "the data you trained on?")

    record_pairs_queue = Queue(2)
    score_queue =  SimpleQueue()
    result_queue = SimpleQueue()

    n_map_processes = max(num_cores-1, 1)
    score_records = ScoreRecords(data_model, classifier, threshold) 
    map_processes = [Process(target=score_records,
                             args=(record_pairs_queue,
                                   score_queue))
                     for _ in range(n_map_processes)]
    [process.start() for process in map_processes]

    reduce_process = Process(target=mergeScores,
                             args=(score_queue,
                                   result_queue,
                                   n_map_processes))
    reduce_process.start()

    fillQueue(record_pairs_queue, records, n_map_processes)

    result = result_queue.get()
    if isinstance(result, Exception) :
        raise ChildProcessError

    if result :
        scored_pairs_file, dtype, size = result
        scored_pairs = numpy.memmap(scored_pairs_file,
                                    mode='r',
                                    dtype=dtype,
                                    shape=(size,))
    else:
        scored_pairs = []

    reduce_process.join()
    [process.join() for process in map_processes]

    return scored_pairs
Esempio n. 26
0
def main(q=None):
    """
    Try to detect relics on the screen and show if they can be detected in a window.

    For some weird reason, Qt and tesseract cannot run in the same thread.
    Doing so leads to a crash and I'm unable to figure out the cause.
    We workaround this by creating a thread for Qt and putting all ocr result into a queue.
    :param q: A queue for communication with the Qt thread
    """
    if q is not None:
        app = QApplication(sys.argv)
        widget = Widget(q)
        sys.exit(app.exec_())

    q = Queue(1)
    p = Process(target=main, args=(q, ))
    p.start()

    tessdata_dir = 'tessdata/'
    with TesserocrPool(tessdata_dir,
                       'Roboto',
                       psm=PSM.SINGLE_BLOCK,
                       oem=OEM.LSTM_ONLY) as pool, mss.mss() as sct:
        s = Screenshots(sct)
        while p.is_alive():
            begin = time.time()
            image_input = next(s)
            end = time.time()
            delta = end - begin
            print(f'screenshot took {delta}s')

            try:
                ocr_data = do_ocr(pool, image_input)
            except:
                ocr_data = None

            if ocr_data is None:
                ocr_data = itertools.repeat(('ocrerror', ) * 4, 20)

            try:
                q.put(tuple(ocr_data), block=True, timeout=0.5)
            except QueueFullException:
                if not p.is_alive():
                    break
            except ValueError as e:
                pass
            except AssertionError as e:
                pass

    p.join()
Esempio n. 27
0
 def __init__(self):
     self.ua = UserAgent()
     self.headers = {}
     self.q1 = Queue(300)
     self.q2 = Queue(1000)
     self.lock = Lock()
     # self.path = 'D:/IMG/'
     self.main_page_urls = []
     self.subpageurls = []
     conn = sqlite3.connect('mmonly.db')
     conn.isolation_level = None
     try:
         conn.execute(
             '''create table subpageurl(url text primary key not null)''')
         conn.execute(
             '''create table imgurl(url text primary key not null)''')
     except (Exception) as e:
         print('创建表:{}'.format(e).decode('utf-8').encode(type))
     finally:
         conn.close()
     self.rootpath = os.getcwd().replace('\\', '/')
     self.path = os.path.join(self.rootpath, 'imges/')
     if not os.path.exists(self.path):
         os.mkdir(self.path)
Esempio n. 28
0
def main():
    queue = Queue()
    # 开启生产消费者线程任务
    t_list = [
        threading.Thread(target=func, args=(queue, ))
        for func in (producer, consumer)
    ]
    # 启动两个线程
    for t in t_list:
        # 设置后台线程,就算是死循环当主线程退出的时候也会退出的
        t.setDaemon(True)  # 进程是daemon属性,t.daemon=True
        t.start()
    # 等待所有任务完成
    queue.join()  # 你可以把这句话注释掉看输出
    print(f"当前队列未完成的数量:{queue.unfinished_tasks}")
Esempio n. 29
0
    def __init__(self):

        pool = Pool(processes=2)
        self.graph = getGraph()

        files = findFiles(opts)

        self.progressQueue = Queue()
        reporter = Process(target=ProgressReport,
                           args=(self.progressQueue, len(files)))
        reporter.start()
        result = pool.map(self.cacheFile, enumerate(files), chunksize=5)
        self.progressQueue.put('END')
        log.info("finished, %s results", len(result))
        reporter.join()
Esempio n. 30
0
    def __init__(self):
        '''初始化'''

        # 创建队列
        self._queue = Queue()

        # 是否触发时间
        self._active = False

        # 线程池
        self._thread = [Process(target=self._run) for _ in range(5)]
        self._workers_n = 5

        # 执行
        self._handlers = {}