예제 #1
0
def main():
    file_changes_q = Queue()
    # 启动监听线程。
    p = Process(target=start_watch_file_changes, args=(file_changes_q,".",))
    p.start()
    while 1:
        print file_changes_q.get()
예제 #2
0
def test():
    NUMBER_OF_PROCESSES = 4
    TASKS1 = [(mul, (i, 7)) for i in range(20)]
    TASKS2 = [(plus, (i, 8)) for i in range(10)]

    # Create queues
    task_queue = Queue()
    done_queue = Queue()

    # Submit tasks
    for task in TASKS1:
        task_queue.put(task)

    # Start worker processes
    for i in range(NUMBER_OF_PROCESSES):
        Process(target=worker, args=(task_queue, done_queue)).start()

    # Get and print results
    print 'Unordered results:'
    for i in range(len(TASKS1)):
        print '\t', done_queue.get()

    # Add more tasks using `put()`
    for task in TASKS2:
        task_queue.put(task)

    # Get and print some more results
    for i in range(len(TASKS2)):
        print '\t', done_queue.get()

    # Tell child processes to stop
    for i in range(NUMBER_OF_PROCESSES):
        task_queue.put('STOP')
예제 #3
0
def test_transfert_queue():
    t1 = "testTopic"
    topic = Topics()
    q = Queue()

    topic.process(t1,123)
    topic.process(t1,456)
    topic.process(t1,789)

    assert q.empty()

    topic.transfer(t1,q)

    assert q.qsize() > 0

    assert q.get() == [0, 123]
    assert q.get() == [1, 456]
    assert q.get() == [2, 789]

    topic.process(t1,111)
    topic.process(t1,222)

    assert q.qsize() > 0

    assert q.get() == [3, 111]
    assert q.get() == [4, 222]
예제 #4
0
파일: paths.py 프로젝트: pytrip/pytrip
 def calculate_quality_list(self, voi, gantry, couch, calculate_from=0, stepsize=1.0, avoid=[], gradient=True):
     """ TODO: Documentation
     """
     q = Queue(32767)
     process = []
     d = voi.get_voi_cube()
     d.cube = np.array(d.cube, dtype=np.float32)
     voi_cube = DensityProjections(d)
     result = []
     for gantry_angle in gantry:
         p = Process(
             target=self.calculate_angle_quality_thread,
             args=(voi, gantry_angle, couch, calculate_from, stepsize, q, avoid, voi_cube, gradient))
         p.start()
         p.deamon = True
         process.append(p)
         if len(process) > 2:
             tmp = q.get()
             result.append(tmp)
             for p in process:
                 if not p.is_alive():
                     process.remove(p)
     while not len(result) == len(gantry) * len(couch):
         tmp = q.get()
         result.append(tmp)
     return result
예제 #5
0
def genPairs(PNGMaps, compareMaps):
    pairA = []
    pairB = []
    # Maximum possible fitness
    totalFitness = len(compareMaps)
    threadsA = []
    threadsB = []
    # Thread safe way to get parent PNGMaps
    queueA = Queue()
    queueB = Queue()
    # Create a list of threads to get a PNGMap
    for listMap in PNGMaps:
        threadA = Process(target=randPair , args=(PNGMaps, compareMaps, totalFitness, queueA))
        threadB = Process(target=randPair , args=(PNGMaps, compareMaps, totalFitness, queueB))
        threadA.start()
        threadB.start()
        threadsA.append(threadA)
        threadsB.append(threadB)
    # Get the parents from the queues
    while not len(pairA) == len(PNGMaps):
        pairA.append(queueA.get())
    while not len(pairB) == len(PNGMaps):
        pairB.append(queueB.get())
    # Join the threads with the current one
    for thread in threadsA:
        thread.join()
    for thread in threadsB:
        thread.join()
    # Return the pair of PNGMaps
    return pairA, pairB
예제 #6
0
def likelihood_mp_simple(seqlens,fss,uon,bon,theta,seqnum,K,ufnum,bfnum,regtype,sigma):
    global _gradient
    grad = numpy.array(fss,copy=True)  # data distribuition
    likelihood = numpy.dot(fss,theta)
    que1 = Queue() # for the likihood output
    que2 = Queue() # for the gradient output
    np = 0
    subprocesses = []
    corenum=multiprocessing.cpu_count()
    #corenum=1
    if corenum>1:
        chunk=seqnum/corenum+1
    else:
        chunk=seqnum
    starti=0
    while starti < (seqnum):
        endi=starti+chunk
        if endi>seqnum:
            endi=seqnum    
        p = Process(target=likelihoodthread_simple, 
            args=(seqlens[starti:endi],uon[starti:endi],bon[starti:endi],theta,K,ufnum,bfnum,que1,que2))
        p.start()
        np+=1
        #print 'delegated %s:%s to subprocess %s' % (starti, endi, np)
        subprocesses.append(p)
        starti += chunk
    for i in range(np):
        likelihood += que1.get()
    for i in range(np):
        grad += que2.get()
    while subprocesses:
        subprocesses.pop().join()
    grad -= regularity_deriv(theta,regtype,sigma)
    _gradient = grad
    return likelihood - regularity(theta,regtype,sigma)
def parallel_precompute(global_conf_file=None):
    # Define queues
    queueIn = Queue(nb_workers+2)
    queueOut = Queue(nb_workers+8)
    queueProducer = Queue()
    queueFinalizer = Queue()
    queueConsumer = Queue(nb_workers)

    # Start finalizer
    t = Process(target=finalizer, args=(global_conf_file, queueOut, queueFinalizer))
    t.daemon = True
    t.start()
    # Start consumers
    for i in range(nb_workers):
        t = Process(target=consumer, args=(global_conf_file, queueIn, queueOut, queueConsumer))
        t.daemon = True
        t.start()
    # Start producer
    t = Process(target=producer, args=(global_conf_file, queueIn, queueProducer))
    t.daemon = True
    t.start()

    # Wait for everything to be started properly
    producerOK = queueProducer.get()
    finalizerOK = queueFinalizer.get()
    for i in range(nb_workers):
        consumerOK = queueConsumer.get()
    print "[parallel_precompute: log] All workers are ready."
    sys.stdout.flush()
    # Wait for everything to be finished
    finalizerEnded = queueFinalizer.get()
    print "[parallel_precompute: log] Done at {}".format(get_now())
    return
예제 #8
0
class CommunicationQueues(object):
    """Queues to handle communication between the threads.

    On the bc side, this is also a logging handler sending
    log messages to the node side."""
    def __init__(self):
        self.bc_to_node = Queue()
        self.node_to_bc = Queue()

    def set(self, bc_to_node=None, node_to_bc=None, queues=None):
        if bc_to_node:
            self.bc_to_node = bc_to_node
            return
        if node_to_bc:
            self.node_to_bc = node_to_bc
            return
        assert queues.bc_to_node
        self.bc_to_node = queues.bc_to_node
        assert queues.node_to_bc
        self.node_to_bc = queues.node_to_bc

    def empty_queues(self):
        print "Emptying queues:"
        while not self.bc_to_node.empty():
            print "BC to node:", self.bc_to_node.get()
        while not self.node_to_bc.empty():
            print "Node to BC:", self.node_to_bc.get()
        print "Emptying queues done."

    def get_handler():
        return _CommQueueHandler(self.bc_to_node)
예제 #9
0
 def runBacktracking(self, clauseList, partialAssignment,  literalList, queue):
     result = self.partialInterp(clauseList, partialAssignment)
     if result == 'true':
         queue.put(True)
         return
     if result == 'false':
         queue.put(False)
         return
     
     chosenLiteral = self.chooseLiteral(partialAssignment, literalList)
     
     posDict = dict(partialAssignment)
     posDict[chosenLiteral] = 'true'
     negDict = dict(partialAssignment)
     negDict[chosenLiteral] = 'false'
     
     q1 = Queue()
     q2 = Queue()
     thread1 = Thread(target=self.runBacktracking, args=(copy.deepcopy(clauseList), posDict, list(literalList), q1))
     thread1.start()
     thread2 = Thread(target=self.runBacktracking, args=(copy.deepcopy(clauseList), negDict, list(literalList), q2))
     thread2.start()
     thread1.join()
     thread2.join()
     
     if(q1.get()):
         queue.put(True)
         return
     if(q2.get()):
         queue.put(True)
         return
     queue.put(False)
     return
def main():
    if __name__ == '__main__':
        queue = Queue()
        p1 = Process(target=sum, args=(queue, 0, 2500000))
        p2 = Process(target=sum, args=(queue, 2500000, 5000000))
        p3 = Process(target=sum, args=(queue, 5000000, 7500000))
        p4 = Process(target=sum, args=(queue, 7500000, 10000001))

        now_1 = time.time()
        p1.start()
        now_2 = time.time()

        now_3 = time.time()
        p2.start()
        now_4 = time.time()

        now_5 = time.time()
        p3.start()
        now_6 = time.time()

        now_7 = time.time()
        p4.start()
        now_8 = time.time()

        p1.join()
        p2.join()
        p3.join()
        p4.join()

        total_result = queue.get() + queue.get()
        print(total_result)
        print(now_2 - now_1)
        print(now_4 - now_3)
        print(now_6 - now_5)
        print(now_8 - now_7)
예제 #11
0
파일: precession.py 프로젝트: RAPD/RAPD
    def process_labelit(self, inp=False):
        """
        Initiate Labelit runs.
        """
        if self.verbose:
            self.logger.debug("LabelitPP::process_labelit")

        try:
            queue = Queue()
            params = {}
            params["test"] = self.test
            params["cluster"] = False # self.cluster_use # TODO
            params["verbose"] = self.verbose
            args1 = {}
            if inp:
                args1["input"] = inp
            else:
                args1["input"] = self.images
            args1["output"] = queue
            args1["params"] = params
            args1["logger"] = self.logger

            # Import the RunLabelit class
            agent = load_module(seek_module="rapd_agent_index+strategy",
                                directories=self.agent_directories,
                                logger=self.logger)

            Process(target=agent.RunLabelit, kwargs=args1).start()
            self.labelit_results = queue.get()
            self.labelit_log = queue.get()

        except:
            self.logger.exception("**Error in LabelitPP.process_labelit**")
예제 #12
0
class DataLoaderOnTheFly():
    def __init__(self, config):
        default_config = Config(proc_count = 4, limit_batch_count = None)
        self.config = default_config(**config)
        self.exit = Event()
        self.batch_queue = Queue(maxsize = 10)
        if self.config.limit_batch_count is None:
            self.limited = False
        else:
            self.limited = True
            self.batch_list = []
            self.index = -1
        self.workers = []
        for _ in range(self.config.proc_count):
            self.workers.append(Process(target = config.worker, args = (self,)))
        for w in self.workers:
            w.daemon = True
            w.start()
    def next_batch(self):
        if self.limited:
            if len(self.batch_list) < self.config.limit_batch_count:
                self.batch_list.append(Config(self.batch_queue.get()))
            self.index = (self.index + 1) % self.config.limit_batch_count
            return Config(self.batch_list[self.index])
        else:
            return Config(self.batch_queue.get())
    def __del__(self):
        self.exit.set()
        for w in self.workers:
            w.join()
예제 #13
0
def split_messages(d, n, nPrime, r, bit,data):
	""" Splits a data set based on the subtraction in montgomery exponentiation."""
	mlist = data
	q_t = Queue()
	q_f = Queue()
	processes = []
	start = 0
	numProcs = 8
	NP = 0
	chunk = len(mlist)//numProcs
	while start < len(mlist):
		p = Process(target=do_sim, args=(q_t, q_f, mlist[start:start+chunk], d, n, nPrime, r, bit))
		NP += 1
		p.start()
		start += chunk
		processes.append(p)
	
	m_true = []
	m_false = []
	for i in range(NP):
		m_true += q_t.get()
		m_false += q_f.get()

	while processes:
		processes.pop().join()
	return (m_true, m_false)
예제 #14
0
 def testBackTracking(self):
     testData = [[-1, -2], [1, -2], [-1, -3]]
     testLiterals = [1, 2, 3]
     queue0 = Queue()
     self.dpllTester.runBacktracking(testData, {}, testLiterals, queue0)
     if queue0.get() == False:
         print "Error"
     else:
         print "Passed Test 4"
     
     queue1 = Queue()    
     testData = [[-1, -2], [-1, 2], [1, -2], [2, -3], [1, 3]]
     self.dpllTester.runBacktracking(testData, {}, testLiterals, queue1)
     if queue1.get() == True:
         print "Error"
     else:
         print "Passed Test 5"
     
     queue2 = Queue()    
     testData = [[2, 1], [-1], [-2, -3], [3, 1]]
     self.dpllTester.runBacktracking(testData, {}, testLiterals, queue2)
     if queue2.get() == True:
         print "Error"
     else:
         print "Passed Test 6"
예제 #15
0
def parallel_return_function(function, args_list, threads):
	""" Run function using multiple threads """
	values = []
	processes = []
	queue = Queue()
	for arguments in args_list: # run function for each set of args in args_list
		arguments['queue'] = queue # append queue to list of args
		p = Process(target=function, kwargs=arguments)
		processes.append(p)
		processes[-1].start()
		while len(processes) >= threads: # control number of active processes
			indexes = []
			for index, process in enumerate(processes):
				if process.is_alive(): # keep processes that are still alive
					indexes.append(index)
				else: # return values from processes that are finished
					values.append(queue.get())
			processes = [processes[i] for i in indexes] # update list of processes
	# wait until there are no active processes
	while len(processes) > 0:
		indexes = []
		for index, process in enumerate(processes):
			if process.is_alive(): # keep processes that are still alive
				indexes.append(index)
			else: # return values from processes that are finished
				values.append(queue.get())
		processes = [processes[i] for i in indexes] # update list of processes
	return values
예제 #16
0
파일: draft.py 프로젝트: Hareric/DM
def createMatrix(messageList,vocabList):#输入短信列表和词库 返回短信-短信向量矩阵
    t1 = time.time()
    print "开始构建向量矩阵..."

    num = len(messageList)
    n = num/2
    def Pro(N1,N2,getReturn=None,isP=False):
        returnMatrix = []
        if isP:
            for i in range(num)[N1:N2]:
                print "    构建中...   %i%%"%(i/float(n)*100),
                sys.stdout.write("\r")
                returnMatrix.append(createVecInVocabList(messageList[i],vocabList=vocabList))
        else:
            for i in range(num)[N1:N2]:
                returnMatrix.append(createVecInVocabList(messageList[i],vocabList=vocabList))
        getReturn.put(returnMatrix)
        # return returnMatrix
    getReturn1 = Queue();getReturn2 = Queue()
    P1 = Process(target=Pro,args=(0,n,getReturn1,True))
    P2 = Process(target=Pro,args=(n,2*n,getReturn2))
    # P3 = Process(target=Pro,args=(2*n,3*n,getReturn3))
    # P4 = Process(target=Pro,args=(3*n,4*n,getReturn4))
    P1.start()
    P2.start()
    matrix = getReturn1.get() + getReturn2.get()
    # matrix = Pro(0,num,None,True)

    t2 = time.time()
    print "矩阵构建完毕,花费时间:%is\n"%(t2-t1)
    return matrix
예제 #17
0
파일: gomut.py 프로젝트: rjeli/gomut
class chan:
    def __init__(self):
        self._queue = Queue()
        self._closed = False

    def __iter__(self):
        return self

    def __next__(self):
        try:
            return self.get()
        except queue.Empty:
            raise StopIteration

    def put(self, x):
        self._queue.put(x)

    def get(self):
        if self._closed:
            return self._queue.get(False)
        else:
            return self._queue.get()

    def close(self):
        self._closed = True
예제 #18
0
파일: test.py 프로젝트: xiaoge56/lol
def main():
    l=Lock()
    dat = Queue()
    pool=Pool(processes=2)
    # pool.apply_async(produce_number, [l,dat])
    p1 = Process(target=produce_number, args=(l,dat,))
    p2 = Process(target=produce_number, args=(l,dat,))
    p3 = Process(target=produce_number, args=(l,dat,))
    p4 = Process(target=produce_number, args=(l,dat,))
    p5 = Process(target=produce_number, args=(l,dat,))
    p1.start()
    p2.start()
    p3.start()
    p4.start()
    p5.start()
    # p1.join()
    # p1.join()
    # p1.join()
    # p1.join()
    while True:
        
        if dat.empty():
            pass
        else:
            print dat.get()
    p1.join()
    p1.join()
    p1.join()
    p1.join()
class CommunicationChannels(object):
    '''Bi directional communication channel
    '''
    def __init__(self):
        self.qin = Queue()
        self.qout = Queue()

    def set_child(self):
        q = self.qin
        self.qin = self.qout
        self.qout = q

    def close(self):
        self.qin.close()
        self.qout.close()

    def dump(self, obj):
        self.qout.put(obj, block=True)
        confirm = self.qin.get()
        assert confirm

    def load(self, conn=None):
        res = self.qin.get()
        self.qout.put(True)
        return res
예제 #20
0
    def test_worker(self):
        tournament = axelrod.Tournament(
            name=self.test_name,
            players=self.players,
            game=self.game,
            turns=200,
            repetitions=self.test_repetitions)

        work_queue = Queue()
        chunks = tournament.match_generator.build_match_chunks()
        count = 0
        for chunk in chunks:
            work_queue.put(chunk)
            count += 1
        work_queue.put('STOP')

        done_queue = Queue()
        tournament._worker(work_queue, done_queue)
        for r in range(count):
            new_matches = done_queue.get()
            for index_pair, matches in new_matches.items():
                self.assertIsInstance(index_pair, tuple)
                self.assertEqual(len(matches), self.test_repetitions)
        queue_stop = done_queue.get()
        self.assertEqual(queue_stop, 'STOP')
def multi_threaded_generator(generator, num_cached=10, num_threads=4):
    queue = MPQueue(maxsize=num_cached)
    # define producer (putting items into queue)
    def producer():
        for item in generator:
            queue.put(item)
            # pretend we are doing some calculations
            # sleep(0.5)
        queue.put("end")

    # start producer (in a background thread)
    threads = []
    for _ in xrange(num_threads):
        np.random.seed()
        threads.append(Process(target=producer))
        threads[-1].daemon = True
        threads[-1].start()

    # run as consumer (read items from queue, in current thread)
    # print "starting while"
    item = queue.get()
    while item != "end":
        # print len(item)
        yield item
        item = queue.get()
    queue.close()
예제 #22
0
파일: pwork.py 프로젝트: Scaurus/grab
def make_work(callback, tasks, limit, ignore_exceptions=True,
              taskq_size=50):
    """
    Run up to "limit" processes, do tasks and yield results.

    :param callback:  the function that will process single task
    :param tasks:  the sequence or iterator or queue of tasks, each task
        in turn is sequence of arguments, if task is just signle argument
        it should be wrapped into list or tuple
    :param limit: the maximum number of processes
    """
    
    # If tasks is number convert it to the list of number
    if isinstance(tasks, int):
        tasks = xrange(tasks)

    # Ensure that tasks sequence is iterator
    tasks = iter(tasks)    

    taskq = Queue(taskq_size)

    # Here results of task processing will be saved
    resultq = Queue()

    # Prepare and run up to "limit" processes
    processes = []
    for x in xrange(limit):
        process = Worker(callback, taskq, resultq, ignore_exceptions)
        process.daemon = True
        process.start()
        processes.append(process)

    # Put tasks from tasks iterator to taskq queue
    # until tasks iterator ends
    # Do it in separate process
    def task_processor(task_iter, task_queue, limit):
        try:
            for task in task_iter:
                task_queue.put(task)
        finally:
            for x in xrange(limit):
                task_queue.put(STOP)

    processor = Process(target=task_processor, args=[tasks, taskq, limit])
    processor.daemon = True
    processor.start()

    while True:
        try:
            yield resultq.get(True, 0.2)
        except Empty:
            pass
        if not any(x.is_alive() for x in processes):
            break

    while True:
        try:
            yield resultq.get(False)
        except Empty:
            break
예제 #23
0
def test_warc_writer_locking(tmpdir):
    """Test if WarcWriter is locking WARC files.
    When we don't have the .open suffix, WarcWriter locks the file and the
    external process trying to ``lock_file`` fails (result=0).
    """
    recorder = ProxyingRecorder(None, None, 'sha1', url='http://example.com')
    recorded_url = RecordedUrl(
            url='http://example.com', content_type='text/plain', status=200,
            client_ip='127.0.0.2', request_data=b'abc',
            response_recorder=recorder, remote_ip='127.0.0.3',
            timestamp=datetime.utcnow(), payload_digest=hashlib.sha1())

    dirname = os.path.dirname(str(tmpdir.mkdir('test-warc-writer')))
    wwriter = WarcWriter(Options(
        directory=dirname, no_warc_open_suffix=True))
    wwriter.write_records(recorded_url)
    warcs = [fn for fn in os.listdir(dirname) if fn.endswith('.warc')]
    assert warcs
    target_warc = os.path.join(dirname, warcs[0])
    # launch another process and try to lock WARC file
    q = Queue()
    p = Process(target=lock_file, args=(q, target_warc))
    p.start()
    p.join()
    assert q.get() == 'FAILED TO OBTAIN LOCK'
    wwriter.close()

    # locking must succeed after writer has closed the WARC file.
    p = Process(target=lock_file, args=(q, target_warc))
    p.start()
    p.join()
    assert q.get() == 'OBTAINED LOCK'
예제 #24
0
class TestSetTemp(OpengbTestCase): 

    def setUp(self):
        self.to_printer = Queue()
        self.message_handler = server.MessageHandler(to_printer=self.to_printer)

    def test_pass_set_temps_method_to_printer(self):
        """Valid set temperatures result in a 'set_temp' message on the to_printer queue."""
        mh = self.message_handler.set_temp(bed=100, nozzle1=200, nozzle2=200)
        self.assertEqual(json.loads(self.to_printer.get())["method"], "set_temp")

    def test_valid_set_temps_passed_to_printer(self):
        """Valid set temperatures are added as a message on the to_printer queue."""
        mh = self.message_handler.set_temp(bed=100, nozzle1=200, nozzle2=200)
        self.assertDictEqual(json.loads(self.to_printer.get()), {
            "method": "set_temp",
            "params": {"bed": 100, "nozzle2": 200, "nozzle1": 200}})

    def test_set_bed_temp_defaults_to_none(self):
        """Unspecified bed_temperature is passed to_the printer as None."""
        mh = self.message_handler.set_temp(nozzle1=200, nozzle2=200)
        self.assertEqual(
            json.loads(self.to_printer.get())["params"]["bed"], None)
    
    def test_set_nozzle1_temp_defaults_to_none(self):
        """Unspecified nozzle1_temperature is passed to_the printer as None."""
        mh = self.message_handler.set_temp(bed=100, nozzle2=200)
        self.assertEqual(
            json.loads(self.to_printer.get())["params"]["nozzle1"], None)

    def test_set_nozzle2_temp_defaults_to_none(self):
        """Unspecified nozzle2_temperature is passed to_the printer as None."""
        mh = self.message_handler.set_temp(bed=100, nozzle1=200)
        self.assertEqual(
            json.loads(self.to_printer.get())["params"]["nozzle2"], None)
예제 #25
0
파일: 10.py 프로젝트: tenghuanhe/PyScript
def test():
    NUMBER_OF_PROCESSES = 4
    TASK1 = [(mul, (i, 7)) for i in range(20)]
    TASK2 = [(plus, (i, 8)) for i in range(10)]

    task_queue = Queue()
    done_queue = Queue()

    for task in TASK1:
        task_queue.put(task)

    for i in range(NUMBER_OF_PROCESSES):
        Process(target=worker, args=(task_queue, done_queue)).start()

    print 'Unordered results:'
    for i in range(len(TASK1)):
        print '\t', done_queue.get()

    for task in TASK2:
        task_queue.put(task)

    for i in range(len(TASK2)):
        print '\t', done_queue.get()

    for i in range(NUMBER_OF_PROCESSES):
        task_queue.put('STOP')
예제 #26
0
파일: test_worker.py 프로젝트: spulec/PyQS
def test_worker_fills_internal_queue_only_until_maximum_queue_size():
    """
    Test read workers fill internal queue only to maximum size
    """
    conn = boto3.client('sqs', region_name='us-east-1')
    # Set visibility timeout low to improve test speed
    queue_url = conn.create_queue(
        QueueName="tester", Attributes={'VisibilityTimeout': '1'})['QueueUrl']

    message = json.dumps({
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 'Test message',
        },
    })
    for i in range(3):
        conn.send_message(QueueUrl=queue_url, MessageBody=message)

    internal_queue = Queue(maxsize=2)
    worker = ReadWorker(queue_url, internal_queue, BATCHSIZE)
    worker.read_message()

    # The internal queue should only have two messages on it
    internal_queue.get(timeout=1)
    internal_queue.get(timeout=1)

    try:
        internal_queue.get(timeout=1)
    except Empty:
        pass
    else:
        raise AssertionError("The internal queue should be empty")
예제 #27
0
파일: fio_iostat.py 프로젝트: agsha/pyfk
def driver():
    for ioengine in "sync libaio".split():
        for readwrite in "read write randread randwrite rw randrw".split():
            for sync in "0 1".split():
                for direct in "0 1".split():
                    name = "/home/sharath.g/fio2/{}-{}-o_sync{}-o_direct{}".format(ioengine, readwrite, sync, direct)

                    bs = 32*1024
                    time = 60
                    experiments = [hostname]
                    q1 = Queue()
                    q2 = Queue()
                    # cc('> {}'.format(name))
                    while bs <= 1024*1024*16:
                        pass
                        if direct=="1" and bs % 4096 != 0:
                            bs *= 2
                            continue
                        fio_cmd = "sudo fio --name=global --bs={bs} --ioengine={ioengine} --iodepth=10 --runtime={runtime} --time_based --size=3G --group_reporting --disable_lat=1 --disable_clat=1 --disable_slat=1 --clat_percentiles=0  --filename=/mnt/vdb1/fio/myfile --name={name} --rw={readwrite}  --sync={sync} --direct={direct} --minimal".format(bs=bs, ioengine=ioengine, runtime=time, name=name, readwrite=readwrite, sync=sync, direct=direct)
                        iostat_cmd = "/usr/bin/iostat -k -x -d 1 {} /dev/vdb".format(time)

                        t1 = Process(target=fio, args=(fio_cmd, q1))
                        t2 = Process(target=iostat, args=(iostat_cmd, q2))
                        t1.start()
                        t2.start()
                        t1.join()
                        t2.join()
                        experiments.append([bs, fio_cmd, q1.get(), iostat_cmd, q2.get()])
                        l(json.dumps(experiments, indent=2))
                        bs*=2
                    with open(name, "w") as output_file:
                        json.dump(experiments, output_file, indent=2)
                    log.debug("======================")
                    log.debug(json.dumps(experiments, indent=2))
예제 #28
0
  def processLabelit(self,inp=False):
    """
    Initiate Labelit runs.
    """
    if self.verbose:
      self.logger.debug('LabelitPP::processLabelit')
    try:
      queue = Queue()
      params = {}
      params['test'] = self.test
      params['cluster'] = self.cluster_use
      params['verbose'] = self.verbose
      args1 = {}
      if inp:
        args1['input'] = inp
      else:
        args1['input'] = self.images
      args1['output'] = queue
      args1['params'] = params
      args1['logger'] = self.logger
      Process(target=RunLabelit,kwargs=args1).start()
      self.labelit_results = queue.get()
      self.labelit_log = queue.get()

    except:
      self.logger.exception('**Error in LabelitPP.processLabelit**')
예제 #29
0
	def create_lists(self, copy_list, new_loc):
	#Used to create the call lists		
		
		to_q= Queue()
		resp_q = Queue()

		p = Process(target=self.render_thing, args=(to_q,resp_q,self.initalize,new_loc,self.OFFSET, self.FACTOR, self.use_old))
		p.start()
		
		to_q.put(self.diamonds, False)
		to_q.put(copy_list, False)
		to_q.put(self.OFFSET, False)
		to_q.put(self.rw.convert, False)
		
		print "process....."
		print "get pos_list"
		self.pos_list = resp_q.get()
		self.diamonds = resp_q.get()
		print self.pos_list
		new_dic = resp_q.get()
		dic = self.combine(self.pos_list, new_dic, self.trans.location_var)
		self.trans.location_var = dic
		
		p.join()
		
		print "updating...."
		self.rw.need_lists = True
		self.initalize = False
예제 #30
0
def test_processor():

    pathq = Queue()
    pathq.put(io.StringIO(SAMPLE_XML))

    outputq = Queue()

    def process_dump(dump, path):
        for page in dump:
            yield page.id


    processor = Processor(pathq, outputq, process_dump)
    processor.start()

    error, item = outputq.get()
    assert not error
    eq_(item, 1)

    error, item = outputq.get()
    assert not error
    eq_(item, 2)

    error, item = outputq.get()
    assert not error
    eq_(item, DONE)
        producers = []
        no_of_producers = min(2, len(dirs))
        per_prod = len(dirs) // no_of_producers
        for i in range(no_of_producers):
            p = Process(target=producer,
                        args=(dirs[i * per_prod:i * per_prod + per_prod],
                              shared_q, count_q))
            p.start()
            producers.append(p)

        for p in producers:
            p.join()

        count = 0
        for i in range(len(processes)):
            count += count_q.get()
    else:
        count = producer(dirs, shared_q)

    # wait for all processes to exit
    print("Waiting for processes to terminate...")
    for i in range(len(processes)):
        shared_q.put(None)
    tcount = 0
    for i in range(len(processes)):
        tcount += res_q.get()
    for p in processes:
        p.join()

    print("total files which had only 1 face:", count)
    print("total_written", tcount)
예제 #32
0
class DataSet(object):
    def __init__(self, images, labels):
        assert images.shape[0] == labels.shape[0], (
            'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
        self._num_examples = images.shape[0]
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0

    def image_worker(self, worker_num, q, e, work_queue):
        while True:
            try:
                image, label = work_queue.get(timeout=0.1)
                image = random_crop(image)
                image = random_HLS(image)
                image = random_flip(image)

                q.put([image, label])
            except:
                if e.is_set():
                    return

    def loop_fill(self, epoch_num, batch_size, q, e):
        self.workers = []
        self.pipequeue = Queue()

        for k in range(NUM_PROCESSES):
            worker = Process(target=self.image_worker,
                             args=(k, q, e, self.pipequeue))
            worker.daemon = True
            worker.start()
            self.workers += [worker]

        for i in xrange(epoch_num * (self.num_examples / batch_size)):
            #for i in range(1):
            #print 'processing batch#', i
            while (self.pipequeue.qsize() > MAX_QUEUE) or (q.qsize() >
                                                           MAX_QUEUE):
                time.sleep(0.2)
            self.batch_images, self.batch_labels = self.next_batch(batch_size)
            for image, labels in zip(self.batch_images, self.batch_labels):
                self.pipequeue.put([image, labels])

        for worker in self.workers:
            worker.join()

    def start_fill(self, epoch_num, batch_size):
        print 'start filling...'
        self._resource_pool = Queue()
        self._stop_event = Event()
        self._filler = Process(target=self.loop_fill,
                               args=(epoch_num, batch_size,
                                     self._resource_pool, self._stop_event))
        self._filler.start()
        print 'start_fill finish.'

    def stop_fill(self):
        print 'stop filling.'
        self._stop_event.set()
        self._filler.join()
        print 'filling stoped'

    def next_batch_from_queue(self, batch_size):
        ready_images = []
        ready_labels = []
        for i in range(batch_size):
            image, label = self._resource_pool.get()
            ready_images += [image]
            ready_labels += [label]
        return ready_images, ready_labels

    @property
    def images(self):
        return self._images

    @property
    def labels(self):
        return self._labels

    @property
    def num_examples(self):
        return self._num_examples

    @property
    def epochs_completed(self):
        return self._epochs_completed

    def next_batch(self, batch_size, distort=False):
        start = self._index_in_epoch
        self._index_in_epoch += batch_size
        if self._index_in_epoch > self._num_examples:
            # Finished epoch
            self._epochs_completed += 1
            # Shuffle the data
            perm = np.arange(self._num_examples)
            np.random.shuffle(perm)
            self._images = self._images[perm]
            self._labels = self._labels[perm]
            # Start next epoch
            start = 0
            self._index_in_epoch = batch_size
            assert batch_size <= self._num_examples
        end = self._index_in_epoch
        result_images = self._images[start:end]
        #if distort == True:
        #    result_images = random_crop(result_images)
        #    result_images = random_HLS(result_images)
        #    result_images = random_flip(result_images)

        return result_images, self._labels[start:end]
예제 #33
0
파일: lidar.py 프로젝트: shonxg/gunna
class LidarParser:
    def __init__(self, server):
        assert isinstance(server, CharStream)
        self.lidarData = [
            []
        ] * 360  # A list of 360 elements Angle, Distance, quality
        self.dataArrs = Queue()
        self.init_level = 0
        self.server = server
        self.rpm = 0

    def __len__(self):
        return int(self.dataArrs.qsize())

    def pop(self):
        return self.dataArrs.get()

    def savePacketQuarter(self, angle, data):
        """Save a sample.

        Takes the angle (an int, from 0 to 359) and the list of four bytes of data in the order they arrived.
        """
        logging.debug("Saving quarter-packet of sum %s at angle %s." %
                      (sum(data), angle))
        # unpack data using the denomination used during the discussions
        x = data[0]
        x1 = data[1]
        x2 = data[2]
        x3 = data[3]

        dist_mm = x | (
            (x1 & 0x3f) << 8)  # distance is coded on 13 bits ? 14 bits ?
        quality = x2 | (x3 << 8)  # quality is on 16 bits
        self.lidarData[angle] = [dist_mm, quality]

    def packageScan(self):
        try:
            ragged = False
            for angle in self.lidarData:
                if len(angle) != 2:
                    ragged = True
                    break
            if not ragged:
                dataArr = np.vstack(self.lidarData)
                if dataArr.size > 0:
                    return dataArr
        except ValueError as e:  # Data is likely ragged (not all filled).
            logging.debug("Got a ValueError.")
            raise e

    def parse(self):
        while True:
            time.sleep(0.00001)  # do not hog the processor power
            nb_errors = 0

            self.init_level

            if self.init_level == 0:  # We're searching for a start byte.
                logging.debug("Looking for a start byte now.")
                char = self.server.getChar()
                if char is None:
                    continue
                b = ord(char)
                # start byte
                if b == 0xFA:
                    logging.debug("Got a start byte, 0xFA!")
                    self.init_level = 1
                else:
                    self.init_level = 0
            elif self.init_level == 1:  # We found a start byte; now we're parsing.
                logging.debug("Looking for an index byte now.")
                # position index
                b = ord(self.server.getChar(1))
                if b >= 0xA0 and b <= 0xF9:  # The angle indices range from 160 to 249
                    if b == 0xA0:  # We're at the first index; save the previous scan.
                        #######################
                        #### YIELD/RETURN #####
                        #######################
                        yield self.packageScan(), self.rpm
                        #######################
                        #### YIELD/RETURN #####
                        #######################
                    index = b - 0xA0
                    logging.debug("At index %d." % index)
                    self.init_level = 2
                elif b != 0xFA:  # If the next byte after the start byte isn't in the index range, go back to searching for a start byte.
                    self.init_level = 0
            elif self.init_level == 2:
                logging.debug("Getting two speed bytes.")
                # speed
                b_speed = [ord(b) for b in self.server.getChar(2)]

                # data
                #
                # (Data description from
                # https://xv11hacking.wikispaces.com/LIDAR+Sensor .)
                #
                # Each packet is organized as follows:
                # <start> <index> <speed_L> <speed_H>
                # [Data 0] [Data 1] [Data 2] [Data 3] <checksum_L> <checksum_H>
                #
                # where:
                #
                # start is always 0xFA=250
                # index is the index byte in the 90 packets, going from 0xA0
                # (packet 0, readings 0 to 3) to 0xF9 (packet 89, readings 356 to 359).
                # speed is a two-byte information, little-endian. It represents
                # the speed, in 64th of RPM (aka value in RPM represented in
                # fixed point, with 6 bits used for the decimal part).
                # [Data 0] to [Data 3] are the 4 readings. Each one is 4 bytes
                # long, and organized as follows :
                #
                # `byte 0 : <distance 7:0>`
                # `byte 1 : <"invalid data" flag> <"strength warning" flag> <distance 13:8>`
                # `byte 2 : <signal strength 7:0>`
                # `byte 3 : <signal strength 15:8>`

                logging.debug("Getting four data0 bytes.")
                b_data0 = [ord(b) for b in self.server.getChar(4)]
                logging.debug("Getting four data1 bytes.")
                b_data1 = [ord(b) for b in self.server.getChar(4)]
                logging.debug("Getting four data2 bytes.")
                b_data2 = [ord(b) for b in self.server.getChar(4)]
                logging.debug("Getting four data3 bytes.")
                b_data3 = [ord(b) for b in self.server.getChar(4)]

                # for the checksum, we need all the data of the packet...
                # this could be collected in a more elegent fashion...
                all_data = [0xFA, index + 0xA0
                            ] + b_speed + b_data0 + b_data1 + b_data2 + b_data3

                # checksum
                logging.debug("Getting two checksum bytes.")
                b_checksum = [ord(b) for b in self.server.getChar(2)]
                incoming_checksum = int(
                    b_checksum[0]) + (int(b_checksum[1]) << 8)
                logging.debug("incoming_checksum is %s" % incoming_checksum)

                # verify that the received checksum is equal to the one computed from the data
                if checksum(all_data) == incoming_checksum:
                    self.rpm = compute_speed(b_speed)
                    self.savePacketQuarter(index * 4 + 0, b_data0)
                    self.savePacketQuarter(index * 4 + 1, b_data1)
                    self.savePacketQuarter(index * 4 + 2, b_data2)
                    self.savePacketQuarter(index * 4 + 3, b_data3)
                else:
                    # the checksum does not match, something went wrong...
                    nb_errors += 1
                    logging.debug("Checksum errors!")

                    # "saveScan" data in error state.
                    self.savePacketQuarter(index * 4 + 0, [0, 0x80, 0, 0])
                    self.savePacketQuarter(index * 4 + 1, [0, 0x80, 0, 0])
                    self.savePacketQuarter(index * 4 + 2, [0, 0x80, 0, 0])
                    self.savePacketQuarter(index * 4 + 3, [0, 0x80, 0, 0])
                self.init_level = 0  # reset and wait for the next packet

            else:  # default, should never happen...
                raise RuntimeError("You shouldn't be here!")
                self.init_level = 0
예제 #34
0
class AgentCtrl:
    '''
    agent子进程维护
    '''
    def __init__(self, agent_name, size_x, size_y, detector_num, fighter_num,
                 gpu_num):
        self.agent_name = agent_name
        self.size_x = size_x
        self.size_y = size_y
        self.detector_num = detector_num
        self.fighter_num = fighter_num
        self.send_q = None
        self.recv_q = None
        self.agent = None
        self.gpu_num = gpu_num

    def agent_init(self):
        self.send_q = Queue(1)
        self.recv_q = Queue(1)
        self.agent = AgentProc(self.agent_name, self.size_x, self.size_y,
                               self.detector_num, self.fighter_num,
                               self.send_q, self.recv_q, self.gpu_num)
        self.agent.start()
        try:
            agent_msg = self.recv_q.get(True, AGENT_INIT_TIMEOUT)
        except:
            self.terminate()
            return False
        else:
            return True

    def terminate(self):
        if self.agent:
            if self.agent.is_alive():
                self.send_q.put('done')
                self.agent.terminate()
        self.agent = None
        if self.send_q:
            self.send_q.close()
        self.send_q = None
        if self.recv_q:
            self.recv_q.close()
        self.recv_q = None

    def get_action(self, obs_raw_dict, step_cnt):
        '''
        获得动作
        :param obs_raw_dict: raw obs
        :param step_cnt: step计数,从1开始
        :return: action: 动作信息
        :return: result: 0, 正常; 1, 崩溃; 2, 超时
        '''
        action = []
        result = 0
        self.send_q.put({'obs_raw_dict': obs_raw_dict, 'step_cnt': step_cnt})
        try:
            action = self.recv_q.get(True, AGENT_RESP_TIMEOUT)
        except:
            if not self.agent.is_alive():
                # 子进程不存在,崩溃
                result = 1
            else:
                # 子进程存在,卡死
                result = 2
            self.__agent_restart()
        return action, result

    def __agent_restart(self):
        '''
        重启agent。由于重启代表之前成功启动过,所以此处认为重启也会成功。但若重启不成功将导致程序卡死。若后续出现此类问题应重点排查此处。
        :return:
        '''
        self.terminate()
        self.send_q = Queue(1)
        self.recv_q = Queue(1)
        self.agent = AgentProc(self.agent_name, self.size_x, self.size_y,
                               self.detector_num, self.fighter_num,
                               self.send_q, self.recv_q, self.gpu_num)
        self.agent.start()
        self.recv_q.get()
예제 #35
0
class ProcessStats(Process):
    def __init__(self):
        super(ProcessStats, self).__init__()
        self.episode_log_q = Queue(
            maxsize=100
        )  # Queue for saving episode log (multiprocessesing queue)
        self.episode_count = Value('i', 0)
        self.training_count = Value('i', 0)
        self.should_save_model = Value('i', 0)
        self.trainer_count = Value('i', 0)
        self.predictor_count = Value('i', 0)
        self.agent_count = Value('i', 0)
        self.total_frame_count = 0

    def FPS(self):
        # average FPS from the beginning of the training (not current FPS)
        return np.ceil(self.total_frame_count /
                       (time.time() - self.start_time))

    def TPS(self):
        # average TPS from the beginning of the training (not current TPS)
        return np.ceil(self.training_count.value /
                       (time.time() - self.start_time))

    def run(self):
        with open(Config.RESULTS_FILENAME,
                  'a') as results_logger, open(Config.EPISDOES_LOG_FILENAME,
                                               'a') as eval_logger:
            rolling_frame_count = 0
            rolling_reward = 0
            results_q = queueQueue(maxsize=Config.STAT_ROLLING_MEAN_WINDOW)
            episode_scores = []

            self.start_time = time.time()
            first_time = datetime.now()
            while True:
                episode_time, reward, length = self.episode_log_q.get(
                )  # get episode log from queue
                results_logger.write(
                    '%s, %d, %d\n' %
                    (episode_time.strftime("%Y-%m-%d %H:%M:%S"), reward,
                     length))
                results_logger.flush()

                self.total_frame_count += length
                self.episode_count.value += 1

                rolling_frame_count += length
                rolling_reward += reward

                if results_q.full():
                    old_episode_time, old_reward, old_length = results_q.get()
                    rolling_frame_count -= old_length
                    rolling_reward -= old_reward
                    first_time = old_episode_time

                results_q.put((episode_time, reward, length))

                episode_scores.append(reward)
                if len(episode_scores) >= 1000:
                    eval_logger.write("{} {} {} {}\n".format(
                        self.episode_count.value,
                        np.max(episode_scores[-100:]),
                        np.mean(episode_scores[-100:]),
                        np.min(episode_scores[-100:])))
                    eval_logger.flush()
                    episode_scores = []

                if self.episode_count.value % Config.SAVE_FREQUENCY == 0:
                    self.should_save_model.value = 1

                if self.episode_count.value % Config.PRINT_STATS_FREQUENCY == 0:
                    print('[Time: %8d] '
                          '[Episode: %8d Score: %10.4f] '
                          '[RScore: %10.4f RPPS: %5d] '
                          '[PPS: %5d TPS: %5d] '
                          '[NT: %2d NP: %2d NA: %2d]' %
                          (int(time.time() - self.start_time),
                           self.episode_count.value, reward, rolling_reward /
                           results_q.qsize(), rolling_frame_count /
                           (datetime.now() - first_time).total_seconds(),
                           self.FPS(), self.TPS(), self.trainer_count.value,
                           self.predictor_count.value, self.agent_count.value))
                    sys.stdout.flush()
예제 #36
0
def main():
    global parent_pid
    parent_pid = os.getpid()
    args_info = ArgsProcessing(sys.argv[1:])  # 处理命令行参数
    work_mode = args_info.current_mode  # 工作模式
    log_file_list = args_info.log_file_list  # 所有的日志文件
    signal.signal(signal.SIGINT, signal_handler)  # SIGINT是ctrl+c发出的信号,值为2
    start_time = time.time()
    # 模式1:分析单笔交易 模式2:分析所有交易 模式3:分析单个区块 模式4:分析所有区块
    if work_mode == 1:
        tx_hash = args_info.tx_hash  # 交易Hash
        # 获取全部的字典格式的交易数据
        all_tx_dict_list = get_all_log_dict(log_file_list, 'transaction')
        overall_earliest_msg, overall_latest_msg = retrieve_earliest_latest_msg(
            all_tx_dict_list, tx_hash)
        if overall_earliest_msg and overall_latest_msg:
            print('最早: %s' % overall_earliest_msg)
            print('最晚: %s' % overall_latest_msg)
            interval_time = millisecond2time_format(
                calc_millisecond_interval(overall_latest_msg[0],
                                          overall_earliest_msg[0]))
            print('间隔: %s' % interval_time)
        else:
            print('The transaction %s was not found in log file!' % tx_hash)
    elif work_mode == 2:
        all_tx_dict_list = get_all_log_dict(log_file_list, 'transaction')
        all_tx_hash = []  # 所有的交易Hash
        for tx_dict in all_tx_dict_list:
            all_tx_hash.extend(list(tx_dict.keys()))
        # 去除重复元素
        all_tx_hash = list(set(all_tx_hash))
        broadcasting_time_queue = Queue()  # 存储广播时间的Queue
        processes = []
        # 获取cpu核心数
        processor_num = cpu_count()
        # 将所有的交易Hash分割为与cpu核心数相等的尽量平均的份数
        split_all_tx_hash = split_list(all_tx_hash, processor_num)
        for work_list in split_all_tx_hash:
            # 创建与交易Hash份数(cpu核心数)数量相同的子进程用于计算广播时间
            p = Process(target=calc_broadcasting_time,
                        args=(work_list, broadcasting_time_queue,
                              all_tx_dict_list))
            p.start()
            processes.append(p)
        for process in processes:
            # 等待所有子进程结束
            process.join()
        broadcasting_time_list = []
        while True:
            # 将子进程的分析结果存储到父进程中的列表
            broadcasting_time_list.append(broadcasting_time_queue.get())
            if broadcasting_time_queue.empty():
                break
        # 使用最小堆对所有子进程生成的有序列表合并
        broadcasting_time_list = list(heapq.merge(*broadcasting_time_list))
        # 计算广播时间列表平均值和中位数
        average, median = get_average_median(broadcasting_time_list)
        print('最短时间: %s' % millisecond2time_format(broadcasting_time_list[0]))
        print('最长时间: %s' % millisecond2time_format(broadcasting_time_list[-1]))
        print('平均值:   %s' % millisecond2time_format(average))
        print('中位数:   %s' % millisecond2time_format(median))
    elif work_mode == 3:
        height = args_info.height  # 区块高度
        all_block_dict_list = get_all_log_dict(log_file_list, 'block')
        overall_earliest_msg, overall_latest_msg = retrieve_earliest_latest_msg(
            all_block_dict_list, height)
        if overall_earliest_msg and overall_latest_msg:
            print('最早: %s' % overall_earliest_msg)
            print('最晚: %s' % overall_latest_msg)
            interval_time = millisecond2time_format(
                calc_millisecond_interval(overall_latest_msg[0],
                                          overall_earliest_msg[0]))
            print('间隔: %s' % interval_time)
        else:
            print('The block height %s was not found in log file!' % height)
    elif work_mode == 4:
        all_block_dict_list = get_all_log_dict(log_file_list, 'block')
        all_block_height = []
        for block_dict in all_block_dict_list:
            all_block_height.extend(list(block_dict.keys()))
        all_block_height = list(set(all_block_height))
        broadcasting_time_queue = Queue()
        processes = []
        processor_num = cpu_count()
        split_all_block_height = split_list(all_block_height, processor_num)
        for work_list in split_all_block_height:
            p = Process(target=calc_broadcasting_time,
                        args=(work_list, broadcasting_time_queue,
                              all_block_dict_list))
            p.start()
            processes.append(p)
        for process in processes:
            process.join()
        broadcasting_time_list = []
        while True:
            broadcasting_time_list.append(broadcasting_time_queue.get())
            if broadcasting_time_queue.empty():
                break
        broadcasting_time_list = list(heapq.merge(*broadcasting_time_list))
        average, median = get_average_median(broadcasting_time_list)
        print('最短时间: %s' % millisecond2time_format(broadcasting_time_list[0]))
        print('最长时间: %s' % millisecond2time_format(broadcasting_time_list[-1]))
        print('平均值:   %s' % millisecond2time_format(average))
        print('中位数:   %s' % millisecond2time_format(median))

    print('分析用时:', time.time() - start_time)
예제 #37
0
class Main(QMainWindow):
    def __init__(self):
        # Initialize the super class
        super().__init__()
        # self.setMinimumSize(QSize(640, 480))
        self.setMinimumWidth(480)
        self.setWindowTitle("pyJulieScanner - A Kodi Database Scanner")

        centralWidget = QWidget(self)
        self.setCentralWidget(centralWidget)
        gridLayout = QGridLayout(self)
        centralWidget.setLayout(gridLayout)
        self.allOptions = {}

        # title.setAlignment(QtCore.Qt.AlignCenter)
        # gridLayout.addWidget(title, 0, 0)

        # Setup checkboxes for possible tasks
        checkboxLabel = QLabel("Perform the following actions:", self)
        option1 = QCheckBox(
            "Produce a file containing all movies Kodi does not see", self)
        option2 = QCheckBox("Produce a file containing all movies Kodi sees",
                            self)
        option3 = QCheckBox("Check TV shows for missing seasons", self)

        self.fileOutputDir = "./"
        self.fileOutputDirText = "Output Files to: "
        self.fileOutputDirButton = QPushButton(self.fileOutputDirText, self)
        self.fileOutputDirLabel = QLabel(self.fileOutputDir, self)
        # Connect it to setting the outputdir
        self.fileOutputDirButton.clicked.connect(self.setOutputDir)

        self.allOptions["option1"] = option1
        self.allOptions["option2"] = option2
        self.allOptions["option3"] = option3

        # The log area
        self.logText = QTextEdit("", self)
        self.logText.setReadOnly(True)

        # Action button!
        self.doButton = QPushButton("Do Checked Tasks", self)
        # Connect a function to the button
        self.doButton.clicked.connect(self.doAllPossibleActions)

        self.option1ProgressLabel = QLabel("", self)
        self.option1ProgressLabel.hide()

        gridLayout.addWidget(checkboxLabel, 0, 0, 1, 3)
        gridLayout.addWidget(option1, 1, 0, 1, 3)
        gridLayout.addWidget(option2, 2, 0, 1, 3)
        gridLayout.addWidget(option3, 3, 0, 1, 3)
        gridLayout.addWidget(self.fileOutputDirButton, 4, 0, 1, 1)
        gridLayout.addWidget(self.fileOutputDirLabel, 4, 1, 1, 3)
        # gridLayout.addWidget(self.fileOutputDirText, 4, 1)
        gridLayout.addWidget(QLabel("Log:"), 5, 0)
        gridLayout.addWidget(self.logText, 6, 0, 1, 4)
        gridLayout.addWidget(self.doButton, 5, 3)
        gridLayout.addWidget(self.option1ProgressLabel, 7, 0, 1, 4)
        # Find Kodi database
        # self.databases = self.getDatabases()

        self.getDatabases()
        self.activeProcess = None
        self.latest = None
        self.option1Timer = None
        # Find all media that Kodi doesn't see
        # Find all media that Kodi sees
        # Find missing TV Show seasons

    def doAllPossibleActions(self):
        if not self.fileOutputDir:
            # Get a file output location
            self.setOutputDir()
            if self.fileOutputDir is None or not self.fileOutputDir:
                msg = QMessageBox()
                msg.setWindowTitle("Error")
                msg.setText("No Output Directory Selected!")
                msg.exec()
                self.updateText("No actions selected!")
                return None
        p = 0

        r = re.compile("[\\:\\w&.\\-\\/]+MyVideos\\d+.db$")
        videoDatabase = list(filter(r.match, self.databases))[0]
        if self.allOptions['option1'].isChecked():
            # If a current job is running, kill it!
            if self.option1Timer is not None and self.option1Timer.isActive():
                self.option1Timer.stop()
            if self.activeProcess is not None and self.activeProcess.is_alive(
            ):
                self.updateText("Killing active task...")
                self.activeProcess.terminate()
                self.updateText("Success! Starting new requested tasks...")
            # Establish a connection to the movie database
            connection = sqlite3.connect(videoDatabase)
            cursor = connection.cursor()
            # Get the movie name, movie path, movie filename
            cursor.execute("""SELECT DISTINCT strFilename,strPath FROM files
                            INNER JOIN movie
                            ON files.idFile = movie.idFile
                            INNER JOIN path 
                            ON files.idPath = path.idPath;""")
            allKnownMovies = cursor.fetchall()
            self.updateText("Found {} files in Kodi...".format(
                len(allKnownMovies)))
            connection.close()
            p += 1
            mediaDirectory = QFileDialog.getExistingDirectory(
                self, "Select Media Directory To Scan")
            if mediaDirectory == "":
                self.updateText(
                    "Media directory cannot be ignored for option 1! Skipping..."
                )
            else:
                # Definitely broken!
                # Start a thread safe queue
                self.queue = Queue()
                # Setup a timer
                self.option1Timer = QTimer(self)
                self.option1Timer.setInterval(1000)
                self.option1Timer.timeout.connect(
                    self.updateMovieDirectoriesScanned)
                # Spin up a process
                self.activeProcess = VideoWorker(
                    self.queue, mediaDirectory, allKnownMovies,
                    self.fileOutputDir + "/Missing Movies.csv")
                # Start a timer for checking and updating the files found marker
                self.activeProcess.start()
                self.option1Timer.start()

        if self.allOptions['option2'].isChecked():
            # Establish a connection to the movie database
            connection = sqlite3.connect(videoDatabase)
            cursor = connection.cursor()
            # Get the movie name, movie path, movie filename
            cursor.execute("SELECT DISTINCT c00 FROM movie;")
            allKnownMovies = sorted(cursor.fetchall())
            allKnownMovies = list(
                map(lambda nameTup: '"{}"'.format(nameTup[0]), allKnownMovies))
            # Point out possible duplicates?
            cursor.execute("""SELECT movie.c00,cnt,strPath FROM movie
                            INNER JOIN files
                            ON files.idFile = movie.idFile
                            INNER JOIN path
                            ON path.idPath = files.idPath
                            LEFT JOIN (
                            SELECT c00, COUNT(c00) AS cnt FROM movie 
                            GROUP BY c00
                            ) AS temptable
                            ON movie.c00 = temptable.c00
                            WHERE cnt > 1;""")
            duplicates = sorted(cursor.fetchall())
            duplicates = list(
                map(
                    lambda entry: ",".join(
                        list(map(lambda item: '"{}"'.format(item), entry))),
                    duplicates))
            self.updateText("Writing out a CSV of movie names from Kodi...")
            with open("{}/All Movies.csv".format(self.fileOutputDir),
                      "w") as outfile:
                outfile.write("Title\n")
                outfile.write("\n".join(allKnownMovies))
            self.updateText("{}/All Movies.csv".format(self.fileOutputDir))
            with open("{}/All Duplicate Movies.csv".format(self.fileOutputDir),
                      "w") as outfile:
                outfile.write("Title,Total,Path\n")
                outfile.write("\n".join(duplicates))
            self.updateText("{}/All Duplicate Movies.csv".format(
                self.fileOutputDir))
            # Get the movie name, movie path, movie filename
            p += 1
            # self.updateText("Found {} movies logged by Kodi...".format(numFound))
            # Write out the file
        if self.allOptions['option3'].isChecked():
            connection = sqlite3.connect(videoDatabase)
            cursor = connection.cursor()
            # cursor.execute("SELECT idMovie, idFile, c00 FROM movie")
            # allKnownShows = cursor.fetchall()
            connection.close()
            self.updateText("Option 3 is not yet available")
            p += 1
            # self.updateText("Found {} missing seasons".format(numMissing))
            # Write out the file

        if p == 0:
            msg = QMessageBox()
            msg.setWindowTitle("Error")
            msg.setText("No Actions Selected!")
            msg.exec()
            self.updateText("No actions selected!")
        return None

    # Possibly broken
    def updateMovieDirectoriesScanned(self):
        # Check if active
        qsize = self.queue.qsize()
        totalActive = len(active_children())
        print("Active children: {}".format(totalActive))
        # Check the queue
        for num in range(qsize):
            self.latest = self.queue.get()

        if totalActive != 0:
            if self.latest is not None:
                self.option1ProgressLabel.show()
                self.option1ProgressLabel.setText("Currently in {}...".format(
                    str(self.latest)))
        else:
            self.updateText("Cleaning up...")
            if self.latest is not None:
                self.option1ProgressLabel.hide()
                self.updateText(
                    "Safe finish: {} files not imported into Kodi".format(
                        self.latest))
                self.updateText("{}Missing Movies.csv".format(
                    self.fileOutputDir))
                self.option1Timer.stop()
        print("Tick!")

    def setOutputDir(self):
        self.fileOutputDir = QFileDialog.getExistingDirectory(
            self, "Select Media Location")
        self.fileOutputDirLabel.setText(self.fileOutputDir)
        self.updateText("Output directory selected as:\n{}".format(
            self.fileOutputDir))
        return None

    def updateText(self, moreText):
        self.logText.setText("{}{}{}".format(self.logText.toPlainText(),
                                             moreText, "\n"))
        self.logText.verticalScrollBar().setValue(
            self.logText.verticalScrollBar().maximum())
        return None

    def getDatabases(self, databaseRoot=None):
        # For windows only
        if databaseRoot is None:
            databaseRoot = "C:/Users/{}/AppData/Roaming/Kodi/userdata/Database".format(
                getuser())

        databases = []
        if exists(databaseRoot):
            allfiles = listdir(databaseRoot)
            for afile in allfiles:
                if afile[-3:] == ".db":
                    databases.append(afile)
            if databases == []:
                self.updateText(
                    "Directory exists but no databases found? Program will crash if ran further"
                )
            else:
                databases = list(
                    map(
                        lambda database: "{}/{}".format(
                            databaseRoot, database), databases))
                self.updateText("Databases found: {}".format(databases))
            self.databases = databases
        else:
            # Need a better work around
            if databaseRoot == "" or databaseRoot != "C:/Users/{}/AppData/Roaming/Kodi/userdata/Database".format(
                    getuser()):
                exit()
            self.updateText("No databases found at {}".format(databaseRoot))
            self.updateText(
                "A new location must be selected and must contain the userdata folder for kodi or the application will crash"
            )
            # Allow the user to pick a directory but see files
            # databaseRoot = QFileDialog.getExistingDirectory(self, "Select Database Location",QFileDialog.)
            databaseRoot = QFileDialog.getExistingDirectory(
                self, "Select Database Location")
            self.getDatabases(databaseRoot=databaseRoot)
예제 #38
0
    def search_single_record(self,
                             rec,
                             n_parallel_words=1,
                             word_limit=None,
                             process_timeout=None,
                             maximum_matches=1000,
                             filter=None):
        if n_parallel_words is None:
            n_parallel_words = cpu_count()

        if word_limit is None:
            word_limit = self.N

        initial_q = managerQueue.Queue()

        [
            initial_q.put({field_name: rec[field_name]})
            for field_name in self.index_names[:word_limit]
        ]

        # enqueue a sentinel value so we know we have reached the end of the queue
        initial_q.put('STOP')
        queue_empty = False

        # create an empty queue for results
        results_q = Queue()

        # create a set of unique results, using MongoDB _id field
        unique_results = set()

        l = list()

        while True:

            # build children processes, taking cursors from in_process queue first, then initial queue
            p = list()
            while len(p) < n_parallel_words:
                word_pair = initial_q.get()
                if word_pair == 'STOP':
                    # if we reach the sentinel value, set the flag and stop queuing processes
                    queue_empty = True
                    break
                if not initial_q.empty():
                    p.append(
                        Process(target=get_next_match,
                                args=(results_q, word_pair, self.collection,
                                      np.array(rec['signature']),
                                      self.distance_cutoff, maximum_matches)))

            if len(p) > 0:
                for process in p:
                    process.start()
            else:
                break

            # collect results, taking care not to return the same result twice

            num_processes = len(p)

            while num_processes:
                results = results_q.get()
                if results == 'STOP':
                    num_processes -= 1
                else:
                    for key in results.keys():
                        if key not in unique_results:
                            unique_results.add(key)
                            l.append(results[key])

            for process in p:
                process.join()

            # yield a set of results
            if queue_empty:
                break

        return l
예제 #39
0
class SketchDataHandler(DataHandler):
    def __init__(self, root_path, paths_file, batch_size, target_size):
        super(SketchDataHandler, self).__init__(batch_size, target_size)
        self._index = 0
        self.root_path = root_path
        self._image_paths = self._get_image_paths(
            os.path.join(root_path, paths_file))
        if (len(self._image_paths) < batch_size * 100):
            self._image_paths = self._image_paths * 100
        self._shuffle_image_paths()
        self._total_num = len(self._image_paths)

        self.queue = Queue(40)
        self.msg_queue = Queue(4)
        self.procs = []
        self.start_threads()

    def _get_image_paths(self, paths_file):
        with open(paths_file) as f:
            return [
                os.path.join(self.root_path, line.rstrip('\n')) for line in f
            ]

    def _shuffle_image_paths(self):
        random.shuffle(self._image_paths)

    def _random_preprocessing(self, image, size):
        # rotate image
        rand_degree = np.random.randint(0, 180)
        rand_flip = np.random.randint(0, 2)
        image = scipy.ndimage.interpolation.rotate(image,
                                                   rand_degree,
                                                   cval=255)
        if rand_flip == 1:
            image = np.flip(image, 1)

        # Select cropping range between (target_size/2 ~ original_size)
        original_h, original_w = image.shape
        crop_width = np.random.randint(self.target_size / 2, original_w)
        crop_height = np.random.randint(self.target_size / 2, original_h)
        topleft_x = np.random.randint(0, original_w - crop_width)
        topleft_y = np.random.randint(0, original_h - crop_height)
        cropped_img = image[topleft_y:topleft_y + crop_height,
                            topleft_x:topleft_x + crop_width]
        output = scipy.misc.imresize(cropped_img,
                                     [self.target_size, self.target_size])

        output = (output - 128.0) / 128.0
        return output

    def next(self):
        output = self.queue.get()
        return output

    def _enqueue_op(self, queue, msg_queue):
        while msg_queue.qsize() == 0:
            # randomly select index
            indexes = np.random.randint(0, self._total_num, self.batch_size)
            sz = self.target_size
            output = np.zeros([self.batch_size, sz, sz, 1])
            for i in range(len(indexes)):
                index = indexes[i]
                output[i] = self._random_preprocessing(
                    scipy.misc.imread(self._image_paths[index],
                                      mode='L').astype(np.float),
                    self.target_size).reshape([sz, sz, 1])
                while np.amin(output[i]) == np.amax(
                        output[i]):  # some data are strange..
                    output[i] = self._random_preprocessing(
                        scipy.misc.imread(self._image_paths[index],
                                          mode='L').astype(np.float32),
                        self.target_size).reshape([sz, sz, 1])

            queue.put(output)

    def start_threads(self):
        print("start threads called")
        for i in range(2):
            proc = Process(target=self._enqueue_op,
                           args=(self.queue, self.msg_queue))
            self.procs.append(proc)
            proc.daemon = True
            proc.start()
        print("enqueue thread started!")

    def get_batch_shape(self):
        return (self.batch_size, self.target_size, self.target_size, 1)

    def kill(self):
        self.msg_queue.put("illkillyou")
        for proc in self.procs:
            proc.terminate()
            proc.join()
        print('sketch data killed')
예제 #40
0
def test_net(imdb,
             output_dir,
             target_test,
             thresh=0.05,
             no_cache=False,
             step=0):
    # Run demo
    if imdb is None:
        assert cfg.TEST.DEMO.ENABLE, "check your config and stderr!"
        return demo(target_test, thresh)
    # Initializing the timers
    logger.info('Evaluating {} on {}'.format(cfg.NAME, imdb.name))

    run_inference = True
    if not no_cache:
        det_file = os.path.join(output_dir, 'detections.pkl')
        if os.path.exists(det_file):
            try:
                with open(det_file, 'r') as f:
                    dets = cPickle.load(f)
                    run_inference = False
                    logger.info(
                        'Loading detections from cache: {}'.format(det_file))
            except:
                logger.warning(
                    'Could not load the cached detections file, detecting from scratch!'
                )

    # Perform inference on images if necessary
    if run_inference:
        pyramid = True if len(cfg.TEST.SCALES) > 1 else False
        if isinstance(cfg.TEST.GPU_ID, int):
            cfg.TEST.GPU_ID = [cfg.TEST.GPU_ID]
        assert len(cfg.TEST.GPU_ID) >= 1, "You must specify at least one GPU"
        if len(cfg.TEST.GPU_ID) == 1:
            dets = inference_worker(0, imdb, target_test, 0, len(imdb), thresh)
        else:
            result_queue = Queue()
            procs = []
            len_per_gpu = int(np.ceil(1. * len(imdb) / len(cfg.TEST.GPU_ID)))
            for rank in range(len(cfg.TEST.GPU_ID)):
                p = Process(target=inference_worker,
                            args=(rank, imdb, target_test, len_per_gpu * rank,
                                  min(len_per_gpu * (rank + 1),
                                      len(imdb)), thresh, result_queue))
                p.daemon = True
                p.start()
                procs.append(p)
            dets = [result_queue.get() for _ in procs]
            for p in procs:
                p.join()
            dets = [det[1] for det in sorted(dets, key=lambda x: x[0])]
            dets = [[_ for det in dets for _ in det[i]]
                    for i in range(imdb.num_classes)]
        assert len(dets[0]) == len(imdb), "Detection result compromised"
        det_file = os.path.join(output_dir, 'detections.pkl')
        if not no_cache:
            with open(det_file, 'wb') as f:
                cPickle.dump(dets, f, cPickle.HIGHEST_PROTOCOL)

    # Evaluate the detections
    logger.info('Evaluating detections')
    result = imdb.evaluate_detections(all_boxes=dets,
                                      output_dir=output_dir,
                                      method_name=cfg.NAME,
                                      step=step)
    logger.info(result)
    logger.info('All Done!')
예제 #41
0
# 	if np.any(bool_list):
# 		time.sleep(.1)
# 	else:
# 		break
# else:
# 	print("Timed out, killing all processes")
# 	for p in procs:
# 		p.terminate()

liveprocs = list(procs)
results = []
while liveprocs:
    try:
        while 1:
            results.append(output.get(False))
    except queue.Empty:
        pass

    time.sleep(0.5)
    if not output.empty():
        continue

    liveprocs = [p for p in liveprocs if p.is_alive()]

for proc in procs:
    proc.join()

# results = []
# for proc in procs:
# 	while proc.is_alive():
예제 #42
0
# coding:utf-8
from multiprocessing import Queue

try:
    q = Queue()
    q.get(timeout=5)
except BaseException as e:
    print('--' + str(e))

예제 #43
0
def await_solution(puzzle: np.ndarray, q: Queue) -> None:
    solved = q.get()
    print("\nOriginal:\n")
    display_grid(puzzle)
    print("\nSolved:\n")
    display_grid(solved)
예제 #44
0
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 13 17:35:23 2018

@author: Administrator
"""

from multiprocessing import Process, Queue


def f(q, n):
    q.put([42, n, 'hello'])


if __name__ == '__main__':
    q = Queue()
    p_list = []
    for i in range(3):
        p = Process(target=f, args=(q, i))
        p_list.append(p)
        p.start()
    print(q.get())
    print(q.get())
    print(q.get())
    for i in p_list:
        i.join()
예제 #45
0
    with ThreadPoolExecutor() as executor:
        results = executor.map(long_running_search, url_list)
        q.put([x for x in results])  # Send results to the shared queue.
    print(f'Worker {name} done.')


def long_running_search(url):
    try:
        r = requests.get(url, timeout=2, verify=False)
        return r.status_code
    except Exception as e:
        print(f'{e}')


if __name__ == '__main__':

    jobs = []
    for root_name in root_instances:
        p = Process(target=search_instances,
                    args=(root_name, q, url_dict[root_name]))
        jobs.append(p)
        p.start()

    for job in jobs:
        job.join()  # Ensure each process exits normally.

    while not q.empty():
        print(q.get())  # Retrieve each item from the queue.

    print('Fin')
#   Created by Elshad Karimov on 31/05/2020.
#   Copyright © 2020 AppMillers. All rights reserved.

# How to use multiprocessing.Queue as a FIFO queue:

from multiprocessing import Queue

customQueue = Queue(maxsize=3)
customQueue.put(1)
print(customQueue.get())
예제 #47
0
 def flush_worker(queue: Queue):
     while True:
         q_contents = queue.get()
         tracker_session.add(q_contents)
         tracker_session.flush()
예제 #48
0
class NoverScratch:

    # TODO list
    # 1. write txt
    # 2. multi threat to handle
    # 3. how to write txt. write once or more times. if one time, string would overflow
    #    more times include more txt join on txt or append text in order

    # initial varaiable
    # @param {string} chromeDriver Local Path eg: C:\Program Files (x86)\Google\Chrome\Application\chromedriver
    def __init__(self, chromeDriverPath):
        # chrome browser
        # defaultChromeDriverPath = 'C:\Program Files (x86)\Google\Chrome\Application\chromedriver'
        # self.browser = webdriver.Chrome(defaultChromeDriverPath)
        # phantomjs
        defaultPhantomjsPath = './phantomjs'
        if platform.system() == 'Linux':
            # specfic path related to phantamjs.sh
            defaultPhantomjsPath = '/usr/local/src/phantomjs/bin/phantomjs'
            self.browser = webdriver.PhantomJS(service_args=[
                '--ignore-ssl-errors=true', '--ssl-protocol=any'
            ])
        else:
            self.browser = webdriver.PhantomJS(
                executable_path=defaultPhantomjsPath)
        # headless chrome
        # chrome_options = Options()
        # chrome_options.add_argument('--headless')
        # self.browser = webdriver.Chrome(chrome_options=chrome_options, executable_path=r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe')
        if chromeDriverPath:
            self.browser = webdriver.Chrome(chromeDriverPath)
        # web driver browser wait timeout
        self.wait = WebDriverWait(self.browser, 10)
        # multi thread lock
        self.threadLock = threading.Lock()
        # Queue
        self.queue = Queue()

    # get each chapter link by menu link
    # @param {string} url eg:http:// or https://www.biquyun.com/1_1559/
    # @returns {chapterName} chapterName
    def getEachChapterLink(self, menuUrl):
        # check param validation
        if menuUrl.index('http') > -1 or menuUrl.index('https') > -1:
            # begin browser load
            try:
                self.browser.get(menuUrl)
                # add chapter link when content load
                chapterList = self.wait.until(
                    EC.presence_of_element_located((By.CSS_SELECTOR, '#list')))
                html = self.browser.page_source
                doc = pq(html)
                chapterName = doc('#info > h1').text()
                items = doc('#list > dl > dd > a').items()
                # add each link
                for item in items:
                    link = item.attr('href')
                    # eg /1_1559/952222.html
                    # using re to join link TODO
                    # current use split to join
                    linkArr = link.split('/')
                    linkSuffix = linkArr[len(linkArr) - 1]
                    self.queue.put(menuUrl + linkSuffix)
                return chapterName
            except TimeoutException:
                # todo
                # print('error')
                return ''

    # get specific text by each chapter link
    # @param {string} chapter link eg:https://www.biquyun.com/1_1559/9986611.html
    # @param {chapterName} chapterName
    # @param {thread_id} thread id
    # @returns {string} novel text or other info (TODO)
    def getSpecificTextByChapterLink(self, chapterLink, chapterName):
        try:
            self.browser.get(chapterLink)
            chapterContent = self.wait.until(
                EC.presence_of_element_located((By.CSS_SELECTOR, '#content')))
            html = self.browser.page_source
            doc = pq(html)
            chapterTitle = doc(
                '#wrapper > div.content_read > div > div.bookname > h1').text(
                )
            chapterText = doc('#content').text()
            # chapterText may include chapterTitle 标题写了两遍
            # parse chapterText because chapterText mat like this. eg: &nbsp;&nbsp;&nbsp;&nbsp;第一五二章风雨初平<br>
            # write chaptertext to txt
            # get lock
            self.threadLock.acquire()
            self.writeTextToLocalTXT((' \r\n ' + chapterTitle + ' \r\n ' +
                                      chapterText).encode('utf-8'),
                                     chapterName)
            # release lock
            self.threadLock.release()
        except TimeoutException:
            return

    # parse chapterText eg: &nbsp;&nbsp;&nbsp;&nbsp;第一五二章风雨初平<br>
    # @param {string} chapterText to be parsed
    # @param {chapterName} chapterName
    # @returns {array} content after parsing
    def parseChapterContent(self, chapterContent):
        # parse &nbsp; and <br/> by re
        parseContent = re.sub(r'(<br>+)\s*(<br>)*', '<br>\r\n<br>',
                              chapterContent)
        p = re.compile(r'<br>')
        return p.split(parseContent)

    # write content to local txt
    # @params {array} chapterContent array
    # @param {thread_id} thread id
    # @returns {boolean} write success or false
    def writeTextToLocalTXT(self, chapterContent, chapterName):
        file = None
        try:
            # file = open('static/' + chapterName + '_' + str(thread_id) + '.txt', 'ab+')
            file = open('static/' + chapterName + '.txt', 'ab+')
            if isinstance(chapterContent, list):
                # '\r\n' represent \n normaly
                file.writelines(chapterContent)
            else:
                file.write(chapterContent)
        except IOError:
            file.close()
        finally:
            file.close()

    # join several txt file to one
    # @param {txtFileArr}
    # @param {savePath} saved txt path
    def joinTxtFile(self, txtFileArr, savePath):
        with open(savePath, 'w') as outfile:
            for fname in txtFileArr:
                with open(fname) as infile:
                    outfile.write(infile.read())

    # multi thread to write txt
    # @param {linkArr} linkArr to handle
    # @param {thread_num} thread num
    # @param {chapterName} chapterName
    def multiThreadGetChapter(self, thread_num, chapterName):
        efficientWorker = EfficientWorker(self.batchWriteChapterToTxt,
                                          [chapterName], 'thread', 4, 5)
        efficientWorker.start()

    # batch write chapter to txt
    def batchWriteChapterToTxt(self, chapterName):
        while self.queue.empty() != True:
            chanpterLink = self.queue.get()
            self.getSpecificTextByChapterLink(chanpterLink, chapterName)

    # write each chapter to txt
    # @param {link} eg.https://www.biquyun.com/1_1559/
    def writeTotalChapterToTxt(self, link):
        chapterName = self.getEachChapterLink(link)
        thread_num = 5
        try:
            self.multiThreadGetChapter(thread_num, chapterName)
            # thread_txt = ['static/' + chapterName + '_' + str(i) + '.txt' for i in range(thread_num)]
            # self.joinTxtFile(thread_txt, 'static/' + chapterName + '.txt')
            # for tfile in thread_txt:
            #   if os.path.exists(tfile):
            #     os.remove(tfile)
            return chapterName + '.txt'
        except IOError:
            return -1
예제 #49
0
파일: train.py 프로젝트: bic4907/mario_rl
        pass

    for idx in range(num_worker):
        parent_conn, child_conn = Pipe()
        worker = MarioEnv(env_id, idx, child_conn, queue, n_step, is_render)
        worker.start()
        workers.append(worker)
        parent_conns.append(parent_conn)

    while model.g_episode < max_episode:

        while queue.empty():  # Wait for worker's state
            continue

        # Received some data
        idx, command, parameter = queue.get()
        if command == "OnStep":
            transition, reward, done = parameter

            action = model.get_action(transition, is_random=False)

            buffer_state[idx].append(np.array(transition))
            buffer_action[idx].append(action)
            buffer_reward[idx].append(reward)

            # n-step을 위한 데이터들이 다 모였을 시
            if len(buffer_state[idx]) > n_step:
                model.train(buffer_state[idx], buffer_action[idx],
                            buffer_reward[idx], done)

                buffer_state[idx].pop()
    video_capture = WebcamVideoStream(src=cam_url, width=640,
                                      height=480).start()
    #video_capture=cv2.VideoCapture(0)

    fps = FPS().start()
    t_start = time.time()
    out = None
    while True:  # fps._numFrames < 120
        if time.time() - t_start > 1.2:  # 输出图像的延迟
            t_start = time.time()
            frame = video_capture.read()
            frame = cv2.resize(frame, (int(640), int(480)))
            input_q.put(frame)

            t = time.time()
            out = mid_q.get()
            output_q.put(out)
            cv2.imshow('Video', out)
            fps.update()
            print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
        else:
            frame = video_capture.read()
            if out is not None:
                cv2.imshow("Video", out)
            else:
                cv2.imshow("Video", frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
# time.sleep(0.3)
예제 #51
0
p.start()

while not SHOULD_EXIT:
    # print("processing GUI events in CALLBACK")
    sleep(0.01)
    # check if encoders have changed
    pedal_hardware.process_input()

pedal_hardware.EXIT_THREADS = True
send_ui_message("exit", ("exit", ))
p.terminate()
p.join()

while True:
    try:
        ui_messages.get(block=False)
    except queue.Empty:
        break

while True:
    try:
        core_messages.get(block=False)
    except queue.Empty:
        break

ui_messages.close()
ui_messages.join_thread()
core_messages.close()
core_messages.join_thread()
# print("exiting core")
print("Normal exit")
예제 #52
0
            print(m[i][j], end="")
        print(end='\n')
    print(end='\n\n')

    # multiprocessing
    for _ in range(args.processes):
        p = Process(target=next_step, args=(x, q))
        processes.append(p)
        p.start()

    for process in processes:
        process.join()

    # get the final matrix for 'nth' no of process, where n is the no. of processes
    while q.empty() is False:
        final = q.get()

    finish = time.perf_counter(
    )  # to mark the current time after the whole processes are executed

    o = args.output

    # write the resulting matrix to the output file in form of string
    for i in range(len(final[0])):
        for j in range(len(final[0][i])):
            o.write(final[0][i][j])
        o.write('\n')

    print(
        f'Simulation complete. Final result stored in the output file "{args.output.name}"',
        end="\n\n")
예제 #53
0
파일: match.py 프로젝트: parksang21/RL
    right_p.start()

    l_cnt = 0
    r_cnt = 0
    for ep_i in range(args.episodes):
        done_n = [False for _ in range(env.n_agents)]
        ep_reward = 0
        env.seed(ep_i)
        obs_n = env.reset()

        frame = []
        while not all(done_n):

            left_q1.put(obs_n)
            right_q1.put(obs_n)
            l_action = left_q2.get()
            r_action = right_q2.get()
            obs_n, reward_n, done_n, info = env.step([l_action, r_action])
            # frame.append(env.render(mode='rgb_array'))
            #action_n = env.action_space.sample()
            #obs_n, reward_n, done_n, info = env.step(action_n)

            l_reward = reward_n[0]
            r_reward = reward_n[1]
            l_cnt += l_reward
            r_cnt += r_reward
            env.render()
        print('Episode #{} left: {} right: {} '.format(ep_i, l_cnt, r_cnt))

    left_q1.put(None)
    right_q1.put(None)
from multiprocessing import Process, Queue
import numpy as np

Size = 127


def producer(mp_queue):
    print("Child entered")
    window1 = np.arange(Size * Size).reshape((Size, Size))
    print(window1.dtype)
    print("Array created")
    mp_queue.put(window1)
    print("Data added to Pipe")
    return


if __name__ == '__main__':
    q = Queue()
    window_reader = Process(target=producer, args=(q, ))
    window_reader.start()
    window_reader.join()
    print("Child process exited, entered parent")
    # Acting like a consumer
    a = q.get()
    print(a)
예제 #55
0
def main():

    parser = argparse.ArgumentParser(description="Perform workflow for scan")
    parser.add_argument('-d', '--directory', help='scan directory')
    parser.add_argument('-r',
                        '--refine',
                        action='store_false',
                        help='refine lattice parameters')
    parser.add_argument('-t',
                        '--transform',
                        action='store_true',
                        help='perform CCTW transforms')
    parser.add_argument('-c',
                        '--cwd',
                        default='/data/user6idd',
                        help='directory containing GUP directories')
    parser.add_argument('-g',
                        '--gup',
                        default='GUP-57342',
                        help='GUP number, e.g., GUP-57342')

    args = parser.parse_args()

    directory = args.directory.rstrip('/')
    sample = os.path.basename(os.path.dirname(directory))
    label = os.path.basename(directory)
    if args.refine:
        refine = '-r'
    else:
        refine = ''
    if args.transform:
        transform = '-t'
    else:
        transform = ''

    print("Processing directory '%s'" % directory)

    path = os.path.join(args.cwd, args.gup)
    if 'tasks' not in os.listdir(path):
        os.mkdir(os.path.join(path, 'tasks'))

    scans = sorted([
        scan for scan in os.listdir(directory)
        if (os.path.isdir(os.path.join(directory, scan))
            and not scan.endswith('_1'))
    ],
                   key=natural_sort)
    parent = os.path.join(sample, label, '%s_%s.nxs' % (sample, scans[0]))

    commands = [
        'nxtask -d %s -p %s %s %s' %
        (os.path.join(directory, scan), parent, refine, transform)
        for scan in scans
    ]

    tasks = JoinableQueue()
    results = Queue()

    nodes = [ProcessNode(node, tasks, results) for node in orthros_nodes]
    for node in nodes:
        node.start()

    # Enqueue jobs
    for command in commands:
        print(command)
        tasks.put(Task(path, command))

    # Add a poison pill for each node
    for node in nodes:
        tasks.put(None)

    # Wait for all of the tasks to finish
    tasks.join()

    # Start printing results
    num_jobs = len(commands)
    while num_jobs:
        result = results.get()
        print('Completed:', result)
        num_jobs -= 1
예제 #56
0
def main(workers=0, limit=None):
    globs = Globs()

    # Configure logging
    logging.basicConfig(
        level=logging.ERROR,
        filename='main.log',
        format='%(relativeCreated)6d %(threadName)s %(message)s')
    logging.error('STARTING NEW ENTRY AT %s' % str(dt.now()))

    # Load settings
    with open('settings.txt', 'r') as f:
        try:
            filename = f.readline().split('=')[1].strip()
            print('Loading', filename)
        except Exception as e:
            print('Failed to read filename from settings.txt'.upper(), e)
            sys.exit()
        try:
            globs.set('minmatch', int(f.readline().split('=')[1].strip()))
            print('Minmatch set to', globs.get('minmatch'))
        except Exception as e:
            print('Failed to read minimum from settings.txt'.upper(), e)
            sys.exit()

    # Load csv
    try:
        df = pd.read_csv(filename, header=0)
    except Exception as e:
        print('Failed to read csv file with filename', filename,
              'from settings.txt', e)
        sys.exit()

    # Limit as needed
    if limit:
        df = df.iloc[:limit]

    # Determine number of workers(threads) to use
    if workers == 0:
        workers = cpu_count()

    # Create jobs
    st = time()
    jobs = []
    q = Queue()
    for n in range(workers):
        batch_size = len(df) // workers + 1
        start = n * batch_size
        end = start + batch_size
        p = Process(target=start_worker,
                    args=(n, df.iloc[start:end], globs, q))
        jobs.append((n, p))
        print('Starting worker', n)
        p.start()

    # Wait until all links are scraped
    while q.qsize() < len(df):
        print('Main process is waiting for workers to finish...')
        sleep(10)

    for n, p in jobs:
        p.join(timeout=.1)
        print('Worker %s process has joined' % n)

    # Get worker results and save as a csv called results.csv
    results = pd.DataFrame(columns=['Email', 'Results'])
    i = 0
    while q.qsize() > 0:
        r = q.get(timeout=.2)
        # print(i, r)
        results.loc[len(results)] = r
        i += 1

    results.to_csv('results.csv')
    print('Results written to results.csv...')

    print("Time Elapsed: {:.2f}s".format(time() - st))
        p_mels = []
        for i in range(num_part):
            p_mels.append(
                Process(target=preprocess_audio,
                        args=(args, 'mel', metalist_list[i], q)))

        for i in range(num_part):
            p_mels[i].daemon = True
            p_mels[i].start()

        filtered_list = []
        error_list = []

        for _ in range(num_part):
            _, _filtered, _error = q.get()
            filtered_list.extend(_filtered)
            error_list.extend(_error)

        filtered_list.sort()
        print(len(filtered_list))

        with open('MSD_audio_filtered_track_file_path_list.p', 'wb') as handle:
            pickle.dump(filtered_list,
                        handle,
                        protocol=pickle.HIGHEST_PROTOCOL)

        with open('MSD_audio_error_list.p', 'wb') as handle:
            pickle.dump(error_list, handle, protocol=pickle.HIGHEST_PROTOCOL)

        for i in range(num_part):
예제 #58
0
def MultiProcess(nproc,
                 config,
                 job_func,
                 tasks,
                 item,
                 logger=None,
                 done_func=None,
                 except_func=None,
                 except_abort=True):
    """A helper function for performing a task using multiprocessing.

    A note about the nomenclature here.  We use the term "job" to mean the job of building a single
    file or image or stamp.  The output of each job is gathered into the list of results that
    is returned.  A task is a collection of one or more jobs that are all done by the same
    processor.  For simple cases, each task is just a single job, but for things like a Ring
    test, the task needs to have the jobs for a full ring.

    The tasks argument is a list of tasks.
    Each task in that list is a list of jobs.
    Each job is a tuple consisting of (kwargs, k), where kwargs is the dict of kwargs to pass to
    the job_func and k is the index of this job in the full list of jobs.

    Parameters:
        nproc:          How many processes to use.
        config:         The configuration dict.
        job_func:       The function to run for each job.  It will be called as::

                            result = job_func(**kwargs)

                        where kwargs is from one of the jobs in the task list.
        tasks:          A list of tasks to run.  Each task is a list of jobs, each of which is
                        a tuple (kwargs, k).
        item:           A string indicating what is being worked on.
        logger:         If given, a logger object to log progress. [default: None]
        done_func:      A function to run upon completion of each job.  It will be called as::

                            done_func(logger, proc, k, result, t)

                        where proc is the process name, k is the index of the job, result is
                        the return value of that job, and t is the time taken. [default: None]
        except_func:    A function to run if an exception is encountered.  It will be called as::

                            except_func(logger, proc, k, ex, tr)

                        where proc is the process name, k is the index of the job that failed,
                        ex is the exception caught, and tr is the traceback. [default: None]
        except_abort:   Whether an exception should abort the rest of the processing.
                        If False, then the returned results list will not include anything
                        for the jobs that failed.  [default: True]

    Returns:
        a list of the outputs from job_func for each job
    """
    import time
    import traceback

    # The worker function will be run once in each process.
    # It pulls tasks off the task_queue, runs them, and puts the results onto the results_queue
    # to send them back to the main process.
    # The *tasks* can be made up of more than one *job*.  Each job involves calling job_func
    # with the kwargs from the list of jobs.
    # Each job also carries with it its index in the original list of all jobs.
    def worker(task_queue, results_queue, config, logger):
        proc = current_process().name

        # The logger object passed in here is a proxy object.  This means that all the arguments
        # to any logging commands are passed through the pipe to the real Logger object on the
        # other end of the pipe.  This tends to produce a lot of unnecessary communication, since
        # most of those commands don't actually produce any output (e.g. logger.debug(..) commands
        # when the logging level is not DEBUG).  So it is helpful to wrap this object in a
        # LoggerWrapper that checks whether it is worth sending the arguments back to the original
        # Logger before calling the functions.
        logger = LoggerWrapper(logger)

        if 'profile' in config and config['profile']:
            import cProfile, pstats, io
            pr = cProfile.Profile()
            pr.enable()
        else:
            pr = None

        for task in iter(task_queue.get, 'STOP'):
            try:
                logger.debug('%s: Received job to do %d %ss, starting with %s',
                             proc, len(task), item, task[0][1])
                for kwargs, k in task:
                    t1 = time.time()
                    kwargs['config'] = config
                    kwargs['logger'] = logger
                    result = job_func(**kwargs)
                    t2 = time.time()
                    results_queue.put((result, k, t2 - t1, proc))
            except KeyboardInterrupt:
                raise
            except Exception as e:
                tr = traceback.format_exc()
                logger.debug('%s: Caught exception: %s\n%s', proc, str(e), tr)
                results_queue.put((e, k, tr, proc))
        logger.debug('%s: Received STOP', proc)
        if pr is not None:
            pr.disable()
            try:
                from StringIO import StringIO
            except ImportError:
                from io import StringIO
            s = StringIO()
            sortby = 'time'  # Note: This is now called tottime, but time seems to be a valid
            # alias for this that is backwards compatible to older versions
            # of pstats.
            ps = pstats.Stats(pr, stream=s).sort_stats(sortby).reverse_order()
            ps.print_stats()
            logger.error(
                "*** Start profile for %s ***\n%s\n*** End profile for %s ***",
                proc, s.getvalue(), proc)

    njobs = sum([len(task) for task in tasks])

    if nproc > 1:
        logger.warning("Using %d processes for %s processing", nproc, item)

        from multiprocessing import Process, Queue, current_process
        from multiprocessing.managers import BaseManager

        # Send the tasks to the task_queue.
        task_queue = Queue()
        for task in tasks:
            task_queue.put(task)

        # Temporarily mark that we are multiprocessing, so we know not to start another
        # round of multiprocessing later.
        config['current_nproc'] = nproc

        if 'profile' in config and config['profile']:
            logger.info(
                "Starting separate profiling for each of the %d processes.",
                nproc)

        # The logger is not picklable, so we need to make a proxy for it so all the
        # processes can emit logging information safely.
        logger_proxy = GetLoggerProxy(logger)

        # Run the tasks.
        # Each Process command starts up a parallel process that will keep checking the queue
        # for a new task. If there is one there, it grabs it and does it. If not, it waits
        # until there is one to grab. When it finds a 'STOP', it shuts down.
        results_queue = Queue()
        p_list = []
        for j in range(nproc):
            # The process name is actually the default name that Process would generate on its
            # own for the first time we do this. But after that, if we start another round of
            # multiprocessing, then it just keeps incrementing the numbers, rather than starting
            # over at Process-1.  As far as I can tell, it's not actually spawning more
            # processes, so for the sake of the logging output, we name the processes explicitly.
            p = Process(target=worker,
                        args=(task_queue, results_queue, config, logger_proxy),
                        name='Process-%d' % (j + 1))
            p.start()
            p_list.append(p)

        raise_error = None

        try:
            # In the meanwhile, the main process keeps going.  We pull each set of images off of the
            # results_queue and put them in the appropriate place in the lists.
            # This loop is happening while the other processes are still working on their tasks.
            results = [None for k in range(njobs)]
            for kk in range(njobs):
                res, k, t, proc = results_queue.get()
                if isinstance(res, Exception):
                    # res is really the exception, e
                    # t is really the traceback
                    # k is the index for the job that failed
                    if except_func is not None:  # pragma: no branch
                        except_func(logger, proc, k, res, t)
                    if except_abort or isinstance(res, KeyboardInterrupt):
                        for j in range(nproc):
                            p_list[j].terminate()
                        raise_error = res
                        break
                else:
                    # The normal case
                    if done_func is not None:  # pragma: no branch
                        done_func(logger, proc, k, res, t)
                    results[k] = res

        except BaseException as e:  # pragma: no cover
            # I'm actually not sure how to make this happen.  We do a good job of catching
            # all the normal kinds of exceptions that might occur and dealing with them cleanly.
            # However, if something happens that we didn't anticipate, this should make an
            # attempt to clean up the task queue and worker before raising the error further.
            logger.error(
                "Caught a fatal exception during multiprocessing:\n%r", e)
            logger.error("%s", traceback.format_exc())
            # Clear any unclaimed jobs that are still in the queue
            while not task_queue.empty():
                task_queue.get()
            # And terminate any jobs that might still be running.
            for j in range(nproc):
                p_list[j].terminate()
            raise_error = e

        finally:
            # Stop the processes
            # Once you are done with the processes, putting nproc 'STOP's will stop them all.
            # This is important, because the program will keep running as long as there are running
            # processes, even if the main process gets to the end.  So you do want to make sure to
            # add those 'STOP's at some point!
            for j in range(nproc):
                task_queue.put('STOP')
            for j in range(nproc):
                p_list[j].join()
            task_queue.close()

            del config['current_nproc']

        if raise_error is not None:
            raise raise_error

    else:  # nproc == 1
        results = [None] * njobs
        for task in tasks:
            for kwargs, k in task:
                try:
                    t1 = time.time()
                    kwargs['config'] = config
                    kwargs['logger'] = logger
                    result = job_func(**kwargs)
                    t2 = time.time()
                    if done_func is not None:  # pragma: no branch
                        done_func(logger, None, k, result, t2 - t1)
                    results[k] = result
                except KeyboardInterrupt:
                    raise
                except Exception as e:
                    tr = traceback.format_exc()
                    if except_func is not None:  # pragma: no branch
                        except_func(logger, None, k, e, tr)
                    if except_abort or isinstance(e, KeyboardInterrupt):
                        raise

    # If there are any failures, then there will still be some Nones in the results list.
    # Remove them.
    results = [r for r in results if r is not None]

    return results
            print('ROOT: ')
            cmd = stdin.readline()
            q.put(cmd)

    def root(char):
        assert isinstance(char, str), "Argument must be string!"
        ROOT.gROOT.ProcessLine(char)

    if __name__ == '__main__':
        ___queue___ = Queue()
        ___newstdin___ = os.fdopen(os.dup(sys.stdin.fileno()))
        ___input_p___ = Process(target=input_thread,
                                args=(___queue___, ___newstdin___))
        ___input_p___.daemon = True
        ___input_p___.start()
        ___g___ = ROOT.gSystem.ProcessEvents
        try:
            while 1:
                if not ___queue___.empty():
                    ___cmd___ = ___queue___.get()
                    try:
                        exec(___cmd___, globals())
                    except:
                        print(sys.exc_info())
                time.sleep(0.01)
                ___g___()
        except (KeyboardInterrupt, SystemExit):
            ___input_p___.terminate()

#
예제 #60
0
파일: video.py 프로젝트: Ojas-Singh/oOo
    fvs = FileVideoStream('sample.mp4').start()
    while quit == False and frame_count < 500:

        frame = fvs.read()
        input_q.put([
            time.time(), frame[int(roi[1]):int(roi[1] + roi[3]),
                               int(roi[0]):int(roi[0] + roi[2])]
        ])

        if output_q.empty():
            pass  # fill up queue
        else:
            frame_count += 1
            dummylist = []
            for i in range(output_q.qsize()):
                dummylist.append((quit, output_q.get()))
            dummylist.sort()
            for i in dummylist:
                display_q.put(i)
            fps.update()
            print(frame_count)

    fps.stop()
    quit = True

    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
    display_q.put((quit, output_q.get()))
    time.sleep(4)
    D.terminate()
    R.terminate()