Exemple #1
0
 def start(self):
     print('开启')
     z = Thread(target=do_parent, args=(self.pa, self.user))
     x = Process(target=video_s, args=(self.vi,))
     y = Process(target=audio_s, args=(self.au,))
     t = Process(target=desk_s, args=(self.de,))
     p = Thread(target=chat_s, args=(self.ch, self.user))
     z.setDaemon(True)
     x.Daemon = True
     y.Daemon = True
     t.Daemon = True
     p.setDaemon(True)
     z.start()
     x.start()
     y.start()
     t.start()
     p.start()
def assign_blocks(centroids, data, processes):
'''
Inputs: list of centroids
        numpy array of blocks
        number of prcesses (integer)
Outputs: Calls graph function which plots every block

This function assigns each block to one of the centroids.
It's almost identical to using pool.map in linear_search_pool.py,
but since pool.map turned out to slow down the algorithm,
after consulting with Professor Wachs, we manually splitted up
the data into chunks and input each split chunk to each processor
which turned out to be approximately 'processes' times faster 
'''
    Districts = dc.create_districts(centroids)
    data = rm_centroids_from_data(centroids, data)

    q = Queue()

    colors_dict = get_colors(Districts)
   
    # used for stopping conditon with EPSILON
    unassigned_blocks = data.shape[0]

    while data.shape[0] != 0:
        data_splitted = split_data(data, processes)
        priority_district = dc.return_low_pop(Districts)

        for subdata in data_splitted:
            p = Process(target=find_nearest_block, args=(subdata, priority_district.centroid, q))
            p.Daemon = True
            p.start()

        for subdata in data_splitted:
            p.join()

        blocks = []
        while(not q.empty()):
            blocks.append(q.get())
        # [1:] part gets rid of the distance 
        nearest_block = list(min(blocks)[1:])

        plt.scatter(nearest_block[2], nearest_block[1], color=colors_dict[priority_district.id])

        priority_district.add_block(nearest_block, Districts) #should i get rid of distance before 
        idx = np.where(data[:,0] == nearest_block[0])
        data = np.delete(data, idx, 0)

        if (unassigned_blocks - EPSILON) == data.shape[0]:
           break

    graph(Districts, data)
def multips(function_in_use, file_path, parallel, fa_files):
    """ running multiple threads """
    from multiprocessing import Process
    fa_files_len_part=len(fa_files)/parallel
    start = time.time(); procs = []

    for i in range(0, parallel):
        if i!=parallel-1:
            p = Process( target=function_in_use, args=(i,file_path, fa_files[ i*fa_files_len_part : (i+1)*fa_files_len_part ], ) )
        else: # the left-overs
            p = Process( target=function_in_use, args=(i,file_path, fa_files[ i*fa_files_len_part : len(fa_files) ], ) )
        p.Daemon = True;p.start();procs.append(p)
    for p in procs: p.join()

    print 'time-SF06*py:', times(start)
Exemple #4
0
 def jiemian(self):
     self.sockfd.listen()
     print('等待客户端连接')
     while True:
         try:
             connfd, addr = self.sockfd.accept()
             print('客户端连接', addr)
         except Exception as e:
             print(e)
             continue
         except KeyboardInterrupt:
             self.sockfd.close()
             sys.exit('服务端退出')
         t = Process(target=self.handler, args=(connfd, ))
         t.Daemon = True
         t.start()
def searching_all(filename, number):
	Grid, data, dim, lat, lon = build_grid(filename, number)
	
	Districts = dc.create_districts(CENTROID_L)
	unassigned_blocks = data.shape[0]

	q = Queue()
	processes = 5
	colors_dict = get_colors(Districts)
	
	while unassigned_blocks != 0:
		tol = 1
		priority_district = dc.return_low_pop(Districts)

		subset = searching_neighborhood(priority_district, tol, Grid, dim, lat, lon)
		print(subset.shape)

		split_subset = np.array_split(subset, processes)

		for subdata in split_subset:
			p = Process(target=find_nearest_block, args=(subdata, priority_district.centroid, q))
			p.Daemon = True
			p.start()

		for subdata in split_subset:
			p.join()

		while q.empty():
			tol += 1
			print("changed tolerance.")
			subset = searching_neighborhood(priority_district, tol, Grid, dim, lat, lon)

		blocks = []
		while(not q.empty()):
			blocks.append(q.get())
		nearest_block = list(min(blocks)[1:])

		priority_district.add_block(nearest_block[:-2], Districts)

		Grid[int(nearest_block[-2])][int(nearest_block[-1])].remove(nearest_block[:-2])
		plt.scatter(nearest_block[2], nearest_block[1], color=colors_dict[priority_district.id])
		unassigned_blocks -= 1

	graph(Districts, data)
    def proc_map_reduce(self, iterable, partition=False):
        """
		Parallel implementation of mapReduce using multiprocessing.Process
		"""
        if partition:
            iterable = self.partition(iterable)
        manager = Manager()
        return_dict = manager.dict()
        proc_queue = []
        for proc, i in enumerate(iterable):
            p = Process(target=self.map_func_wrap, args=(proc, return_dict, i))
            p.Daemon = True
            p.start()
            proc_queue.append(p)
        for p in proc_queue:
            p.join()
        while True:
            if any(proc.is_alive() for proc in proc_queue):
                time.sleep(1)
            else:
                return self.reduce_func(return_dict.values())
Exemple #7
0
        data = c.recv(1024)
        if not data:
            break
        print(data.decode())
        c.send(b'Receive')
    c.close()
    sys.exit(0)


#创建套接字
s = socket()
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(ADDR)
s.listen(3)

#接收客户端请求
while True:
    try:
        c, addr = s.accept()
    except KeyboardInterrupt:
        s.close()
        sys.exit("服务器退出")
    except Exception as e:
        print(e)
        continue

    #创建进程
    t = Process(target=handler, args=(c, ))
    t.Daemon = True
    t.start()
Exemple #8
0
def calc_sequential_2opt_tour(tsp):
    THREADS = 1
    MAX_ITER = 10

    cities = tsp["CITIES"]
    chunk_sz = (len(cities) + 1) / THREADS
    tour = range(len(cities))
    #tour = nearest_neighbor(cities, random.randint(0,len(cities)-1))
    tour.append(tour[0])  # make path into a tour

    dist = tour_distance(cities, tour)
    best_dist = dist
    best_tour = tour
    print "initial distance"
    print best_dist

    print "search iteration:"
    for i in xrange(MAX_ITER):

        new_tour = best_tour

        # rotate new_tour by chunk_sz/2 or chunk_sz/3 randomly
        new_tour = new_tour[:len(new_tour) - 1]
        cut_point = random.randint(2, 3)
        new_tour = new_tour[chunk_sz / cut_point:] + new_tour[:chunk_sz /
                                                              cut_point]
        new_tour.append(new_tour[0])

        # split new_tour by THREADS
        splits = rough_chunk(new_tour, THREADS)
        if THREADS == 1:  # need to change tour to path because local_search() accepts path
            splits[0] = splits[0][:len(splits[0]) - 1]

        # pass to localsearch()
        queue = Queue()  # shared queue among all processors
        procs = []
        for m in xrange(len(splits)):
            # mark the subtour using processor id, m
            p = Process(target=local_search,
                        args=(
                            splits[m],
                            m,
                            cities,
                            queue,
                        ))
            p.Daemon = True  # dieing parent thread will terminate this child p
            procs.append(p)
            p.start()

        # merge the collected paths in queue
        for p in procs:
            p.join()
        queue.put('QUEUE_END')
        new_tour = [None] * THREADS
        for s in iter(queue.get, 'QUEUE_END'):
            new_tour[s[0]] = s[1]
        new_tour = [city for subt in new_tour for city in subt]  # flatten list
        if THREADS == 1:  # need to change path back to tour because local_search() return path
            new_tour.append(new_tour[0])

        # replace best solution with current if better
        dist = tour_distance(cities, new_tour)
        print[i, dist]
        if dist < best_dist:
            best_dist = dist
            best_tour = new_tour

    return (best_dist, best_tour)
Exemple #9
0
def spectral_overlapper(n,colors,lasers,c = 0.2,time_out = 100):

    # Star the run time
    run_time = time.time()


    # Connect to database
    db = dataDB()


    """Developer commment:
    
    The reason we init the list here, is so we can check what lasers are used. We can then sort the lasers not in use,
    and thereby get a higher chance of having the result being in the db already! 
    """


    # In this code the db side have taken care of setting emissions < 0 <- 0
    fluorochromes_all = db.fetch_fluorchromes_data(colors)

    # Init the list
    fc_list = []

    # If the fluorochrome is valid at the given laser then add it to the list
    for fc in fluorochromes_all:
        fc_obj = fluorochrome_analyzed(fc, fluorochromes_all[fc], 'clone', lasers)

        if fc_obj.valid:
            fc_list.append(fc_obj)
        else:
            # Todo: return list?
            # Tell if has been omitted
            print("{0} omitted. Relative emission intensity is below {1} %".format(fc_obj.name, c * 100))


    lasers = sorted(list(set([fc.l_max_laser for fc in fc_list])))

    # Check whether the specific combination of lasers and colors has been evaluated before
    pre_data_check = db.extended_check_basic_comb_log(n,lasers,colors)


    # If the combination has been evaluated before then simply plot that result
    if pre_data_check != None:

        # Create a list of fc objects

        return json.loads(pre_data_check)


    # If it haven't been evaluate then evaluate it
    else:


        ## Sort the list of fluorochromes
        fc_list.sort()


        # Calculate the overlapc
        auc_overlaps = auc_overlaps_fun(fc_list)#auc_overlaps_fun(fc_list)

        # Get number of rows
        r = auc_overlaps.shape[0]

        # Get the expected size of the generator
        size = choose(r, n)




        proc = 4 # Running 4 process
        splitter = size // proc # Diving the size by proc and return the integer value
        rest_split = size % proc # get the remainder

        comb = itertools.combinations(range(r), n) # Create the generator


        #main_list = [] # Create an empty list to append to

        # Chunkify the list
        """
        for i in range(proc):
            if i == proc - 1:
                # If it is the last "list"-element, then add the remainder to the index splice
                itertools.islice(comb, splitter * i, splitter * (i + 1) + rest_split)

            # Add the chunk
            main_list.append(itertools.islice(comb, splitter * i, splitter * (i + 1)))
        """

        # Same as above
        main_list =  [itertools.islice(comb, splitter * i, splitter * (i + 1) + rest_split) if i == proc - 1 else
                      itertools.islice(comb, splitter * i, splitter * (i + 1)) for i in range(proc)]

        # Create the queue
        q = Queue()

        # Init the multiprocessing
        for i, sub_list in enumerate(main_list):

            # Call the function
            p = Process(target=process_search, args=(auc_overlaps,sub_list, q, "sub {0}".format(i)))
            # Run in the background
            p.Daemon = True
            p.start()

        # "Connect" the queues
        for tmp in main_list:
            p.join()

        # Init a list containing the answers
        res = []

        ## Todo; check res length instead, or add timeout


        while True:
            # Append a queue to the result list
            res.append(q.get())

            # If all the processes are done
            if len(res) == proc:
                break

            # If it is running to slow then break it
            if run_time+time_out <= time.time():
                print(time_out)
                # Todo: what to return?
                return

        # Find the combination that is smallest - each process returns the smallest list of the chunck it evaluated
        min_list = min(res, key = lambda t: t[0])[1]

        # Add the time and result to the database
        db.speed_test(time.time()-run_time,size,'Macbook pro')

        # Add the combination and result to the database
        db.add_basic_comb_log(n, lasers, colors, [fc_list[i] for i in min_list])

        # Return the names of the optimal colors

        return list([fc_list[i].name for i in min_list])
Exemple #10
0
def main():
    # Read parameters
    config = Config()
    
    #Parse the inputs args/options
    parser = argparse.ArgumentParser(usage="target_fasta query_fastq [options]") # , version="%prog 0.1")

    parser.add_argument("target_fasta", type=str,
                        help="The target genome fasta file.")
    parser.add_argument("query_fastq", type=str,
                        help="The query sequences.")
    parser.add_argument("--g", dest="g", help="Use Numba cuda.jit kernel to parallelize MinimizerIndexer on GPU", action='store_true')
    parser.add_argument("--w", dest="w", type=int, help="Length of minimizer window. Default=%s" % config.w, default=config.w)
    parser.add_argument("--k", dest="k", type=int, help="Length of k-mer. Default=%s" % config.k, default=config.k)
    parser.add_argument("--t", dest="t", type=int, help="Discard minmers that occur more frequently " 
                                            "in the target than t. Default=%s" % config.t, default=config.t)
    parser.add_argument("--l", dest="l", type=int, help="Cluster two minmers into the same cluster if within l bases of"
                                            " each other in both target and query. Default=%s" % config.l, default=config.l)
    parser.add_argument("--c", dest="c", type=int, help="Add this many bases to the prefix and suffix of a seed cluster in the"
                                            " target and query sequence. Default=%s" % config.c, default=config.c)
    parser.add_argument("--gapScore", type=float, dest="gapScore", help="Smith-Waterman gap-score. Default=%s" % 
                      config.gapScore, default=config.gapScore)
    parser.add_argument("--matchScore", type=float, dest="matchScore", help="Smith-Waterman match-score. Default=%s" % 
                      config.gapScore, default=config.gapScore)
    parser.add_argument("--mismatchScore", type=float, dest="mismatchScore", help="Smith-Waterman mismatch-score. Default=%s" % 
                      config.mismatchScore, default=config.mismatchScore)
    parser.add_argument("--log", dest="logLevel", help="Logging level. Default=%s" % 
                      config.logLevel, default=config.logLevel)
    
    options = parser.parse_args()
    
    # Parse the log level
    numeric_level = getattr(logging, options.logLevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % options.logLevel)
    
    # Setup a logger
    logger.setLevel(numeric_level)
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(numeric_level)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    logger.debug("Established logger")
    
    startTime = time.time()
    global targetString
    
    # Parse the target sequence and read the first sequence
    with pysam.FastaFile(options.target_fasta) as targetFasta:
        targetString = targetFasta.fetch(targetFasta.references[0])
    logger.info("Parsed target string. Length: %s in %s seconds" % (len(targetString), time.time()-startTime))
    
    # Build minimizer index
    minimizerIndex = MinimizerIndexer(targetString.upper(), w=options.w, k=options.k, t=options.t)
    # print("minimizerIndex attributes", list(minimizerIndex.minimizerMap.items())[:10], 
    #       list(minimizerIndex.minmerOccurrences.items())[:10])
    
    # Only seeing this many minmers for the target DNA sequence:
    print(len(minimizerIndex.minimizerMap.keys()), "minimizerMap keys", list(minimizerIndex.minimizerMap.keys())[:20],
          "\n", len(minimizerIndex.minmerOccurrences.keys()), "minmerOccurrences keys", list(minimizerIndex.minmerOccurrences.keys())[:20])
    
    minmerInstances = sum(map(len, minimizerIndex.minimizerMap.values()))
    logger.info("Built minimizer index in %s seconds. #minmers: %s, #minmer instances: %s" %
                 ((time.time()-startTime), len(minimizerIndex.minimizerMap), minmerInstances))
    
    # Open the query files
    alignmentScores = [] # Array storing the alignment scores found
    threads = []
    with pysam.FastqFile(options.query_fastq) as queryFastq: #, Pool(10) as p:
        # For each query string build alignment
        if options.g:
            # For each query string build alignment
            for query, queryIndex in zip(queryFastq, range(sys.maxsize)): # xrange(sys.maxint)):
                ## print queryIndex
                print(queryIndex)
                alignment = simpleMap(minimizerIndex, query.sequence.upper(), config, None, options.g)
                alignmentScore = 0 if alignment is None else alignment.getMaxAlignmentScore()
                alignmentScores.append(alignmentScore)
                logger.info("Mapped query sequence #%i, length: %s alignment_found?: %s "
                            "max_alignment_score: %s" % 
                            (queryIndex, len(query.sequence), alignment is not None, alignmentScore)) 
        else:
            results = list()
            q = Queue()
            for query, queryIndex in zip(queryFastq, range(sys.maxsize)): # xrange(sys.maxint)):
                print("Reading query", queryIndex)
                results.append((queryIndex, query.sequence))
                p = Process(target=simpleMap, args=(minimizerIndex, query.sequence.upper(), config, q, options.g))
                p.Daemon = True
                p.start()
            for r in results:
                queryIndex = r[0]
                querySeq = r[1]
                alignment = q.get()
                try:
                    alignmentScore = alignment.getMaxAlignmentScore()
                except:
                    print("None type, continue")
                    continue
                # print("Query joined", queryIndex)
                alignmentScores.append(alignmentScore)
                logger.info("Mapped query sequence #%i, length: %s alignment_found?: %s "
                            "max_alignment_score: %s" % 
                            (queryIndex, len(querySeq), alignment is not None, alignmentScore))  

            for t in threads:
                p.join()
                   
    logger.info("Finished alignments in %s total seconds, average alignment score: %s" % 
                    (time.time()-startTime, float(sum(alignmentScores))/len(alignmentScores)))
def speed_results(n,lasers,colors):




    time_out = 11000 # approx 3 hours
    run_time = time.time()




    # In this code the db side have taken care of setting emissions < 0 <- 0
    fluorochromes_all = db.fetch_fluorchromes_data(colors)

    # Init the list
    fc_list = []

    # If the fluorochrome is valid at the given laser then add it to the list
    for fc in fluorochromes_all:
        fc_obj = fluorochrome_analyzed(fc, fluorochromes_all[fc], 'clone', lasers)

        if fc_obj.valid:
            fc_list.append(fc_obj)
        else:

            # Tell if has been omitted
            #print("{0} omitted. Relative emission intensity is below {1} %".format(fc_obj.name, c * 100))
            pass

    lasers = sorted(list(set([fc.l_max_laser for fc in fc_list])))

    # Check whether the specific combination of lasers and colors has been evaluated before
    pre_data_check = db.extended_check_basic_comb_log(n, lasers, colors)

    # If the combination has been evaluated before then simply plot that result
    if pre_data_check != None:
        # Don't do anything
        return json.loads(pre_data_check),0


    # If it haven't been evaluate then evaluate it
    else:

        ## Sort the list of fluorochromes
        fc_list.sort()

        # Calculate the overlapc
        auc_overlaps = auc_overlaps_fun(fc_list)  # auc_overlaps_fun(fc_list)

        # Get number of rows
        r = auc_overlaps.shape[0]

        # Get the expected size of the generator
        size = choose(r, n)

        splitter = size // proc  # Diving the size by proc and return the integer value
        rest_split = size % proc  # get the remainder

        comb = itertools.combinations(range(r), n)  # Create the generator


        # Same as above
        main_list = [itertools.islice(comb, splitter * i, splitter * (i + 1) + rest_split) if i == proc - 1 else
                     itertools.islice(comb, splitter * i, splitter * (i + 1)) for i in range(proc)]

        # Create the queue
        q = Queue()

        # Init the multiprocessing
        for i, sub_list in enumerate(main_list):
            # Call the function
            p = Process(target=process_search, args=(auc_overlaps, sub_list, q, "sub {0}".format(i)))
            # Run in the background
            p.Daemon = True
            p.start()

        # "Connect" the queues
        for tmp in main_list:
            p.join()

        # Init a list containing the answers
        res = []



        while True:
            # Append a queue to the result list
            res.append(q.get())

            # If all the processes are done
            if len(res) == proc:
                break

            # If it is running to slow then break it
            if run_time + time_out <= time.time():
                print("time_out")
                # Todo: what to return?


        # Find the combination that is smallest - each process returns the smallest list of the chunck it evaluated
        min_list = min(res, key=lambda t: t[0])[1]


        # Add the combination and result to the database
        #print(n, lasers, colors, [fc_list[i] for i in min_list])
        db.add_basic_comb_log(n, lasers, colors, [fc_list[i] for i in min_list])
        return [fc_list[i].name for i in min_list],1
Exemple #12
0
    def train(self, corpus):

        if self.trained:
            sys.exit(
                'A trained voice exists for this language/speaker/recipe combination.'
            )

        ## For a while, all_corpus included both text and speech-and-text utterances,
        ## but this has been reverted to original set-up, text_corpus has been restored
        ## to train calls at positional (not kw as before) arg:
        #all_corpus = corpus.make_utterances(self.res.make_dir(c.TRAIN, "utt"), \
        #                                                clear_old_data=self.clear_old_data)
        speech_corpus = corpus.make_utterances(self.res.make_dir(c.TRAIN, "utt"), \
                                                    clear_old_data=self.clear_old_data)
        text_corpus = corpus.all_text_files()

        ## tmporary fix for error:
        '''
        Traceback (most recent call last):
          File "./scripts/train.py", line 115, in <module>
            main_work()
          File "./scripts/train.py", line 110, in main_work
            voice.train(corpus)
          File "/afs/inf.ed.ac.uk/group/cstr/projects/simple4all_2/alessandra_dissertation/tool/Ossian/scripts/main/Voice.py", line 291, in train
            result = pool.apply_async(processor, args=(utterance_file, self.res.make_dir(c.TRAIN, "utt"), self.run_mode))
          File "<string>", line 2, in apply_async
          File "/afs/inf.ed.ac.uk/user/o/owatts/tool/python/ActivePython-2.7/lib/python2.7/multiprocessing/managers.py", line 763, in _callmethod
            conn.send((self._id, methodname, args, kwds))
        cPickle.PicklingError: Can't pickle <type '_sre.SRE_Match'>: attribute lookup _sre.SRE_Match failed
        '''
        unparallelisable_classes = ['BasicStanfordCoreNLP', 'Lexicon'
                                    ]  ## lexicon parallelises very slowly --
        ## see: http://stackoverflow.com/questions/20727375/multiprocessing-pool-slower-than-just-using-ordinary-functions
        ### ^---- TODO: this is now unsed

        t = time.time()

        i = 1
        for processor in self.processors:

            print("\n\n== Train voice (proc no. %s (%s))  ==" %
                  (i, processor.processor_name))

            if not processor.trained:
                ## has a suitable component already been trained?
                if os.path.isdir(processor.component_path):
                    print("Copy existing component for processor " +
                          processor.processor_name)
                    processor.reuse_component(self.res)
                else:
                    print("Train processor " + processor.processor_name)
                    processor.train(speech_corpus, text_corpus)

            print("          Applying processor " + processor.processor_name)

            if self.max_cores > 1 and processor.parallelisable:
                # Split the utterances into n chunks
                chunks = self.split_corpus(speech_corpus)
                # Create processes, each supplied with one chunk of the corpus
                processes = []
                for i in range(len(chunks)):
                    p = Process(target=chunk_processor,
                                args=(processor, chunks[i],
                                      self.res.make_dir(c.TRAIN,
                                                        "utt"), self.run_mode))
                    p.Daemon = True
                    p.start()
                    processes.append(p)
                # Join on every process (main thread can't continue until every process is terminated)
                for pr in processes:
                    pr.join()
            else:
                for utterance_file in speech_corpus:
                    utterance = Utterance(utterance_file,
                                          utterance_location=self.res.make_dir(
                                              c.TRAIN, "utt"))
                    processor.apply_to_utt(utterance, voice_mode=self.run_mode)
                    utterance.save()

            # if self.max_cores > 1:
            #     pool = Manager().Pool(self.max_cores)
            # for utterance_file in speech_corpus:
            #     if self.max_cores > 1 and processor.parallelisable:
            #         result = pool.apply_async(processor, args=(utterance_file, self.res.make_dir(c.TRAIN, "utt"), self.run_mode))
            #     else:
            #         utterance = Utterance(utterance_file, utterance_location=self.res.make_dir(c.TRAIN, "utt"))
            #         processor.apply_to_utt(utterance, voice_mode=self.run_mode)
            #         utterance.save()
            # if self.max_cores > 1:
            #     pool.close()
            #     pool.join()

            i += 1

        print('\nTIME : %s ' % (time.time() - t))
        self.save()
    d = manager.dict()
    d2 = manager.dict()
    # d = {}
    # 父进程创建Queue,并传给各个子进程:
    # q1 = Queue(maxsize=1)
    # q2 = Queue(maxsize=1)

    # parent_conn, child_conn = Pipe()

    # ppre = Process(target=prepost, args=(300, 300, 30, cam, timers))
    ppre = Process(target=preProcessing, args=(300, 300, 30, cam, timers))
    pdet = threading.Thread(target=detInference, args=(model, timers))

    ppost = threading.Thread(target=postProcessing, args=(timers, ))
    # 启动子进程ppre,从摄像头读入图像并准备网络输入:
    # preProcessing(q1, 300, 300, 30, cam)
    # ppre.setDaemon(True)
    ppre.Daemon = True
    pdet.setDaemon(True)
    ppre.start()
    # 启动子进程pdet,执行检测网络:
    pdet.start()
    # # # # 启动子进程post,对输出进行处理和显示:
    ppost.start()
    # # 等待pw结束:
    ppost.join()
    # ppre.join()
    # # pr进程里是死循环,无法等待其结束,只能强行终止:
    # ppre.terminate()
    os._exit(0)
Exemple #14
0
import client
import server
import time
from multiprocessing import Process

if __name__ == "__main__":
    S = input('输入服务器线程数:(如为0则自动从1-5)')
    C = input('输入客户端线程数:(如为0则自动从1-5)')
    Server = Process(target=server.serverMain, args=(int(S),))
    Client = Process(target=client.clientMain, args=(int(C),))
    Server.Daemon = True
    Client.Daemon = True
    Server.start()
    time.sleep(1)
    Client.start()
    Server.join()
    Client.join()
Exemple #15
0
 def go(scores, s, i):
     t = Process(target=worker, args=(scores, s, i))
     t.Daemon = True
     t.start()
Exemple #16
0
from time import sleep
#q是任务队列
#NUM是并发线程总数
#JOBS是有多少任务
q = JoinableQueue()
NUM = 12
JOBS = 100
#具体的处理函数,负责处理单个任务
def do_somthing_using(arguments):
    i=0
    while i < 100000:
        i += 1
    print(arguments)
#这个是工作进程,负责不断从队列取数据并处理
def working():
    while True:
        arguments = q.get()
        do_somthing_using(arguments)
#       sleep(1)
        q.task_done()
#fork NUM个线程等待队列
for i in range(NUM):
    t = Process(target=working)
    t.Daemon=True
    t.start()
#把JOBS排入队列
for i in range(JOBS):
    q.put(i)
#等待所有JOBS完成
q.join()
Exemple #17
0
    def get_action(self):

        begin = time.time()
        # Causes the AI to calculate the best action from the
        # current game state and return it.

        self.max_depth = 0
        self.data = {}
        self.stats.clear()

        state = self.history[-1]
        player = self.board.current_player(state)
        legal = self.board.legal_actions(self.history[:])

        # Bail out early if there is no real choice to be made.
        if not legal:
            return
        if len(legal) == 1:
            return self.board.unpack_action(legal[0])

        games = 0
        # @TODO multithreading here
        queue = Queue()
        processes_num = 9
        processes = []
        result = []
        for i in range(processes_num):
            p = Process(target=self.simulation_worker, args = (queue,))
            p.Daemon = True
            p.start()
            processes.append(p)

        for i in range(processes_num):
            result.append(queue.get())

        for i in range(processes_num):
            processes[i].join()

        voting = {}
        for p in legal:
            voting[p] = {'votes': 0, 'wins': 0, 'visits': 0}


        for stats, game_times, max_depth in result:
            # @DEBUG
            print 'games:', game_times
            games += game_times
            self.max_depth = max(self.max_depth, max_depth)

            actions = self.calculate_action_values(state, player, legal, stats)
            #highest_score = actions[0]['percent']
            #print actions[0]['wins']
            for action in actions:
                print self.action_template.format(**action)
            print '\n'

            action = actions[0]
            voting[action['action']]['votes'] += 1
            voting[action['action']]['wins'] += action['wins']
            voting[action['action']]['visits'] += action['plays']

            # for action in actions:
            #     #tmp = voting[action['action']]
            #     #print tmp, tmp['wins']
            #     if action['percent'] == highest_score:
            #         voting[action['action']]['votes'] += 1
            #         voting[action['action']]['wins'] += action['wins']
            #         voting[action['action']]['visits'] += action['plays']
            #     else:
            #         break

            # for key in stats.keys():
            #     # @DEBUG
            #     #print stats[key].value, stats[key].visits
            #     S = self.stats.setdefault(key, Stat())
            #     S.value += stats[key].value
            #     S.visits += stats[key].visits
        for key in voting.keys():
            if voting[key]['votes'] == 0:
                del voting[key]  # a bad move
                continue
            print 'action: {0}, votes: {1}, average: {2:.1f}% ({3}/{4})'.format(self.board.unpack_action(key), voting[key]['votes'], 100 * voting[key]['wins'] / voting[key]['visits'], voting[key]['wins'], voting[key]['visits'])  # @DEBUG

        # Display the number of calls of `run_simulation` and the
        # time elapsed.
        self.data.update(games=games, max_depth=self.max_depth,
                         time=str(time.time() - begin))
        print 'games: {0}, time ellapsed: {1}'.format(self.data['games'], self.data['time'])
        print "Maximum depth searched:", self.max_depth

        # Store and display the stats for each possible action.
        self.data['actions'] = sorted(
            voting.items(),
            key = lambda x: (x[1]['votes'], x[1]['wins']/x[1]['visits']),
            reverse=True
        )
        # for m in self.data['actions']:
        #     print self.action_template.format(**m)

        # Pick the action with the highest average value.
        return self.board.unpack_action(self.data['actions'][0][0])