コード例 #1
0
ファイル: IGN_scraper.py プロジェクト: akshayka/video-games
def launch_workers(outfile, start_index, end_index, score_flag, force, verbose):
	BASE_URL = "http://www.ign.com/games/all-ajax?startIndex="
	
	
	# Synchronization Tools
	num_workers = Semaphore(MAX_NUM_PROCESSES)
	outfile_lock = Lock()
	urlopen_lock = Lock()
	stderr_lock = Lock()
	print_lock = Lock()
	
	# Write the categories
	if (outfile != None):
		outfile.write("title,link,platform,publisher,score,date\n")

	# Launch the workers
	processes = []
	curr_index = start_index;
	while curr_index <= end_index:
		curr_url = BASE_URL + str(curr_index)
	 	worker = Process(target=open_url_and_parse,
	 		args=(outfile, curr_url, score_flag, force, verbose,
	 			outfile_lock, urlopen_lock, stderr_lock, print_lock,
	 			num_workers))
	 	processes.append(worker)
	 	if verbose:
			print_lock.acquire()
			print "Launching worker for url: %s" % curr_url
			print_lock.release()
	 	num_workers.acquire()
	 	worker.start()
	 	curr_index += INDEX_INCREMENT; 
	for p in processes:
	 	p.join()
コード例 #2
0
class Thread_Pool_Manager(object):
    def __init__(self, thread_num=cpu_count()):
        self.thread_num = thread_num
        print(thread_num)
        self.work_queue = JoinableQueue()
        self.work_num = Semaphore(0)
        self.mutex = Lock()

    def start_threads(self):
        for i in range(self.thread_num):
            thread = Process(target=self.do_job)
            thread.daemon = True  # set thread as daemon
            thread.start()

    def do_job(self):
        global Numbers
        while True:
            # print(1)
            self.work_num.acquire()
            with self.mutex:
                print(1, self.work_queue.qsize())
                thread_job = self.work_queue.get()
                print(0, self.work_queue.qsize())
            thread_job.do_job(self.work_queue, self.work_num)
            print(self.work_queue.qsize())
            self.work_queue.task_done()

    def join(self):
        self.work_queue.join()

    def add_job(self, job):
        self.work_queue.put(job)
        self.work_num.release()
コード例 #3
0
class Budget(object):
    """Device budget (for lack of a better term)

    Details
    -------
    Keeps a semaphore for the total number of resources available,
    and a device-specific atomic counter of available slots.
    """
    def __init__(self, devices, n_per_device):
        self.devices, self.n_per_device = devices, n_per_device
        self.total = Semaphore(len(devices) * n_per_device)
        self.alloc = Array("i", len(devices) * [n_per_device])

    def acquire(self):
        self.total.acquire()
        with self.alloc.get_lock():
            # get the largest counter
            index = max(range(len(self.alloc)), key=self.alloc.__getitem__)
            # assert self.alloc[index] > 0

            # acquire the index and decrease the counter
            self.alloc[index] -= 1
        return index

    def release(self, index):
        with self.alloc.get_lock():
            self.alloc[index] += 1
        self.total.release()

    def __getitem__(self, index):
        return self.devices[index]
コード例 #4
0
def relay(semaphore: mp.Semaphore, queue: mp.Queue, output_lock: mp.Lock,
          bmsg: bytes, addr: tuple, relay_dict, recv_time: datetime):
    semaphore.acquire()
    bmsg = bytearray(bmsg)
    header = DNSHeader(bmsg[:12])
    header.aa = 1
    bmsg = header.bmsg + bmsg[12:]
    assert header.qdcount == 1
    question = DNSQuestion(bmsg, offset=12)
    with output_lock:
        cprint(f'[{recv_time}][recv query {bytes_to_int(bmsg[:2])}]: {bmsg} from {addr}', fore='green', style='reverse')
        cprint_header(header, fore='green')
        cprint_question(question, fore='green')
    if question.qname in relay_dict:
        if relay_dict[question.qname] == '0.0.0.0':
            header.rcode = 3
            answer = header.bmsg + bmsg[12:]
            mode = 'intercept  '
        elif question.qtype == 1:
            answer = fake_bmsg(bmsg, relay_dict[question.qname])
            mode = 'local resolve '
        else:
            answer = forward(bmsg)
            if answer is None:
                return
            mode = 'relay msg  '
    else:
        answer = forward(bmsg)
        mode = 'relay msg  '
    queue.put((answer, addr, recv_time, mode))
    semaphore.release()
コード例 #5
0
class EmbeddingWorker(Process):
    def __init__(self, queue, X, y, transformation_method, embedding_args):
        super().__init__()
        self.pause_lock = Semaphore(value=True)  # lock is free
        self.embedding_args = embedding_args
        self.X = X
        self.y = y
        self.transformation_method = transformation_method
        self.queue = queue

    def callback(self, command, iteration, payload):
        # pausing acquires pause_lock and the following code only runs if
        # pause_lock is free
        with self.pause_lock:
            self.queue.put((command, iteration, payload))

    def run(self):
        self.transformation_method(self.X, self.y, self.embedding_args,
                                   self.callback)

    def pause(self):
        self.pause_lock.acquire()

    def resume(self):
        self.pause_lock.release()

    def is_paused(self):
        return not self.pause_lock.get_value()
コード例 #6
0
def identify_similar_regions_for_vntrs_using_blat():
    from multiprocessing import Process, Semaphore, Manager
    reference_vntrs = load_unique_vntrs_data()

    records = []
    for ref_vntr in reference_vntrs:
        record = SeqRecord.SeqRecord('')
        sequence = ref_vntr.left_flanking_region[
            -30:] + ref_vntr.pattern + ref_vntr.right_flanking_region[:30]
        record.seq = Seq.Seq(sequence)
        record.id = str(ref_vntr.id)
        records.append(record)
    vntr_structures_file = 'reference_vntr_structures.fa'
    with open(vntr_structures_file, 'w') as output_handle:
        SeqIO.write(records, output_handle, 'fasta')

    sema = Semaphore(7)
    manager = Manager()
    result_list = manager.list()
    process_list = []
    for ref_vntr in reference_vntrs:
        sema.acquire()
        p = Process(target=find_similar_region_for_vntr,
                    args=(sema, ref_vntr, vntr_structures_file, result_list))
        process_list.append(p)
        p.start()

    for p in process_list:
        p.join()
    result_list = list(result_list)
    with open('similar_vntrs.txt', 'a') as out:
        for vntr_id in result_list:
            out.write('%s\n' % vntr_id)
コード例 #7
0
ファイル: benchmark.py プロジェクト: domoritz/SoSAT
def run():
    algo = parameters["algo"]
    files = [open(x) for x in parameters["files"]]
    configs = []
    p = parameters["params"]
    max_processes = 3
    semaphore = Semaphore(max_processes)

    # generate configurations as compination of possible
    # keys and product of values
    for keys in it.combinations(p.keys(), len(p.keys())):
        v = [p[k] for k in keys]
        for values in it.product(*v):
            config = {}
            for i, k in enumerate(keys):
                config[k] = values[i]
            configs.append(config)
    for f in files:
        for conf in configs:
            config = {"FILENAME": f.name}
            config.update(conf)

            f.seek(0)
            num_vars, clauses = parser.parse(f)

            p = MyProcess(target=run_algorithm, args=(algo, num_vars, clauses, config, semaphore))

            semaphore.acquire()
            p.start()
コード例 #8
0
ファイル: tasks.py プロジェクト: project-ncl/pnc-cli
    def run(self, tasks, build_config, parallel_threads):
        semaphore = Semaphore(parallel_threads)
        process_finished_notify = Condition(Lock())
        while tasks.count_buildable_tasks() > 0:
            task = tasks.get_next()

            if task is None:
                self.wait_tasks_to_complete(parallel_threads, process_finished_notify, semaphore)
                continue

            semaphore.acquire()
            task.state = Task.State.RUNNING
            logging.debug("Starting task %s", task.name)
            self.start_new_process(process_finished_notify, semaphore, self.process_job, task, build_config)

        self.wait_tasks_to_complete(parallel_threads, process_finished_notify, semaphore)

        if tasks.count(Task.State.FAILED) > 0:
            logging.error('Some packages failed to build.')
            logging.error("  %s", tasks.print_name(Task.State.FAILED))
            return 1
        if tasks.count(Task.State.RUNNING) > 0:
            logging.error('Something went wrong, there are still some running tasks.')
            return 1
        if tasks.count(Task.State.NEW) > 0:
            logging.error('Something went wrong, there are still unprocessed tasks.')
            return 1

        logging.info("Build completed successfully.")
        return 0
コード例 #9
0
ファイル: cam_model.py プロジェクト: USnark772/RSCompanionV3
 def _img_processor(self, sh_img_arr: Array, sem1: Semaphore,
                    sem2: Semaphore, ovl_arr: Array) -> None:
     """
     Process images as needed.
     :param sh_img_arr: The array containing the frame to work with.
     :param sem1: The entrance lock.
     :param sem2: The exit lock.
     :param ovl_arr: The array containing the overlay work with.
     :return None:
     """
     img_dim = (EDIT_HEIGHT, self._cur_arr_shape[1], self._cur_arr_shape[2])
     img_size = int(EDIT_HEIGHT * img_dim[1] * img_dim[2])
     img_arr = frombuffer(sh_img_arr.get_obj(), count=img_size,
                          dtype=DTYPE).reshape(img_dim)
     while self._process_imgs:
         sem1.acquire()
         if self._use_overlay:
             img_pil = Image.fromarray(img_arr)
             draw = ImageDraw.Draw(img_pil)
             draw.text(OVL_POS,
                       text=ovl_arr.value.decode(),
                       font=OVL_FONT,
                       fill=OVL_CLR)
             processed_img = asarray(img_pil)
             copyto(img_arr, processed_img)
         sem2.release()
コード例 #10
0
ファイル: image.py プロジェクト: rupertsmall/neon
class Msg(object):
    """
    TODO: Not documenting this class because it may go away.
    """

    def __init__(self, size):
        self.s_e = Semaphore(1)
        self.s_f = Semaphore(0)
        self.s_buf = Array(ct.c_ubyte, size)

    def send(self, func):
        self.s_e.acquire()
        self.s_buf.acquire()
        send_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_f.release()
        return send_result

    def recv(self, func):
        self.s_f.acquire()
        self.s_buf.acquire()
        recv_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_e.release()
        return recv_result
コード例 #11
0
def _build_single_scenario_proc(clean: bool, allow_offset_map: bool,
                                scenario: str, semaphore: Semaphore):
    semaphore.acquire()
    try:
        _build_single_scenario(clean, allow_offset_map, scenario)
    finally:
        semaphore.release()
コード例 #12
0
ファイル: topology.py プロジェクト: krohan100/pydssim
 def getNeighbors(self, peer):
             
     semaphore = Semaphore()
     semaphore.acquire()
     neighbors = self.__layout[peer.getId()].getNeighbors()
     semaphore.release()
     return neighbors
コード例 #13
0
 def setCurrentSimulationTime(self, currentSimulationTime):
    
     semaphore = Semaphore()
     semaphore.acquire()
     self.__currentSimulationTime = currentSimulationTime
     semaphore.release()
     return self.__currentSimulationTime
コード例 #14
0
def run():
    algo = parameters['algo']
    files = [open(x) for x in parameters['files']]
    configs = []
    p = parameters['params']
    max_processes = 3
    semaphore = Semaphore(max_processes)

    # generate configurations as compination of possible
    # keys and product of values
    for keys in it.combinations(p.keys(), len(p.keys())):
        v = [p[k] for k in keys]
        for values in it.product(*v):
            config = {}
            for i, k in enumerate(keys):
                config[k] = values[i]
            configs.append(config)
    for f in files:
        for conf in configs:
            config = {'FILENAME': f.name}
            config.update(conf)

            f.seek(0)
            num_vars, clauses = parser.parse(f)

            p = MyProcess(target=run_algorithm,
                          args=(algo, num_vars, clauses, config, semaphore))

            semaphore.acquire()
            p.start()
コード例 #15
0
ファイル: abstract_network.py プロジェクト: krohan100/pydssim
 def getPeerID(self, peerId):
     
     semaphore = Semaphore()
     semaphore.acquire()
     peer = self.__layout[peerId]
     semaphore.release()
     return peer
コード例 #16
0
ファイル: image.py プロジェクト: huhoo/neon
class Msg(object):
    """
    Data structure encapsulating a message.
    """

    def __init__(self, size):
        self.s_e = Semaphore(1)
        self.s_f = Semaphore(0)
        self.s_buf = Array(ct.c_ubyte, size)

    def send(self, func):
        self.s_e.acquire()
        self.s_buf.acquire()
        send_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_f.release()
        return send_result

    def recv(self, func):
        self.s_f.acquire()
        self.s_buf.acquire()
        recv_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_e.release()
        return recv_result
コード例 #17
0
    def serve(self):
        """Start workers and put into queue"""
        # this is a shared state that can tell the workers to exit when False
        self.isRunning.value = True

        # first bind and listen to the port
        self.serverTransport.listen()

        # fork the children
        semaphore = Semaphore(0)
        for _ in range(self.numWorkers):
            try:
                w = Process(target=self.workerProcess, args=(semaphore, ))
                w.daemon = True
                w.start()
                self.workers.append(w)
            except Exception as x:
                logger.exception(x)

        # wait until all workers init finish
        for _ in range(self.numWorkers):
            semaphore.acquire()

        # wait until the condition is set by stop()
        while True:
            try:
                gevent.sleep(1)
                if not self.isRunning.value:
                    break
            except (SystemExit, KeyboardInterrupt):
                break
            except Exception as x:
                logger.exception(x)

        self.isRunning.value = False
コード例 #18
0
def main():
    tic = time.perf_counter()
    parser = make_parser()
    parse(parser.parse_args())

    # os.chdir(source)
    get_files()
    match_files()
    pages = list(chunks(matches, lines))
    pages = name_pages(pages)
    i = 0
    processes = []
    concurrency = thread_count
    print("Currently using " + str(concurrency) + " Thread(s)")
    sema = Semaphore(concurrency)
    for page in pages:
        sema.acquire()
        progress(i + 1, len(pages) * 2, "Processing page" + str(i + 1) + " of " + str(len(pages)))
        process = multiprocessing.Process(target=make_page, args=(page, sema,))
        processes.append(process)
        process.start()
        i += 1

    for process in processes:
        progress(i + 1, len(pages) * 2, "Finishing page " + str((i + 1) - len(pages)) + " of " + str(len(pages)))
        process.join()
        i += 1

    toc = time.perf_counter()
    print(f"\nFinished merging in {toc - tic:0.4f} seconds")
    print("\nPages have been stored at ", dest)
コード例 #19
0
ファイル: tasks.py プロジェクト: vmarekrh/pnc-cli
    def run(self, tasks, build_config, parallel_threads):
        semaphore = Semaphore(parallel_threads)
        process_finished_notify = Condition(Lock())
        while tasks.count_buildable_tasks() > 0:
            task = tasks.get_next()

            if task is None:
                self.wait_tasks_to_complete(parallel_threads,
                                            process_finished_notify, semaphore)
                continue

            semaphore.acquire()
            task.state = Task.State.RUNNING
            logging.debug("Starting task %s", task.name)
            self.start_new_process(process_finished_notify, semaphore,
                                   self.process_job, task, build_config)

        self.wait_tasks_to_complete(parallel_threads, process_finished_notify,
                                    semaphore)

        if tasks.count(Task.State.FAILED) > 0:
            logging.error('Some packages failed to build.')
            logging.error("  %s", tasks.print_name(Task.State.FAILED))
            return 1
        if tasks.count(Task.State.RUNNING) > 0:
            logging.error(
                'Something went wrong, there are still some running tasks.')
            return 1
        if tasks.count(Task.State.NEW) > 0:
            logging.error(
                'Something went wrong, there are still unprocessed tasks.')
            return 1

        logging.info("Build completed successfully.")
        return 0
コード例 #20
0
ファイル: _helpers.py プロジェクト: wtsi-hgi/cookie-monster
def block_until_processed(cookie_jar: CookieJar, cookie_paths: Sequence[str],
                          expected_number_of_calls_to_mark_as_complete: int):
    """
    Puts the given cookies into the cookie jar and wait until they have been completed/marked for reprocessing.
    :param cookie_jar: the cookie jar to put cookies to process into
    :param cookie_paths: the cookie paths to process
    :param expected_number_of_calls_to_mark_as_complete: the number of calls expected to the Cookie jar's
    `mark_as_complete` method
    """
    if cookie_jar.queue_length() != 0:
        raise RuntimeError("Already cookies in the jar")

    mark_as_complete_semaphore = Semaphore(0)
    original_mark_as_complete = cookie_jar.mark_as_complete

    def mark_as_complete(path: str):
        mark_as_complete_semaphore.release()
        original_mark_as_complete(path)

    cookie_jar.mark_as_complete = MagicMock(side_effect=mark_as_complete)

    for cookie_path in cookie_paths:
        cookie_jar.mark_for_processing(cookie_path)

    calls_to_mark_as_complete = 0
    while calls_to_mark_as_complete != expected_number_of_calls_to_mark_as_complete:
        mark_as_complete_semaphore.acquire()
        assert cookie_jar.mark_as_complete.call_count <= expected_number_of_calls_to_mark_as_complete
        calls_to_mark_as_complete += 1

    assert calls_to_mark_as_complete == cookie_jar.mark_as_complete.call_count
コード例 #21
0
    def generator(self, *args, **kwargs):
        """ This function warp generator to ParaWrapper's generator
            which is capable of multi-processing
            Once the generator function was settled, we can send worker with the task then
            work with full-load until meet the buff_size limit

            The worker's job is to feed the list and keep it contains more than <buff_size> batches
        """
        #   Initialization semaphores and numbering
        buff_count = Semaphore(value=0)
        target_remain = Semaphore(value=self.buff_size)
        number = str(self.gen_num)
        self.gen_num += 1

        #   Initializing list
        self.batch_list[number] = self.manager.list()

        #   Assign work and send worker
        gen = self.datagen.generator(*args, **kwargs)
        worker = Process(target=self.task,
                         args=(gen, number, target_remain, buff_count))
        worker.start()

        while True:
            buff_count.acquire(block=True)
            ret = self.batch_list[number].pop()
            target_remain.release()
            yield ret
コード例 #22
0
def identify_similar_regions_for_vntrs_using_blat():
    from multiprocessing import Process, Semaphore, Manager

    reference_vntrs = load_unique_vntrs_data()
    sema = Semaphore(24)
    manager = Manager()
    result_list = manager.list()
    process_list = []
    # os.system('cp hg19_chromosomes/CombinedHG19_Reference.fa /tmp/CombinedHG19_Reference.fa')
    for i in range(len(reference_vntrs)):
        if not reference_vntrs[i].is_non_overlapping(
        ) or reference_vntrs[i].has_homologous_vntr():
            continue
        sema.acquire()
        p = Process(target=find_similar_region_for_vntr,
                    args=(sema, reference_vntrs[i], i, result_list))
        process_list.append(p)
        p.start()

    for p in process_list:
        p.join()
    result_list = list(result_list)
    with open('similar_vntrs.txt', 'a') as out:
        for vntr_id in result_list:
            out.write('%s\n' % vntr_id)
コード例 #23
0
    def get_spanning_reads_of_aligned_pacbio_reads(self, alignment_file):
        sema = Semaphore(settings.CORES)
        manager = Manager()
        length_distribution = manager.list()
        mapped_spanning_reads = manager.list()

        vntr_start = self.reference_vntr.start_point
        vntr_end = self.reference_vntr.start_point + self.reference_vntr.get_length(
        )
        region_start = vntr_start
        region_end = vntr_end
        read_mode = 'r' if alignment_file.endswith('sam') else 'rb'
        samfile = pysam.AlignmentFile(alignment_file, read_mode)
        reference = get_reference_genome_of_alignment_file(samfile)
        chromosome = self.reference_vntr.chromosome if reference == 'HG19' else self.reference_vntr.chromosome[
            3:]
        process_list = []
        for read in samfile.fetch(chromosome, region_start, region_end):
            sema.acquire()
            p = Process(target=self.check_if_pacbio_read_spans_vntr,
                        args=(sema, read, length_distribution,
                              mapped_spanning_reads))
            process_list.append(p)
            p.start()

        for p in process_list:
            p.join()

        logging.info('length_distribution of mapped spanning reads: %s' %
                     list(length_distribution))
        return list(mapped_spanning_reads)
コード例 #24
0
ファイル: topology.py プロジェクト: krohan100/pydssim
 def countNeighbors(self, peer):
                    
     semaphore = Semaphore()
     semaphore.acquire()
     
     count = peer.countNeighbors()
     semaphore.release()
     return count
コード例 #25
0
ファイル: topology.py プロジェクト: krohan100/pydssim
 def getNeighborIt(self, peer):
             
     semaphore = Semaphore()
     semaphore.acquire()
     neighbors = []
     for neighbor in self.__layout[peer.getId()].getNeighbors():
         neighbors.append(neighbor.getTargetPeer())
     neighborIt = neighbors.__iter__()
     semaphore.release()
     return neighborIt
コード例 #26
0
def get_measurement_ids(ip_addr: str, ripe_slow_down_sema: mp.Semaphore,
                        allowed_measurement_age: int) -> [int]:
    """
    Get ripe measurements for ip_addr
    """
    def next_batch(measurement):
        loc_retries = 0
        while True:
            try:
                measurement.next_batch()
            except ripe_atlas.exceptions.APIResponseError:
                logging.exception(
                    'MeasurementRequest APIResponseError next_batch')
                pass
            else:
                break
            time.sleep(5)
            loc_retries += 1

            if loc_retries % 5 == 0:
                logging.error('Ripe next_batch error! {}'.format(ip_addr))

    max_age = int(time.time()) - allowed_measurement_age
    params = {
        'status__in': '2,4,5',
        'target': ip_addr,
        'type': 'ping',
        'stop_time__gte': max_age
    }
    ripe_slow_down_sema.acquire()
    retries = 0

    while True:
        try:
            measurements = ripe_atlas.MeasurementRequest(**params)
        except ripe_atlas.exceptions.APIResponseError:
            logging.exception('MeasurementRequest APIResponseError')
        else:
            break

        time.sleep(5)
        retries += 1

        if retries % 5 == 0:
            logging.error('Ripe MeasurementRequest error! {}'.format(ip_addr))
            time.sleep(30)

    next_batch(measurements)
    if measurements.total_count > 500:
        skip = int(measurements.total_count / 100) - 5

        for _ in range(0, skip):
            next_batch(measurements)

    return [measurement['id'] for measurement in measurements]
コード例 #27
0
ファイル: abstract_network.py プロジェクト: krohan100/pydssim
 def addPeer(self, peer):
     
     if self.__layout.has_key(peer.getPID()):
         return False
     
     semaphore = Semaphore()
     semaphore.acquire()
     
     self.__layout[peer.getPID()] = peer
     semaphore.release()
     NetworkLogger().resgiterLoggingInfo("Add peer %s in Layout Network "%(peer.getPID()))
     return self.__layout.has_key(peer.getPID())
コード例 #28
0
class Runner:
    def __init__(self):
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        signal.signal(signal.SIGUSR1, signal.SIG_IGN)

        # Create the pool without special signal handling
        self.pool = Pool(processes=NUM_WORKERS)
        self.worker_semaphore = Semaphore(NUM_WORKERS)

        self.has_received_termination_signal = False

        for sig in termination_signals:
            signal.signal(sig, self.handle_termination_signal)

        self.units_of_work = itertools.cycle(range(100))

        logging.info("Main PID {0}".format(os.getpid()))

    def do_work(self):
        while True:
            if self.has_received_termination_signal:
                logging.info(
                    "Stopping loop because termination signal received")
                return

            self.acquire_worker_semaphore()
            work = (next(self.units_of_work), )

            logging.info("Sending work {0}".format(work))
            self.pool.apply_async(
                worker_function,
                work,
                callback=self.release_worker_semaphore,
                error_callback=self.release_worker_semaphore,
            )
            time.sleep(2)

    def start(self):
        self.do_work()
        self.pool.close()
        self.pool.join()

    def handle_termination_signal(self, signum, frame):
        my_pid = os.getpid()
        logging.info("PID {0} received termination signal {1!r}".format(
            my_pid, signum))
        self.has_received_termination_signal = True

    def acquire_worker_semaphore(self):
        self.worker_semaphore.acquire()

    def release_worker_semaphore(self, exception=None):
        self.worker_semaphore.release()
コード例 #29
0
def cv_evaluation(hyperparameters, verbose=True):
    start_time = time.time()

    n_splits = 6

    current_epochs = int(hyperparameters[:, 0])
    current_learning_rate = float(hyperparameters[:, 1])
    current_activation_function = activation_functions[int(hyperparameters[:,
                                                                           2])]
    current_batch_size = 14
    current_optimizer = tf.keras.optimizers.Adamax(
        learning_rate=current_learning_rate)

    cv_split = KFold(n_splits=n_splits)

    test_index = np.linspace(
        n_partitions * (n_individuals_train + n_individuals_val),
        n_partitions *
        (n_individuals_train + n_individuals_val + n_individuals_test) - 1,
        n_partitions * n_individuals_test,
        dtype='int32')

    with Manager() as manager:
        loss = manager.list()
        processes = []

        semaphore = Semaphore(min(concurrency, n_splits))

        for train_index, val_index in cv_split.split(
                _bold_set[:24 * (n_individuals_train + n_individuals_val)]):
            semaphore.acquire()

            p = Process(target=loop_cv,
                        args=(train_index, val_index, test_index,
                              current_epochs, current_learning_rate,
                              current_activation_function, current_batch_size,
                              semaphore, loss))
            p.start()
            processes.append(p)

        for p in processes:
            p.join()

        _loss = np.mean(list(loss))

    if (verbose):
        print("Evaluation with val loss:", _loss)
        print("Evaluation took", (time.time() - start_time) / (60 * 60),
              "hours",
              end='\n\n')

    return _loss
コード例 #30
0
def __get_measurements_for_nodes(measurement_ids: [int],
                                 ripe_slow_down_sema: mp.Semaphore,
                                 near_nodes: [RipeAtlasProbe],
                                 allowed_measurement_age: int) \
        -> typing.Generator[typing.Tuple[int, typing.List[RipeMeasurementResult]], None, None]:
    """Loads all results for all measurements if they are less than a year ago"""

    node_dct = {}
    for node in near_nodes:
        node_dct[node.probe_id] = node.id

    for measurement_id in measurement_ids:
        allowed_start_time = int(time.time()) - allowed_measurement_age

        params = {
            'msm_id': measurement_id,
            'start': allowed_start_time,
            'probe_ids': [node.probe_id for node in near_nodes][:1000]
        }

        ripe_slow_down_sema.acquire()
        success, result_list = ripe_atlas.AtlasResultsRequest(
            **params).create()
        retries = 0

        while not success and retries < 5:
            if not success and 'error' in result_list and 'status' in result_list[
                    'error'] and 'code' in result_list['error'] and result_list[
                        'error']['status'] == 406 and result_list['error'][
                            'code'] == 104:
                retries = 5
                break
            logging.debug('AtlasResultsRequest error! {}'.format(result_list))
            time.sleep(10 + (random.randrange(0, 500) / 100))
            ripe_slow_down_sema.acquire()
            success, result_list = ripe_atlas.AtlasResultsRequest(
                **params).create()
            if not success:
                retries += 1

        if retries > 4:
            logging.error('AtlasResultsRequest error! {}'.format(result_list))
            continue

        measurements = []
        for res in result_list:
            ripe_measurement = RipeMeasurementResult.create_from_dict(res)
            ripe_measurement.probe_id = node_dct[str(res['prb_id'])]

            measurements.append(ripe_measurement)

        yield measurement_id, measurements
コード例 #31
0
class Barrier:
    def __init__(self, n):
        self.n = n
        self.counter = SharedCounter(0)
        self.barrier = Semaphore(0)

    def wait(self):
        with self.counter.lock:
            self.counter.val.value += 1
            if self.counter.val.value == self.n:
                self.barrier.release()
        self.barrier.acquire()
        self.barrier.release()
コード例 #32
0
ファイル: shared_utils.py プロジェクト: traai/async-deep-rl
class Barrier:
    def __init__(self, n):
        self.n = n
        self.counter = SharedCounter(0)
        self.barrier = Semaphore(0)

    def wait(self):
        with self.counter.lock:
            self.counter.val.value += 1
            if self.counter.val.value == self.n: 
                self.barrier.release()
        self.barrier.acquire()
        self.barrier.release()
コード例 #33
0
ファイル: ipc.py プロジェクト: gattis/magnum-py
class WorkQueue(object):
    def __init__(self):
        self.request_rfd, self.request_wfd = os.pipe()
        self.response_rfd, self.response_wfd = os.pipe()
        self.response_reader = ResponseReader(self.response_rfd)
        self.request_sem = Semaphore()
        self.response_sem = Semaphore()

    def submit_request(self, id, address, head, body):
        try:
            ip_str, port = address
            ipa, ipb, ipc, ipd = map(int, ip_str.split("."))
        except:
            port = ipa = ipb = ipc = ipd = 0
        os.write(
            self.request_wfd, REQUEST_HEADER.pack(id, ipa, ipb, ipc, ipd, port, len(head), len(body)) + head + body
        )

    def get_request(self):
        self.request_sem.acquire()
        header = ""
        bytes_to_read = REQUEST_HEADER.size
        while bytes_to_read:
            header += os.read(self.request_rfd, bytes_to_read)
            bytes_to_read = REQUEST_HEADER.size - len(header)
        id, ipa, ipb, ipc, ipd, port, head_len, body_len = REQUEST_HEADER.unpack(header)

        head = StringIO()
        bytes_to_read = head_len
        while bytes_to_read:
            head.write(os.read(self.request_rfd, bytes_to_read))
            bytes_to_read = head_len - head.tell()

        body = StringIO()
        bytes_to_read = body_len
        while bytes_to_read:
            body.write(os.read(self.request_rfd, bytes_to_read))
            bytes_to_read = body_len - body.tell()

        self.request_sem.release()
        return id, (".".join(map(str, [ipa, ipb, ipc, ipd])), port), head.getvalue(), body.getvalue()

    def submit_response(self, id, response):
        self.response_sem.acquire()
        response_output = response.output()
        keep_alive = "\x01" if response.headers.get("Connection") == "Keep-Alive" else "\x00"
        os.write(self.response_wfd, RESPONSE_HEADER.pack(id, len(response_output)) + response_output + keep_alive)
        self.response_sem.release()

    def get_response(self):
        return self.response_reader.read()
コード例 #34
0
ファイル: forking.py プロジェクト: nvie/worker-experiment
class ForkingWorker(BaseWorker):

    def __init__(self, num_processes=1):
        # Set up sync primitives, to communicate with the spawned children
        self._semaphore = Semaphore(num_processes)
        self._slots = Array('i', [0] * num_processes)

    def spawn_child(self):
        """Forks and executes the job."""
        self._semaphore.acquire()    # responsible for the blocking

        # Select an empty slot from self._slots (the first 0 value is picked)
        # The implementation guarantees there will always be at least one empty slot
        for slot, value in enumerate(self._slots):
            if value == 0:
                break

        # The usual hardcore forking action
        child_pid = os.fork()
        if child_pid == 0:
            # Within child

            # Disable signal handlers
            signal.signal(signal.SIGINT, signal.SIG_IGN)
            signal.signal(signal.SIGTERM, signal.SIG_IGN)

            random.seed()
            try:
                self.fake_work()
            finally:
                # This is the new stuff.  Remember, we're in the child process
                # currently. When all work is done here, free up the current
                # slot (by writing a 0 in the slot position).  This
                # communicates to the parent that the current child has died
                # (so can safely be forgotten about).
                self._slots[slot] = 0
                self._semaphore.release()
                os._exit(0)
        else:
            # Within parent, keep track of the new child by writing its PID
            # into the first free slot index.
            self._slots[slot] = child_pid

    def wait_for_children(self):
        for child_pid in self._slots:
            if child_pid != 0:
                os.waitpid(child_pid, 0)

    def get_id(self):
        return os.getpid()
コード例 #35
0
 def disconnect(self, priority):
     sem = Semaphore()
     sem.acquire()
     if not self.getPeer().isConnected():
         return
     
     network = self.getPeer().getNetwork()
     neighbors = network.getNeighbors(self.getPeer())
     if len(neighbors) > 0:
         for n in neighbors:
             network.removeConnection(self.getPeer(), n)
             self.getPeer().disconnected()
     else:
         self.getPeer().disconnected()
コード例 #36
0
class ProcessControl():
    def __init__(self, forks_number):
        self.forks_number = forks_number
        self.semaphore = Semaphore(self.forks_number)

    def execute(self, obj, function_to_execute, data):
        self.semaphore.acquire()
        #print("Launching new process")
        p = Process(target=processCall,
                    args=(self.semaphore, obj, function_to_execute, data))
        p.start()

    def wait(self):
        for i in range(self.forks_number):
            self.semaphore.acquire()
コード例 #37
0
class ProcessControl():
    def __init__(self,forks_number):
        self.forks_number=forks_number
        self.semaphore=Semaphore(self.forks_number)
        
    def execute(self,obj,function_to_execute,data):
        self.semaphore.acquire()
        #print("Launching new process")
        p=Process(target=processCall, args=(self.semaphore,obj,function_to_execute,data))
        p.start()
            
    
    def wait(self):
        for i in range(self.forks_number):
            self.semaphore.acquire()
コード例 #38
0
ファイル: parallel.py プロジェクト: mgroth0/mlib
class CheckableSem:
    def __init__(self):
        self._sem = Semaphore()
        self._num_permits = 1

    def acquire(self):
        self._sem.acquire()
        self._num_permits -= 1

    def release(self):
        self._sem.release()
        self._num_permits += 1

    def almost_perfect_get_num_permits(self):
        return self._num_permits
コード例 #39
0
ファイル: abstract_trading.py プロジェクト: lglmoura/PyDDSIM
 def definyPeerTrading(self):
     
     value =0;
     peerAux =""
     semaphore = Semaphore()
     semaphore.acquire()
     for peer,trust in self.__peersTrading.iteritems():
         
         if trust >= value:
             value = trust
             peerAux = peer 
         
         
     semaphore.release()
     return (peerAux,value)
コード例 #40
0
    def start_multiprocess(self, csv_file_name):
        self.get_download_task_from_csv(csv_file_name)
        semaphores = Semaphore(self._thread_number)
        for video_url in self._video_url:
            semaphores.acquire()
            process = multiprocessing.Process(
                target=self.run_downloader,
                args=(semaphores, video_url, self._MAX_VIDEO_DURATION_DIFF))
            process.daemon = True
            process.start()


#            semaphores.release()
        for _ in range(0, self._thread_number):
            semaphores.acquire()
        semaphores.release()
コード例 #41
0
ファイル: enc_manager.py プロジェクト: gonzygonz/enc_files
    def _enc_dec_list(self, paths, enc: bool, remove_old):
        sema = Semaphore(self.workers)
        all_processes = []
        for path in paths:
            sema.acquire()
            recv_end, send_end = Pipe(False)
            p = Process(target=self._launch_single_enc_dec,
                        args=(enc, remove_old, path, sema, send_end))
            all_processes.append((p, recv_end, path))
            p.start()
        for p, r, path in all_processes:
            p.join()
            res = r.recv()
            path.update_new_path(res)

        print("done files")
コード例 #42
0
class DbWriter(Process):

	def __init__(self, queue, stop_flag):
		super(DbWriter, self).__init__()
		self.worker_control = Semaphore(MAX_WRITER_WORKERS)
		self.result_queue = queue
		self.stop_flag = stop_flag

	def run(self):

		print(" *** DB Writer online")

		while True and self.stop_flag.value != 1:

			if self.worker_control.acquire(False):

				task = self.result_queue.get()

				if task:
					try:
						worker = WriterWorker(task, self.worker_control)
						worker.start()

					except Exception as err:
						print(err)
						print("Invalid task %s" % task)
						self.worker_control.release()
				else:
					self.worker_control.release()

			time.sleep(0.3)

		print("stop flag: %s" % self.stop_flag.value)
コード例 #43
0
ファイル: topology.py プロジェクト: krohan100/pydssim
 def removeNeighbor(self, source, target):
     
     if (not self.__layout.has_key(source.getId())) and (not self.__layout.has_key(target.getId())) :
         return False
     if source.hasNeighbor(target):
         return False 
     if target.hasNeighbor(source):
         return False
     
     semaphore = Semaphore()
     semaphore.acquire()
     flag = source.removeNeighbor(target)
     target.removeNeighbor(source)
     semaphore.release()
     
     return flag
コード例 #44
0
def process_vntrseek_data(unprocessed_vntrs_file,
                          output_file='vntr_data/VNTRs.txt',
                          chrom=None):
    process_list = []
    unprocessed_vntrs = load_unprocessed_vntrseek_data(unprocessed_vntrs_file,
                                                       chrom)
    sema = Semaphore(settings.CORES)
    manager = Manager()
    partial_vntrs = manager.list([])
    for i in range(settings.CORES):
        sema.acquire()
        partial_vntrs.append(manager.list())
        q = len(unprocessed_vntrs) / settings.CORES
        start = i * q
        end = (i + 1) * q if i + 1 < settings.CORES else len(unprocessed_vntrs)
        partial_input = unprocessed_vntrs[start:end]
        p = Process(target=find_non_overlapping_vntrs,
                    args=(partial_input, partial_vntrs[i], chrom, sema))
        process_list.append(p)
        p.start()
    for p in process_list:
        p.join()
    vntrs = []
    for partial_list in partial_vntrs:
        vntrs.extend(list(partial_list))
    print(chrom, len(vntrs))

    for vntr in vntrs:
        if not vntr.is_non_overlapping():
            continue
        repeat_segments = ','.join(vntr.get_repeat_segments())
        with open(output_file, 'a') as out:
            end_point = vntr.start_point + vntr.get_length()
            gene_name, annotation = get_gene_name_and_annotation_of_vntr(
                vntr.chromosome, vntr.start_point, end_point)
            out.write('%s %s %s %s %s %s %s %s %s %s\n' % (
                vntr.id,
                vntr.is_non_overlapping(),
                vntr.chromosome,
                vntr.start_point,
                gene_name,
                annotation,
                vntr.pattern,
                vntr.left_flanking_region,
                vntr.right_flanking_region,
                repeat_segments,
            ))
コード例 #45
0
ファイル: hack.py プロジェクト: juanigsrz/Gladiabot
class Barrier:
    def __init__(self, n):
        self.n = n
        self.count = Value('i', 0)
        self.mutex = Semaphore(1)
        self.barrier = Semaphore(0)

    def wait(self):
        self.mutex.acquire()
        self.count.value += 1
        self.mutex.release()

        if self.count.value == self.n:
            self.barrier.release()

        self.barrier.acquire()
        self.barrier.release()
コード例 #46
0
 def find_hmm_score_of_simulated_reads(self, hmm, reads):
     initial_recruitment_score = -10000
     process_list = []
     sema = Semaphore(settings.CORES)
     manager = Manager()
     processed_reads = manager.list([])
     vntr_bp_in_reads = Value('d', 0.0)
     for read_segment in reads:
         sema.acquire()
         p = Process(target=self.process_unmapped_read,
                     args=(sema, read_segment, hmm,
                           initial_recruitment_score, vntr_bp_in_reads,
                           processed_reads, False))
         process_list.append(p)
         p.start()
     for p in process_list:
         p.join()
     return processed_reads
コード例 #47
0
ファイル: test_shm.py プロジェクト: USnark772/RSCompanionV3
def image_processor(index: int, shared_array: Array, shared_dim: tuple,
                    sem1: Semaphore, sem2: Semaphore, line: Array,
                    running: Value):
    shm_size = EDIT_HEIGHT * shared_dim[1] * shared_dim[2]
    np_arr = np.frombuffer(shared_array.get_obj(),
                           count=int(shm_size),
                           dtype=DTYPE).reshape(
                               (EDIT_HEIGHT, shared_dim[1], shared_dim[2]))
    out_name = profile_outdir + "test_shm_proc_" + str(
        index) + "_image_processor" + ".prof"
    pflr = cProfile.Profile()
    pflr.enable()
    while running.value != 0:
        sem1.acquire()
        np.copyto(np_arr, add_overlay(np_arr, line.value.decode(STR_ENCODING)))
        sem2.release()
    pflr.disable()
    pflr.dump_stats(out_name)
コード例 #48
0
ファイル: worker.py プロジェクト: nak/pytest_mproc
    def run(index: int, host: str, port: int, start_sem: Semaphore,
            fixture_sem: Semaphore, test_q: JoinableQueue, result_q: Queue,
            node_port: int) -> None:
        start_sem.acquire()

        worker = WorkerSession(index, host, port, test_q, result_q)
        worker._node_fixture_manager = Node.Manager(as_main=False,
                                                    port=node_port,
                                                    name=f"Worker-{index}")
        worker._fixture_sem = fixture_sem
        args = sys.argv[1:]
        # remove coverage args, as pytest_cov handles multiprocessing already and will applyt coverage to worker
        # as a proc that was launched from main thread which itsalef has coverage (otherwise it will attempt
        # duplicate coverage processing and file conflicts galore)
        args = [arg for arg in args if not arg.startswith("--cov=")]
        config = _prepareconfig(args, plugins=[])
        # unregister terminal (don't want to output to stdout from worker)
        # as well as xdist (don't want to invoke any plugin hooks from another distribute testing plugin if present)
        config.pluginmanager.unregister(name="terminal")
        config.pluginmanager.register(worker, "mproc_worker")
        config.option.mproc_worker = worker
        from pytest_mproc.main import Orchestrator
        worker._client = Orchestrator.Manager(addr=(worker._host,
                                                    worker._port))
        workerinput = {
            'slaveid':
            "worker-%d" % worker._index,
            'workerid':
            "worker-%d" % worker._index,
            'cov_master_host':
            socket.gethostname(),
            'cov_slave_output':
            os.path.join(os.getcwd(), "worker-%d" % worker._index),
            'cov_master_topdir':
            os.getcwd()
        }
        config.slaveinput = workerinput
        config.slaveoutput = workerinput
        try:
            # and away we go....
            config.hook.pytest_cmdline_main(config=config)
        finally:
            config._ensure_unconfigure()
            worker._reporter.write(f"\nWorker-{index} finished\n")
コード例 #49
0
    def basic_call(self, f, req):
        free_threads = Semaphore(5)

        def g(): raise RuntimeError("oaiwhgfoia")
        mock_proc_inter_instance = MagicMock(free_threads=Semaphore(5),
                                             Lock=[Lock()
                                                   for _ in range(0, 3)],
                                             NL_L=Semaphore(2),
                                             pid_ptid_mapping=MagicMock(
                                                 has_key=lambda *args,
                                                 **kwargs: False))
        with patch("fsspmnl_processing_interface.path_constants_webinterface.jobs", self.jobs), patch("fsspmnl_processing_interface.LongProcess.LongProcess", self.MockLP), patch("fsspmnl_processing_interface.processing_interface.ProcessingInterface.Instance", mock_proc_inter_instance), patch("fsspmnl_processing_interface.path_constants_webinterface.dicom_dir", self.basedicomdir), patch("path_constants.jobs", self.jobs):
            ret = f(req)

        for _ in xrange(0, 5):
            assert(free_threads.acquire(False))

        assert(not free_threads.acquire(False))
        return ret
コード例 #50
0
ファイル: Filtering.py プロジェクト: Harry73/IGVC2017
def main():
	gps_n = Semaphore(0)
	gps_s = Semaphore(1)
	gps_coords_stack = Manager().list()

	gps = GPS(gps_coords_stack, gps_n, gps_s)

	gps.start()

	# Get the first position
	z = gps.getPosition()

	dt = 0.05
	range_std = 5. # Means meters

	# Instantiate the filter
	filterk = ExtendedKalmanFilter(2, 1, 0) # 1 type of value of position, but in 2 dimensions. sensor provides position in (x,y) so use 2

	# Insert first position
	filterk.x = array(z)
	# Pretty sure this sets up the taylor series
	filterk.F = eye(2) + array([[0,1], [0,0]])*dt
	# Sets the uncertainty
	filterk.R = np.diag([range_std**2])
	# Trains it using white noise?
	filterk.Q[0:2, 0:2] = Q_discrete_white_noise(2, dt=dt, var=0.1)
	filterk.Q[2, 2] = 0.1
	# Covariance matrix
	filterk.P *= 50

	for i in range(10):
		# Pull a value from the GPS stack
		gps_n.acquire()
		gps_s.acquire()
		result = gps_coords_stack.pop()
		gps_s.release()

		# Put new z value in
		filterk.predict_update(array(result), HJacobian_at, hx) #this maaaaay need to be formatted differently, otherwise just put the longitude and lattitude as an array [x,y]

		# Get the predicted value
		np.append(xs, filterk.x)
		print(filterk.x)
コード例 #51
0
ファイル: subprocess.py プロジェクト: ZSAIm/EventDriven
class QueueifyPipeConnection:
    """ 管道Pipe队列化的连接器。
    主要是为了实现与队列使用方法一致的接口。
    """
    __slots__ = '_p1', '_p2', '_empty', '_unfinished_tasks', '_lock'

    def __init__(self, p1, p2):
        self._p1 = p1
        self._p2 = p2
        # 由于是进程间的数据同步,所以这里只能使用通过进程信号量来共享计数数据。
        # 在这里的信号量仅仅是用于计数put和get的差量。
        self._unfinished_tasks = Semaphore(0)
        # 空队列事件,这将用于join。
        self._empty = ProcessEvent()
        self._lock = ProcessLock()

    def put(self, value):
        with self._lock:
            # 队列数据计数+1
            self._unfinished_tasks.release()
            self._empty.clear()
        self._p1.send(value)

    def get(self):
        return self._p2.recv()

    def task_done(self):
        """ 完成一项任务。"""
        with self._lock:
            # 队列数据计数-1
            self._unfinished_tasks.acquire(False)
            # 为了实现方法join,当取出后队列的数据量为0,那么空队列事件置位。
            if not self._unfinished_tasks.get_value():
                self._empty.set()

    def join(self):
        """ 等待队列化管道被取完。"""
        self._empty.wait()

    def empty(self):
        """ 返回队列未完成任务是否为空。"""
        return self._empty.is_set()
コード例 #52
0
ファイル: abstract_network.py プロジェクト: krohan100/pydssim
 def removePeer(self, peer):
     
     flag = True
     
     if not self.__layout.has_key(peer.getPID()):
         return False
     
     semaphore = Semaphore()
     semaphore.acquire()
     
     '''
     pode travar  pois estou chamando um sema dentro do outro?
     '''
     
     
     del self.__layout[peer.getPID()]
    
     
     flag = not self.__layout.has_key(peer.getPID())
     semaphore.release()
     
     return flag
コード例 #53
0
 def connect(self, priority):
     sem = Semaphore()
     sem.acquire()
     if self.getPeer().isConnected():
         return
     network = self.getPeer().getNetwork()
     
     node = None
     if netowork.countNodes() > 0:
         idx = randint(0, network.countNodes() - 1)
         graph = network.getGraph()
         node = graph.keys()[idx]
     
     network.addNode(self.getPeer())
     if node:
         network.createConnection(self.getPeer(), node)
         
     self.getPeer().connected()
     # randon time for disconnect
     disconnectionTime = randint(3600, 28800)
     self.getPeer().setDisconnectionTime(disconnectionTime)
     
     sem.release()
コード例 #54
0
ファイル: _helpers.py プロジェクト: wtsi-hgi/cookie-monster
def add_data_files(source: SynchronisedFilesDataSource, data_files: Sequence[str]):
    """
    Copies the given data files to the folder monitored by the given synchronised files data source. Blocks until
    all the files have been processed by the data source. Assumes all data files register one item of data.
    :param source: the data source monitoring a folder
    :param data_files: the data files to copy
    """
    load_semaphore = Semaphore(0)

    def on_load(change: FileSystemChange):
        if change == FileSystemChange.CREATE:
            load_semaphore.release()

    source.add_listener(on_load)

    for data_file in data_files:
        shutil.copy(data_file, source._directory_location)

    loaded = 0
    while loaded != len(data_files):
        load_semaphore.acquire()
        loaded += 1

    source.remove_listener(on_load)
コード例 #55
0
ファイル: processes.py プロジェクト: zielmicha/satori
class EventMasterProcess(SatoriProcess):
    def __init__(self):
        super(EventMasterProcess, self).__init__('event master')
        self.sem = Semaphore(0)

    def do_run(self):
        listener = Listener(address=(settings.EVENT_HOST, settings.EVENT_PORT))
        master = Master(mapper=TrivialMapper())
        master.listen(listener)
        self.sem.release()
        master.run()

    def start(self, *args, **kwargs):
        super(EventMasterProcess, self).start(*args, **kwargs)
        while True:
            if self.sem.acquire(False):
                return
            if not self.is_alive():
                raise RuntimeError('Event master failed to start')
            sleep(0)
コード例 #56
0
ファイル: multiproc.py プロジェクト: Exteris/spack
class Barrier:
    """Simple reusable semaphore barrier.

    Python 2.6 doesn't have multiprocessing barriers so we implement this.

    See http://greenteapress.com/semaphores/downey08semaphores.pdf, p. 41.
    """
    def __init__(self, n, timeout=None):
        self.n = n
        self.to = timeout
        self.count = Value('i', 0)
        self.mutex = Semaphore(1)
        self.turnstile1 = Semaphore(0)
        self.turnstile2 = Semaphore(1)


    def wait(self):
        if not self.mutex.acquire(timeout=self.to):
            raise BarrierTimeoutError()
        self.count.value += 1
        if self.count.value == self.n:
            if not self.turnstile2.acquire(timeout=self.to):
                raise BarrierTimeoutError()
            self.turnstile1.release()
        self.mutex.release()

        if not self.turnstile1.acquire(timeout=self.to):
            raise BarrierTimeoutError()
        self.turnstile1.release()

        if not self.mutex.acquire(timeout=self.to):
            raise BarrierTimeoutError()
        self.count.value -= 1
        if self.count.value == 0:
            if not self.turnstile1.acquire(timeout=self.to):
                raise BarrierTimeoutError()
            self.turnstile2.release()
        self.mutex.release()

        if not self.turnstile2.acquire(timeout=self.to):
            raise BarrierTimeoutError()
        self.turnstile2.release()
コード例 #57
0
class Barrier(object):
    def __init__(self, total = 2):
        self.waiting = 0
        self.total = total
        self.waitSem = Semaphore()
        self.waitSem.acquire()
        self.mutex = Semaphore()
    
    def sync(self):
        self.mutex.acquire()
        if self.waiting == self.total - 1:
            self.waitSem.release()
        else:
            self.waiting += 1
            self.mutex.release()
            self.waitSem.acquire()
            self.waiting -= 1
            if self.waiting ==0:
                self.mutex.release()
            else:
                self.waitSem.release()
コード例 #58
0
class PooledProcessMixIn:
    """
A Mix-in added by inheritance to any Socket Server like BaseHTTPServer to provide concurrency through
A Pool of forked processes each having a pool of threads
    """
    def _handle_request_noblock(self):
        if not getattr(self, '_pool_initialized', False): self._init_pool()
        self._event.clear()
        self._semaphore.release()
        self._event.wait()

    def _real_handle_request_noblock(self):
        try:
            # next line will do self.socket.accept()
            request, client_address = self.get_request()
        except socket.error:
            self._event.set()
            return
        self._event.set()
        if self.verify_request(request, client_address):
            try:
                self.process_request(request, client_address)
                self.shutdown_request(request)
            except:
                self.handle_error(request, client_address)
                self.shutdown_request(request)


    def _init_pool(self):
        self._pool_initialized = True
        self._process_n = getattr(self, '_process_n', max(2, cpu_count()))
        self._thread_n = getattr(self, '_thread_n', 64)
        self._keep_running = Value('i', 1)
        self._shutdown_event = Event()
        self._shutdown_event.clear()
        self._event = Event()
        self._semaphore = Semaphore(1)
        self._semaphore.acquire()
        self._maintain_pool()
    
    def _maintain_pool(self):
        self._processes = []
        for i in range(self._process_n):
            t = Process(target=self._process_loop)
            t.start()
            self._processes.append(t)

    def _process_loop(self):
        threads = []
        for i in range(self._thread_n):
            t = Thread(target=self._thread_loop)
            t.setDaemon(0)
            t.start()
            threads.append(t)
        # we don't need this because they are non-daemon threads
        # but this did not work for me
        # FIXME: replace this with event
        self._shutdown_event.wait()
        #for t in threads: t.join()

    def _thread_loop(self):
        while(self._keep_running.value):
            self._semaphore.acquire() # wait for resource
            self._real_handle_request_noblock()

    def pool_shutdown(self):
        self._keep_running.value = 0
        self._shutdown_event.set()

    def shutdown(self):
        self.pool_shutdown()
        BaseServer.shutdown(self) # super(BaseServer).shutdown()
コード例 #59
0
ファイル: part2_1.py プロジェクト: qisaw/se730_assignment_1
class Scheduler():

    def __init__(self):
        self.ready_list = []
        self.last_run = None;\
        self.semafore=Semaphore(1);

    # Add a process to the run list
    def add_process(self, process):
        #set the index as the last element of the list at the begining
        #RACE condition here!!
        '''
        Say a process with priority 1 comes into the ready_list with 5 elements
        The ready_list priorities are as shown
        ready_list_priorities={10,8,8,6,4}
        the index=len(self.ready_list) line executes
        and gets the value 5. This means index =5
        then say before the next line is executed, another process  is added to the ready_list, its priority is 3
        then ready_list_priorities={10,8,8,6,4,3}
        but index is still 5 so new process will be inserted here rather than at the end of the list
        so ready_list_priorities={10,8,8,6,4,1,3}
        this is incorrect
        we must lock this method and only allow one process to access it at a time
        maybe with a semaphore
        '''
        self.semafore.acquire()
        index=len(self.ready_list)
        #find the first priority that is less than the priority of the process
        for i in range (len(self.ready_list)-1,-1,-1):
            if self.ready_list[i].priority<process.priority:
                index = i
        #add the item there
        self.ready_list.insert(index, process)
        self.semafore.release()
        return


    def remove_process(self, process):
        #do more shit to ensure that all things remain the way they are
        self.ready_list.remove(process)

    # Selects the process with the best priority.
    # If more than one have the same priority these are selected in round-robin fashion.
    def select_process(self):
        #return none if the process list is empty
        if len(self.ready_list)==0:
            return None
        
        #otherwise check to see if the lastrun process process is the same as the current running process
        if self.last_run == self.ready_list[0]:
            #make sure the list doesnt contain only one process
            if len(self.ready_list)>1:
                
                #check to see that here are no more processes with the same or higher priority level
                #because the ready_list is already sorted, we only need to check the next element and check that it is not
                #of equal or higher priority
                if(self.last_run.priority<=self.ready_list[1].priority):
                    #if it is, remove this process from the readylist and put it back in
                    #This has the effect of placing it behind all the processes with the same priority level as this process
                    self.remove_process(self.last_run)
                    self.add_process(self.last_run)
                
        #return the process that is at the head of the queue and move it to the tail of the queue
        self.last_run = self.ready_list[0]
        return self.ready_list[0]

    # Suspends the currently running process by sending it a STOP signal.
    @staticmethod
    def suspend(process):
        os.kill(process.pid, signal.SIGSTOP)

    # Resumes a process by sending it a CONT signal.
    @staticmethod
    def resume(process):
        if process.pid: # if the process has a pid it has started
            os.kill(process.pid, signal.SIGCONT)
        else:
            process.run()
    
    def run(self):
        current_process = None
        while True:
            #print('length of ready_list:', len(self.ready_list))
            next_process = self.select_process()
            if next_process == None: # no more processes
                controller_write.write('terminate\n')
                sys.exit()
            if next_process != current_process:
                if current_process:
                    self.suspend(current_process)
                current_process = next_process
                self.resume(current_process)
            time.sleep(1)
            # need to remove dead processes from the list
            try:
                current_process_finished = (
                    os.waitpid(current_process.pid, os.WNOHANG) != (0, 0)
                )
            except ChildProcessError:
                current_process_finished = True
            if current_process_finished:
                print('remove process', current_process.pid, 'from ready list')
                self.remove_process(current_process)
                current_process = None
コード例 #60
0
ファイル: server.py プロジェクト: oldpeculier/oldpeculier
class PooledProcessMixIn:
    def _handle_request_noblock(self):
        self._event.clear()
        self._semaphore.release()
        self._event.wait()

    def _real_handle_request_noblock(self):
        try:
            # next line will do self.socket.accept()
            request, client_address = self.get_request()
        except socket.error:
            self._event.set()
            return
        self._event.set()
        if self.verify_request(request, client_address):
            try:
                self.process_request(request, client_address)
                self.shutdown_request(request)
            except:
                self.handle_error(request, client_address)
                self.shutdown_request(request)


    def __init__(self):
        self._process_n = getattr(self, '_process_n', max(2, cpu_count()))
        self._thread_n = getattr(self, '_thread_n', 63)
        self._keep_running = Value('i', 1)
        self._event = Event()
        self._semaphore = Semaphore(1)
        self._semaphore.acquire()
        self._maintain_pool()
    
    def _maintain_pool(self):
        self._processes = []
        for i in range(self._process_n):
            t = Process(target=self._process_loop)
            t.start()
            self._processes.append(t)

    def _process_loop(self):
        threads = []
        for i in range(self._thread_n):
            t = Thread(target=self._thread_loop)
            t.setDaemon(0)
            t.start()
            threads.append(t)
        # we don't need this because they are non-daemon threads
        # but this did not work for me
        # FIXME: replace this with event
        #self._shutdown_event.wait()
        for t in threads: t.join()

    def _thread_loop(self):
        while(self._keep_running.value):
            self._semaphore.acquire() # wait for resource
            self._real_handle_request_noblock()

    def shutdown(self,signal=None,frame=None):
        print ""
        for p in self._processes:
            self.logger.info("shutting down process %s" %(p.pid))
            p.terminate()
        exit(0)