예제 #1
0
파일: multiproc.py 프로젝트: Exteris/spack
 def __init__(self, n, timeout=None):
     self.n = n
     self.to = timeout
     self.count = Value('i', 0)
     self.mutex = Semaphore(1)
     self.turnstile1 = Semaphore(0)
     self.turnstile2 = Semaphore(1)
    def call_START(self, mock_pid_ptid_mapping=MagicMock(has_key=lambda *args, **kwargs: False)):
        self.Locks = [Lock() for _ in range(0, 3)]
        self.NL_L = Semaphore(2)
        self.free_threads = Semaphore(5)
        mock_proc_inter_instance = MagicMock(free_threads=self.free_threads,
                                             Locks=self.Locks,
                                             NL_L=self.NL_L,
                                             pid_ptid_mapping=MagicMock(
                                                 has_key=lambda *args, **kwargs: False)
                                             )

        with patch("processing_interface.ProcessingInterface.Instance", side_effect=lambda *args, **kwargs: mock_proc_inter_instance):
            LP = LongProcess.LongProcess(self.test_dicoms_dirs,
                                         [os.path.split(dicom_dir)[-1] for dicom_dir in self.test_dicoms_dirs])

        self.__test_Locks()

        LP._LongProcess__START()

        self.__test_Locks()

        print LP._Process__success
        assert(LP._Process__success == "SUCCESS")
        assert(LP._Process__state == STATES.LongFS1)

        return LP
예제 #3
0
 def setCurrentSimulationTime(self, currentSimulationTime):
    
     semaphore = Semaphore()
     semaphore.acquire()
     self.__currentSimulationTime = currentSimulationTime
     semaphore.release()
     return self.__currentSimulationTime
예제 #4
0
 def _readtimestepsbond(self):
     # added on 2018-12-15
     stepatomfiles = {}
     self._mkdir(self.trajatom_dir)
     with Pool(self.nproc, maxtasksperchild=10000) as pool:
         semaphore = Semaphore(360)
         results = pool.imap_unordered(
             self.bonddetector.readatombondtype,
             self._produce(
                 semaphore,
                 enumerate(
                     zip(self.lineiter(self.bonddetector), self.erroriter()
                         ) if self.errorfilename is not None else self.
                     lineiter(self.bonddetector)),
                 (self.errorfilename is not None)), 100)
         nstep = 0
         for d, step in tqdm(results,
                             desc="Read trajectory",
                             unit="timestep"):
             for bondtypebytes, atomids in d.items():
                 bondtype = self._bondtype(bondtypebytes)
                 if bondtype not in self.atombondtype:
                     self.atombondtype.append(bondtype)
                     stepatomfiles[bondtype] = open(
                         os.path.join(self.trajatom_dir,
                                      f'stepatom.{bondtype}'), 'wb')
                 stepatomfiles[bondtype].write(
                     self.listtobytes([step, atomids]))
             semaphore.release()
             nstep += 1
     pool.close()
     self._nstep = nstep
     for stepatomfile in stepatomfiles.values():
         stepatomfile.close()
     pool.join()
예제 #5
0
def block_until_processed(cookie_jar: CookieJar, cookie_paths: Sequence[str],
                          expected_number_of_calls_to_mark_as_complete: int):
    """
    Puts the given cookies into the cookie jar and wait until they have been completed/marked for reprocessing.
    :param cookie_jar: the cookie jar to put cookies to process into
    :param cookie_paths: the cookie paths to process
    :param expected_number_of_calls_to_mark_as_complete: the number of calls expected to the Cookie jar's
    `mark_as_complete` method
    """
    if cookie_jar.queue_length() != 0:
        raise RuntimeError("Already cookies in the jar")

    mark_as_complete_semaphore = Semaphore(0)
    original_mark_as_complete = cookie_jar.mark_as_complete

    def mark_as_complete(path: str):
        mark_as_complete_semaphore.release()
        original_mark_as_complete(path)

    cookie_jar.mark_as_complete = MagicMock(side_effect=mark_as_complete)

    for cookie_path in cookie_paths:
        cookie_jar.mark_for_processing(cookie_path)

    calls_to_mark_as_complete = 0
    while calls_to_mark_as_complete != expected_number_of_calls_to_mark_as_complete:
        mark_as_complete_semaphore.acquire()
        assert cookie_jar.mark_as_complete.call_count <= expected_number_of_calls_to_mark_as_complete
        calls_to_mark_as_complete += 1

    assert calls_to_mark_as_complete == cookie_jar.mark_as_complete.call_count
예제 #6
0
    def _init_pool(self):
        """

        Pool initialization

        Worker with parameters:
            - self._process_n (processes pool length)
            - self._thread_n (threads pool length for process)
            - self._daemon (True if daemon threads)
            - self._kill (True if kill main process when shutdown)
            - self._debug (True if debug mode)
            - self._logger (logger)

        """
        self._pool_initialized = True
        self._process_n = getattr(self, '_process_n', max(2, cpu_count()))
        self._thread_n = getattr(self, '_thread_n', 64)
        self._daemon = getattr(self, '_daemon', False)
        self._kill = getattr(self, '_kill', True)
        self._debug = getattr(self, '_debug', False)
        self._logger = getattr(self, '_logger', None)
        self._keep_running = Value('i', 1)
        self._shutdown_event = Event()
        self._shutdown_event.clear()
        self._event = Event()
        self._semaphore = Semaphore(1)
        self._semaphore.acquire()
        self._closed = False
        self._maintain_pool()
예제 #7
0
def identify_similar_regions_for_vntrs_using_blat():
    from multiprocessing import Process, Semaphore, Manager
    reference_vntrs = load_unique_vntrs_data()

    records = []
    for ref_vntr in reference_vntrs:
        record = SeqRecord.SeqRecord('')
        sequence = ref_vntr.left_flanking_region[
            -30:] + ref_vntr.pattern + ref_vntr.right_flanking_region[:30]
        record.seq = Seq.Seq(sequence)
        record.id = str(ref_vntr.id)
        records.append(record)
    vntr_structures_file = 'reference_vntr_structures.fa'
    with open(vntr_structures_file, 'w') as output_handle:
        SeqIO.write(records, output_handle, 'fasta')

    sema = Semaphore(7)
    manager = Manager()
    result_list = manager.list()
    process_list = []
    for ref_vntr in reference_vntrs:
        sema.acquire()
        p = Process(target=find_similar_region_for_vntr,
                    args=(sema, ref_vntr, vntr_structures_file, result_list))
        process_list.append(p)
        p.start()

    for p in process_list:
        p.join()
    result_list = list(result_list)
    with open('similar_vntrs.txt', 'a') as out:
        for vntr_id in result_list:
            out.write('%s\n' % vntr_id)
예제 #8
0
    def __init__(self,
                 dataset,
                 batch_size,
                 batchifier,
                 pool,
                 shuffle=False,
                 use_padding=False,
                 no_semaphore=20):

        # Filtered games
        games = dataset.get_data()
        games = batchifier.filter(games)

        if shuffle:
            random.shuffle(games)

        self.n_examples = len(games)
        self.batch_size = batch_size

        self.n_batches = int(math.ceil(1. * self.n_examples / self.batch_size))
        batch = split_batch(games, batch_size, use_padding)

        # no proc
        # self.it = (batchifier.apply(b )for b in batch)

        # Multi_proc
        self.semaphores = Semaphore(no_semaphore)
        it_batch = sem_iterator(l=batch, sem=self.semaphores)
        self.process_iterator = pool.imap(batchifier.apply, it_batch)
 def __init__(self, f=nothing, g=nothing, options=None):
     import os
     from datetime import datetime
     self._result_list = []
     self._experiements = []
     self._penalty_func = []
     self.set_metric_func(f)
     self.set_penatly_funct(g)
     self._evals = 0
     self._options = options
     self.__eval_processes = 1
     self.__evalSem = Semaphore(self.__eval_processes)
     if os.path.exists(
             self._options.dataPath) and (not self._options.noSimulation):
         if self._options.dataPath[-1] == '/':
             newDir = self._options.dataPath[:-1] + "_" + str(
                 datetime.now().strftime('%Y-%m-%d-%M-%S.%f'))
         else:
             newDir = self._options.dataPath + "_" + str(
                 datetime.now().strftime('%Y-%m-%d-%M-%S.%f'))
         print "Moving old simulation data dir: " + str(
             self._options.dataPath) + " to " + newDir
         os.rename(self._options.dataPath, newDir)
     self._post_process_events = []
     # Used to normalize results (make it possible for functions to be converted to minimizations)
     self._control_result = None
def relay(semaphore: mp.Semaphore, queue: mp.Queue, output_lock: mp.Lock,
          bmsg: bytes, addr: tuple, relay_dict, recv_time: datetime):
    semaphore.acquire()
    bmsg = bytearray(bmsg)
    header = DNSHeader(bmsg[:12])
    header.aa = 1
    bmsg = header.bmsg + bmsg[12:]
    assert header.qdcount == 1
    question = DNSQuestion(bmsg, offset=12)
    with output_lock:
        cprint(f'[{recv_time}][recv query {bytes_to_int(bmsg[:2])}]: {bmsg} from {addr}', fore='green', style='reverse')
        cprint_header(header, fore='green')
        cprint_question(question, fore='green')
    if question.qname in relay_dict:
        if relay_dict[question.qname] == '0.0.0.0':
            header.rcode = 3
            answer = header.bmsg + bmsg[12:]
            mode = 'intercept  '
        elif question.qtype == 1:
            answer = fake_bmsg(bmsg, relay_dict[question.qname])
            mode = 'local resolve '
        else:
            answer = forward(bmsg)
            if answer is None:
                return
            mode = 'relay msg  '
    else:
        answer = forward(bmsg)
        mode = 'relay msg  '
    queue.put((answer, addr, recv_time, mode))
    semaphore.release()
예제 #11
0
class EmbeddingWorker(Process):
    def __init__(self, queue, X, y, transformation_method, embedding_args):
        super().__init__()
        self.pause_lock = Semaphore(value=True)  # lock is free
        self.embedding_args = embedding_args
        self.X = X
        self.y = y
        self.transformation_method = transformation_method
        self.queue = queue

    def callback(self, command, iteration, payload):
        # pausing acquires pause_lock and the following code only runs if
        # pause_lock is free
        with self.pause_lock:
            self.queue.put((command, iteration, payload))

    def run(self):
        self.transformation_method(self.X, self.y, self.embedding_args,
                                   self.callback)

    def pause(self):
        self.pause_lock.acquire()

    def resume(self):
        self.pause_lock.release()

    def is_paused(self):
        return not self.pause_lock.get_value()
예제 #12
0
파일: benchmark.py 프로젝트: domoritz/SoSAT
def run():
    algo = parameters["algo"]
    files = [open(x) for x in parameters["files"]]
    configs = []
    p = parameters["params"]
    max_processes = 3
    semaphore = Semaphore(max_processes)

    # generate configurations as compination of possible
    # keys and product of values
    for keys in it.combinations(p.keys(), len(p.keys())):
        v = [p[k] for k in keys]
        for values in it.product(*v):
            config = {}
            for i, k in enumerate(keys):
                config[k] = values[i]
            configs.append(config)
    for f in files:
        for conf in configs:
            config = {"FILENAME": f.name}
            config.update(conf)

            f.seek(0)
            num_vars, clauses = parser.parse(f)

            p = MyProcess(target=run_algorithm, args=(algo, num_vars, clauses, config, semaphore))

            semaphore.acquire()
            p.start()
예제 #13
0
    def run(self, tasks, build_config, parallel_threads):
        semaphore = Semaphore(parallel_threads)
        process_finished_notify = Condition(Lock())
        while tasks.count_buildable_tasks() > 0:
            task = tasks.get_next()

            if task is None:
                self.wait_tasks_to_complete(parallel_threads, process_finished_notify, semaphore)
                continue

            semaphore.acquire()
            task.state = Task.State.RUNNING
            logging.debug("Starting task %s", task.name)
            self.start_new_process(process_finished_notify, semaphore, self.process_job, task, build_config)

        self.wait_tasks_to_complete(parallel_threads, process_finished_notify, semaphore)

        if tasks.count(Task.State.FAILED) > 0:
            logging.error('Some packages failed to build.')
            logging.error("  %s", tasks.print_name(Task.State.FAILED))
            return 1
        if tasks.count(Task.State.RUNNING) > 0:
            logging.error('Something went wrong, there are still some running tasks.')
            return 1
        if tasks.count(Task.State.NEW) > 0:
            logging.error('Something went wrong, there are still unprocessed tasks.')
            return 1

        logging.info("Build completed successfully.")
        return 0
예제 #14
0
 def getNeighbors(self, peer):
             
     semaphore = Semaphore()
     semaphore.acquire()
     neighbors = self.__layout[peer.getId()].getNeighbors()
     semaphore.release()
     return neighbors
예제 #15
0
class Thread_Pool_Manager(object):
    def __init__(self, thread_num=cpu_count()):
        self.thread_num = thread_num
        print(thread_num)
        self.work_queue = JoinableQueue()
        self.work_num = Semaphore(0)
        self.mutex = Lock()

    def start_threads(self):
        for i in range(self.thread_num):
            thread = Process(target=self.do_job)
            thread.daemon = True  # set thread as daemon
            thread.start()

    def do_job(self):
        global Numbers
        while True:
            # print(1)
            self.work_num.acquire()
            with self.mutex:
                print(1, self.work_queue.qsize())
                thread_job = self.work_queue.get()
                print(0, self.work_queue.qsize())
            thread_job.do_job(self.work_queue, self.work_num)
            print(self.work_queue.qsize())
            self.work_queue.task_done()

    def join(self):
        self.work_queue.join()

    def add_job(self, job):
        self.work_queue.put(job)
        self.work_num.release()
예제 #16
0
    def get_spanning_reads_of_aligned_pacbio_reads(self, alignment_file):
        sema = Semaphore(settings.CORES)
        manager = Manager()
        length_distribution = manager.list()
        mapped_spanning_reads = manager.list()

        vntr_start = self.reference_vntr.start_point
        vntr_end = self.reference_vntr.start_point + self.reference_vntr.get_length(
        )
        region_start = vntr_start
        region_end = vntr_end
        read_mode = 'r' if alignment_file.endswith('sam') else 'rb'
        samfile = pysam.AlignmentFile(alignment_file, read_mode)
        reference = get_reference_genome_of_alignment_file(samfile)
        chromosome = self.reference_vntr.chromosome if reference == 'HG19' else self.reference_vntr.chromosome[
            3:]
        process_list = []
        for read in samfile.fetch(chromosome, region_start, region_end):
            sema.acquire()
            p = Process(target=self.check_if_pacbio_read_spans_vntr,
                        args=(sema, read, length_distribution,
                              mapped_spanning_reads))
            process_list.append(p)
            p.start()

        for p in process_list:
            p.join()

        logging.info('length_distribution of mapped spanning reads: %s' %
                     list(length_distribution))
        return list(mapped_spanning_reads)
예제 #17
0
    def __init__(self, dataset, batch_size, batchifier, pool,
                 shuffle= False, use_padding = False, no_semaphore= 20):
                 
        print("----------------- Iterator",batch_size)

        # Filtered games
        games = dataset.get_data()
        # print("games = {}".format(games))
        # print("dataset = {} ".format(dataset))

        # exit()

        games = batchifier.filter(games)

        if shuffle:
            random.shuffle(games)

        self.n_examples = len(games)
        self.batch_size = batch_size

        self.n_batches = int(math.ceil(1. * self.n_examples / self.batch_size))
        batch = split_batch(games, batch_size, use_padding)
        
        print("++++ Iterator | n_examples = {},batch_size={},n_batches={}".format(self.n_examples,self.batch_size,self.n_batches))
        print("shape({},{})".format(len(batch),len(batch[0])))
        # no proc
        # self.it = (batchifier.apply(b )for b in batch)

        # Multi_proc
        self.semaphores = Semaphore(no_semaphore)
        it_batch = sem_iterator(l=batch, sem=self.semaphores)
        self.process_iterator = pool.imap(batchifier.apply, it_batch)
예제 #18
0
def identify_similar_regions_for_vntrs_using_blat():
    from multiprocessing import Process, Semaphore, Manager

    reference_vntrs = load_unique_vntrs_data()
    sema = Semaphore(24)
    manager = Manager()
    result_list = manager.list()
    process_list = []
    # os.system('cp hg19_chromosomes/CombinedHG19_Reference.fa /tmp/CombinedHG19_Reference.fa')
    for i in range(len(reference_vntrs)):
        if not reference_vntrs[i].is_non_overlapping(
        ) or reference_vntrs[i].has_homologous_vntr():
            continue
        sema.acquire()
        p = Process(target=find_similar_region_for_vntr,
                    args=(sema, reference_vntrs[i], i, result_list))
        process_list.append(p)
        p.start()

    for p in process_list:
        p.join()
    result_list = list(result_list)
    with open('similar_vntrs.txt', 'a') as out:
        for vntr_id in result_list:
            out.write('%s\n' % vntr_id)
예제 #19
0
    def __init__(self, no_cuda, model_dir, model_file, decider, threshold,
                 entities, decider_processes, classifier_processes,
                 batch_size):

        self._process_queue = []
        self._process_queue_sem = Semaphore(0)
        self._main_sem = Semaphore(1)

        self._no_cuda = no_cuda
        self._model_dir = model_dir
        self._model_file = model_file

        self._decider = decider
        self._threshold = threshold

        self._entities = entities
        self._decider_processes = decider_processes
        self._classifier_processes = classifier_processes
        self._batch_size = batch_size

        self._rank_intervalls = np.linspace(0.001, 0.1, 100)
        self._quantiles = np.linspace(0.1, 1, 10)
        self._return_full = False

        self._sequence = self.process_sequence()
    def start(self):
        if hasattr(self.__generator, 'init'):
            self.__generator.init()
        self.__status.value = True
        self.__inputSM = {
            "q": Queue(),
            "emptySemaphore": Semaphore(value=0),
            "fullSemaphore": Semaphore(value=self.__size),
            "mutex": Lock()
        }
        self.__batchSM = {
            "q": Queue(),
            "emptySemaphore": Semaphore(value=0),
            "fullSemaphore": Semaphore(value=self.__size),
            "mutex": Lock()
        }

        childCount = self.__childCount if self.__childCount != -1 else cpu_count(
        )
        self.__childProcess = []
        self.__mProcess = Process(target=self.monitor,
                                  args=(self.__status, self.__inputSM))
        self.__mProcess.start()
        for i in range(childCount):
            p = Process(target=self.loadBatch,
                        args=(self.__status, self.__inputSM, self.__batchSM))
            p.start()
            self.__childProcess.append(p)
예제 #21
0
    def start(self, test_q: JoinableQueue, result_q: Queue) -> None:
        """
        Start all worker processes

        :return: this object
        """
        local_test_q = JoinableQueue()

        self._node_manager = Node.Manager(as_main=True,
                                          port=self.__class__._node_port)
        start_sem = Semaphore(self._max_simultaneous_connections)
        # This will be used to throttle the number of connections made when makeing distributed call to get
        # node-level and global-level fixtures;  otherwise multiproceissing can hang on these calls if
        # overwhelmed
        fixture_sem = Semaphore(self._max_simultaneous_connections)
        for index in range(self._num_processes):
            proc = WorkerSession.start(
                index,
                self._host,
                self._port,
                start_sem,
                fixture_sem,
                local_test_q,
                result_q,
                self._node_port,
            )
            self._worker_procs.append(proc)
            start_sem.release()
        self._test_q_process = Process(target=self._process_test_q,
                                       args=(test_q, local_test_q))
        self._test_q_process.start()
예제 #22
0
    def __init__(self, num_processes=1):
        # Set up sync primitives, to communicate with the spawned children
        self.num_processes = num_processes

        # This semaphore is used as a "worker pool guard" to keep the number
        # of spawned workers in the pool to the specified maximum (and block
        # the .spawn_child() call after that)
        self._semaphore = Semaphore(num_processes)

        # This array of integers represents a slot per worker and holds the
        # actual pids (process ids) of the worker's children.  Initially, the
        # array-of-pids is all zeroes.  When a new child is spawned, the pid
        # is written into the slot.  WHen a child finishes, it resets its own
        # slot to 0 again, effectively freeing up the slot (and allowing new
        # children to be spawned).
        self._pids = Array('i', [0] * num_processes)

        # This array of integers also represents a slot per worker and also
        # holds the actual pids of the worker's children.  The difference with
        # _pids, however, is that this array's slots don't get reset
        # immediately when the children end.  In order for Unix subprocesses
        # to actually disappear from the process list (and freeing up the
        # memory), they need to be waitpid()'ed for by the parent process.
        # When each new child is spawned, it waitpid()'s for the (finished)
        # child that was previously in that slot before it claims the new
        # slot.  This mainly avoids ever-growing process lists and slowly
        # growing the memory footprint.
        self._waitfor = Array('i', [0] * num_processes)

        # This array of booleans represent workers that are in their idle
        # state (i.e. they are waiting for work).  During this time, it is
        # safe to terminate them when the user requests so.  Once they start
        # processing work, they flip their idle state and won't be terminated
        # while they're still doing work.
        self._idle = Array('b', [False] * num_processes)
예제 #23
0
    def __init__(self, obu_idx, total_obu_num, GLOBAL_ID_OBU_i_PK_i_map):
        super().__init__()
        self.ID_OBU_i = get_ID_obu_i(obu_idx)
        self.total_num = total_obu_num
        self.idx = str(obu_idx)

        self.logger = get_logger("OBU_" + self.idx)

        # ========= 生成属性 =========
        self.r_i = random.randint(1, self.P)
        self.x_i, self.y_i, self.PK_i = get_x_i_y_i_PK_i(
            self.P, self.ID_OBU_i, self.s)
        GLOBAL_ID_OBU_i_PK_i_map[self.ID_OBU_i] = self.PK_i
        self.GLOBAL_ID_OBU_i_PK_i_map = GLOBAL_ID_OBU_i_PK_i_map
        self.R_i = self.r_i * self.P
        self.Ri_map = dict()
        self.R = 0
        self.logger.debug("x_i: {}, y_i: {}, PK_i: {}, R_i: {}".format(
            self.x_i, self.y_i, self.PK_i, self.R_i))

        # ========= 信号量定义 =======
        self.sem_r_receive = Semaphore(0)
        self.sem_rsp_msg = Semaphore(0)

        # ========= 网络设置 ========
        self.send_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.send_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
        self.send_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)

        # ========= 回包 =========
        self.rsp_msg = None
예제 #24
0
def prog_proc(cam_index: int, running: Value):
    shm_arr_dim = (1080, 1920, 3)
    image_dim = (IMG_SIZE[1], IMG_SIZE[0], 3)
    image_processors = list()
    sems1 = list()
    sems2 = list()
    sems3 = list()
    shm_arrs = list()
    shm_ovl_arrs = list()
    out_name = profile_outdir + "test_shm_proc_" + str(cam_index) + ".prof"
    pflr = cProfile.Profile()
    pflr.enable()
    for i in range(NUM_CAM_PROCS):
        sems1.append(Semaphore(0))
        sems2.append(Semaphore(0))
        sems3.append(Semaphore(1))
        shm_ovl_arrs.append(Array(c_char, BYTESTR_SIZE))
        shm_arrs.append(
            Array('i', shm_arr_dim[0] * shm_arr_dim[1] * shm_arr_dim[2]))
        image_processors.append(
            Thread(target=image_processor,
                   daemon=True,
                   args=(
                       cam_index,
                       shm_arrs[i],
                       image_dim,
                       sems1[i],
                       sems2[i],
                       shm_ovl_arrs[i],
                       running,
                   )))
        image_processors[i].start()
    cam = Thread(target=cam_reader,
                 daemon=True,
                 args=(
                     cam_index,
                     shm_arrs,
                     image_dim,
                     sems1,
                     sems3,
                     shm_ovl_arrs,
                     running,
                 ))
    cam.start()
    disp = Thread(target=vid_disp,
                  daemon=True,
                  args=(
                      cam_index,
                      shm_arrs,
                      image_dim,
                      sems2,
                      sems3,
                      running,
                  ))
    disp.start()
    while running.value != 0:
        sleep(1)
    sleep(5)
    pflr.disable()
    pflr.dump_stats(out_name)
예제 #25
0
class Budget(object):
    """Device budget (for lack of a better term)

    Details
    -------
    Keeps a semaphore for the total number of resources available,
    and a device-specific atomic counter of available slots.
    """
    def __init__(self, devices, n_per_device):
        self.devices, self.n_per_device = devices, n_per_device
        self.total = Semaphore(len(devices) * n_per_device)
        self.alloc = Array("i", len(devices) * [n_per_device])

    def acquire(self):
        self.total.acquire()
        with self.alloc.get_lock():
            # get the largest counter
            index = max(range(len(self.alloc)), key=self.alloc.__getitem__)
            # assert self.alloc[index] > 0

            # acquire the index and decrease the counter
            self.alloc[index] -= 1
        return index

    def release(self, index):
        with self.alloc.get_lock():
            self.alloc[index] += 1
        self.total.release()

    def __getitem__(self, index):
        return self.devices[index]
예제 #26
0
    def __init__(self, *args, **kwargs):
        self.url = kwargs.get("url")

        if not self.url:
            raise Exception("No URL to gather")

        self.max_depth = kwargs.get("depth", 1)
        self.workers = kwargs.get("workers", 1)
        self.max_errors = kwargs.get("acceptable_errors", None)

        self.out = kwargs.get("out", "/tmp/")
        if not self.out.endswith("/"):
            self.out += "/"
        self.out += "url_gather/"
        if not os.path.exists(self.out):
            os.makedirs(self.out)

        self.collector_file = kwargs.get("collector_file")
        self.collector_class = kwargs.get("collector_class")
        self._load_collector()
        self._gathered_urls = set()

        # initiate multiprocessing resources
        self._pool = Pool(self.workers)
        self._semaphore = Semaphore(self.workers)
        self._manager = Manager()
        self._url_children = self._manager.dict()
        self._url_errors = self._manager.dict()
        self._url_events = {}
예제 #27
0
class DbWriter(Process):

	def __init__(self, queue, stop_flag):
		super(DbWriter, self).__init__()
		self.worker_control = Semaphore(MAX_WRITER_WORKERS)
		self.result_queue = queue
		self.stop_flag = stop_flag

	def run(self):

		print(" *** DB Writer online")

		while True and self.stop_flag.value != 1:

			if self.worker_control.acquire(False):

				task = self.result_queue.get()

				if task:
					try:
						worker = WriterWorker(task, self.worker_control)
						worker.start()

					except Exception as err:
						print(err)
						print("Invalid task %s" % task)
						self.worker_control.release()
				else:
					self.worker_control.release()

			time.sleep(0.3)

		print("stop flag: %s" % self.stop_flag.value)
예제 #28
0
    def __init__(self, size, max_seq_length, tokenizer,
                 ned_sql_file, entities_file, embeddings, n_trees, distance_measure, entity_index_path, search_k, max_dist,
                 sentence_subset=None, bad_count=10, lookup_processes=0, pairing_processes=0):

        self._size = size
        self._max_seq_length = max_seq_length
        self._tokenizer = tokenizer
        self._ned_sql_file = ned_sql_file
        self._entities_file = entities_file
        self._entities = pd.read_pickle(entities_file)
        self._embeddings = embeddings
        self._n_trees = n_trees
        self._distance_measure = distance_measure
        self._entity_index_path = entity_index_path
        self._search_k = search_k
        self._max_dist = max_dist
        self._sentence_subset = sentence_subset

        self._bad_count = bad_count
        self._max_bad_count = 50

        self._sequence = self.get_features()
        self._counter = 0
        self._lookup_processes = lookup_processes
        self._pairing_processes = pairing_processes
        self._lookup_sem = Semaphore(100)
        self._convert_sem = Semaphore(1000)
예제 #29
0
def main():
    tic = time.perf_counter()
    parser = make_parser()
    parse(parser.parse_args())

    # os.chdir(source)
    get_files()
    match_files()
    pages = list(chunks(matches, lines))
    pages = name_pages(pages)
    i = 0
    processes = []
    concurrency = thread_count
    print("Currently using " + str(concurrency) + " Thread(s)")
    sema = Semaphore(concurrency)
    for page in pages:
        sema.acquire()
        progress(i + 1, len(pages) * 2, "Processing page" + str(i + 1) + " of " + str(len(pages)))
        process = multiprocessing.Process(target=make_page, args=(page, sema,))
        processes.append(process)
        process.start()
        i += 1

    for process in processes:
        progress(i + 1, len(pages) * 2, "Finishing page " + str((i + 1) - len(pages)) + " of " + str(len(pages)))
        process.join()
        i += 1

    toc = time.perf_counter()
    print(f"\nFinished merging in {toc - tic:0.4f} seconds")
    print("\nPages have been stored at ", dest)
예제 #30
0
 def _img_processor(self, sh_img_arr: Array, sem1: Semaphore,
                    sem2: Semaphore, ovl_arr: Array) -> None:
     """
     Process images as needed.
     :param sh_img_arr: The array containing the frame to work with.
     :param sem1: The entrance lock.
     :param sem2: The exit lock.
     :param ovl_arr: The array containing the overlay work with.
     :return None:
     """
     img_dim = (EDIT_HEIGHT, self._cur_arr_shape[1], self._cur_arr_shape[2])
     img_size = int(EDIT_HEIGHT * img_dim[1] * img_dim[2])
     img_arr = frombuffer(sh_img_arr.get_obj(), count=img_size,
                          dtype=DTYPE).reshape(img_dim)
     while self._process_imgs:
         sem1.acquire()
         if self._use_overlay:
             img_pil = Image.fromarray(img_arr)
             draw = ImageDraw.Draw(img_pil)
             draw.text(OVL_POS,
                       text=ovl_arr.value.decode(),
                       font=OVL_FONT,
                       fill=OVL_CLR)
             processed_img = asarray(img_pil)
             copyto(img_arr, processed_img)
         sem2.release()
예제 #31
0
파일: tasks.py 프로젝트: vmarekrh/pnc-cli
    def run(self, tasks, build_config, parallel_threads):
        semaphore = Semaphore(parallel_threads)
        process_finished_notify = Condition(Lock())
        while tasks.count_buildable_tasks() > 0:
            task = tasks.get_next()

            if task is None:
                self.wait_tasks_to_complete(parallel_threads,
                                            process_finished_notify, semaphore)
                continue

            semaphore.acquire()
            task.state = Task.State.RUNNING
            logging.debug("Starting task %s", task.name)
            self.start_new_process(process_finished_notify, semaphore,
                                   self.process_job, task, build_config)

        self.wait_tasks_to_complete(parallel_threads, process_finished_notify,
                                    semaphore)

        if tasks.count(Task.State.FAILED) > 0:
            logging.error('Some packages failed to build.')
            logging.error("  %s", tasks.print_name(Task.State.FAILED))
            return 1
        if tasks.count(Task.State.RUNNING) > 0:
            logging.error(
                'Something went wrong, there are still some running tasks.')
            return 1
        if tasks.count(Task.State.NEW) > 0:
            logging.error(
                'Something went wrong, there are still unprocessed tasks.')
            return 1

        logging.info("Build completed successfully.")
        return 0
예제 #32
0
    def _spawn(self):
        """
        Spawn spider processes.
        """

        # create a unique result file name
        def create_result_file_name(dd):
            clean_domain = "".join(c if c in string.ascii_letters +
                                   string.digits else "_" for c in dd)
            return clean_domain + time.strftime("__%Y%m%d_%H%M%S.txt")

        self._sema = Semaphore(self.max_processes)

        # the spiders can crawl independently and have no common resources
        for u in self._urls:
            d = urlsplit(u).netloc.lower()
            s = Spider(u, d, self.limit, self.limit_param,
                       os.path.join(os.getcwd(), create_result_file_name(d)),
                       self.max_threads, self._sema, self.verbose)

            p = Process(target=Spider.crawl, args=(s, ))
            self._sema.acquire(True)  # acquire semaphore for next spider
            p.start()

            if self.verbose:
                print("[+] Spawned spider for: {}".format(u))
예제 #33
0
 def getPeerID(self, peerId):
     
     semaphore = Semaphore()
     semaphore.acquire()
     peer = self.__layout[peerId]
     semaphore.release()
     return peer
예제 #34
0
def run():
    algo = parameters['algo']
    files = [open(x) for x in parameters['files']]
    configs = []
    p = parameters['params']
    max_processes = 3
    semaphore = Semaphore(max_processes)

    # generate configurations as compination of possible
    # keys and product of values
    for keys in it.combinations(p.keys(), len(p.keys())):
        v = [p[k] for k in keys]
        for values in it.product(*v):
            config = {}
            for i, k in enumerate(keys):
                config[k] = values[i]
            configs.append(config)
    for f in files:
        for conf in configs:
            config = {'FILENAME': f.name}
            config.update(conf)

            f.seek(0)
            num_vars, clauses = parser.parse(f)

            p = MyProcess(target=run_algorithm,
                          args=(algo, num_vars, clauses, config, semaphore))

            semaphore.acquire()
            p.start()
예제 #35
0
def launch_workers(outfile, start_index, end_index, score_flag, force, verbose):
	BASE_URL = "http://www.ign.com/games/all-ajax?startIndex="
	
	
	# Synchronization Tools
	num_workers = Semaphore(MAX_NUM_PROCESSES)
	outfile_lock = Lock()
	urlopen_lock = Lock()
	stderr_lock = Lock()
	print_lock = Lock()
	
	# Write the categories
	if (outfile != None):
		outfile.write("title,link,platform,publisher,score,date\n")

	# Launch the workers
	processes = []
	curr_index = start_index;
	while curr_index <= end_index:
		curr_url = BASE_URL + str(curr_index)
	 	worker = Process(target=open_url_and_parse,
	 		args=(outfile, curr_url, score_flag, force, verbose,
	 			outfile_lock, urlopen_lock, stderr_lock, print_lock,
	 			num_workers))
	 	processes.append(worker)
	 	if verbose:
			print_lock.acquire()
			print "Launching worker for url: %s" % curr_url
			print_lock.release()
	 	num_workers.acquire()
	 	worker.start()
	 	curr_index += INDEX_INCREMENT; 
	for p in processes:
	 	p.join()
예제 #36
0
    def _get_all(embeddings,
                 data_sequence,
                 start_iteration,
                 ent_type,
                 w_size,
                 batch_size,
                 processes,
                 evalutation_semaphore=None):

        # The embed semaphore makes sure that the EmbedWithContext will not over produce results in relation
        # to the LookUpBySurfaceAndContext creation
        embed_semaphore = Semaphore(100)

        for it, link_result in \
                enumerate(
                    EmbedWithContext.run(embeddings, data_sequence, ent_type, w_size, batch_size,
                                         processes, embed_semaphore, start_iteration=start_iteration)):
            try:
                if evalutation_semaphore is not None:
                    evalutation_semaphore.acquire(timeout=10)

                yield LookUpBySurfaceAndContext(link_result)

            except Exception as ex:
                print(type(ex))
                print("Error: ", link_result)
                raise

            if it % batch_size == 0:
                embed_semaphore.release()
예제 #37
0
def _build_single_scenario_proc(clean: bool, allow_offset_map: bool,
                                scenario: str, semaphore: Semaphore):
    semaphore.acquire()
    try:
        _build_single_scenario(clean, allow_offset_map, scenario)
    finally:
        semaphore.release()
예제 #38
0
    def serve(self):
        """Start workers and put into queue"""
        # this is a shared state that can tell the workers to exit when False
        self.isRunning.value = True

        # first bind and listen to the port
        self.serverTransport.listen()

        # fork the children
        semaphore = Semaphore(0)
        for _ in range(self.numWorkers):
            try:
                w = Process(target=self.workerProcess, args=(semaphore, ))
                w.daemon = True
                w.start()
                self.workers.append(w)
            except Exception as x:
                logger.exception(x)

        # wait until all workers init finish
        for _ in range(self.numWorkers):
            semaphore.acquire()

        # wait until the condition is set by stop()
        while True:
            try:
                gevent.sleep(1)
                if not self.isRunning.value:
                    break
            except (SystemExit, KeyboardInterrupt):
                break
            except Exception as x:
                logger.exception(x)

        self.isRunning.value = False
예제 #39
0
def _spawnmain(
    testid: TestId,
    exts: List[str],
    # pyre-fixme[11]: Annotation `Semaphore` is not defined as a type.
    sem: multiprocessing.Semaphore,
    resultqueue: multiprocessing.Queue,
):
    """run a test and report progress back
    intended to be spawned via multiprocessing.Process.
    """

    hasmismatch = False

    def mismatchcb(mismatch: Mismatch):
        nonlocal hasmismatch
        hasmismatch = True
        mismatch.testname = testid.name
        resultqueue.put(mismatch)

    result = TestResult(testid=testid)
    try:
        runtest(testid, exts, mismatchcb)
    except TestNotFoundError as e:
        result.exc = e
    except Exception as e:
        result.exc = e
        result.tb = traceback.format_exc(limit=-1)
    finally:
        if result.exc is None and hasmismatch:
            result.exc = MismatchError("output mismatch")
        resultqueue.put(result)
        resultqueue.close()
        sem.release()
예제 #40
0
 def countNeighbors(self, peer):
                    
     semaphore = Semaphore()
     semaphore.acquire()
     
     count = peer.countNeighbors()
     semaphore.release()
     return count
예제 #41
0
 def getNeighborIt(self, peer):
             
     semaphore = Semaphore()
     semaphore.acquire()
     neighbors = []
     for neighbor in self.__layout[peer.getId()].getNeighbors():
         neighbors.append(neighbor.getTargetPeer())
     neighborIt = neighbors.__iter__()
     semaphore.release()
     return neighborIt
예제 #42
0
 def addPeer(self, peer):
     
     if self.__layout.has_key(peer.getPID()):
         return False
     
     semaphore = Semaphore()
     semaphore.acquire()
     
     self.__layout[peer.getPID()] = peer
     semaphore.release()
     NetworkLogger().resgiterLoggingInfo("Add peer %s in Layout Network "%(peer.getPID()))
     return self.__layout.has_key(peer.getPID())
예제 #43
0
class ForkingWorker(BaseWorker):

    def __init__(self, num_processes=1):
        # Set up sync primitives, to communicate with the spawned children
        self._semaphore = Semaphore(num_processes)
        self._slots = Array('i', [0] * num_processes)

    def spawn_child(self):
        """Forks and executes the job."""
        self._semaphore.acquire()    # responsible for the blocking

        # Select an empty slot from self._slots (the first 0 value is picked)
        # The implementation guarantees there will always be at least one empty slot
        for slot, value in enumerate(self._slots):
            if value == 0:
                break

        # The usual hardcore forking action
        child_pid = os.fork()
        if child_pid == 0:
            # Within child

            # Disable signal handlers
            signal.signal(signal.SIGINT, signal.SIG_IGN)
            signal.signal(signal.SIGTERM, signal.SIG_IGN)

            random.seed()
            try:
                self.fake_work()
            finally:
                # This is the new stuff.  Remember, we're in the child process
                # currently. When all work is done here, free up the current
                # slot (by writing a 0 in the slot position).  This
                # communicates to the parent that the current child has died
                # (so can safely be forgotten about).
                self._slots[slot] = 0
                self._semaphore.release()
                os._exit(0)
        else:
            # Within parent, keep track of the new child by writing its PID
            # into the first free slot index.
            self._slots[slot] = child_pid

    def wait_for_children(self):
        for child_pid in self._slots:
            if child_pid != 0:
                os.waitpid(child_pid, 0)

    def get_id(self):
        return os.getpid()
예제 #44
0
파일: image.py 프로젝트: rupertsmall/neon
class Msg(object):
    """
    TODO: Not documenting this class because it may go away.
    """

    def __init__(self, size):
        self.s_e = Semaphore(1)
        self.s_f = Semaphore(0)
        self.s_buf = Array(ct.c_ubyte, size)

    def send(self, func):
        self.s_e.acquire()
        self.s_buf.acquire()
        send_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_f.release()
        return send_result

    def recv(self, func):
        self.s_f.acquire()
        self.s_buf.acquire()
        recv_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_e.release()
        return recv_result
예제 #45
0
파일: image.py 프로젝트: huhoo/neon
class Msg(object):
    """
    Data structure encapsulating a message.
    """

    def __init__(self, size):
        self.s_e = Semaphore(1)
        self.s_f = Semaphore(0)
        self.s_buf = Array(ct.c_ubyte, size)

    def send(self, func):
        self.s_e.acquire()
        self.s_buf.acquire()
        send_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_f.release()
        return send_result

    def recv(self, func):
        self.s_f.acquire()
        self.s_buf.acquire()
        recv_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_e.release()
        return recv_result
예제 #46
0
 def disconnect(self, priority):
     sem = Semaphore()
     sem.acquire()
     if not self.getPeer().isConnected():
         return
     
     network = self.getPeer().getNetwork()
     neighbors = network.getNeighbors(self.getPeer())
     if len(neighbors) > 0:
         for n in neighbors:
             network.removeConnection(self.getPeer(), n)
             self.getPeer().disconnected()
     else:
         self.getPeer().disconnected()
예제 #47
0
class ProcessControl():
    def __init__(self,forks_number):
        self.forks_number=forks_number
        self.semaphore=Semaphore(self.forks_number)
        
    def execute(self,obj,function_to_execute,data):
        self.semaphore.acquire()
        #print("Launching new process")
        p=Process(target=processCall, args=(self.semaphore,obj,function_to_execute,data))
        p.start()
            
    
    def wait(self):
        for i in range(self.forks_number):
            self.semaphore.acquire()
예제 #48
0
 def definyPeerTrading(self):
     
     value =0;
     peerAux =""
     semaphore = Semaphore()
     semaphore.acquire()
     for peer,trust in self.__peersTrading.iteritems():
         
         if trust >= value:
             value = trust
             peerAux = peer 
         
         
     semaphore.release()
     return (peerAux,value)
예제 #49
0
    def __init__(self, num_processes=1):
        # Set up sync primitives, to communicate with the spawned children
        self.num_processes = num_processes

        # This semaphore is used as a "worker pool guard" to keep the number
        # of spawned workers in the pool to the specified maximum (and block
        # the .spawn_child() call after that)
        self._semaphore = Semaphore(num_processes)

        # This array of integers represents a slot per worker and holds the
        # actual pids (process ids) of the worker's children.  Initially, the
        # array-of-pids is all zeroes.  When a new child is spawned, the pid
        # is written into the slot.  WHen a child finishes, it resets its own
        # slot to 0 again, effectively freeing up the slot (and allowing new
        # children to be spawned).
        self._pids = Array('i', [0] * num_processes)

        # This array of integers also represents a slot per worker and also
        # holds the actual pids of the worker's children.  The difference with
        # _pids, however, is that this array's slots don't get reset
        # immediately when the children end.  In order for Unix subprocesses
        # to actually disappear from the process list (and freeing up the
        # memory), they need to be waitpid()'ed for by the parent process.
        # When each new child is spawned, it waitpid()'s for the (finished)
        # child that was previously in that slot before it claims the new
        # slot.  This mainly avoids ever-growing process lists and slowly
        # growing the memory footprint.
        self._waitfor = Array('i', [0] * num_processes)

        # This array of booleans represent workers that are in their idle
        # state (i.e. they are waiting for work).  During this time, it is
        # safe to terminate them when the user requests so.  Once they start
        # processing work, they flip their idle state and won't be terminated
        # while they're still doing work.
        self._idle = Array('b', [False] * num_processes)
예제 #50
0
 def removeNeighbor(self, source, target):
     
     if (not self.__layout.has_key(source.getId())) and (not self.__layout.has_key(target.getId())) :
         return False
     if source.hasNeighbor(target):
         return False 
     if target.hasNeighbor(source):
         return False
     
     semaphore = Semaphore()
     semaphore.acquire()
     flag = source.removeNeighbor(target)
     target.removeNeighbor(source)
     semaphore.release()
     
     return flag
예제 #51
0
    def __init__(self, *args, **kwargs):
        self.url = kwargs.get("url")

        if not self.url:
            raise Exception("No URL to gather")

        self.max_depth = kwargs.get("depth", 1)
        self.workers = kwargs.get("workers", 1)
        self.max_errors = kwargs.get("acceptable_errors", None)

        self.out = kwargs.get("out", "/tmp/")
        if not self.out.endswith("/"):
            self.out += "/"
        self.out += "url_gather/"
        if not os.path.exists(self.out):
            os.makedirs(self.out)

        self.collector_file = kwargs.get("collector_file")
        self.collector_class = kwargs.get("collector_class")
        self._load_collector()
        self._gathered_urls = set()

        # initiate multiprocessing resources
        self._pool = Pool(self.workers)
        self._semaphore = Semaphore(self.workers)
        self._manager = Manager()
        self._url_children = self._manager.dict()
        self._url_errors = self._manager.dict()
        self._url_events = {}
예제 #52
0
 def __init__(self):
     self._process_n = getattr(self, '_process_n', max(2, cpu_count()))
     self._thread_n = getattr(self, '_thread_n', 63)
     self._keep_running = Value('i', 1)
     self._event = Event()
     self._semaphore = Semaphore(1)
     self._semaphore.acquire()
     self._maintain_pool()
예제 #53
0
    def __init__(self, maxreaders=120):
        # Linux max semaphore sets is 120
        self.max = 120
        self._reader = Semaphore(120)
        self._writer = Semaphore(1)
        self._sleeping = Event()

        # Does this process hold the write?
        self.localwrite = False
        self.thread_id = currentThread()

        self.create_methods()

        def after_fork(obj):
            obj._reader._after_fork()
            obj._writer._after_fork()
            obj._sleeping._after_fork()

        register_after_fork(self, after_fork)
    def basic_call(self, f, req):
        free_threads = Semaphore(5)

        def g(): raise RuntimeError("oaiwhgfoia")
        mock_proc_inter_instance = MagicMock(free_threads=Semaphore(5),
                                             Lock=[Lock()
                                                   for _ in range(0, 3)],
                                             NL_L=Semaphore(2),
                                             pid_ptid_mapping=MagicMock(
                                                 has_key=lambda *args,
                                                 **kwargs: False))
        with patch("fsspmnl_processing_interface.path_constants_webinterface.jobs", self.jobs), patch("fsspmnl_processing_interface.LongProcess.LongProcess", self.MockLP), patch("fsspmnl_processing_interface.processing_interface.ProcessingInterface.Instance", mock_proc_inter_instance), patch("fsspmnl_processing_interface.path_constants_webinterface.dicom_dir", self.basedicomdir), patch("path_constants.jobs", self.jobs):
            ret = f(req)

        for _ in xrange(0, 5):
            assert(free_threads.acquire(False))

        assert(not free_threads.acquire(False))
        return ret
예제 #55
0
 def _init_pool(self):
     self._pool_initialized = True
     self._process_n = getattr(self, '_process_n', max(2, cpu_count()))
     self._thread_n = getattr(self, '_thread_n', 64)
     self._keep_running = Value('i', 1)
     self._shutdown_event = Event()
     self._shutdown_event.clear()
     self._event = Event()
     self._semaphore = Semaphore(1)
     self._semaphore.acquire()
     self._maintain_pool()
예제 #56
0
class EventMasterProcess(SatoriProcess):
    def __init__(self):
        super(EventMasterProcess, self).__init__('event master')
        self.sem = Semaphore(0)

    def do_run(self):
        listener = Listener(address=(settings.EVENT_HOST, settings.EVENT_PORT))
        master = Master(mapper=TrivialMapper())
        master.listen(listener)
        self.sem.release()
        master.run()

    def start(self, *args, **kwargs):
        super(EventMasterProcess, self).start(*args, **kwargs)
        while True:
            if self.sem.acquire(False):
                return
            if not self.is_alive():
                raise RuntimeError('Event master failed to start')
            sleep(0)
예제 #57
0
파일: ipc.py 프로젝트: gattis/magnum-py
class WorkQueue(object):
    def __init__(self):
        self.request_rfd, self.request_wfd = os.pipe()
        self.response_rfd, self.response_wfd = os.pipe()
        self.response_reader = ResponseReader(self.response_rfd)
        self.request_sem = Semaphore()
        self.response_sem = Semaphore()

    def submit_request(self, id, address, head, body):
        try:
            ip_str, port = address
            ipa, ipb, ipc, ipd = map(int, ip_str.split("."))
        except:
            port = ipa = ipb = ipc = ipd = 0
        os.write(
            self.request_wfd, REQUEST_HEADER.pack(id, ipa, ipb, ipc, ipd, port, len(head), len(body)) + head + body
        )

    def get_request(self):
        self.request_sem.acquire()
        header = ""
        bytes_to_read = REQUEST_HEADER.size
        while bytes_to_read:
            header += os.read(self.request_rfd, bytes_to_read)
            bytes_to_read = REQUEST_HEADER.size - len(header)
        id, ipa, ipb, ipc, ipd, port, head_len, body_len = REQUEST_HEADER.unpack(header)

        head = StringIO()
        bytes_to_read = head_len
        while bytes_to_read:
            head.write(os.read(self.request_rfd, bytes_to_read))
            bytes_to_read = head_len - head.tell()

        body = StringIO()
        bytes_to_read = body_len
        while bytes_to_read:
            body.write(os.read(self.request_rfd, bytes_to_read))
            bytes_to_read = body_len - body.tell()

        self.request_sem.release()
        return id, (".".join(map(str, [ipa, ipb, ipc, ipd])), port), head.getvalue(), body.getvalue()

    def submit_response(self, id, response):
        self.response_sem.acquire()
        response_output = response.output()
        keep_alive = "\x01" if response.headers.get("Connection") == "Keep-Alive" else "\x00"
        os.write(self.response_wfd, RESPONSE_HEADER.pack(id, len(response_output)) + response_output + keep_alive)
        self.response_sem.release()

    def get_response(self):
        return self.response_reader.read()
예제 #58
0
    def __init__(self, api):
        self.api = api

        self.config = self.api.session.config

        self.download_progress = dict()

        self.download_progress['active'] = dict()
        self.download_progress['completed'] = list()
        self.download_progress['failed'] = list()

        self.pool = Pool(CORES_TO_USE, initializer=initialize_download, maxtasksperchild=1)
        self.sema = Semaphore()
예제 #59
0
 def removePeer(self, peer):
     
     flag = True
     
     if not self.__layout.has_key(peer.getPID()):
         return False
     
     semaphore = Semaphore()
     semaphore.acquire()
     
     '''
     pode travar  pois estou chamando um sema dentro do outro?
     '''
     
     
     del self.__layout[peer.getPID()]
    
     
     flag = not self.__layout.has_key(peer.getPID())
     semaphore.release()
     
     return flag
예제 #60
0
 def connect(self, priority):
     sem = Semaphore()
     sem.acquire()
     if self.getPeer().isConnected():
         return
     network = self.getPeer().getNetwork()
     
     node = None
     if netowork.countNodes() > 0:
         idx = randint(0, network.countNodes() - 1)
         graph = network.getGraph()
         node = graph.keys()[idx]
     
     network.addNode(self.getPeer())
     if node:
         network.createConnection(self.getPeer(), node)
         
     self.getPeer().connected()
     # randon time for disconnect
     disconnectionTime = randint(3600, 28800)
     self.getPeer().setDisconnectionTime(disconnectionTime)
     
     sem.release()