Esempio n. 1
0
def run():
    multiprocessing.set_start_method('spawn')
    values = []
    num_vertex = [100, 1000, 2000, 4000]
    for n in num_vertex:
        for p in range(1, 11):
            work_start = Barrier(process_count + 1)
            work_complete = Barrier(process_count + 1)
            start = time.time()
            adj_list = []
            n_vertex, n_edges, adj_list = read_values(adj_list, n, p)
            vertex_set, weight_list = init_values(adj_list, n_vertex)
            color_list = multiprocessing.Array('i', [-1] * n_vertex,
                                               lock=False)
            print("start with ", n, " vertex and ", p, "probability")
            main_process = Process(target=luby_jones,
                                   args=([
                                       color_list, weight_list, adj_list,
                                       work_start, work_complete
                                   ]))
            main_process.start()
            main_process.join()
            print("end")
            colors = set({})
            for colo in color_list:
                colors.add(colo)
            print("the chromatic number is", len(colors), "the probability is",
                  p)
            end = time.time()
            values.append([len(colors), p / 10, end - start])
    write_values(values)
    def __init__(self,
                 dataname_tuples,
                 pdgIDs,
                 nWorkers,
                 num_loaders,
                 filters=[]):
        self.dataname_tuples = sorted(dataname_tuples)
        self.nClasses = len(dataname_tuples[0])
        self.total_files = len(dataname_tuples)  # per class
        self.num_per_file = len(dataname_tuples) * [0]
        self.num_loaders = num_loaders
        self.lock = RLock()
        self.fileInMemory = Value('i', 0, lock=self.lock)
        self.fileInMemoryFirstIndex = Value('i', 0, lock=self.lock)
        self.fileInMemoryLastIndex = Value('i', -1, lock=self.lock)
        self.mem_index = Value('i',
                               1)  # either 0 or 1. used for mem management.
        self.loadNext = Event()
        self.loadFile = Event()
        self.load_barrier = Barrier(self.num_loaders + 1)
        self.batch_barrier = Barrier(nWorkers - (self.num_loaders + 1))
        self.worker_files = [
            RawArray(ctypes.c_char,
                     len(dataname_tuples[0][0]) + 50)
            for _ in range(self.num_loaders)
        ]
        self.data = {}
        ###########################################
        # prepare memory to share with workers #
        # take a sample file and get keys and over allocate
        # what if we overallocate for both classes?
        # we should overallocate for both classes
        # if user runs into memory problems, use fewer num_loaders.
        with h5py.File(dataname_tuples[0][0]) as sample:
            for key in sample.keys():
                #                 print(key)
                old_shape = sample[key].shape
                size = self.nClasses * self.num_loaders
                self.new_shape = list(old_shape)
                for dim in old_shape:
                    size *= dim
                self.new_shape[
                    0] = self.nClasses * self.num_loaders * old_shape[0]
                buff = RawArray(ctypes.c_float,
                                size)  # prepare mem for num_loaders
                self.data[key] = np.frombuffer(buff, dtype=np.float32).reshape(
                    self.new_shape)  # map numpy array on buffer
            classID_buff = RawArray(
                ctypes.c_int, (2 * self.nClasses * self.num_loaders * 200))
            #             print(classID_buff)
            self.data['classID'] = np.frombuffer(
                classID_buff,
                dtype=np.int)  #.reshape(self.nClasses*self.num_loaders*200)
#             print(self.data['classID'].shape)
###########################################
        self.pdgIDs = {}
        self.filters = filters
        for i, ID in enumerate(pdgIDs):
            self.pdgIDs[ID] = i
        self.countEvents()
Esempio n. 3
0
def mock_mpiexec(nproc, target, *args):
    """Run a function, given as target, as though it were an MPI session using mpiexec -n nproc
    but using multiprocessing instead of mpi.
    """
    from multiprocessing import Pipe, Process, Barrier, set_start_method
    set_start_method('spawn', force=True)

    # Make the message passing pipes
    all_pipes = [{} for p in range(nproc)]
    for i in range(nproc):
        for j in range(i + 1, nproc):
            p1, p2 = Pipe()
            all_pipes[i][j] = p1
            all_pipes[j][i] = p2

    # Make a barrier
    barrier = Barrier(nproc)

    # Make fake MPI-like comm object
    comms = [
        MockComm(rank, nproc, pipes, barrier)
        for rank, pipes in enumerate(all_pipes)
    ]

    # Make processes
    procs = [Process(target=target, args=(comm, ) + args) for comm in comms]

    for p in procs:
        p.start()

    for p in procs:
        p.join()
    def __init__(self,
                 mat_vehicle: '_MatplotlibVehicle',
                 dashboard: '_MatplotlibSimpleDashboard',
                 opponent_mat_vehicles: list,
                 asynchronous: bool = False,
                 record: bool = False,
                 record_folder: str = "OUTPUT/",
                 **kwargs):
        super().__init__(mat_vehicle, dashboard, opponent_mat_vehicles, record,
                         record_folder, **kwargs)

        self._asynchronous = asynchronous

        self._events['close_event'] = self._close

        # Initialize multiprocessing objects
        if plt.get_backend() == "MacOSX":
            set_start_method("spawn", force=True)

        # Communication pipelines between processes
        self._event_sender, self._event_receiver = Pipe()
        self._queue = Queue(1)

        # Barrier that will wait for initialization to occur
        self._barrier = Barrier(2)

        self._p = Process(target=self.run)
        self._p.start()

        # Wait for the matplotlib window to initialize
        self._barrier.wait()
Esempio n. 5
0
    def __init__(self, port, responder):
        Process.__init__(self, daemon=True)
        timeout_s = 5

        self.__listening_port = port
        self.__responder = responder
        self.__barrier = Barrier(parties=2, timeout=timeout_s)
Esempio n. 6
0
def simulate_repel_parallel(area, subgrid, pPos, dt, bPos, bType, b_scale, p_scale, figname, ndt0, n):
    particlePos = mp.sharedctypes.RawArray(c_double, pPos.flatten())
    boundPos = mp.sharedctypes.RawArray(c_double, bPos.flatten())
    boundType = mp.sharedctypes.RawArray(c_uint, bType)
    nParticle = pPos.shape[1]
    nBoundary = bPos.shape[0]

    update_barrier = Barrier(n)
    output_lock = Lock()
    procs = []
    for i in range(n):
        index = n_in_m(nParticle, n, i)
        print(f'patch size = {index.size}', flush = True)
        proc = Process(target = parallel_repel, args = (area, subgrid, nParticle, particlePos, p_scale, nBoundary, boundPos, boundType, b_scale, dt, i, index, update_barrier, output_lock, 1.0, figname, ndt0))
        procs.append(proc)
        proc.start()

    for proc in procs:
        proc.join()
    with open(figname+'_final.bin', 'wb') as f:
        np.array([nParticle]).tofile(f)        
        np.frombuffer(particlePos, dtype=float).tofile(f)
        np.array([nBoundary]).tofile(f)        
        np.frombuffer(boundPos, dtype=float).tofile(f)
        np.frombuffer(boundType, dtype='u4').tofile(f)
        subgrid.tofile(f)
        np.array([area]).tofile(f)
    return np.frombuffer(particlePos, dtype = float).copy().reshape(2,nParticle)
Esempio n. 7
0
def aio_basic_multiprocessing(args, read_op):
    b = Barrier(args.threads)
    pool_params = [(args, p, read_op) for p in range(args.threads)]
    with Pool(processes=args.threads, initializer=_init_tasklet, initargs=(b, )) as p:
        pool_results = p.map(_aio_handle_tasklet, pool_params)

    report_results(args, read_op, pool_results)
Esempio n. 8
0
 def setUp(self):
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.barrier = Barrier(2)
     MockNodeHandler.BARRIER = self.barrier
     self.node_handler = node_handler_process.NodeHandlerProcess
     node_handler_process.NodeHandlerProcess = MockNodeHandler
     from src.backup_scheduler.backup_scheduler import BackupScheduler
     shutil.rmtree('/tmp/disk_db_concus', ignore_errors=True)
     os.mkdir('/tmp/disk_db_concus')
     shutil.rmtree('/tmp/data_for_backup', ignore_errors=True)
     os.mkdir('/tmp/data_for_backup')
     with open('/tmp/data_for_backup/data', 'w') as data_file:
         data_file.write("adasdsa")
     database = DiskDatabase('/tmp/disk_db_concus')
     shutil.rmtree('/tmp/backup_scheduler_path', ignore_errors=True)
     os.mkdir('/tmp/backup_scheduler_path')
     with open('/tmp/backup_scheduler_path/trash', 'w') as trash_file:
         trash_file.write("trash")
     backup_scheduler_recv, self.client_listener_send = Pipe(False)
     self.client_listener_recv, backup_scheduler_send = Pipe(False)
     backup_scheduler = BackupScheduler('/tmp/backup_scheduler_path', database,
                                        backup_scheduler_recv, backup_scheduler_send, 10)
     self.p = Process(target=backup_scheduler)
     self.p.start()
Esempio n. 9
0
    def init(self):
        # Barrier Initialize for all sub-processes and main process
        self.barrier = Barrier(len(self.nodes) + 1)

        # Add edges from VIPPipeline to starting nodes
        for node in self.nodes:
            if len(node.in_edges) == 0:
                self.add_edges([self], [node])

        for node in self.nodes:
            if len(node.out_edges) == 0:
                self.add_edges([node], [self])

        # acquire locks in head node
        for out_edge in self.out_edges:
            out_edge.lock.acquire()

        #run all the nodes in parallel
        for node in self.nodes:
            p = mp.Process(target=node.start_process, args=())
            self.processes.append(p)

        [x.start() for x in self.processes]

        self.barrier.wait()
Esempio n. 10
0
    def test_concurrency(self, procedure, resource):
        # In this test we are going to start several processes trying to
        # handle the same job at the same time. Only one of those processes
        # should succeed, the others should fail with 409 Conflict.
        proc_count = 64
        job = procedure.exec(target=resource.id, wait=False)

        # Barrier is used to make the processess call handle() at the same
        # time
        barrier = Barrier(proc_count)
        # Queue is used to collect the results from the processess: either
        # 'success' or 'conflict'
        queue = Queue()

        def inner():
            agent = create_agent()

            # Wait for all other processess to start and complete agent
            # creation
            barrier.wait()

            try:
                job.handle(owner=agent.id)
            except StormConflictError:
                queue.put((agent.id, 'conflict'))
            else:
                queue.put((agent.id, 'success'))

        # Start all processes
        procs = [Process(target=inner, daemon=True) for i in range(proc_count)]

        for proc in procs:
            proc.start()

        # Join the procs and get their results, waiting at most 10
        # seconds
        max_time = time.time() + 10

        for proc in procs:
            proc.join(timeout=max_time - time.time())

        results = dict(
            queue.get(timeout=max_time - time.time())
            for i in range(proc_count))

        # Check that the results contain exactly 1 'success' and all the
        # others are 'conflict'
        assert len(results) == proc_count

        statuses = list(results.values())
        assert statuses.count('success') == 1
        assert statuses.count('conflict') == proc_count - 1

        # Check that the agent that reported 'success' is effectively
        # the owner of the job
        expected_owner, = (owner for owner, status in results.items()
                           if status == 'success')

        job.reload()
        assert job.owner == expected_owner
Esempio n. 11
0
    def test_send_message_to_unexistent_user(self):
        def escritor(barrera):
            connector = ChatServerConnector('localhost', 6500, 11)
            connector.send_message(TextMessage(11, 999, "Hola amiwi"))
            barrera.wait()
            message = TextMessage(11, 12, "Hola don pepito")
            connector.send_message(message)
            news = connector.get_news()
            while not news:
                news = connector.get_news()
            assert len(news) == 1
            assert news[0].message_id == message.message_id
            exit(0)

        def receptor(barrera):
            connector = ChatServerConnector('localhost', 6500, 12)
            barrera.wait()
            news = connector.get_news()
            while not news:
                news = connector.get_news()
            assert len(news) == 1
            assert news[0].message.sender == 11
            exit(0)

        barrera = Barrier(2)
        p1 = Process(target=escritor, args=(barrera, ))
        p2 = Process(target=receptor, args=(barrera, ))
        p1.start()
        p2.start()
        p1.join()
        p2.join()
        self.assertEqual(p1.exitcode, 0)
        self.assertEqual(p2.exitcode, 0)
Esempio n. 12
0
def connectProcedure():
    connectButton.config(state="disabled")
    disconnectButton.config(state="normal")
    identifyActivityButton.config(state="normal")
    identifyDevicesButton.config(state="disabled")
    print("Connecting the devices...")
    # Create dir to save data
    cwd = os.getcwd()
    os.mkdir(cwd + "/Recordings - " + dt.datetime.now().strftime('%c'))
    os.chdir(cwd + "/Recordings - " + dt.datetime.now().strftime('%c'))
    # Create peripheral objects
    peripherals = [
        ACM(macAdresses[i], i, LOCATIONS[i]) for i in range(5)
        if macAdresses[i] != ''
    ]
    # Create barrier object
    barrier = Barrier(len(peripherals))
    # Configure and start logging processes
    queue = Queue(-1)
    process = Process(target=runLogger, args=(queue, ))
    process.start()
    # Start processes
    for peripheral in peripherals:
        process = Process(target=runProcess, args=(peripheral, barrier, queue))
        process.start()
Esempio n. 13
0
    def test_simple_receipt_message(self):
        def escritor(barrera):
            connector = ChatServerConnector('localhost', 6500, 6)
            barrera.wait()
            message = TextMessage(6, 5, "Hola don pepito")
            connector.send_message(message)
            news = connector.get_news()
            while not news:
                news = connector.get_news()
            assert len(news) == 1
            assert news[0].message_id == message.message_id
            exit(0)

        def receptor(barrera):
            connector = ChatServerConnector('localhost', 6500, 5)
            barrera.wait()
            news = connector.get_news()
            while not news:
                news = connector.get_news()
            assert len(news) == 1
            assert news[0].message.sender == 6
            exit(0)

        barrera = Barrier(2)
        p1 = Process(target=escritor, args=(barrera, ))
        p2 = Process(target=receptor, args=(barrera, ))
        p1.start()
        p2.start()
        p1.join()
        p2.join()
        self.assertEqual(p1.exitcode, 0)
        self.assertEqual(p2.exitcode, 0)
Esempio n. 14
0
def run(test, count, concurrency, *, loop, verbose, profile):
    if verbose:
        print("Prepare")
    else:
        print('.', end='', flush=True)
    host, port = find_port()
    barrier = Barrier(2)
    server = Process(target=test, args=(host, port, barrier, profile))
    server.start()
    barrier.wait()

    url = 'http://{}:{}'.format(host, port)

    connector = aiohttp.TCPConnector(loop=loop)
    with aiohttp.ClientSession(connector=connector) as client:

        for i in range(10):
            # make server hot
            resp = yield from client.get(url+'/prepare')
            assert resp.status == 200, resp.status
            yield from resp.release()

        if verbose:
            test_name = test.__name__
            print("Attack", test_name)
        rps, data = yield from attack(count, concurrency, client, loop, url)
        if verbose:
            print("Done")

        resp = yield from client.get(url+'/stop')
        assert resp.status == 200, resp.status
        yield from resp.release()

    server.join()
    return rps, data
Esempio n. 15
0
def benchmark_throughput(workload_path,
                         workload_off,
                         client_builder,
                         n_ops,
                         n_procs,
                         log_interval=100000):
    barrier = Barrier(n_procs)
    logging.info(
        "[Master] Creating processes with workload_path=%s, workload_off=%d, n_ops=%d, n_procs=%d..."
        % (workload_path, workload_off, n_ops, n_procs))
    benchmark = [
        Process(target=_load_and_run_workload,
                args=(
                    barrier,
                    workload_path,
                    workload_off + i * (n_ops / n_procs),
                    client_builder,
                    int(n_ops / n_procs),
                    log_interval,
                )) for i in range(n_procs)
    ]

    for b in benchmark:
        b.start()

    for b in benchmark:
        b.join()
    logging.info("[Master] Benchmark complete.")
Esempio n. 16
0
class economic :
    evenement_E = 3421
    crash_fincancier = False
    inflation = False
    anarchie  = False
    listEvenementEconomics = [ crash_fincancier, inflation, anarchie ] #on peut faire une fonction qui lit ce tableau en boucle, et dès qu'il voit true agit
    b = Barrier(1, timeout=10)
    #Création du pipe
    parent_conn, child_conn = Pipe()

    def Event(self,evenement_E, child_conn,barrier):
        #Il y a 3 évènements possibles, à chaque synchronisation on lance un randint 
        #Si le randint correspond à l'un des évènement : il devient vrai
        while True :
            evenement_E = randint(1,4000)#les évènements ont une chance sur 200 d'arriver
            if evenement_E == 921 :
                crash_fincancier = True
                child_conn.send("crash_fincancier")#prévient le père
            elif evenement_E == 123 :
                inflation = True
                child_conn.send("inflation")#prévient le père
            elif evenement_E == 3900 : 
                anarchie = True
                child_conn.send("anarchie")#prévient le père
            barrier.wait()
Esempio n. 17
0
def run_async_kv_benchmark(d_host,
                           d_port,
                           l_port,
                           data_path,
                           workload_path,
                           workload_off=0,
                           n_ops=100000,
                           n_procs=1,
                           max_async=10000):
    barrier = Barrier(n_procs)
    benchmark = [
        Process(target=load_and_run_workload,
                args=(
                    barrier,
                    workload_path,
                    workload_off + i * (n_ops / n_procs),
                    d_host,
                    d_port,
                    l_port,
                    data_path,
                    int(n_ops / n_procs),
                    max_async,
                )) for i in range(n_procs)
    ]

    for b in benchmark:
        b.start()

    for b in benchmark:
        b.join()

    logging.info("[Master] Benchmark complete.")
Esempio n. 18
0
 def setUp(self) -> None:
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.barrier = Barrier(2)
     self.p = None
Esempio n. 19
0
def main():
    # Pick a random UUID
    seed = str(uuid.uuid4())

    # Run start and stop, simultaneously
    synchronizer = Barrier(2)
    Process(target=start, args=(seed, synchronizer)).start()
    Process(target=stop, args=(seed, synchronizer)).start()

    print("Image is at https://" + domain + "/images/scores/" + seed + ".jpg")
Esempio n. 20
0
 def training_one_step(self):
     lock_model = Lock()
     p_list = []
     T = Value('I', 0, lock=True)
     b = Barrier(self.process_num)
     for i in range(self.process_num):
         p_list.append(Process(target=self.each_actor_learner, args=(lock_model, self.share_model_pv, T, b)))
         p_list[i].start()
     for p in p_list:
         p.join()
     self.model = self.share_model_pv.get_model_p()
Esempio n. 21
0
    def startBackgroundStreaming(self,
                                 isMaster,
                                 intervalInMicroseconds,
                                 filePath,
                                 bufferSize=2 << 20,
                                 axis0=False,
                                 axis1=False,
                                 axis2=False):
        """
        Starts concurrent and permanent position streaming to file in background.
        Programm must run in a main function:

        def main():
            do_everything()

        if __name__ == '__main__':
            main()

        Parameters
        ----------
        isMaster : bool
            Master
        intervalInMicroseconds : int
            Sample interval (in us) of the position samples
        filePath : str
            target file
        bufferSize : int
            Size of each buffer in bytes
        axis0 : bool, default: False
            Should Axis 0 be recorded?
        axis1 : bool, default: False
            Should Axis 1 be recorded?
        axis2 : bool, default: False
            Should Axis 2 be recorded?
        """

        if self.background_process is not None:
            raise Exception("Stream recording already started")

        barrier = Barrier(2)
        stopped = Value('b', False)

        self.background_process = Process(
            target=fileWriter,
            args=(self.device.address, isMaster, intervalInMicroseconds,
                  filePath, bufferSize, axis0, axis1, axis2, barrier, stopped))
        self.background_process.daemon = True
        self.background_process.stopped = stopped
        self.background_process.start()

        barrier.wait()
        sleep(0.1)
        return
Esempio n. 22
0
def runProcess(peripheral, barrier, queue):
    # Logging configuration
    h = logging.handlers.QueueHandler(queue)  # Just the one handler needed
    logger = logging.getLogger()
    logger.addHandler(h)
    logger.setLevel(logging.DEBUG)

    while True:
        try:
            time.sleep((peripheral.index + 1) * 2.02 + random.random())
            # Connections
            print("Connecting to BlueNRG - " + peripheral.location +
                  " Device...")

            BlueNRG = btle.Peripheral(peripheral.address,
                                      btle.ADDR_TYPE_RANDOM)
            BlueNRG.setDelegate(peripheral)

            # Service retrieval
            BlueNRGService = BlueNRG.getServiceByUUID(SENSOR_SERVICE_UUID)

            # Char
            BlueNRGAccChar = BlueNRGService.getCharacteristics(
                ACC_SERVICE_UUID)[0]
            BlueNRGStartChar = BlueNRGService.getCharacteristics(
                START_SERVICE_UUID)[0]

            print("Connection successfull for BlueNRG - " +
                  peripheral.location + " Device...")

            # Wait for connection update
            time.sleep(5)

            # Waiting to start (only for the initial sync)
            barrier.wait()
            barrier = Barrier(1)

            # Set timer to the right value
            BlueNRG.writeCharacteristic(BlueNRGStartChar.valHandle,
                                        (masterClock.value + 40).to_bytes(
                                            4, byteorder='little'))

            # Setting the notifications on
            BlueNRG.writeCharacteristic(BlueNRGAccChar.valHandle + 1,
                                        b'\x01\x00')

            while True:
                BlueNRG.waitForNotifications(1.0)

        except btle.BTLEDisconnectError:
            print("A disconnection occured for BlueNRG - " +
                  peripheral.location + " Device. Retrying...")
            time.sleep(1)
Esempio n. 23
0
def main():
    (PROC1_COUNT, PROC2_COUNT, PROC3_COUNT) = (int(sys.argv[i]) for i in range(1, 4))
    r = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=0)
    r.flushall()
    b = Barrier(PROC1_COUNT + PROC2_COUNT + PROC3_COUNT)
    end_1 = Value('b', False)
    processes_1 = [Process(target=process1.execute, 
                              args=(REDIS_HOST, REDIS_PORT, b, end_1, 0)) 
                        for _ in range(PROC1_COUNT)]
    end_2 = Value('b', False)
    processes_2 = [Process(target=process2.execute, 
                              args=(REDIS_HOST, REDIS_PORT, b, end_2)) 
                        for _ in range(PROC2_COUNT)]
    end_3 = Value('b', False)
    processes_3 = [Process(target=process3.execute, 
                              args=(REDIS_HOST, REDIS_PORT, b, end_3)) 
                        for _ in range(PROC3_COUNT)]
    for p in processes_1:
        p.start()
    for p in processes_2:
        p.start()
    for p in processes_3:
        p.start()

    time.sleep(5)
    
    print('Stopping workers')
    
    end_3.value = True
    for p in processes_3:
        p.join()
    end_2.value = True
    for p in processes_2:
        p.join()
    end_1.value = True
    for p in processes_1:
        p.join()
    
    results = []
    all_requests = 0
    for key in r.keys('*'):
        if not key.decode().startswith('request-'):
            continue
        all_requests += 1
        (time_1, time_2, time_3, is_shown) = r.hmget(key, 'time_1', 'time_2', 'time_3', 'shown')
        (time_1, time_2, time_3) = (float(time_1 or 0), float(time_2 or 0), float(time_3 or 0))
        if not is_shown is None:
            results.append((time_1, time_2, time_3, int(is_shown.decode())))
    results = sorted(results, key=lambda el: el[0])
    
    analyze.show_results(results, all_requests)
Esempio n. 24
0
 def setUp(self):
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.barrier = Barrier(2)
     self.client_listener = None
     self.backup_scheduler_recv, client_listener_send = Pipe(False)
     client_listener_recv, self.backup_scheduler_send = Pipe(False)
     self.p = Process(target=self._launch_process,
                      args=(client_listener_send, client_listener_recv))
     self.p.start()
Esempio n. 25
0
def main(args):
	logger.debug("CONFIGURATION : {}".format(args))

	#Global shared counter alloated in the shared memory. i = signed int
	args.global_step = Value('i', 0)

	#Barrier used to synchronize the threads
	args.barrier = Barrier(args.num_actor_learners)

	#Thread safe queue used to communicate between the threads
	args.queue = Queue()

	#Number of actions available at each steps of the game
	args.nb_actions = atari_environment.get_num_actions(args.game)

	if args.visualize == 0:
		args.visualize = False
	else:
		args.visualize = True
	
	actor_learners = []

	#n-1 pipes are needed.
	pipes = [Pipe() for _ in range(args.num_actor_learners - 1)]

	#Loop launching all the learned on different process
	for i in range(args.num_actor_learners):

		if i == 0:
			#A pipe to each other processe
			args.pipes = [pipe[0] for pipe in pipes]
		else:
			#A pipe to the process 0
			args.pipes = [pipes[i-1][1]]

		#Process id
		args.actor_id = i

		#Random see for each process
		rng = np.random.RandomState(int(time.time()))
		args.random_seed = rng.randint(1000)

		actor_learners.append(A3C_Learner(args))
		actor_learners[-1].start()

	#Waiting for the processes to finish
	for t in actor_learners:
		t.join()

	logger.debug("All processes are over")
def publish_a_file(dir, port) -> Process:

    barrier = Barrier(2)

    def func(dir, port, barrier):
        with TCPServer(("", port), simple_http_handler(dir)) as server:
            barrier.wait()
            server.handle_request()

    proc = Process(target=func, args=(dir, port, barrier))
    proc.start()
    barrier.wait()
    time.sleep(0.05)

    return proc
Esempio n. 27
0
    def test_simple_receive_message(self):
        def jorgito(barrera):
            connector = ChatServerConnector('localhost', 6500, 8)
            barrera.wait()
            connector.send_message(TextMessage(8, 7, "Hola don pepito"))
            news = [
                new for new in connector.get_news()
                if isinstance(new, NewMessage)
            ]
            while not news:
                news = [
                    new for new in connector.get_news()
                    if isinstance(new, NewMessage)
                ]
            assert len(news) == 1
            assert news[0].message.sender == 7
            assert news[0].message.content == "Hola jorgito"
            exit(0)

        def pepito(barrera):
            connector = ChatServerConnector('localhost', 6500, 7)
            barrera.wait()
            connector.send_message(TextMessage(7, 8, "Hola jorgito"))
            news = [
                new for new in connector.get_news()
                if isinstance(new, NewMessage)
            ]
            while not news:
                news = [
                    new for new in connector.get_news()
                    if isinstance(new, NewMessage)
                ]
            assert len(news) == 1
            assert news[0].message.sender == 8
            assert news[0].message.content == "Hola don pepito"
            exit(0)

        barrera = Barrier(2)
        p_jorgito = Process(target=jorgito, args=(barrera, ))
        p_pepito = Process(target=pepito, args=(barrera, ))
        p_jorgito.start()
        p_pepito.start()
        p_jorgito.join()
        p_pepito.join()
        self.assertEqual(p_pepito.exitcode, 0)
        self.assertEqual(p_jorgito.exitcode, 0)
Esempio n. 28
0
def main():
    (PROC1_COUNT, PROC2_COUNT, PROC3_COUNT) = (int(sys.argv[i])
                                               for i in range(1, 4))
    setup_tables()
    b = Barrier(PROC1_COUNT + PROC2_COUNT + PROC3_COUNT)
    end_1 = Value('b', False)
    processes_1 = [
        Process(target=process1.execute,
                args=(PGSQL_HOST, PGSQL_USER, PGSQL_PWD, b, end_1))
        for _ in range(PROC1_COUNT)
    ]
    end_2 = Value('b', False)
    processes_2 = [
        Process(target=process2.execute,
                args=(PGSQL_HOST, PGSQL_USER, PGSQL_PWD, b, end_2))
        for _ in range(PROC2_COUNT)
    ]
    end_3 = Value('b', False)
    processes_3 = [
        Process(target=process3.execute,
                args=(PGSQL_HOST, PGSQL_USER, PGSQL_PWD, b, end_3))
        for _ in range(PROC3_COUNT)
    ]
    for p in processes_1:
        p.start()
    for p in processes_2:
        p.start()
    for p in processes_3:
        p.start()

    time.sleep(5)
    print('Stopping workers')

    end_3.value = True
    for p in processes_3:
        p.join()
    end_2.value = True
    for p in processes_2:
        p.join()
    end_1.value = True
    for p in processes_1:
        p.join()

    results, allnum = fetch_results()
    analyze.show_results(results, allnum)
    def __init__(self, bully_connections_config: Dict[int, Tuple],
                 workers_config: Dict, lowest_port: int, host_id: int):
        """
        Initializes connections and bully
        :param bully_connections_config: Dictionary that contains numerical ids of hosts as keys
        and a tuple with the host ip and port that will have a listening socket to receive messages as values.
        :param workers_config: Dictionary that contains all the configuration about the running workers in the system.
        :param lowest_port: Integer that represents the lowest port to listen from other nodes.
        :param host_id: Numerical value that represents the id of the host for the bully algorithm.
        """
        self._workers_config = workers_config
        self._bully_connections_config = bully_connections_config
        self._host_id = host_id
        self._sockets_to_send_messages = {}

        bully_leader_election = BullyLeaderElection(
            host_id,
            list(bully_connections_config.keys()) + [host_id])
        manager = Manager()
        concurrent_dict = manager.dict()
        concurrent_dict['bully'] = bully_leader_election
        self._bully_leader_election_dict = concurrent_dict
        self._bully_leader_election_lock = Lock()

        self._sending_connections = manager.dict()

        open_sockets_barrier = Barrier(len(bully_connections_config) + 1)

        for i in range(len(bully_connections_config)):
            bully_message_receiver = BullyMessageReceiver(
                host_id, lowest_port + i, self._bully_leader_election_dict,
                self._bully_leader_election_lock, self._sending_connections,
                open_sockets_barrier)
            listening_process = Process(
                target=bully_message_receiver.start_listening)
            listening_process.start()

        for h_id, host_and_port in bully_connections_config.items():
            self._sending_connections[h_id] = open_sending_socket_connection(
                host_and_port[0], host_and_port[1])

        # This barrier exists because a listening process can try to access to the sending connections after they are all initialized.
        open_sockets_barrier.wait()
Esempio n. 30
0
def main():
    initialize_log()
    logging.info("Starting sink node. Waiting for metrics.")
    config_params = parse_config_params()
    metrics_barrier = Barrier(5)
    sorted_cities_process = Process(target=launch_sink_direct_queue,
                                    args=(config_params['sorted_cities_queue'],
                                          config_params['final_results_queue'],
                                          'top_10_cities_by_review_quantity',
                                          metrics_barrier))
    sorted_cities_process.start()

    weekday_count_process = Process(target=launch_sink_direct_queue,
                                    args=(config_params['weekday_count_queue'],
                                          config_params['final_results_queue'],
                                          'weekday_count_for_reviews',
                                          metrics_barrier))

    weekday_count_process.start()

    user_fifty_reviews_process = Process(
        target=launch_sink_fanout_exchange,
        args=(config_params['users_fifty_reviews_exchange'],
              config_params['final_results_queue'],
              'users_with_fifty_or_more_reviews', metrics_barrier))
    user_fifty_reviews_process.start()

    user_fifty_reviews_5_stars_process = Process(
        target=launch_sink_fanout_exchange,
        args=(config_params['five_stars_exchange'],
              config_params['final_results_queue'],
              'users_with_fifty_or_more_reviews_and_only_five_stars',
              metrics_barrier))
    user_fifty_reviews_5_stars_process.start()

    user_five_reviews_and_same_text = Process(
        target=launch_sink_fanout_exchange,
        args=(config_params['exchange_same_text_and_five_reviews'],
              config_params['final_results_queue'],
              'users_with_five_or_more_reviews_and_always_same_text',
              metrics_barrier))
    user_five_reviews_and_same_text.start()