def proc_run(sync_barrier: multiprocessing.Barrier,
             fin_event: multiprocessing.Event, args: argparse.Namespace):
    if args.e is None:
        experiment_names = list(_EXPERIMENTS.keys())
    else:
        experiment_names = args.e

    try:
        pid = sync_barrier.wait()
        device_string = str(args.cuda_device[pid])
        os.environ['CUDA_VISIBLE_DEVICES'] = device_string
        # Init torch in order to occupy GPU.
        torch.cuda.init()
        for experiment_name in experiment_names:
            sync_barrier.wait()
            out_log_path = os.path.join(
                args.dir, f'pid{os.getpid()}-{pid}_{experiment_name}.log')
            err_log_path = os.path.join(
                args.dir, f'pid{os.getpid()}-{pid}_{experiment_name}.err.log')
            sys.stdout = utils_io.LogFile(out_log_path, lazy_create=True)
            sys.stderr = utils_io.LogFile(err_log_path, lazy_create=True)
            print(f'CUDA_VISIBLE_DEVICES = {device_string}')
            experiment = _EXPERIMENTS[experiment_name]
            experiment(sync_barrier, pid, sync_barrier.parties, args)
    except threading.BrokenBarrierError:
        print('Aborted from outside!')
    finally:
        fin_event.set()
class TestBlockingSocketTransferer(unittest.TestCase):
    TEST_PORT = 8000

    def setUp(self) -> None:
        try:
            from pytest_cov.embed import cleanup_on_sigterm
        except ImportError:
            pass
        else:
            cleanup_on_sigterm()
        self.barrier = Barrier(2)
        self.p = None

    def tearDown(self) -> None:
        self.p.join()
        TestBlockingSocketTransferer.TEST_PORT += 1

    def test_send_text(self):
        self.p = Process(target=message_sender, args=(self.barrier, TestBlockingSocketTransferer.TEST_PORT))
        self.p.start()
        self.barrier.wait()
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(('localhost', TestBlockingSocketTransferer.TEST_PORT))
        socket_transferer = BlockingSocketTransferer(sock)
        self.assertEqual(socket_transferer.receive_plain_text(), "Hola uacho")
        socket_transferer.close()

    def test_send_file(self):
        with open('/tmp/big_dummy_file_test', 'wb') as dummy_file:
            for i in range(100000):
                dummy_file.write(("%d%d%d" % (i, i, i)).encode('utf-8'))
        sha256 = hashlib.sha256()
        with open('/tmp/big_dummy_file_test', 'rb') as dummy_file:
            while True:
                data = dummy_file.read(2048)
                if not data:
                    break
                sha256.update(data)
        original_hash = sha256.hexdigest()

        self.p = Process(target=file_sender, args=(self.barrier, TestBlockingSocketTransferer.TEST_PORT,
                                                   '/tmp/big_dummy_file_test'))
        self.p.start()
        self.barrier.wait()
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(('localhost', TestBlockingSocketTransferer.TEST_PORT))
        socket_transferer = BlockingSocketTransferer(sock)
        with open('/tmp/big_dummy_file_test_out', 'wb') as write_file:
            socket_transferer.receive_file_data(write_file)
        sha256 = hashlib.sha256()
        with open('/tmp/big_dummy_file_test_out', 'rb') as dummy_file:
            while True:
                data = dummy_file.read(2048)
                if not data:
                    break
                sha256.update(data)
        self.assertEqual(sha256.hexdigest(), original_hash)
        os.remove('/tmp/big_dummy_file_test')
        os.remove('/tmp/big_dummy_file_test_out')
        socket_transferer.close()
Example #3
0
def run(test, count, concurrency, *, loop, verbose, profile):
    if verbose:
        print("Prepare")
    else:
        print('.', end='', flush=True)
    host, port = find_port()
    barrier = Barrier(2)
    server = Process(target=test, args=(host, port, barrier, profile))
    server.start()
    barrier.wait()

    url = 'http://{}:{}'.format(host, port)

    connector = aiohttp.TCPConnector(loop=loop)
    with aiohttp.ClientSession(connector=connector) as client:

        for i in range(10):
            # make server hot
            resp = yield from client.get(url+'/prepare')
            assert resp.status == 200, resp.status
            yield from resp.release()

        if verbose:
            test_name = test.__name__
            print("Attack", test_name)
        rps, data = yield from attack(count, concurrency, client, loop, url)
        if verbose:
            print("Done")

        resp = yield from client.get(url+'/stop')
        assert resp.status == 200, resp.status
        yield from resp.release()

    server.join()
    return rps, data
Example #4
0
def run():
    multiprocessing.set_start_method('spawn')
    values = []
    num_vertex = [100, 1000, 2000, 4000]
    for n in num_vertex:
        for p in range(1, 11):
            work_start = Barrier(process_count + 1)
            work_complete = Barrier(process_count + 1)
            start = time.time()
            adj_list = []
            n_vertex, n_edges, adj_list = read_values(adj_list, n, p)
            vertex_set, weight_list = init_values(adj_list, n_vertex)
            color_list = multiprocessing.Array('i', [-1] * n_vertex,
                                               lock=False)
            print("start with ", n, " vertex and ", p, "probability")
            main_process = Process(target=luby_jones,
                                   args=([
                                       color_list, weight_list, adj_list,
                                       work_start, work_complete
                                   ]))
            main_process.start()
            main_process.join()
            print("end")
            colors = set({})
            for colo in color_list:
                colors.add(colo)
            print("the chromatic number is", len(colors), "the probability is",
                  p)
            end = time.time()
            values.append([len(colors), p / 10, end - start])
    write_values(values)
Example #5
0
    def __init__(self, port, responder):
        Process.__init__(self, daemon=True)
        timeout_s = 5

        self.__listening_port = port
        self.__responder = responder
        self.__barrier = Barrier(parties=2, timeout=timeout_s)
Example #6
0
    def _start(self, initialization_barrier: multiprocessing.Barrier):
        try:
            log_dir = os.path.join(get_pros_dir(), 'logs')
            os.makedirs(log_dir, exist_ok=True)
            pros_logger = logging.getLogger(pros.__name__)
            pros_logger.setLevel(logging.DEBUG)
            log_file_name = os.path.join(get_pros_dir(), 'logs', 'serial-share-bridge.log')
            handler = logging.handlers.TimedRotatingFileHandler(log_file_name, backupCount=1)
            handler.setLevel(logging.DEBUG)
            fmt_str = '%(name)s.%(funcName)s:%(levelname)s - %(asctime)s - %(message)s (%(process)d) ({})' \
                .format(self._serial_port_name)
            handler.setFormatter(logging.Formatter(fmt_str))
            pros_logger.addHandler(handler)

            self.zmq_ctx = zmq.Context()
            # timeout is none, so blocks indefinitely. Helps reduce CPU usage when there's nothing being recv
            self.port = DirectPort(self._serial_port_name, timeout=None)
            self.from_device_thread = threading.Thread(target=self._from_device_loop, name='From Device Reader',
                                                       daemon=False, args=(initialization_barrier,))
            self.to_device_thread = threading.Thread(target=self._to_device_loop, name='To Device Reader',
                                                     daemon=False, args=(initialization_barrier,))
            self.dying = threading.Event()  # type: threading.Event
            self.from_device_thread.start()
            self.to_device_thread.start()

            while not self.dying.wait(10000):
                pass

            logger(__name__).info('Main serial share bridge thread is dying. Everything else should be dead: {}'.format(
                threading.active_count() - 1))
            self.kill(do_join=True)
        except Exception as e:
            initialization_barrier.abort()
            logger(__name__).exception(e)
class BarrierNameFilter(Filter):
    def __init__(self):
        self._barrier = Barrier(2)

    def filter(self, items: Iterable[Any]) -> Iterable[Any]:
        self._barrier.wait()
        yield f"pid-{current_process().pid}"
Example #8
0
    def init(self):
        # Barrier Initialize for all sub-processes and main process
        self.barrier = Barrier(len(self.nodes) + 1)

        # Add edges from VIPPipeline to starting nodes
        for node in self.nodes:
            if len(node.in_edges) == 0:
                self.add_edges([self], [node])

        for node in self.nodes:
            if len(node.out_edges) == 0:
                self.add_edges([node], [self])

        # acquire locks in head node
        for out_edge in self.out_edges:
            out_edge.lock.acquire()

        #run all the nodes in parallel
        for node in self.nodes:
            p = mp.Process(target=node.start_process, args=())
            self.processes.append(p)

        [x.start() for x in self.processes]

        self.barrier.wait()
Example #9
0
 def _to_device_loop(self, initialization_barrier: multiprocessing.Barrier):
     try:
         to_ser_sock = self.zmq_ctx.socket(zmq.SUB)
         addr = 'tcp://{}:{}'.format(self._base_addr, self._to_port_num)
         to_ser_sock.bind(addr)
         to_ser_sock.setsockopt(zmq.SUBSCRIBE, b'')
         logger(__name__).info('Bound to device broadcaster as a subscriber to {}'.format(addr))
         watchdog = threading.Timer(10, self.kill)
         initialization_barrier.wait()
         watchdog.start()
         while not self.dying.is_set():
             msg = to_ser_sock.recv_multipart()
             if not msg or self.dying.is_set():
                 continue
             if msg[0] == b'kick':
                 logger(__name__).debug('Kicking watchdog on server {}'.format(threading.current_thread()))
                 watchdog.cancel()
                 watchdog = threading.Timer(msg[1][1] if len(msg) > 1 and len(msg[1]) > 0 else 5, self.kill)
                 watchdog.start()
             elif msg[0] == b'send':
                 logger(self).debug('Writing {} to {}'.format(bytes_to_str(msg[1]), self.port.port_name))
                 self.port.write(msg[1])
     except Exception as e:
         initialization_barrier.abort()
         logger(__name__).exception(e)
     logger(__name__).warning('To Device Broadcaster is dying now.')
     try:
         self.kill(do_join=False)
     except:
         sys.exit(0)
    def __init__(self,
                 dataname_tuples,
                 pdgIDs,
                 nWorkers,
                 num_loaders,
                 filters=[]):
        self.dataname_tuples = sorted(dataname_tuples)
        self.nClasses = len(dataname_tuples[0])
        self.total_files = len(dataname_tuples)  # per class
        self.num_per_file = len(dataname_tuples) * [0]
        self.num_loaders = num_loaders
        self.lock = RLock()
        self.fileInMemory = Value('i', 0, lock=self.lock)
        self.fileInMemoryFirstIndex = Value('i', 0, lock=self.lock)
        self.fileInMemoryLastIndex = Value('i', -1, lock=self.lock)
        self.mem_index = Value('i',
                               1)  # either 0 or 1. used for mem management.
        self.loadNext = Event()
        self.loadFile = Event()
        self.load_barrier = Barrier(self.num_loaders + 1)
        self.batch_barrier = Barrier(nWorkers - (self.num_loaders + 1))
        self.worker_files = [
            RawArray(ctypes.c_char,
                     len(dataname_tuples[0][0]) + 50)
            for _ in range(self.num_loaders)
        ]
        self.data = {}
        ###########################################
        # prepare memory to share with workers #
        # take a sample file and get keys and over allocate
        # what if we overallocate for both classes?
        # we should overallocate for both classes
        # if user runs into memory problems, use fewer num_loaders.
        with h5py.File(dataname_tuples[0][0]) as sample:
            for key in sample.keys():
                #                 print(key)
                old_shape = sample[key].shape
                size = self.nClasses * self.num_loaders
                self.new_shape = list(old_shape)
                for dim in old_shape:
                    size *= dim
                self.new_shape[
                    0] = self.nClasses * self.num_loaders * old_shape[0]
                buff = RawArray(ctypes.c_float,
                                size)  # prepare mem for num_loaders
                self.data[key] = np.frombuffer(buff, dtype=np.float32).reshape(
                    self.new_shape)  # map numpy array on buffer
            classID_buff = RawArray(
                ctypes.c_int, (2 * self.nClasses * self.num_loaders * 200))
            #             print(classID_buff)
            self.data['classID'] = np.frombuffer(
                classID_buff,
                dtype=np.int)  #.reshape(self.nClasses*self.num_loaders*200)
#             print(self.data['classID'].shape)
###########################################
        self.pdgIDs = {}
        self.filters = filters
        for i, ID in enumerate(pdgIDs):
            self.pdgIDs[ID] = i
        self.countEvents()
    def request_concurrent_wrapper(self, method_name:str, method_args:dict, thread_num:int, num_threads:int, barrier:mp.Barrier, recieve_pipe:mp.Pipe):

        # wait for all threads
        barrier.wait()

        # call the appropriate request method
        if method_name == 'Getter':
            if method_args['command'] == 'list':
                response = self.request_getter(command='list')
            else:
                response = self.request_getter(command='get_file_url', filename=method_args['filename'])
        elif method_name == 'FeedGenerator':
            response = self.request_feed_generator(num_items=method_args['num_items'])
        elif method_name == 'FeedWebView':
            response = self.request_feed_webview(num_items=method_args['num_items'])
        elif method_name == 'Putter':
            # TODO
            raise NotImplementedError()

        # add concurrent metadata
        response.concurrent = True
        response.thread_num = thread_num
        response.num_threads = num_threads

        # return TestData
        recieve_pipe.send(response)
    def test(self):
        filename = str(tempfile.mktemp())

        def process(cur_count: int, barrier: Barrier):
            try:
                logger = logging.Logger('a logger')
                handler = rolling.MPRotatingFileHandler(
                    filename, 'a', self.FILE_SIZE, self.FILE_COUNT
                )
                logger.setLevel(20)
                logger.addHandler(handler)
                sleep(1)  # This is just to simulate presence of handlers
                s = 'Proc {}, Pid {}'.format(cur_count, os.getpid())
                s += '*' * (self.FILE_SIZE - len(s) - 2)
                logger.info(s)
            finally:
                barrier.wait()

        b = Barrier(self.PROCESS_COUNT + 1)
        processes = [Process(target=process, args=(i, b,)) for i in range(self.PROCESS_COUNT)]

        for p in processes:
            p.start()

        b.wait()

        base_filename = os.path.basename(filename)
        count = sum([_file_len('{}/{}'.format(os.path.dirname(filename), x))
                     for x in os.listdir(os.path.dirname(filename)) if base_filename in x]) - 1
        self.assertEqual(self.PROCESS_COUNT, count)
Example #13
0
    def train(self,
              num_episodes: int,
              batch_size: int,
              decay: float,
              n_steps: int,
              experience_queue: Queue,
              queue_barrier: Barrier,
              exit_condition: Optional[Callable[[], bool]] = None) -> None:
        """
        Trains the algorithm for the number of episodes specified on the
        environment.

        Args:
            num_episodes: The number of episodes to train for.
            batch_size: The number of ready experiences to train on at a time.
            decay: The decay of the next.
            n_steps: The number of steps.
            experience_queue: The queue to send experiences to.
            queue_barrier: A barrier to use when all queue tasks are complete on
                all processes.
            exit_condition: An alternative exit condition to num episodes which
                will be used if given.
        """
        self.om.train(num_episodes,
                      batch_size,
                      decay,
                      n_steps,
                      experience_queue,
                      exit_condition=exit_condition)

        # Wait for all processes to finish using queues
        queue_barrier.wait()
Example #14
0
 def setUp(self):
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.barrier = Barrier(2)
     MockNodeHandler.BARRIER = self.barrier
     self.node_handler = node_handler_process.NodeHandlerProcess
     node_handler_process.NodeHandlerProcess = MockNodeHandler
     from src.backup_scheduler.backup_scheduler import BackupScheduler
     shutil.rmtree('/tmp/disk_db_concus', ignore_errors=True)
     os.mkdir('/tmp/disk_db_concus')
     shutil.rmtree('/tmp/data_for_backup', ignore_errors=True)
     os.mkdir('/tmp/data_for_backup')
     with open('/tmp/data_for_backup/data', 'w') as data_file:
         data_file.write("adasdsa")
     database = DiskDatabase('/tmp/disk_db_concus')
     shutil.rmtree('/tmp/backup_scheduler_path', ignore_errors=True)
     os.mkdir('/tmp/backup_scheduler_path')
     with open('/tmp/backup_scheduler_path/trash', 'w') as trash_file:
         trash_file.write("trash")
     backup_scheduler_recv, self.client_listener_send = Pipe(False)
     self.client_listener_recv, backup_scheduler_send = Pipe(False)
     backup_scheduler = BackupScheduler('/tmp/backup_scheduler_path', database,
                                        backup_scheduler_recv, backup_scheduler_send, 10)
     self.p = Process(target=backup_scheduler)
     self.p.start()
Example #15
0
def run(test, count, concurrency, *, loop, verbose, profile):
    if verbose:
        print("Prepare")
    else:
        print('.', end='', flush=True)
    host, port = find_port()
    barrier = Barrier(2)
    server = Process(target=test, args=(host, port, barrier, profile))
    server.start()
    barrier.wait()

    url = 'http://{}:{}'.format(host, port)

    connector = aiohttp.TCPConnector(loop=loop)
    with aiohttp.ClientSession(connector=connector) as client:

        for i in range(10):
            # make server hot
            resp = yield from client.get(url+'/prepare')
            assert resp.status == 200, resp.status
            yield from resp.release()

        if verbose:
            test_name = test.__name__
            print("Attack", test_name)
        rps, data = yield from attack(count, concurrency, client, loop, url)
        if verbose:
            print("Done")

        resp = yield from client.get(url+'/stop')
        assert resp.status == 200, resp.status
        yield from resp.release()

    server.join()
    return rps, data
    def __init__(self,
                 mat_vehicle: '_MatplotlibVehicle',
                 dashboard: '_MatplotlibSimpleDashboard',
                 opponent_mat_vehicles: list,
                 asynchronous: bool = False,
                 record: bool = False,
                 record_folder: str = "OUTPUT/",
                 **kwargs):
        super().__init__(mat_vehicle, dashboard, opponent_mat_vehicles, record,
                         record_folder, **kwargs)

        self._asynchronous = asynchronous

        self._events['close_event'] = self._close

        # Initialize multiprocessing objects
        if plt.get_backend() == "MacOSX":
            set_start_method("spawn", force=True)

        # Communication pipelines between processes
        self._event_sender, self._event_receiver = Pipe()
        self._queue = Queue(1)

        # Barrier that will wait for initialization to occur
        self._barrier = Barrier(2)

        self._p = Process(target=self.run)
        self._p.start()

        # Wait for the matplotlib window to initialize
        self._barrier.wait()
Example #17
0
    def _start_single_replay(
        self,
        replay_index: int,
        offset: int,
        skip_offset_barrier: multiprocessing.Barrier,
        latest_timestamp: ValueProxy,
    ):
        setup_logger("info")
        self._create_producer()
        self.processor.set_latest_timestamp_valueproxy(latest_timestamp)

        _, lines = open_recording(self.bucket, self.key)

        # Skip header
        for _ in range(4):
            next(lines)

        # "Fast-forward" recording to offset
        for _ in range(offset):
            next(lines)

        logger.info(
            f"Replay {replay_index+1} of {skip_offset_barrier.parties} ready")
        skip_offset_barrier.wait()

        # Scheduler is used to replay messages with original relative timing
        scheduler = MultithreadingScheduler()
        scheduler.start()

        if replay_index == 0:
            _start_latency_marker_generator(
                self.config, self.processor.generate_latency_markers,
                self._ingest)

        # Necessary to start immediately despite "fast-forwarding"
        recording_start_offset = None

        message_regex = re.compile(r'(\S+) "(.+)" (\d) (\d) (\S*)')
        for i, line in enumerate(lines):
            # Prevent queue from growing too fast
            # Only check every 100 iterations for performance reasons
            if i % 100 == 0 and scheduler.is_queue_full():
                time.sleep(0.2)
                continue

            t_offset, topic, _, _, payload = message_regex.match(line).groups()
            t_offset = float(t_offset)
            payload = json.loads(base64.b64decode(payload))

            if recording_start_offset is None:
                recording_start_offset = t_offset

            scheduler.schedule(
                t_offset - recording_start_offset,
                functools.partial(self._process_and_ingest, topic, payload,
                                  replay_index),
            )

        scheduler.stop()
Example #18
0
def count_down_m(cnt: int, b: multiprocessing.Barrier):
    b.wait()
    print(f'Starting {time.ctime()}')
    t = time.time()
    while cnt > 0:
        cnt -= 1
    delta = time.time() - t
    print(f"time taken {delta}")
Example #19
0
 def setUp(self) -> None:
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.barrier = Barrier(2)
     self.p = None
Example #20
0
class SNMPAgent(Process):
    ''' Execute a SNMP agent Process'''
    def __init__(self, port, responder):
        Process.__init__(self, daemon=True)
        timeout_s = 5

        self.__listening_port = port
        self.__responder = responder
        self.__barrier = Barrier(parties=2, timeout=timeout_s)

    def run(self):
        snmpEngine = engine.SnmpEngine()

        config.addSocketTransport(
            snmpEngine,
            udp.domainName,
            udp.UdpTransport().openServerMode(('127.0.0.1',
                                               self.__listening_port))
        )

        config.addV1System(
                     snmpEngine, 'my-area', 'public', contextName='my-context')

        config.addVacmUser(snmpEngine=snmpEngine,
                           securityModel=2,
                           securityName='my-area',
                           securityLevel='noAuthNoPriv',
                           readSubTree=SNMPAgentResponder.OID_PREFIX,
                           writeSubTree=(),
                           notifySubTree=())

        snmpContext = context.SnmpContext(snmpEngine)

        snmpContext.registerContextName(
            v2c.OctetString('my-context'),         # Context Name
            self.__responder                       # Management Instrumentation
        )

        cmdrsp.GetCommandResponder(snmpEngine, snmpContext)

        snmpEngine.transportDispatcher.jobStarted(1)
        self.__barrier.wait()

        # TODO with statement here!
        try:
            snmpEngine.transportDispatcher.runDispatcher()
        except:
            snmpEngine.transportDispatcher.closeDispatcher()
            raise

    def __enter__(self):
        self.start()
        self.__barrier.wait()
        return self

    def __exit__(self, type, value, traceback):
        self.terminate()
Example #21
0
class RabbitConnectionExample:
    """
    RabbitMQ operations
    """

    def __init__(self):
        """
        Initializes the class
        """
        self._url = os.environ['RABBITMQ_URL']
        self._barrier = Barrier(2, timeout=120)

    def connection_callback(self, conn):
        """
        Run on connecting to the server

        :param conn: The connection created in the previous step
        """
        self._connection.channel(on_open_callback=self.channel_callback)

    def channel_callback(self, ch):
        """
        Publish on the channel. You can use other methods with callbacks but only the channel
        creation method provides a channel. Other methods provide a frame you can choose to
        discard.

        :param ch: The channel established
        """
        properties = pika.BasicProperties(content_type='application/json')
        ch.basic_publish(exchange='test_exchange',
                                    routing_key='tests',
                                    properties=properties,
                                    body='Hello CloudAMQP!')
        self._barrier.wait(timeout=1)
        ch.close()
        self._connection.close()

    def run(self):
        """
        Runs the example
        """
        print("Running")
        def run_io_loop(conn):
            conn.ioloop.start()

        params = pika.URLParameters(self._url)
        self._connection = pika.SelectConnection(
            params, on_open_callback=self.connection_callback)
        if self._connection:
            t = threading.Thread(target=run_io_loop, args=(self._connection, ))
            t.start()
            self._barrier.wait(timeout=30)
            print("Waiting on Barrier")
            self._connection.ioloop.stop()
        else:
            raise ValueError
Example #22
0
def manage_data_m(b: multiprocessing.Barrier):
    app_ = new_app()
    name = multiprocessing.process.current_process().name
    b.wait()
    t = time.time()
    result = app_.send_task('tasks.data', args=())
    data = result.get()
    t2 = time.time()
    delta = t2 - t
    logging.info(f'The Overall time taken is {delta}')
Example #23
0
def ext_pot_init(ext_model, ext_kwargs, Potential_Q_list: list,
                 barrier: Barrier, N_NEURON: int, ticks: int):
    N_N_THREAD = len(Potential_Q_list)
    for count in range(ticks):
        total_potentials = [[] for _ in range(N_N_THREAD)]
        external = ext_model(**ext_kwargs)
        for n in external:
            total_potentials[n[-1] // N_NEURON].append(n)
        barrier.wait()
        for idx, ftn in enumerate(total_potentials):
            Potential_Q_list[idx].put(ftn)
Example #24
0
    def startBackgroundStreaming(self,
                                 isMaster,
                                 intervalInMicroseconds,
                                 filePath,
                                 bufferSize=2 << 20,
                                 axis0=False,
                                 axis1=False,
                                 axis2=False):
        """
        Starts concurrent and permanent position streaming to file in background.
        Programm must run in a main function:

        def main():
            do_everything()

        if __name__ == '__main__':
            main()

        Parameters
        ----------
        isMaster : bool
            Master
        intervalInMicroseconds : int
            Sample interval (in us) of the position samples
        filePath : str
            target file
        bufferSize : int
            Size of each buffer in bytes
        axis0 : bool, default: False
            Should Axis 0 be recorded?
        axis1 : bool, default: False
            Should Axis 1 be recorded?
        axis2 : bool, default: False
            Should Axis 2 be recorded?
        """

        if self.background_process is not None:
            raise Exception("Stream recording already started")

        barrier = Barrier(2)
        stopped = Value('b', False)

        self.background_process = Process(
            target=fileWriter,
            args=(self.device.address, isMaster, intervalInMicroseconds,
                  filePath, bufferSize, axis0, axis1, axis2, barrier, stopped))
        self.background_process.daemon = True
        self.background_process.stopped = stopped
        self.background_process.start()

        barrier.wait()
        sleep(0.1)
        return
Example #25
0
 def setUp(self):
     try:
         from pytest_cov.embed import cleanup_on_sigterm
     except ImportError:
         pass
     else:
         cleanup_on_sigterm()
     self.barrier = Barrier(2)
     self.client_listener = None
     self.backup_scheduler_recv, client_listener_send = Pipe(False)
     client_listener_recv, self.backup_scheduler_send = Pipe(False)
     self.p = Process(target=self._launch_process,
                      args=(client_listener_send, client_listener_recv))
     self.p.start()
 def process(cur_count: int, barrier: Barrier):
     try:
         logger = logging.Logger('a logger')
         handler = rolling.MPRotatingFileHandler(
             filename, 'a', self.FILE_SIZE, self.FILE_COUNT
         )
         logger.setLevel(20)
         logger.addHandler(handler)
         sleep(1)  # This is just to simulate presence of handlers
         s = 'Proc {}, Pid {}'.format(cur_count, os.getpid())
         s += '*' * (self.FILE_SIZE - len(s) - 2)
         logger.info(s)
     finally:
         barrier.wait()
def publish_a_file(dir, port) -> Process:

    barrier = Barrier(2)

    def func(dir, port, barrier):
        with TCPServer(("", port), simple_http_handler(dir)) as server:
            barrier.wait()
            server.handle_request()

    proc = Process(target=func, args=(dir, port, barrier))
    proc.start()
    barrier.wait()
    time.sleep(0.05)

    return proc
Example #28
0
def benchmark_throughput(workload_path,
                         workload_off,
                         client_builder,
                         n_ops,
                         n_procs,
                         log_interval=100000):
    barrier = Barrier(n_procs)
    logging.info(
        "[Master] Creating processes with workload_path=%s, workload_off=%d, n_ops=%d, n_procs=%d..."
        % (workload_path, workload_off, n_ops, n_procs))
    benchmark = [
        Process(target=_load_and_run_workload,
                args=(
                    barrier,
                    workload_path,
                    workload_off + i * (n_ops / n_procs),
                    client_builder,
                    int(n_ops / n_procs),
                    log_interval,
                )) for i in range(n_procs)
    ]

    for b in benchmark:
        b.start()

    for b in benchmark:
        b.join()
    logging.info("[Master] Benchmark complete.")
Example #29
0
class economic :
    evenement_E = 3421
    crash_fincancier = False
    inflation = False
    anarchie  = False
    listEvenementEconomics = [ crash_fincancier, inflation, anarchie ] #on peut faire une fonction qui lit ce tableau en boucle, et dès qu'il voit true agit
    b = Barrier(1, timeout=10)
    #Création du pipe
    parent_conn, child_conn = Pipe()

    def Event(self,evenement_E, child_conn,barrier):
        #Il y a 3 évènements possibles, à chaque synchronisation on lance un randint 
        #Si le randint correspond à l'un des évènement : il devient vrai
        while True :
            evenement_E = randint(1,4000)#les évènements ont une chance sur 200 d'arriver
            if evenement_E == 921 :
                crash_fincancier = True
                child_conn.send("crash_fincancier")#prévient le père
            elif evenement_E == 123 :
                inflation = True
                child_conn.send("inflation")#prévient le père
            elif evenement_E == 3900 : 
                anarchie = True
                child_conn.send("anarchie")#prévient le père
            barrier.wait()
Example #30
0
    def test_send_message_to_unexistent_user(self):
        def escritor(barrera):
            connector = ChatServerConnector('localhost', 6500, 11)
            connector.send_message(TextMessage(11, 999, "Hola amiwi"))
            barrera.wait()
            message = TextMessage(11, 12, "Hola don pepito")
            connector.send_message(message)
            news = connector.get_news()
            while not news:
                news = connector.get_news()
            assert len(news) == 1
            assert news[0].message_id == message.message_id
            exit(0)

        def receptor(barrera):
            connector = ChatServerConnector('localhost', 6500, 12)
            barrera.wait()
            news = connector.get_news()
            while not news:
                news = connector.get_news()
            assert len(news) == 1
            assert news[0].message.sender == 11
            exit(0)

        barrera = Barrier(2)
        p1 = Process(target=escritor, args=(barrera, ))
        p2 = Process(target=receptor, args=(barrera, ))
        p1.start()
        p2.start()
        p1.join()
        p2.join()
        self.assertEqual(p1.exitcode, 0)
        self.assertEqual(p2.exitcode, 0)
Example #31
0
    def test_simple_receipt_message(self):
        def escritor(barrera):
            connector = ChatServerConnector('localhost', 6500, 6)
            barrera.wait()
            message = TextMessage(6, 5, "Hola don pepito")
            connector.send_message(message)
            news = connector.get_news()
            while not news:
                news = connector.get_news()
            assert len(news) == 1
            assert news[0].message_id == message.message_id
            exit(0)

        def receptor(barrera):
            connector = ChatServerConnector('localhost', 6500, 5)
            barrera.wait()
            news = connector.get_news()
            while not news:
                news = connector.get_news()
            assert len(news) == 1
            assert news[0].message.sender == 6
            exit(0)

        barrera = Barrier(2)
        p1 = Process(target=escritor, args=(barrera, ))
        p2 = Process(target=receptor, args=(barrera, ))
        p1.start()
        p2.start()
        p1.join()
        p2.join()
        self.assertEqual(p1.exitcode, 0)
        self.assertEqual(p2.exitcode, 0)
Example #32
0
def connectProcedure():
    connectButton.config(state="disabled")
    disconnectButton.config(state="normal")
    identifyActivityButton.config(state="normal")
    identifyDevicesButton.config(state="disabled")
    print("Connecting the devices...")
    # Create dir to save data
    cwd = os.getcwd()
    os.mkdir(cwd + "/Recordings - " + dt.datetime.now().strftime('%c'))
    os.chdir(cwd + "/Recordings - " + dt.datetime.now().strftime('%c'))
    # Create peripheral objects
    peripherals = [
        ACM(macAdresses[i], i, LOCATIONS[i]) for i in range(5)
        if macAdresses[i] != ''
    ]
    # Create barrier object
    barrier = Barrier(len(peripherals))
    # Configure and start logging processes
    queue = Queue(-1)
    process = Process(target=runLogger, args=(queue, ))
    process.start()
    # Start processes
    for peripheral in peripherals:
        process = Process(target=runProcess, args=(peripheral, barrier, queue))
        process.start()
Example #33
0
def run_async_kv_benchmark(d_host,
                           d_port,
                           l_port,
                           data_path,
                           workload_path,
                           workload_off=0,
                           n_ops=100000,
                           n_procs=1,
                           max_async=10000):
    barrier = Barrier(n_procs)
    benchmark = [
        Process(target=load_and_run_workload,
                args=(
                    barrier,
                    workload_path,
                    workload_off + i * (n_ops / n_procs),
                    d_host,
                    d_port,
                    l_port,
                    data_path,
                    int(n_ops / n_procs),
                    max_async,
                )) for i in range(n_procs)
    ]

    for b in benchmark:
        b.start()

    for b in benchmark:
        b.join()

    logging.info("[Master] Benchmark complete.")
Example #34
0
def main():
	print("Doing setup...")
	conn = psycopg2.connect(dsn)
	curs = conn.cursor()
	curs.execute("CREATE TABLE IF NOT EXISTS concurrent_tx(id integer primary key);")
	conn.commit()
	print("done")

	print("Creating workers...")
	run_barrier = Barrier(3, timeout=10)
	# Race to insert a row
	runner1 = ConcurrentRunner("runner1", "INSERT INTO concurrent_tx(id) VALUES (1);", run_barrier, delay_seconds=0)
	runner2 = ConcurrentRunner("runner2", "INSERT INTO concurrent_tx(id) VALUES (1);", run_barrier, delay_seconds=0.01)
	# Start them waiting on the barrier, letting them get their connections set up:
	print("Starting workers...")
	runner1.start()
	runner2.start()
	# and release the barrier. It won't actually get released until all workers
	# have connected and are also waiting on the barrier.
	print("Releasing barrier...")
	run_barrier.wait()
	print("Waiting for results...")
	# OK, we're running. Wait until both workers finish.
	workers = [runner1, runner2]
	for worker in workers:
		worker.get_result()
	# Wait for termination
	for worker in workers:
		worker.join()
	# and report results
	for worker in workers:
		if worker.exception is not None:
			print("Worker {0} got exception: {1}".format(worker.name, worker.exception))
		elif worker.result is not None:
			print("Worker {0} got result: {1}".format(worker.name, worker.result))
		else:
			print("Worker {0} succeeded silently.".format(worker.name))