Exemplo n.º 1
0
    def add_unit_and_extend_linear_order(self, U):
        '''
        Add a (compliant) unit to the poset, try to find a new timing unit and in the case of success, extend the linear order.
        :param Unit U: the unit to be added to the poset
        '''
        #NOTE: it is assumed at this point that U is not yet in the poset
        assert U.hash(
        ) not in self.poset.units, "A duplicate unit is being added to the poset."
        self.poset.add_unit(U)
        self.unordered_units.append(U)
        if self.poset.is_prime(U):

            with timer(self.process_id, 'attempt_timing'):
                if consts.PRECOMPUTE_POPULARITY:
                    self.poset.precompute_popularity_proof(U)
                new_timing_units = self.poset.attempt_timing_decision()
            timer.write_summary(where=self.logger, groups=[self.process_id])
            timer.reset(self.process_id)

            self.logger.info(
                f'prime_unit {self.process_id} | New prime unit at level {U.level} : {U.short_name()}'
            )

            for U_timing in new_timing_units:
                self.logger.info(
                    f'timing_new {self.process_id} | Timing unit at level {U_timing.level} established.'
                )
            for U_timing in new_timing_units:
                with timer(self.process_id, f'linear_order_{U_timing.level}'):
                    units_to_order = []
                    updated_unordered_units = []
                    for V in self.unordered_units:
                        if self.poset.below(V, U_timing):
                            units_to_order.append(V)
                        else:
                            updated_unordered_units.append(V)

                    ordered_units = self.poset.break_ties(units_to_order)
                    self.linear_order += [W.hash() for W in ordered_units]
                    self.unordered_units = updated_unordered_units

                    printable_unit_hashes = ' '.join(W.short_name()
                                                     for W in ordered_units)
                    n_txs = self.process_txs_in_unit_list(ordered_units)

                self.logger.info(
                    f'add_linear_order {self.process_id} | At lvl {U_timing.level} added {len(units_to_order)} units and {n_txs} txs to the linear order {printable_unit_hashes}'
                )
                timer.write_summary(where=self.logger,
                                    groups=[self.process_id])
                timer.reset(self.process_id)
Exemplo n.º 2
0
    def _verify_signatures_and_add_units(self, units_received, peer_id, mode,
                                         ids):
        with timer(ids, 'verify_signatures'):
            succesful = self._verify_signatures(units_received, mode, ids)
        if not succesful:
            self.logger.error(
                f'{mode}_invalid_sign {ids} | Got a unit from {peer_id} with invalid signature; aborting'
            )
            return False

        with timer(ids, 'add_units'):
            succesful = self._add_units(units_received, peer_id, mode, ids)
        if not succesful:
            self.logger.error(
                f'{mode}_not_compliant {ids} | Got unit from {peer_id} that does not comply to the rules; aborting'
            )
            return False
        return True
Exemplo n.º 3
0
    def create_unit(self, txs):
        '''
        Attempts to create a new unit in the poset.
        :param list txs: the transactions to include in the unit
        :returns: A new unit if creation was successfull, None otherwise
        '''
        with timer(self.process_id, 'create_unit'):
            U = create_unit(self.poset, self.process_id, txs)

        return U
Exemplo n.º 4
0
 async def _receive_units(self, channel, mode, ids):
     self.logger.info(
         f'receive_units_start_{mode} {ids} | Receiving units from {channel.peer_id}'
     )
     data = await channel.read()
     n_bytes = len(data)
     self.logger.info(
         f'receive_units_bytes_{mode} {ids} | Received {n_bytes} bytes from {channel.peer_id}'
     )
     with timer(ids, 'unpickle_units'):
         units_received = pickle.loads(data)
     self.logger.info(
         f'receive_units_done_{mode} {ids} | Received {n_bytes} bytes and {len(units_received)} units'
     )
     return units_received
Exemplo n.º 5
0
 async def _send_units(self, to_send, channel, mode, ids):
     self.logger.info(
         f'send_units_start_{mode} {ids} | Sending units to {channel.peer_id}'
     )
     with timer(ids, 'pickle_units'):
         data = pickle.dumps(to_send)
     self.logger.info(
         f'send_units_wait_{mode} {ids} | Sending {len(to_send)} units and {len(data)} bytes to {channel.peer_id}'
     )
     await channel.write(data)
     self.logger.info(
         f'send_units_sent_{mode} {ids} | Sent {len(to_send)} units and {len(data)} bytes to {channel.peer_id}'
     )
     self.logger.info(
         f'send_units_done_{mode} {ids} | Units sent {channel.peer_id}')
Exemplo n.º 6
0
    async def listener(self, peer_id):
        '''
        Listen indefinitely for incoming syncs from process peer_id.
        This version is a counterpart for sync, follows the same 3-exchange protocol.
        '''
        channel = self.listen_channels[peer_id]
        while True:
            # step 1
            their_poset_info, ids = await self._receive_poset_info(
                channel, 'listener', None)

            self.n_recv_syncs += 1
            self.logger.info(
                f'listener_sync_no {ids} | Number of syncs is {self.n_recv_syncs}'
            )

            if self.n_recv_syncs > consts.N_RECV_SYNC:
                self.logger.info(
                    f'listener_too_many_syncs {ids} | Too many syncs, rejecting {peer_id}'
                )
                await channel.reject()
                await self.maybe_close(channel)
                self.n_recv_syncs -= 1
                continue

            self.logger.info(
                f'listener_establish {ids} | Connection established with {peer_id}'
            )

            # step 2
            await self._send_poset_info(channel, 'listener', ids)
            with timer(ids, 'prepare_units'):
                to_send, to_request = units_to_send(self.process.poset,
                                                    their_poset_info)
            await self._send_units(to_send, channel, 'listener', ids)
            await self._send_requests(to_request, channel, 'listener', ids)

            # step 3
            units_received = await self._receive_units(channel, 'listener',
                                                       ids)
            their_requests = await self._receive_requests(
                channel, 'listener', ids)

            # step 4 (only if they requested something)
            if any(their_requests):
                self.logger.info(
                    f'listener_extended {ids} | Sync with {peer_id} extended due to forks'
                )
                with timer(ids, 'prepare_units'):
                    to_send, _ = units_to_send(self.process.poset,
                                               their_poset_info,
                                               their_requests)
                await self._send_units(to_send, channel, 'listener', ids)

            if self._verify_signatures_and_add_units(units_received, peer_id,
                                                     'listener', ids):
                self.logger.info(
                    f'listener_succ {ids} | Syncing with {peer_id} successful')
                timer.write_summary(where=self.logger, groups=[ids])
            else:
                self.logger.info(
                    f'listener_fail {ids} | Syncing with {peer_id} failed')

            await self.maybe_close(channel)
            self.n_recv_syncs -= 1
Exemplo n.º 7
0
    async def sync(self, peer_id):
        '''
        Sync with process peer_id. This version uses 3-exchange "pullpush" protocol: send heights, receive heights, units and
        requests, send units and requests. If we sent some requests there is a 4th exchange where we once again get units. This
        should only happen due to forks.
        '''

        self.n_init_syncs += 1
        self.logger.info(
            f'sync_sync_no | Number of syncs is {self.n_init_syncs}')
        if self.n_init_syncs > consts.N_INIT_SYNC:
            self.logger.info(
                f'sync_too_many_syncs | Too many syncs, not initiating a new one with {peer_id}'
            )
            self.n_init_syncs -= 1
            return
        channel = self.sync_channels[peer_id]
        if channel.in_use.locked():
            self.logger.info(
                f'sync_canceled {self.process.process_id} | Previous sync with {peer_id} still in progress'
            )
            self.n_init_syncs -= 1
            return

        async with channel.in_use:
            ids = self._new_sync_id(peer_id)
            self.logger.info(
                f'sync_establish_try {ids} | Establishing connection to {peer_id}'
            )
            self.logger.info(
                f'sync_establish {ids} | Established connection to {peer_id}')

            # step 1
            await self._send_poset_info(channel, 'sync', ids)

            # step 2
            try:
                their_poset_info, _ = await self._receive_poset_info(
                    channel, 'sync', ids)
                units_received = await self._receive_units(
                    channel, 'sync', ids)
                their_requests = await self._receive_requests(
                    channel, 'sync', ids)
            except RejectException:
                self.logger.info(
                    f'sync_rejected {ids} | Process {peer_id} rejected sync attempt'
                )
                self.n_init_syncs -= 1
                await self.maybe_close(channel)
                return

            # step 3
            with timer(ids, 'prepare_units'):
                to_send, to_request = units_to_send(self.process.poset,
                                                    their_poset_info,
                                                    their_requests)
            await self._send_units(to_send, channel, 'sync', ids)
            received_hashes = [U.hash() for U in units_received]
            to_request = [[r for r in reqs if r not in received_hashes]
                          for reqs in to_request]
            await self._send_requests(to_request, channel, 'sync', ids)

            # step 4 (only if we requested something)
            if any(to_request):
                self.logger.info(
                    f'sync_extended {ids} | Sync with {peer_id} extended due to forks'
                )
                units_received = await self._receive_units(
                    channel, 'sync', ids)

        await self.maybe_close(channel)

        if self._verify_signatures_and_add_units(units_received, peer_id,
                                                 'sync', ids):
            self.logger.info(
                f'sync_succ {ids} | Syncing with {peer_id} successful')
            timer.write_summary(where=self.logger, groups=[ids])
        else:
            self.logger.info(
                f'sync_fail {ids} | Syncing with {peer_id} failed')
        self.n_init_syncs -= 1
Exemplo n.º 8
0
    async def create_add(self, txs_queue, server_started):
        '''
        A task that will keep creating new units.
        :param multiprocessing.Queue txs_queue: a queue of transactions to be added to units
        :param asyncio.Event server_started: a mutex to ensure that the basic connection server starts before we commence unit creation
        '''
        await server_started.wait()
        created_count, max_level_reached = 0, False
        while created_count != consts.UNITS_LIMIT and not max_level_reached:

            # log current memory consumption
            memory_usage_in_mib = (psutil.Process(
                os.getpid()).memory_info().rss) / (2**20)
            self.logger.info(
                f'memory_usage {self.process_id} | {memory_usage_in_mib:.4f} MiB'
            )
            self.logger.info(
                f'max_units {self.process_id} | There are {len(self.poset.max_units)} maximal units just before create_unit'
            )

            txs = self.prepared_txs

            new_unit = self.create_unit(txs)
            created_count += 1

            if new_unit is not None:

                with timer(self.process_id, 'create_unit'):
                    self.poset.prepare_unit(new_unit)
                    assert self.poset.check_compliance(
                        new_unit
                    ), "A unit created by our process is not passing the compliance test!"
                    self.sign_unit(new_unit)

                self.add_unit_to_poset(new_unit)

                n_parents = len(new_unit.parents)
                self.logger.info(
                    f"create_add {self.process_id} | Created a new unit {new_unit.short_name()} with {n_parents} parents"
                )

                self.our_units.append(new_unit)
                if consts.ADAPTIVE_DELAY:
                    self.adjust_create_delay()

                if new_unit.level == consts.LEVEL_LIMIT:
                    max_level_reached = True

                if not txs_queue.empty():
                    self.prepared_txs = txs_queue.get()
                else:
                    self.prepared_txs = []
            else:
                self.logger.info(
                    f'create_fail {self.process_id} | Failed to create a new unit'
                )

            timer.write_summary(where=self.logger, groups=[self.process_id])
            timer.reset(self.process_id)

            await asyncio.sleep(self.create_delay)

        self.keep_syncing = False
        logger = logging.getLogger(consts.LOGGER_NAME)
        if max_level_reached:
            logger.info(
                f'create_stop {self.process_id} | process reached max_level {consts.LEVEL_LIMIT}'
            )
        elif created_count == consts.UNITS_LIMIT:
            logger.info(
                f'create_stop {self.process_id} | process created {consts.UNITS_LIMIT} units'
            )

        # dump the final poset to disc
        self.poset.dump_to_file(f'poset.dag')