Example #1
0
 def _prepare_blockchain_repo(self, config=None):
     blockchain_outbox_repo_conf = env_postgres_config(
         'PROC_BCH_OUTBOX_REPO')
     if config:
         blockchain_outbox_repo_conf.update(config)
     self.blockchain_outbox_repo = ApiOutboxRepo(
         blockchain_outbox_repo_conf)
Example #2
0
def test():

    api_outbox_repo = ApiOutboxRepo(TEST_API_OUTBOX_REPO_CONF)
    rejected_messages_repo = RejectedMessagesRepo(
        TEST_REJECTED_MESSAGES_REPO_CONF)
    api_outbox_repo._unsafe_clear_for_test()
    rejected_messages_repo._unsafe_clear_for_test()

    # check that repos are really empty
    assert not api_outbox_repo.get_next_pending_message()
    assert not rejected_messages_repo.get()

    worker = LoopbackBlockchainWorker(
        blockchain_outbox_repo_conf=TEST_API_OUTBOX_REPO_CONF,
        rejected_messages_repo_conf=TEST_REJECTED_MESSAGES_REPO_CONF)

    # testing that iter returns worker
    assert worker is iter(worker)

    worker.REJECT_EACH = 3

    # check that worker has nothing to do
    assert not next(worker)

    # posting messages/filling outbox
    msg_ids = []
    for i in range(worker.REJECT_EACH * 3):
        message = _generate_msg(sender_ref=_generate_sender_ref(i),
                                status='pending')
        msg_id = api_outbox_repo.post(message)
        assert msg_id
        msg_ids.append(msg_id)

    # checking that worker received all messages + rejected some of them
    for i in range(worker.REJECT_EACH * 3):
        assert next(worker)
        # testing that messages rejected and posted into the queue
        if i + 1 % worker.REJECT_EACH == 0:
            rejected_message = rejected_messages_repo.get()
            assert rejected_message
            rejected_message_queue_id, rejected_message_obj = rejected_message
            assert rejected_messages_repo.delete(rejected_message_queue_id)
    # checking that worker completed all tasks
    assert not next(worker)

    # we don't know the order of messages therefore we just checking number of accepted/rejected
    accepted = 0
    rejected = 0
    for msg_id in msg_ids:
        api_outbox_message = api_outbox_repo.get(msg_id)
        assert api_outbox_message
        if api_outbox_message.status == 'accepted':
            accepted += 1
        elif api_outbox_message.status == 'rejected':
            rejected += 1

    expect_rejected = len(msg_ids) // worker.REJECT_EACH
    expect_accepted = len(msg_ids) - expect_rejected
    assert accepted == expect_accepted
    assert rejected == expect_rejected
Example #3
0
def test():
    # creating testing versions of all required repos
    message_lake_repo = MessageLakeRepo(MESSAGE_LAKE_REPO_CONF)
    object_acl_repo = ObjectACLRepo(OBJECT_ACL_REPO_CONF)

    bc_inbox_repo = BCInboxRepo(BC_INBOX_REPO_CONF)
    object_retrieval_repo = ObjectRetrievalRepo(OBJECT_RETRIEVAL_REPO_CONF)
    notifications_repo = NotificationsRepo(NOTIFICATIONS_REPO_CONF)

    blockchain_outbox_repo = ApiOutboxRepo(BLOCKCHAIN_OUTBOX_REPO_CONF)

    def clear():
        # clearing repos
        message_lake_repo._unsafe_method__clear()
        object_acl_repo._unsafe_method__clear()

        bc_inbox_repo._unsafe_method__clear()
        object_retrieval_repo._unsafe_method__clear()
        notifications_repo._unsafe_method__clear()

        blockchain_outbox_repo._unsafe_method__clear()

        # test repos are empty
        assert message_lake_repo.is_empty()
        assert object_acl_repo.is_empty()

        assert bc_inbox_repo.is_empty()
        assert object_retrieval_repo.is_empty()
        assert notifications_repo.is_empty()

        assert blockchain_outbox_repo.is_empty()

    clear()

    processor = InboundMessageProcessor(
        bc_inbox_repo_conf=BC_INBOX_REPO_CONF,
        message_lake_repo_conf=MESSAGE_LAKE_REPO_CONF,
        object_acl_repo_conf=OBJECT_ACL_REPO_CONF,
        object_retrieval_repo_conf=OBJECT_RETRIEVAL_REPO_CONF,
        notifications_repo_conf=NOTIFICATIONS_REPO_CONF,
        blockchain_outbox_repo_conf=BLOCKCHAIN_OUTBOX_REPO_CONF)

    # test iter processor returns processor
    assert iter(processor) is processor
    # test processor has no jobs
    assert next(processor) is None

    sender_ref = "AU:xxxx-xxxx-xxxx"
    status = 'received'
    message = _generate_msg_object(sender_ref=sender_ref, status=status)
    message.sender = "CN"

    assert bc_inbox_repo.post(message)

    # testing normal execution received message with sender ref
    assert next(processor) is True
    assert next(processor) is None
    # testing that message is deleted
    assert bc_inbox_repo.is_empty()
    # testing message posted to related repos
    assert not message_lake_repo.is_empty()
    assert not object_acl_repo.is_empty()
    # we can't say it's empty because worker gets values from there
    # assert not object_retrieval_repo.is_empty()
    # received status should not be posted to blockchain
    assert blockchain_outbox_repo.is_empty()

    clear()

    sender_ref = "AU:xxxx-xxxx-xxxx"
    # this one should go to blockchain outbox
    status = 'pending'
    message = _generate_msg_object(sender_ref=sender_ref, status=status)
    message.sender = OUR_JRD
    message.receiver = 'CN'

    assert bc_inbox_repo.post(message)

    # testing normal execution received message with sender ref
    assert next(processor) is True
    assert next(processor) is None
    # testing that message is deleted
    assert bc_inbox_repo.is_empty()
    # testing message posted to related repos
    assert not message_lake_repo.is_empty()
    assert not object_acl_repo.is_empty()

    clear()

    # message without sender ref should fail
    message = _generate_msg_object()
    assert bc_inbox_repo.post(message)

    assert next(processor) is False
    assert next(processor) is None
Example #4
0
 def _prepare_outbox_repo(self, conf):
     outbox_repo_conf = env_postgres_config('PROC_BCH_OUTBOX')
     if conf:
         outbox_repo_conf.update(conf)
     self.outbox_repo = ApiOutboxRepo(outbox_repo_conf)
Example #5
0
class MultichannelWorker(object):
    """
    Iterate over the RouteToChannelUseCase.
    """

    ROUTING_TABLE = env_json("IGL_MCHR_ROUTING_TABLE", default=[])

    def _prepare_outbox_repo(self, conf):
        outbox_repo_conf = env_postgres_config('PROC_BCH_OUTBOX')
        if conf:
            outbox_repo_conf.update(conf)
        self.outbox_repo = ApiOutboxRepo(outbox_repo_conf)

    def _prepare_message_updates_repo(self, conf):
        # This repo used to talk to the message updater microservice,
        # which just changes statuses in the message lake
        repo_conf = env_queue_config('MCHR_MESSAGE_UPDATES_REPO',
                                     use_default=False)
        if not repo_conf:
            repo_conf = env_queue_config('BCH_MESSAGE_UPDATES')
        if conf:
            repo_conf.update(conf)
        self.message_updates_repo = MessageUpdatesRepo(repo_conf)

    def _prepare_use_cases(self):
        self.uc = RouteToChannelUseCase(self.ROUTING_TABLE)

    def _prepare_channels(self):
        """
        For each channel in the use-case we create channel object
        and put it into the route table; so underlying use-cases
        don't think about it at all and just use the object.
        """
        for routing_rule in self.ROUTING_TABLE:
            routing_rule["ChannelInstance"] = HttpApiChannel(
                routing_rule.copy())
        return

    def _update_message_status(self,
                               msg,
                               new_status,
                               channel_id=None,
                               channel_msg_id=None):
        # In the message lake
        # if channel_id == DiscreteGenericMemoryChannel.ID:
        #     channel_response = json.loads(channel_response)
        #     channel_txn_id = channel_response['link'].split('=')[1]
        # else:
        #     return False
        patch_data = {
            gd.STATUS_KEY: new_status,
        }
        if channel_id and channel_msg_id:
            patch_data.update({
                gd.CHANNEL_ID_KEY: channel_id,
                gd.CHANNEL_TXN_ID_KEY: channel_msg_id,
            })
        return self.message_updates_repo.post_job(
            {
                'message': msg.to_dict(),
                'patch': patch_data
            },
            delay_seconds=random.randint(2, 7))

    def __init__(self,
                 outbox_repo_conf=None,
                 channel_pending_message_repo_conf=None,
                 message_updates_repo_conf=None,
                 config=None):
        # self._prepare_config(config)
        self._prepare_outbox_repo(outbox_repo_conf)
        # self._prepare_channel_pending_message_repo(channel_pending_message_repo_conf)
        self._prepare_message_updates_repo(message_updates_repo_conf)
        self._prepare_use_cases()
        self._prepare_channels()

    def __iter__(self):
        logger.info("Starting the multichannel worker with channels %s",
                    [ch["Name"] for ch in self.ROUTING_TABLE])
        return self

    def __next__(self):
        try:
            pg_msg = self.outbox_repo.get_next_pending_message()
            if not pg_msg:
                return None
            logger.info("Processing message %s (%s)", pg_msg, pg_msg.id)
            self.outbox_repo.patch(pg_msg.id, {'status': 'sending'})

            # If not result message wasn't posted to channel
            # it looks like ok situation from the use case point of view
            # therefore we just silently return None
            # BUT we probably want to change status of the message in
            # outbox_repo

            # first we convert message from the
            # intergov.repos.api_outbox.postgres_objects.Message
            # to
            # intergov.domain.wire_protocolsgeneric_discrete.Message
            # (actual while we use postgres as a storage for outbox repo)
            assert isinstance(pg_msg, PostgresMessageRepr)
            gd_msg = gd.Message.from_dict(pg_msg.to_dict())

            try:
                result = self.uc.execute(gd_msg)
            except Exception as e:
                # sleep some seconds after fails
                logger.error("[%s] Rejecting due to use-case exception %s",
                             gd_msg.sender_ref, str(e))
                self.outbox_repo.patch(pg_msg.id, {'status': 'rejected'})
                for i in range(random.randint(30, 100)):
                    time.sleep(0.1)
                return False

            if result:
                # message has been sent somewhere
                recipient_channel_id, recipient_channel_message_id = result
                logger.info("[%s] The message has been sent to channel %s",
                            gd_msg.sender_ref, recipient_channel_id)
                self._update_message_status(
                    gd_msg,
                    new_status="accepted",
                    channel_id=recipient_channel_id,
                    channel_msg_id=recipient_channel_message_id)
                if not self.outbox_repo.patch(pg_msg.id,
                                              {'status': 'accepted'}):
                    logger.warning("[%s] Failed to update msg in outbox",
                                   gd_msg.sender_ref)
                    result = False
                else:
                    result = True
            else:
                # no channel accepted the message or there was other error
                logger.warning("[%s] Message has NOT been sent",
                               gd_msg.sender_ref)
                self._update_message_status(gd_msg, "rejected")
                self.outbox_repo.patch(pg_msg.id, {'status': 'rejected'})
                result = False
            return result

        except Exception as e:
            logger.exception(e)
            return None
        return True
Example #6
0
class LoopbackBlockchainWorker(object):

    REJECT_EACH = int(
        env('IGL_PROC_LOOPBACK_BCH_WORKER_REJECT_EACH', default=0))

    MESSAGE_PATCH_API_ENDPOINT = env(
        'IGL_PROC_BCH_MESSAGE_API_ENDPOINT',
        default='http://message_api:5101/message/{sender}:{sender_ref}')

    MESSAGE_RX_API_ENDPOINT = env(
        'IGL_PROC_BCH_MESSAGE_RX_API_URL',
        default='http://message_rx_api:5100/messages')

    # default for all instances
    rejected_each_counter = 0

    def __init__(self,
                 blockchain_outbox_repo_conf=None,
                 rejected_messages_repo_conf=None):
        self._prepare_blockchain_repo(blockchain_outbox_repo_conf)
        self._prepare_rejected_messages_repo(rejected_messages_repo_conf)

    def _prepare_blockchain_repo(self, config=None):
        blockchain_outbox_repo_conf = env_postgres_config(
            'PROC_BCH_OUTBOX_REPO')
        if config:
            blockchain_outbox_repo_conf.update(config)
        self.blockchain_outbox_repo = ApiOutboxRepo(
            blockchain_outbox_repo_conf)

    def _prepare_rejected_messages_repo(self, config=None):
        rejected_messages_repo_conf = env_queue_config(
            'PROC_REJECTED_MESSAGES_REPO')
        if config:
            rejected_messages_repo_conf.update(config)
        self.rejected_messages_repo = RejectedMessagesRepo(
            rejected_messages_repo_conf)

    def __iter__(self):
        logger.info("Starting the loopback blockchain worker")
        return self

    def __next__(self):
        try:
            result = self._process_next_message()
        except Exception as e:
            logger.exception(e)
            result = None
        return result

    def _create_message_payload(self, msg):
        return {
            'sender': msg.sender,
            'receiver': msg.receiver,
            'subject': msg.subject,
            'obj': msg.obj,
            'sender_ref': msg.sender_ref,
            'predicate': msg.predicate,
        }

    def _patch_message_status(self, msg, status):
        url = self.MESSAGE_PATCH_API_ENDPOINT.format(sender=msg.sender,
                                                     sender_ref=msg.sender_ref)
        logger.info("Patching message status to %s by url %s", status, url)
        resp = requests.patch(url, json={'status': status})
        if resp.status_code not in (200, 201):
            raise RuntimeError(
                "Unable to patch message status, resp code {} body {}".format(
                    resp.status_code, resp.content))

    def _post_message_rx(self, payload):
        resp = requests.post(self.MESSAGE_RX_API_ENDPOINT, json=payload)
        if resp.status_code not in (200, 201):
            raise RuntimeError(
                "Unable to post message, code {}, resp {}".format(
                    resp.status_code, resp.content))
        return resp

    def _process_next_message(self):
        msg = self.blockchain_outbox_repo.get_next_pending_message()
        if msg is None:
            return None
        # ensure message got to the repo as expected, won't be a problem for prod
        # (remove it there)
        # time.sleep(3)
        logger.info("[Loopback] Processing message %s (%s) to the blockchain",
                    msg, msg.id)
        # dummy reject controllable from env variable "IGL_PROC_LOOPBACK_BCH_WORKER_REJECT_EACH"
        if self.REJECT_EACH > 0:
            self.rejected_each_counter += 1
            if self.rejected_each_counter == self.REJECT_EACH:
                logger.info(
                    "Rejecting the message (because we reject one of %s, and this is %s)",
                    self.REJECT_EACH, self.rejected_each_counter)
                self.rejected_each_counter = 0
                self.blockchain_outbox_repo.patch(msg.id,
                                                  {'status': 'rejected'})
                self.rejected_messages_repo.post(msg)
                return True

        self.blockchain_outbox_repo.patch(msg.id, {'status': 'sending'})
        # time.sleep(1)  # for the realistic debug

        # please note that logically we don't forward message
        # but send a message about the message which is equal to the message
        # so we don't re-send the same object,
        # but we send another object which (wow!) has all fields the same
        # but it's not the same, because we logically got it from the
        # blockchain, where it was encrypted/compressed and abused by other methos
        message_to_be_sent = self._create_message_payload(msg)
        # it's a silly situation when importer app in the same intergov setup
        # gets both messages, but in the real situation remote importer_app
        # will get only the blockchain one.

        logger.info("message_to_be_sent %s", message_to_be_sent)
        # we behave like this message has been received from the remote party
        self._post_message_rx(message_to_be_sent)
        self.blockchain_outbox_repo.patch(msg.id, {'status': 'accepted'})
        # now we update the original message status
        self._patch_message_status(msg, 'accepted')

        logger.info("[Loopback] The message has been sent to blockchain and "
                    "immediately retrieved from it as a received")
        return True
Example #7
0
class MultiChannelBlockchainWorker(object):
    """
    Iterate over the RouteToChannelUseCase.
    """
    def _prepare_config(self, config):
        if config:
            self.config = config
        else:
            self.config = copy.deepcopy(DEFAULT_CONFIG)

    def _prepare_blockchain_outbox_repo(self, conf):
        blockchain_outbox_repo_conf = env_postgres_config('PROC_BCH_OUTBOX')
        if conf:
            blockchain_outbox_repo_conf.update(conf)
        self.blockchain_outbox_repo = ApiOutboxRepo(
            blockchain_outbox_repo_conf)

    def _prepare_message_updates_repo(self, conf):
        message_updates_repo_conf = env_queue_config('BCH_MESSAGE_UPDATES')
        if conf:
            message_updates_repo_conf.update(conf)
        self.message_updates_repo = MessageUpdatesRepo(
            message_updates_repo_conf)

    def _prepare_channel_pending_message_repo(self, conf):
        channel_pending_message_repo_conf = env_queue_config(
            'PROC_BCH_CHANNEL_PENDING_MESSAGE')
        if conf:
            channel_pending_message_repo_conf.update(conf)
        self.channel_pending_message_repo = ChannelPendingMessageRepo(
            channel_pending_message_repo_conf)

    def _prepare_use_cases(self):
        self.uc = RouteToChannelUseCase(self.config)

    def _message_to_dict(self, msg):
        return {
            gd.SENDER_KEY: msg.sender,
            gd.RECEIVER_KEY: msg.receiver,
            gd.SUBJECT_KEY: msg.subject,
            gd.OBJ_KEY: msg.obj,
            gd.PREDICATE_KEY: msg.predicate,
            gd.SENDER_REF_KEY: msg.sender_ref
        }

    def _push_message_to_channel_pending_message_repo(self, channel_id,
                                                      channel_response, msg):
        if channel_id == DiscreteGenericMemoryChannel.ID:
            return self.channel_pending_message_repo.post_job({
                'channel_id':
                channel_id,
                'channel_response':
                json.loads(channel_response),
                'message':
                self._message_to_dict(msg)
            })
        return False

    def _push_message_to_channel_message_updater(self, channel_id,
                                                 channel_response, msg):
        if channel_id == DiscreteGenericMemoryChannel.ID:
            channel_response = json.loads(channel_response)
            channel_txn_id = channel_response['link'].split('=')[1]
        else:
            return False
        return self.message_updates_repo.post_job(
            {
                'message': self._message_to_dict(msg),
                'patch': {
                    gd.CHANNEL_ID_KEY: channel_id,
                    gd.CHANNEL_TXN_ID_KEY: channel_txn_id
                }
            },
            delay_seconds=10)

    def __init__(self,
                 blockchain_outbox_repo_conf=None,
                 channel_pending_message_repo_conf=None,
                 message_updates_repo_conf=None,
                 config=None):
        self._prepare_config(config)
        self._prepare_blockchain_outbox_repo(blockchain_outbox_repo_conf)
        self._prepare_channel_pending_message_repo(
            channel_pending_message_repo_conf)
        self._prepare_message_updates_repo(message_updates_repo_conf)
        self._prepare_use_cases()

    def __iter__(self):
        logger.info("Starting the multichannel blockchain worker")
        return self

    def __next__(self):
        try:

            msg = self.blockchain_outbox_repo.get_next_pending_message()
            if not msg:
                return None
            logger.info("Processing message %s (%s)", msg, msg.id)
            self.blockchain_outbox_repo.patch(msg.id, {'status': 'sending'})

            # If not result message wasn't posted to channel
            # it looks like ok situation from the use case point of view
            # therefore we just silently return None
            # BUT we probably want to change status of the message in
            # blockchain_outbox_repo

            try:
                result = self.uc.execute(msg)
            except Exception:
                time.sleep(random.randint(1, 6))
                raise
            if not result:
                return None
            channel_id, channel_response = result
            logger.info(f'Channel[{channel_id}]: {channel_response}')

            self._push_message_to_channel_pending_message_repo(
                channel_id, channel_response, msg)
            self._push_message_to_channel_message_updater(
                channel_id, channel_response, msg)

            if self.blockchain_outbox_repo.patch(msg.id,
                                                 {'status': 'accepted'}):
                logger.info("The message has been sent to channel")
            else:
                logger.info(
                    "The message has been sent but failed to update msg in outbox"
                )
                return False

        except Exception as e:
            logger.exception(e)
            return None
        return True