Пример #1
0
def test_patch_success(NotificationsRepoMock, MessageLakeRepoMock, client):

    notifications_repo = NotificationsRepoMock.return_value
    message_lake_repo = MessageLakeRepoMock.return_value

    # testing update without status, possible but not really
    # doing something usefull yet
    data = {**VALID_MESSAGE_DATA_DICT}
    try:
        del data['status']
    except KeyError:
        pass
    message = Message.from_dict(data)

    message_lake_repo.get.return_value = message

    resp = client.patch(
        PATCH_URL.format(MESSAGE_REFERENCE),
        json=VALID_MESSAGE_DATA_DICT
    )

    assert resp.status_code == HTTPStatus.OK
    assert resp.content_type == VALID_RESPONSE_MIMETYPE
    assert resp.get_json() == message.to_dict()

    assert message_lake_repo.get.call_count == 1
    message_lake_repo.update_metadata.assert_not_called()
    notifications_repo.post_job.assert_not_called()

    # testing status update
    sender, sender_ref = MESSAGE_REFERENCE.split(':', 1)
    for status in VALID_PATCH_STATUSES:
        notifications_repo.reset_mock()
        message_lake_repo.reset_mock()

        data = {**VALID_MESSAGE_DATA_DICT}
        message_old = Message.from_dict(data)
        data['status'] = status
        message_patched = Message.from_dict(data)

        message_lake_repo.get.side_effect = (message_old, message_patched,)

        resp = client.patch(
            PATCH_URL.format(MESSAGE_REFERENCE),
            json=data
        )

        assert resp.status_code == HTTPStatus.OK
        assert resp.content_type == VALID_RESPONSE_MIMETYPE
        assert resp.get_json() == message_patched.to_dict()

        assert message_lake_repo.get.call_count == 2
        message_lake_repo.update_metadata.assert_called_once_with(sender, sender_ref, {'status': status})
        notifications_repo.post_job.assert_called_once()
Пример #2
0
 def _get_message(self, channel, message_id):
     request_channel_use_case = RequestChannelAPIUseCase(channel)
     message_payload = request_channel_use_case.get_message(message_id)
     message = Message.from_dict(message_payload['message'])
     message.status = "received"
     message.sender_ref = str(uuid.uuid4())
     return message
Пример #3
0
def message_post():
    """
    Puts message to the message lake and into the processing queue
    """
    body = request.get_json(silent=True)
    if not body or not isinstance(body, dict):
        raise MessageDataEmptyError()

    try:
        message = Message.from_dict(body)
    except Exception as e:
        raise MessageDeserializationError(source=[str(e)])
    if not message.is_valid():
        raise MessageValidationError(source=message.validation_errors())

    # we fill it for message_api but not for message_rx_api
    if not message.sender_ref:
        message.kwargs["sender_ref"] = str(uuid.uuid4())

    # because we are first who sees that message
    message.kwargs["status"] = "pending"

    repo = BCInboxRepo(Config.BC_INBOX_CONF)
    use_case = EnqueueMessageUseCase(repo)

    if use_case.execute(message):
        return Response(
            json.dumps(message, cls=ser.MessageJSONEncoder),
            status=HTTPStatus.CREATED,
            mimetype='application/json',
            # headers={'Location': message_url}
        )
    else:
        raise UnableWriteToInboxError()
Пример #4
0
def test(docker_setup):

    message_lake_repo = MessageLakeRepo(MESSAGE_LAKE_REPO_CONF)
    rejected_message_repo = RejectedMessagesRepo(REJECTED_MESSAGES_REPO_CONF)

    # ensuring that repos are empty
    message_lake_repo._unsafe_clear_for_test()
    rejected_message_repo._unsafe_clear_for_test()

    updater = RejectedStatusUpdater(
        rejected_message_repo_conf=REJECTED_MESSAGES_REPO_CONF,
        message_lake_repo_conf=MESSAGE_LAKE_REPO_CONF)

    # checking that iter returns updater
    assert updater is iter(updater)

    # test no rejected messages in the queue
    assert not next(updater)

    # testing single message in the queue
    sender, sender_ref = MESSAGES_DATA[0]
    message = Message.from_dict(
        _generate_msg_dict(**{
            SENDER_KEY: sender,
            SENDER_REF_KEY: sender_ref
        }))
    rejected_message_repo.post(message)
    message_lake_repo.post(message)

    assert next(updater)
    assert not next(updater)

    # testing several messages in queue
    for i in range(2):
        for sender, sender_ref in MESSAGES_DATA:
            message = Message.from_dict(
                _generate_msg_dict(**{
                    SENDER_KEY: sender,
                    SENDER_REF_KEY: sender_ref
                }))
            rejected_message_repo.post(message)
            message_lake_repo.post(message)
        for i in range(len(MESSAGES_DATA)):
            assert next(updater)
        assert not next(updater)
Пример #5
0
def test_post_success(RepoMock, client):

    message = Message.from_dict(VALID_MESSAGE_DATA_DICT)
    post = RepoMock.return_value.post
    post.return_value = message

    resp = client.post(POST_URL, json=VALID_MESSAGE_DATA_DICT)

    post.assert_called_once()

    assert resp.status_code == HTTPStatus.CREATED
    assert resp.content_type == VALID_RESPONSE_MIMETYPE
    # checks all required attrs + sender_ref
    resp_json = resp.get_json()
    diff = diff_message_dicts(resp_json,
                              VALID_MESSAGE_DATA_DICT,
                              keys=Message.required_attrs + ['sender_ref'])
    assert not diff
    assert 'status' in resp_json.keys()
Пример #6
0
def test_post_success(EnqueueMessageUseCaseMock, BCInboxRepoMock, client):

    execute = EnqueueMessageUseCaseMock.return_value.execute
    execute.return_value = True

    msg_data = generate_message()
    msg_data['sender_ref'] = uuid.uuid4()
    msg_data['status'] = 'received'
    m = Message.from_dict(msg_data)

    http_response = client.post(POST_URL,
                                data=encode_message(m),
                                content_type='application/json')
    assert http_response.mimetype == VALID_RESPONSE_MIMETYPE
    assert not m.spurious_attrs_errors()
    assert int(http_response.status_code
               ) == HTTPStatus.CREATED, http_response.get_json()
    # the payload is re-case into a domain object
    execute.assert_called_with(m)
Пример #7
0
def message():
    """
    Usage:
        curl -XPOST http://127.0.0.1:5000/messages \
            -H 'Content-type: application/json' \
            -d '{"adf": "ee"}'
    """
    # silent prevents default error which is BadRequest
    body = request.get_json(silent=True)
    if not body or not isinstance(body, dict):
        raise MessageDataEmptyError()

    try:
        message = Message.from_dict(body, require_allowed=["sender_ref"])
    except Exception as e:
        raise MessageDeserializationError(source=[str(e)])
    if not message.is_valid():
        raise MessageValidationError(source=message.validation_errors())

    message_url = message.absolute_object_uri()
    if not message_url:
        raise MessageAbsoluteURLError()

    message.kwargs["status"] = "received"  # because it's RX api

    repo = BCInboxRepo(
        Config.BC_INBOX_CONF
    )

    use_case = EnqueueMessageUseCase(repo)

    if use_case.execute(message):
        return Response(
            json.dumps(message, cls=ser.MessageJSONEncoder),
            status=201,
            mimetype='application/json',
            headers={'Location': message_url}
        )
    else:
        raise UnableWriteToInboxError()
Пример #8
0
def test_patch_only_status_and_update_skip_for_equal_status():

    # testing only status update / ingoring other payload fields
    payload = _generate_msg_dict()
    payload[STATUS_KEY] = STATUS_PENDING
    message = Message.from_dict(payload)

    notifications_repo = mock.MagicMock()
    message_lake_repo = mock.MagicMock()
    message_lake_repo.get.return_value = message

    new_payload = _generate_msg_dict()
    new_payload[STATUS_KEY] = STATUS_ACCEPTED
    uc = PatchMessageMetadataUseCase(message_lake_repo, notifications_repo)
    uc.execute(
        MESSAGE_REF,
        new_payload
    )

    message_lake_repo.get.assert_called()
    message_lake_repo.update_metadata.assert_called_once_with(
        SENDER,
        SENDER_REF,
        {STATUS_KEY: new_payload[STATUS_KEY]}
    )
    notifications_repo.post_job.assert_called()

    # testing ingoring update for the equal status values
    notifications_repo.reset_mock()
    message_lake_repo.reset_mock()

    uc.execute(
        MESSAGE_REF,
        payload
    )

    message_lake_repo.get.assert_called()
    message_lake_repo.update_metadata.assert_not_called()
    notifications_repo.post_job.assert_not_called()
def _generate_msg(**kwargs):
    return Message.from_dict(_generate_msg_dict(**kwargs))
Пример #10
0
    def process(self, queue_message_id, message):
        # let it be procssed
        logger.info("Received message to process: %s", message)

        # TODO: if something is fine and something is failed then first
        # steps will be done again
        # fine for object storage but not for queues
        try:
            ml_OK = self.message_lake_repo.post(message)
        except Exception as e:
            logger.exception(e)
            ml_OK = False
        try:
            acl_OK = self.object_acl_repo.post(message)
        except Exception as e:
            logger.exception(e)
            acl_OK = False

        try:
            # we delay it a little to make sure the message has got to the repo
            # and remove status because notifications don't need it
            message_without_status = Message.from_dict(
                message.to_dict(exclude=['status']))
            # fat ping for ones who understand
            pub_OK = self.notifications_repo.post(message_without_status,
                                                  delay_seconds=3)
            # light ping for ones who want everything
            self.notifications_repo.post_job({
                "predicate":
                f'message.{message.sender_ref}.received',
                "sender_ref":
                f"{message.sender}:{message.sender_ref}"
            })
        except Exception as e:
            logger.exception(e)
            pub_OK = False

        # blockchain part - pass the message to the blockchain worker
        # so it can be shared to the foreign parties
        outbox_OK = True
        ret_OK = True
        if str(message.sender) == str(
                self.country) and message.status == 'pending':
            # our jurisdiction -> remote
            logger.info("Sending message to the channels: %s", message.subject)
            try:
                outbox_OK = self.blockchain_outbox_repo.post(message)
            except Exception as e:
                logger.exception(e)
                outbox_OK = False
        elif str(message.sender) != str(
                self.country) and message.status == 'received':
            # Incoming message from remote juridsiction
            # might need to download remote documents using the
            # Documents Spider
            logger.info(
                "Received message from remote jurisdiction %s with subject %s",
                message.sender, message.subject)
            logger.info("Scheduling download remote documents for: %s",
                        message.subject)
            try:
                ret_OK = self.object_retreval_repo.post_job({
                    "action":
                    "download-object",
                    "sender":
                    message.sender,
                    "object":
                    message.obj
                })
            except Exception as e:
                logger.exception(e)
                ret_OK = False
        else:
            # strange situation
            logger.warning(
                "Message sender is %s and we are %s and the status is %s - strange",
                message.sender, self.country, message.status)
            return False

        if ml_OK and acl_OK and ret_OK and pub_OK and outbox_OK:
            self.bc_inbox_repo.delete(queue_message_id)
            return True
        else:
            logger.error("Task processing failed, will try again later")
            # what TODO with the failed ones?
            # the problem is the fact that we have submitted message
            # to some repos and some other failed,
            # which means we must retry just failed submissions
            # and it may introduce a tricky state when some external message
            # processors will get info from the one source and won't get it
            # from the another. They should wait then.
            return False
Пример #11
0
    def process(self, queue_message_id, message):
        # let it be procssed
        logger.info("Received message to process: %s", message)

        # TODO: if something is fine and something is failed then first
        # steps will be done again
        # fine for object storage but not for queues
        try:
            ml_OK = self.message_lake_repo.post(message)
        except Exception as e:
            logger.exception(e)
            ml_OK = False
        try:
            acl_OK = self.object_acl_repo.post(message)
        except Exception as e:
            logger.exception(e)
            acl_OK = False

        if ENV_SEND_LOOPBACK_MESSAGES:
            # disabled because it's pointless to see our own messages on this stage
            # we might end with sending custom notifications but now there are no
            # consumers for them.
            # publish outbox is for notifications to internal clients
            # and in fact it's worthy only for received messages, not for sent
            # so ideally we shouldn't notify ourselves about our messages
            # or may be we do if local apps want to know about it?...
            try:
                # we delay it a little to make sure the message has got to the repo
                # and remove status because notifications don't need it
                message_without_status = Message.from_dict(
                    message.to_dict(exclude=['status'])
                )
                pub_OK = self.notifications_repo.post(
                    message_without_status,
                    delay_seconds=3
                )
            except Exception as e:
                logger.exception(e)
                pub_OK = False

        # blockchain part - pass the message to the blockchain worker
        # so it can be shared to the foreign parties
        if message.status == 'pending':
            # not received from the foreign party = must be sent
            logger.info("Sending this message out to the world")
            try:
                outbox_OK = self.blockchain_outbox_repo.post(message)
            except Exception as e:
                logger.exception(e)
                outbox_OK = False
        else:
            outbox_OK = True

        ret_OK = True
        if message.status == 'received':
            # might need to download remote documents using the
            # Documents Spider
            if message.sender != self.country:
                # if it's not loopback message (test installations only)
                logger.info(
                    "Scheduling download remote documents for %s", message
                )
                try:
                    ret_OK = self.object_retreval_repo.post_job({
                        "action": "download-object",
                        "sender": message.sender,
                        "object": message.obj
                    })
                except Exception as e:
                    logger.exception(e)
                    ret_OK = False
            else:
                logger.info(
                    "Seems that this message is loopback (sent by us back to us)"
                )

        if ml_OK and acl_OK and ret_OK and pub_OK and outbox_OK:
            self.bc_inbox_repo.delete(queue_message_id)
            return True
        else:
            logger.error("Task processing failed, will try again later")
            # what TODO with the failed ones?
            # the problem is the fact that we have submitted message
            # to some repos and some other failed,
            # which means we must retry just failed submissions
            # and it may introduce a tricky state when some external message
            # processors will get info from the one source and won't get it
            # from the another. They should wait then.
            return False