Esempio n. 1
0
    def test_messages(self) -> None:
        processor = GroupAssigneeProcessor("sentry_groupasignee")

        metadata = KafkaMessageMetadata(offset=42,
                                        partition=0,
                                        timestamp=datetime(1970, 1, 1))

        ret = processor.process_message(self.INSERT_MSG, metadata)
        assert ret == InsertBatch([self.PROCESSED])
        self.write_processed_messages([ret])
        ret = (get_cluster(StorageSetKey.EVENTS).get_query_connection(
            ClickhouseClientSettings.QUERY).execute(
                "SELECT * FROM groupassignee_local;"))
        assert ret[0] == (
            42,  # offset
            0,  # deleted
            2,  # project_id
            1359,  # group_id
            datetime(2019, 9, 19, 0, 17, 55),
            1,  # user_id
            None,  # team_id
        )

        ret = processor.process_message(self.UPDATE_MSG_NO_KEY_CHANGE,
                                        metadata)
        assert ret == InsertBatch([self.PROCESSED])

        # Tests an update with key change which becomes a two inserts:
        # one deletion and the insertion of the new row.
        ret = processor.process_message(self.UPDATE_MSG_WITH_KEY_CHANGE,
                                        metadata)
        assert ret == InsertBatch([self.DELETED, self.PROCESSED_UPDATE])

        ret = processor.process_message(self.DELETE_MSG, metadata)
        assert ret == InsertBatch([self.DELETED])
Esempio n. 2
0
    def test_messages(self):
        processor = GroupAssigneeProcessor('sentry_groupasignee')

        metadata = KafkaMessageMetadata(
            offset=42,
            partition=0,
        )

        begin_msg = json.loads(self.BEGIN_MSG)
        ret = processor.process_message(begin_msg, metadata)
        assert ret is None

        commit_msg = json.loads(self.COMMIT_MSG)
        ret = processor.process_message(commit_msg, metadata)
        assert ret is None

        insert_msg = json.loads(self.INSERT_MSG)
        ret = processor.process_message(insert_msg, metadata)
        assert ret.data == [self.PROCESSED]
        self.write_processed_records(ret.data)
        cp = ClickhousePool()
        ret = cp.execute("SELECT * FROM test_groupassignee_local;")
        assert ret[0] == (
            42,  # offset
            0,  # deleted
            2,  # project_id
            1359,  # group_id
            datetime(2019, 9, 19, 0, 17, 55),
            1,  # user_id
            None,  # team_id
        )

        update_msg = json.loads(self.UPDATE_MSG_NO_KEY_CHANGE)
        ret = processor.process_message(update_msg, metadata)
        assert ret.data == [self.PROCESSED]

        # Tests an update with key change which becomes a two inserts:
        # one deletion and the insertion of the new row.
        update_msg = json.loads(self.UPDATE_MSG_WITH_KEY_CHANGE)
        ret = processor.process_message(update_msg, metadata)
        assert ret.data == [self.DELETED, self.PROCESSED_UPDATE]

        delete_msg = json.loads(self.DELETE_MSG)
        ret = processor.process_message(delete_msg, metadata)
        assert ret.data == [self.DELETED]
Esempio n. 3
0
    def test_messages(self):
        processor = GroupAssigneeProcessor("sentry_groupasignee")
        message_filter = CdcTableNameMessageFilter(
            postgres_table=POSTGRES_TABLE)

        metadata = KafkaMessageMetadata(offset=42,
                                        partition=0,
                                        timestamp=datetime(1970, 1, 1))

        assert message_filter.should_drop(
            self.__make_msg(0, 42, self.BEGIN_MSG, []))

        assert message_filter.should_drop(
            self.__make_msg(0, 42, self.COMMIT_MSG, []))

        assert not message_filter.should_drop(
            self.__make_msg(0, 42, self.INSERT_MSG,
                            [("table", POSTGRES_TABLE.encode())]))
        insert_msg = json.loads(self.INSERT_MSG)
        ret = processor.process_message(insert_msg, metadata)
        assert ret == InsertBatch([self.PROCESSED])
        self.write_processed_messages([ret])
        ret = (get_cluster(StorageSetKey.EVENTS).get_query_connection(
            ClickhouseClientSettings.QUERY).execute(
                "SELECT * FROM groupassignee_local;"))
        assert ret[0] == (
            42,  # offset
            0,  # deleted
            2,  # project_id
            1359,  # group_id
            datetime(2019, 9, 19, 0, 17, 55),
            1,  # user_id
            None,  # team_id
        )

        assert not message_filter.should_drop(
            self.__make_msg(
                0,
                42,
                self.UPDATE_MSG_NO_KEY_CHANGE,
                [("table", POSTGRES_TABLE.encode())],
            ))
        update_msg = json.loads(self.UPDATE_MSG_NO_KEY_CHANGE)
        ret = processor.process_message(update_msg, metadata)
        assert ret == InsertBatch([self.PROCESSED])

        # Tests an update with key change which becomes a two inserts:
        # one deletion and the insertion of the new row.
        assert not message_filter.should_drop(
            self.__make_msg(
                0,
                42,
                self.UPDATE_MSG_WITH_KEY_CHANGE,
                [("table", POSTGRES_TABLE.encode())],
            ))
        update_msg = json.loads(self.UPDATE_MSG_WITH_KEY_CHANGE)
        ret = processor.process_message(update_msg, metadata)
        assert ret == InsertBatch([self.DELETED, self.PROCESSED_UPDATE])

        assert not message_filter.should_drop(
            self.__make_msg(
                0,
                42,
                self.DELETE_MSG,
                [("table", POSTGRES_TABLE.encode())],
            ))
        delete_msg = json.loads(self.DELETE_MSG)
        ret = processor.process_message(delete_msg, metadata)
        assert ret == InsertBatch([self.DELETED])
Esempio n. 4
0
    def test_messages(self):
        processor = GroupAssigneeProcessor("sentry_groupasignee")
        message_filter = CdcTableNameMessageFilter(
            postgres_table=POSTGRES_TABLE)

        metadata = KafkaMessageMetadata(
            offset=42,
            partition=0,
        )

        assert not message_filter.should_drop(
            self.__make_msg(0, 42, self.BEGIN_MSG, []))
        begin_msg = json.loads(self.BEGIN_MSG)
        ret = processor.process_message(begin_msg, metadata)
        assert ret is None

        assert not message_filter.should_drop(
            self.__make_msg(0, 42, self.COMMIT_MSG, []))
        commit_msg = json.loads(self.COMMIT_MSG)
        ret = processor.process_message(commit_msg, metadata)
        assert ret is None

        assert not message_filter.should_drop(
            self.__make_msg(0, 42, self.INSERT_MSG,
                            [("table", POSTGRES_TABLE.encode())]))
        insert_msg = json.loads(self.INSERT_MSG)
        ret = processor.process_message(insert_msg, metadata)
        assert ret.data == [self.PROCESSED]
        self.write_processed_records(ret.data)
        ret = clickhouse_ro.execute("SELECT * FROM test_groupassignee_local;")
        assert ret[0] == (
            42,  # offset
            0,  # deleted
            2,  # project_id
            1359,  # group_id
            datetime(2019, 9, 19, 0, 17, 55),
            1,  # user_id
            None,  # team_id
        )

        assert not message_filter.should_drop(
            self.__make_msg(
                0,
                42,
                self.UPDATE_MSG_NO_KEY_CHANGE,
                [("table", POSTGRES_TABLE.encode())],
            ))
        update_msg = json.loads(self.UPDATE_MSG_NO_KEY_CHANGE)
        ret = processor.process_message(update_msg, metadata)
        assert ret.data == [self.PROCESSED]

        # Tests an update with key change which becomes a two inserts:
        # one deletion and the insertion of the new row.
        assert not message_filter.should_drop(
            self.__make_msg(
                0,
                42,
                self.UPDATE_MSG_WITH_KEY_CHANGE,
                [("table", POSTGRES_TABLE.encode())],
            ))
        update_msg = json.loads(self.UPDATE_MSG_WITH_KEY_CHANGE)
        ret = processor.process_message(update_msg, metadata)
        assert ret.data == [self.DELETED, self.PROCESSED_UPDATE]

        assert not message_filter.should_drop(
            self.__make_msg(0, 42, self.DELETE_MSG, []))
        delete_msg = json.loads(self.DELETE_MSG)
        ret = processor.process_message(delete_msg, metadata)
        assert ret.data == [self.DELETED]