Exemplo n.º 1
0
    def copy_immutable_expected_data(self):
        # Create expected data for immutable rows
        if self._validate_not_updated_data:
            if not (self.view_name_for_not_updated_data
                    and self.expected_data_table_name):
                self._validate_not_updated_data = False
                DataValidatorEvent.DataValidator(
                    severity=Severity.WARNING,
                    message=
                    f"Problem during copying expected data: view not found."
                    f"View name for not updated_data: {self.view_name_for_not_updated_data}; "
                    f"Expected data table name {self.expected_data_table_name}. "
                    f"Data validation of not updated rows won' be performed"
                ).publish()
                return

            LOGGER.debug('Copy expected data for immutable rows: %s -> %s',
                         self.view_name_for_not_updated_data,
                         self.expected_data_table_name)
            if not self.longevity_self_object.copy_view(
                    node=self.longevity_self_object.db_cluster.nodes[0],
                    src_keyspace=self.keyspace_name,
                    src_view=self.view_name_for_not_updated_data,
                    dest_keyspace=self.keyspace_name,
                    dest_table=self.expected_data_table_name,
                    copy_data=True):
                self._validate_not_updated_data = False
                DataValidatorEvent.DataValidator(
                    severity=Severity.ERROR,
                    error=
                    f"Problem during copying expected data from {self.view_name_for_not_updated_data} "
                    f"to {self.expected_data_table_name}. "
                    f"Data validation of not updated rows won' be performed"
                ).publish()
    def test_data_validator_event_msgfmt(self):
        critical_event = DataValidatorEvent.DataValidator(
            severity=Severity.ERROR, error="e1")
        self.assertEqual(
            str(critical_event),
            "(DataValidatorEvent Severity.ERROR): type=DataValidator error=e1")
        self.assertEqual(critical_event,
                         pickle.loads(pickle.dumps(critical_event)))

        error_event = DataValidatorEvent.ImmutableRowsValidator(
            severity=Severity.ERROR, error="e2")
        self.assertEqual(
            str(error_event),
            "(DataValidatorEvent Severity.ERROR): type=ImmutableRowsValidator error=e2"
        )
        self.assertEqual(error_event, pickle.loads(pickle.dumps(error_event)))

        warning_event = DataValidatorEvent.UpdatedRowsValidator(
            severity=Severity.WARNING, message="m3")
        self.assertEqual(
            str(warning_event),
            "(DataValidatorEvent Severity.WARNING): type=UpdatedRowsValidator message=m3"
        )
        self.assertEqual(warning_event,
                         pickle.loads(pickle.dumps(warning_event)))

        info_event = DataValidatorEvent.DeletedRowsValidator(
            severity=Severity.NORMAL, message="m4")
        self.assertEqual(
            str(info_event),
            "(DataValidatorEvent Severity.NORMAL): type=DeletedRowsValidator message=m4"
        )
        self.assertEqual(info_event, pickle.loads(pickle.dumps(info_event)))
Exemplo n.º 3
0
    def validate_deleted_rows(self, session, during_nemesis=False):
        """
        Part of data in the user profile table will be deleted using LWT.
        This data will be saved in the materialized view with "_deletions" substring in the name
        After prepare write rows count in this materialized view will be saved self.rows_before_deletion variable as
        expected result.
        During stress (after prepare) LWT delete statements will be run for a few hours.
        When stress will be finished this function verifies that rows count in "_deletions" MV will be less then it
        was saved in self.rows_before_deletion
        """
        if not self.rows_before_deletion:
            LOGGER.debug(
                'Verify deleted rows can\'t be performed as expected rows count had not been saved'
            )
            return

        pk_name = self.base_table_partition_keys[0]
        if not during_nemesis:
            LOGGER.debug('Verify deleted rows')

        actual_result = self.longevity_self_object.fetch_all_rows(
            session=session,
            default_fetch_size=self.DEFAULT_FETCH_SIZE,
            statement=
            f"SELECT {pk_name} FROM {self.view_name_for_deletion_data}",
            verbose=not during_nemesis)
        if not actual_result:
            DataValidatorEvent.DeletedRowsValidator(
                severity=Severity.ERROR,
                error=
                f"Can't validate deleted rows. Fetch all rows from {self.view_name_for_deletion_data} failed. "
                f"See error above in the sct.log").publish()
            return

        if len(actual_result) < self.rows_before_deletion:
            if not during_nemesis:
                # raise info event in the end of test only
                DataValidatorEvent.DeletedRowsValidator(
                    severity=Severity.NORMAL,
                    message="Validation deleted rows finished successfully"
                ).publish()
            else:
                LOGGER.debug('Validation deleted rows finished successfully')
        else:
            LOGGER.warning(
                'Deleted row were not found. May be issue #6181. '
                'Actual dataset length: {}, Expected dataset length: {}'.
                format(len(actual_result), self.rows_before_deletion))
Exemplo n.º 4
0
    def run_prepare_write_cmd(self):
        # `mutation_write_*' errors are thrown when system is overloaded and got timeout on
        # operations on system.paxos table.
        #
        # Decrease severity of this event during prepare.  Shouldn't impact on test result.
        with ignore_mutation_write_errors():
            super().run_prepare_write_cmd()

        # Stop nemesis. Prefer all nodes will be run before collect data for validation
        # Increase timeout to wait for nemesis finish
        if self.db_cluster.nemesis_threads:
            self.db_cluster.stop_nemesis(timeout=300)

        # Wait for MVs data will be fully inserted (running on background)
        time.sleep(300)

        if self.db_cluster.nemesis_count > 1:
            self.data_validator = MagicMock()
            DataValidatorEvent.DataValidator(severity=Severity.WARNING,
                                             message="Test runs with parallel nemesis. Data validator is disabled."
                                             ).publish()
        else:
            self.data_validator = LongevityDataValidator(longevity_self_object=self,
                                                         user_profile_name='c-s_lwt',
                                                         base_table_partition_keys=self.BASE_TABLE_PARTITION_KEYS)

        self.data_validator.copy_immutable_expected_data()
        self.data_validator.copy_updated_expected_data()
        self.data_validator.save_count_rows_for_deletion()

        # Run nemesis during stress as it was stopped before copy expected data
        if self.params.get('nemesis_during_prepare'):
            self.start_nemesis()
Exemplo n.º 5
0
    def save_count_rows_for_deletion(self):
        if not self.view_name_for_deletion_data:
            DataValidatorEvent.DataValidator(
                severity=Severity.WARNING,
                message=f"Problem during copying expected data: not found. "
                f"View name for deletion data: {self.view_name_for_deletion_data}. "
                f"Data validation of deleted rows won' be performed").publish(
                )
            return

        LOGGER.debug(
            f'Get rows count in {self.view_name_for_deletion_data} MV before stress'
        )
        pk_name = self.base_table_partition_keys[0]
        with self.longevity_self_object.db_cluster.cql_connection_patient(
                self.longevity_self_object.db_cluster.nodes[0],
                keyspace=self.keyspace_name) as session:
            rows_before_deletion = self.longevity_self_object.fetch_all_rows(
                session=session,
                default_fetch_size=self.DEFAULT_FETCH_SIZE,
                statement=
                f"SELECT {pk_name} FROM {self.view_name_for_deletion_data}")
            if rows_before_deletion:
                self.rows_before_deletion = len(rows_before_deletion)
                LOGGER.debug(f"{self.rows_before_deletion} rows for deletion")
Exemplo n.º 6
0
    def test_data_validator_event_msgfmt(self):
        critical_event = DataValidatorEvent.DataValidator(
            severity=Severity.ERROR, error="e1")
        critical_event.event_id = "3916da00-643c-4886-bdd0-963d3ebac536"
        self.assertEqual(
            str(critical_event),
            "(DataValidatorEvent Severity.ERROR) period_type=one-time "
            "event_id=3916da00-643c-4886-bdd0-963d3ebac536: type=DataValidator error=e1"
        )
        self.assertEqual(critical_event,
                         pickle.loads(pickle.dumps(critical_event)))

        error_event = DataValidatorEvent.ImmutableRowsValidator(
            severity=Severity.ERROR, error="e2")
        error_event.event_id = "3916da00-643c-4886-bdd0-963d3ebac536"
        self.assertEqual(
            str(error_event),
            "(DataValidatorEvent Severity.ERROR) period_type=one-time "
            "event_id=3916da00-643c-4886-bdd0-963d3ebac536: type=ImmutableRowsValidator error=e2"
        )
        self.assertEqual(error_event, pickle.loads(pickle.dumps(error_event)))

        warning_event = DataValidatorEvent.UpdatedRowsValidator(
            severity=Severity.WARNING, message="m3")
        warning_event.event_id = "3916da00-643c-4886-bdd0-963d3ebac536"
        self.assertEqual(
            str(warning_event),
            "(DataValidatorEvent Severity.WARNING) period_type=one-time "
            "event_id=3916da00-643c-4886-bdd0-963d3ebac536: type=UpdatedRowsValidator message=m3"
        )
        self.assertEqual(warning_event,
                         pickle.loads(pickle.dumps(warning_event)))

        info_event = DataValidatorEvent.DeletedRowsValidator(
            severity=Severity.NORMAL, message="m4")
        info_event.event_id = "3916da00-643c-4886-bdd0-963d3ebac536"
        self.assertEqual(
            str(info_event),
            "(DataValidatorEvent Severity.NORMAL) period_type=one-time "
            "event_id=3916da00-643c-4886-bdd0-963d3ebac536: type=DeletedRowsValidator message=m4"
        )
        self.assertEqual(info_event, pickle.loads(pickle.dumps(info_event)))
Exemplo n.º 7
0
    def copy_updated_expected_data(self):
        # Create expected data for updated rows
        if self._validate_updated_data:
            if not self.view_names_for_updated_data:
                self._validate_updated_per_view = [False]
                DataValidatorEvent.DataValidator(
                    severity=Severity.WARNING,
                    message=
                    f"Problem during copying expected data: view not found. "
                    f"View names for updated data: {self.view_names_for_updated_data}. "
                    f"Data validation of updated rows won' be performed"
                ).publish()
                return

            LOGGER.debug(
                f'Copy expected data for updated rows. {self.view_names_for_updated_data}'
            )
            for src_view in self.view_names_for_updated_data:
                expected_data_table_name = self.set_expected_data_table_name(
                    src_view)
                LOGGER.debug(
                    f'Expected data table name {expected_data_table_name}')
                if not self.longevity_self_object.copy_view(
                        node=self.longevity_self_object.db_cluster.nodes[0],
                        src_keyspace=self.keyspace_name,
                        src_view=src_view,
                        dest_keyspace=self.keyspace_name,
                        dest_table=expected_data_table_name,
                        columns_list=self.base_table_partition_keys,
                        copy_data=True):
                    self._validate_updated_per_view.append(False)
                    DataValidatorEvent.DataValidator(
                        severity=Severity.ERROR,
                        error=
                        f"Problem during copying expected data from {src_view} to {expected_data_table_name}. "
                        f"Data validation of updated rows won' be performed"
                    ).publish()
                self._validate_updated_per_view.append(True)
Exemplo n.º 8
0
    def validate_range_expected_to_change(self, session, during_nemesis=False):
        """
        In user profile 'data_dir/c-s_lwt_basic.yaml' LWT updates the lwt_indicator and author columns with hard coded
        values.

        Two more materialized views are added. The first one holds rows that are candidates for the update
        (i.e. all rows before the update).
        The second one holds rows with lwt_indicator=30000000 (i.e. only the updated rows)

        After prepare all primay keys from first materialized view will be saved in the separate table as
        expected result.

        After the updates will be finished, 2 type of validation:
        1. All primary key values that saved in the expected result table, should be found in the both views
        2. Also validate row counts in the both veiws agains
        """
        if not (self._validate_updated_data
                and self.view_names_for_updated_data):
            LOGGER.debug(
                'Verify updated rows can\'t be performed as expected data has not been saved. '
                'See error above in the sct.log')
            return

        if not during_nemesis:
            LOGGER.debug('Verify updated rows')

        partition_keys = ', '.join(self.base_table_partition_keys)

        # List of tuples of correlated  view names for validation: before update, after update, expected data
        views_list = list(
            zip(
                self.view_names_for_updated_data,
                self.view_names_after_updated_data,
                [
                    self.set_expected_data_table_name(view)
                    for view in self.view_names_for_updated_data
                ],
                self._validate_updated_per_view,
            ))
        for views_set in views_list:
            # views_set[0] - view name with rows before update
            # views_set[1] - view name with rows after update
            # views_set[2] - view name with all expected partition keys
            # views_set[3] - do perform validation for the view or not
            if not during_nemesis:
                LOGGER.debug('Verify updated row. View %s', views_set[0])
            if not views_set[3]:
                DataValidatorEvent.UpdatedRowsValidator(
                    severity=Severity.WARNING,
                    message=
                    f"Can't start validation for {views_set[0]}. Copying expected data failed. "
                    f"See error above in the sct.log").publish()
                return

            before_update_rows = self.longevity_self_object.fetch_all_rows(
                session=session,
                default_fetch_size=self.DEFAULT_FETCH_SIZE,
                statement=f"SELECT {partition_keys} FROM {views_set[0]}",
                verbose=not during_nemesis)
            if not before_update_rows:
                DataValidatorEvent.UpdatedRowsValidator(
                    severity=Severity.WARNING,
                    message=
                    f"Can't validate updated rows. Fetch all rows from {views_set[0]} failed. "
                    f"See error above in the sct.log").publish()
                return

            after_update_rows = self.longevity_self_object.fetch_all_rows(
                session=session,
                default_fetch_size=self.DEFAULT_FETCH_SIZE,
                statement=f"SELECT {partition_keys} FROM {views_set[1]}",
                verbose=not during_nemesis)
            if not after_update_rows:
                DataValidatorEvent.UpdatedRowsValidator(
                    severity=Severity.WARNING,
                    message=
                    f"Can't validate updated rows. Fetch all rows from {views_set[1]} failed. "
                    f"See error above in the sct.log").publish()
                return

            expected_rows = self.longevity_self_object.fetch_all_rows(
                session=session,
                default_fetch_size=self.DEFAULT_FETCH_SIZE,
                statement=f"SELECT {partition_keys} FROM {views_set[2]}",
                verbose=not during_nemesis)
            if not expected_rows:
                DataValidatorEvent.UpdatedRowsValidator(
                    severity=Severity.WARNING,
                    message=
                    f"Can't validate updated row. Fetch all rows from {views_set[2]} failed. "
                    f"See error above in the sct.log").publish()
                return

            # Issue https://github.com/scylladb/scylla/issues/6181
            # Not fail the test if unexpected additional rows where found in actual result table
            if len(before_update_rows) + len(after_update_rows) > len(
                    expected_rows):
                DataValidatorEvent.UpdatedRowsValidator(
                    severity=Severity.WARNING,
                    message=f"View {views_set[0]}. "
                    f"Actual dataset length {len(before_update_rows) + len(after_update_rows)} "
                    f"more then expected dataset length: {len(expected_rows)}. "
                    f"Issue #6181").publish()
            else:
                actual_data = sorted(before_update_rows + after_update_rows)
                expected_data = sorted(expected_rows)
                if not during_nemesis:
                    assert actual_data == expected_data,\
                        'One or more rows are not as expected, suspected LWT wrong update'

                    assert len(before_update_rows) + len(after_update_rows) == len(expected_rows), \
                        'One or more rows are not as expected, suspected LWT wrong update. '\
                        f'Actual dataset length: {len(before_update_rows) + len(after_update_rows)}, ' \
                        f'Expected dataset length: {len(expected_rows)}'

                    # raise info event in the end of test only
                    DataValidatorEvent.UpdatedRowsValidator(
                        severity=Severity.NORMAL,
                        message=
                        f"Validation updated rows finished successfully. View {views_set[0]}"
                    ).publish()
                else:
                    LOGGER.debug(
                        'Validation updated rows.  View %s. Actual dataset length %s, '
                        'Expected dataset length: %s.', views_set[0],
                        len(before_update_rows) + len(after_update_rows),
                        len(expected_rows))
Exemplo n.º 9
0
    def validate_range_not_expected_to_change(self,
                                              session,
                                              during_nemesis=False):
        """
        Part of data in the user profile table shouldn't be updated using LWT.
        This data will be saved in the materialized view with "not_updated" substring in the name
        After prepare write all data from this materialized view will be saved in the separate table as expected result.
        During stress (after prepare) LWT update statements will be run for a few hours.
        When updates will be finished this function verifies that data in "not_updated" MV and expected result table
        is same
        """

        if not (self._validate_not_updated_data
                and self.view_name_for_not_updated_data
                and self.expected_data_table_name):
            LOGGER.debug(
                'Verify immutable rows can\'t be performed as expected data has not been saved. '
                'See error above in the sct.log')
            return

        if not during_nemesis:
            LOGGER.debug('Verify immutable rows')

        actual_result = self.longevity_self_object.fetch_all_rows(
            session=session,
            default_fetch_size=self.DEFAULT_FETCH_SIZE,
            statement=f"SELECT * FROM {self.view_name_for_not_updated_data}",
            verbose=not during_nemesis)
        if not actual_result:
            DataValidatorEvent.ImmutableRowsValidator(
                severity=Severity.WARNING,
                message=f"Can't validate immutable rows. "
                f"Fetch all rows from {self.view_name_for_not_updated_data} failed. "
                f"See error above in the sct.log").publish()
            return

        expected_result = self.longevity_self_object.fetch_all_rows(
            session=session,
            default_fetch_size=self.DEFAULT_FETCH_SIZE,
            statement=f"SELECT * FROM {self.expected_data_table_name}",
            verbose=not during_nemesis)
        if not expected_result:
            DataValidatorEvent.ImmutableRowsValidator(
                severity=Severity.WARNING,
                message=
                f"Can't validate immutable rows. Fetch all rows from {self.expected_data_table_name} failed. "
                f"See error above in the sct.log").publish()
            return

        # Issue https://github.com/scylladb/scylla/issues/6181
        # Not fail the test if unexpected additional rows where found in actual result table
        if len(actual_result) > len(expected_result):
            DataValidatorEvent.ImmutableRowsValidator(
                severity=Severity.WARNING,
                message=
                f"Actual dataset length more then expected ({len(actual_result)} > {len(expected_result)}). "
                f"Issue #6181").publish()
        else:
            if not during_nemesis:
                assert len(actual_result) == len(expected_result), \
                    'One or more rows are not as expected, suspected LWT wrong update. ' \
                    'Actual dataset length: {}, Expected dataset length: {}'.format(len(actual_result),
                                                                                    len(expected_result))

                assert actual_result == expected_result, \
                    'One or more rows are not as expected, suspected LWT wrong update'

                # Raise info event at the end of the test only.
                DataValidatorEvent.ImmutableRowsValidator(
                    severity=Severity.NORMAL,
                    message="Validation immutable rows finished successfully"
                ).publish()
            else:
                if len(actual_result) < len(expected_result):
                    DataValidatorEvent.ImmutableRowsValidator(
                        severity=Severity.ERROR,
                        error=f"Verify immutable rows. "
                        f"One or more rows not found as expected, suspected LWT wrong update. "
                        f"Actual dataset length: {len(actual_result)}, "
                        f"Expected dataset length: {len(expected_result)}"
                    ).publish()
                else:
                    LOGGER.debug(
                        'Verify immutable rows. Actual dataset length: %s, Expected dataset length: %s',
                        len(actual_result), len(expected_result))