Exemple #1
0
    def __create__random_result(self):
        def rnd():
            return randint(1, 1000)

        suffix = rnd()

        env = Env(name=f'Test Env {suffix}', short_name=f'TE{suffix}')
        component = Component(name=f'Test Component {suffix}')
        item = Item(name=f'Test Item {suffix}', args=f'args {suffix}')
        status = Status(test_status='Passed', priority=suffix)
        platform = Platform(name=f'Test Platform {suffix}')
        os = Os(name=f'Test Os {suffix}')
        run = Run(name=f'Test Run {suffix}', session=f'Test Session {suffix}')

        return Result(
            validation=Validation(
                name=f'Test Validation {suffix}',
                env=env,
                platform=platform,
                os=os,
            ),
            env=env,
            platform=platform,
            os=os,
            component=component,
            item=item,
            status=status,
            run=run,
        )
Exemple #2
0
def heroes_create():
    hero_json = request.get_json()

    status = Status.query.filter_by(status=hero_json['status']).first()

    if not status:
        status = Status(status=hero_json['status'])
        db.session.add(status)
        db.session.commit()

    # https://docs.sqlalchemy.org/en/latest/orm/tutorial.html#common-filter-operators
    hero = db.session.query(SuperHero).filter(or_(SuperHero.superhero_alias == hero_json.get('superhero_alias'),\
                       SuperHero.email_address == hero_json.get('email_address')))\
              .first()

    if not hero:
        new_hero = SuperHero(superhero_alias=hero_json.get('superhero_alias'),
                             email_address=hero_json.get('email_address'),
                             first_name=hero_json.get('first_name'),
                             last_name=hero_json.get('last_name'),
                             started_on=hero_json.get('started_on',
                                                      datetime.utcnow()),
                             finished_on=hero_json.get('finished_on'),
                             income=hero_json.get('income'),
                             status=status)

        db.session.add(new_hero)
        db.session.commit()

        return 'KER-SPLOOSH! New hero added to the data trust.', 201
    else:
        # https://httpstatuses.com/409
        return 'ZOWIE! A hero with the same superhero_alias or email_address already exists.', 409
    def get_oldest_object_to_retry(self):
        """Grab the oldest report or report slice object to retry.

        returns: object to retry or None.
        """
        status_info = Status()
        current_time = datetime.now(pytz.utc)
        objects_count = self.calculate_queued_objects(current_time,
                                                      status_info)
        if self.object_class == Report:
            QUEUED_REPORTS.set(objects_count)
        else:
            QUEUED_REPORT_SLICES.set(objects_count)
        LOG.info(
            format_message(
                self.prefix, 'Number of %s waiting to be processed: %s' %
                (self.object_prefix.lower() + 's', objects_count)))
        # first we have to query for all objects with commit retries
        commit_retry_query = self.object_class.objects.filter(
            retry_type=self.object_class.GIT_COMMIT)
        # then we grab the oldest object from the query
        oldest_commit_object = self.return_queryset_object(
            queryset=commit_retry_query)
        if oldest_commit_object:
            same_commit = oldest_commit_object.git_commit == status_info.git_commit
            if not same_commit:
                return oldest_commit_object
        # If the above doesn't return, we should query for all time retries
        time_retry_query = self.object_class.objects.filter(
            retry_type=self.object_class.TIME)
        oldest_time_object = self.return_queryset_object(
            queryset=time_retry_query)
        if oldest_time_object:
            minutes_passed = int(
                (current_time -
                 oldest_time_object.last_update_time).total_seconds() / 60)
            if minutes_passed >= RETRY_TIME:
                return oldest_time_object
        # if we haven't returned a retry object, return None
        return None
Exemple #4
0
 def setUpClass(cls):
     """Test Class setup."""
     # remove filters on logging
     logging.disable(logging.NOTSET)
     cls.status_info = Status()
Exemple #5
0
    def update_slice_state(self, options,
                           report_slice):  # noqa: C901 (too-complex)
        """
        Update the report processor state and save.

        :param options: <dict> dictionary potentially containing the following:
            report_slice: <ReportSlice> the report slice to update
            state: <str> the state to update to
            retry: <enum> Retry.clear=clear count, RETRY.increment=increase count
            retry_type: <str> either time=retry after time,
                git_commit=retry after new commit
            report_json: <dict> dictionary containing the report json
            report_platform_id: <str> string containing report_platform_id
            candidate_hosts: <dict> dictionary containing hosts that were
                successfully verified and uploaded
            failed_hosts: <dict> dictionary containing hosts that failed
                verification or upload
            ready_to_archive: <bool> boolean on whether or not to archive
        """
        try:
            state = options.get('state')
            retry_type = options.get('retry_type')
            retry = options.get('retry', RETRY.clear)
            candidate_hosts = options.get('candidate_hosts')
            failed_hosts = options.get('failed_hosts')
            ready_to_archive = options.get('ready_to_archive')
            status_info = Status()
            report_slice.last_update_time = datetime.now(pytz.utc)
            report_slice.state = state
            report_slice.git_commit = status_info.git_commit
            report_slice_data = {
                'last_update_time': datetime.now(pytz.utc),
                'state': state,
                'git_commit': status_info.git_commit
            }
            if not retry_type:
                retry_type = ReportSlice.TIME
            if retry == RETRY.clear:
                # After a successful transaction when we have reached the update
                # point, we want to set the retry count back to 0 because
                # any future failures should be unrelated
                report_slice_data['retry_count'] = 0
                report_slice_data['retry_type'] = ReportSlice.TIME
            elif retry == RETRY.increment:
                current_count = report_slice.retry_count
                report_slice_data['retry_count'] = current_count + 1
                report_slice_data['retry_type'] = ReportSlice.TIME
            # the other choice for retry is RETRY.keep_same in which case we don't
            # want to do anything to the retry count bc we want to preserve as is
            if candidate_hosts is not None:
                # candidate_hosts will get smaller and smaller until it hopefully
                # is empty because we have taken care of all ofthe candidates so
                # we rewrite this each time
                report_slice_data['candidate_hosts'] = json.dumps(
                    candidate_hosts)
            if failed_hosts:
                # for failed hosts this list can keep growing, so we add the
                # newly failed hosts to the previous value
                failed = json.loads(report_slice.failed_hosts)
                for host in failed_hosts:
                    failed.append(host)
                report_slice_data['failed_hosts'] = json.dumps(failed)
            if ready_to_archive:
                report_slice_data['ready_to_archive'] = ready_to_archive
            state_info = json.loads(report_slice.state_info)
            state_info.append(state)
            report_slice_data['state_info'] = json.dumps(state_info)
            serializer = ReportSliceSerializer(instance=report_slice,
                                               data=report_slice_data,
                                               partial=True)
            serializer.is_valid(raise_exception=True)
            serializer.save()
            LOG.info(
                format_message(self.prefix,
                               'Successfully updated report slice %s' %
                               report_slice.report_slice_id,
                               account_number=self.account_number,
                               report_platform_id=self.report_platform_id))
        except Exception as error:  # pylint: disable=broad-except
            DB_ERRORS.inc()
            self.should_run = False
            LOG.error(
                format_message(
                    self.prefix,
                    'Could not update report slice record due to the following error %s.'
                    % str(error),
                    account_number=self.account_number,
                    report_platform_id=self.report_platform_id))
            print_error_loop_event()
Exemple #6
0
    def update_object_state(self, options):  # noqa: C901 (too-complex)
        """
        Update the report processor state and save.

        :param options: <dict> potentially containing the following:
            retry: <enum> Retry.clear=clear count, RETRY.increment=increase count
            retry_type: <str> either time=retry after time,
                git_commit=retry after new commit
            report_json: <dict> dictionary containing the report json
            report_platform_id: <str> string containing report_platform_id
            candidate_hosts: <dict> dictionary containing hosts that were
                successfully verified and uploaded
            failed_hosts: <dict> dictionary containing hosts that failed
                verification or upload
            status: <str> either success or failure based on the report
            host_inventory_api_version: <str> the inventory api version
            source: <str> containing either qpc or satellite
            source_metadata: <dict> containing metadata info about the source
            ready_to_archive: <bool> bool regarding archive
        """
        try:
            status_info = Status()
            self.state = self.next_state

            # grab all of the potential options
            retry_type = options.get('retry_type', self.object_class.TIME)
            retry = options.get('retry', RETRY.clear)
            report_json = options.get('report_json')
            report_platform_id = options.get('report_platform_id')
            candidate_hosts = options.get('candidate_hosts')
            failed_hosts = options.get('failed_hosts')
            status = options.get('status')
            host_inventory_api_version = options.get('host_inventory_api_version')
            source = options.get('source')
            source_metadata = options.get('source_metadata')
            ready_to_archive = options.get('ready_to_archive')
            start_processing = options.get('start_processing')

            update_data = {
                'last_update_time': datetime.now(pytz.utc),
                'state': self.next_state,
                'git_commit': status_info.git_commit
            }
            # if this is the start of the processing, update the processing
            # start time
            if start_processing:
                update_data['processing_start_time'] = datetime.now(pytz.utc)

            if retry == RETRY.clear:
                # After a successful transaction when we have reached the update
                # point, we want to set the Retry count back to 0 because
                # any future failures should be unrelated
                update_data['retry_count'] = 0
                update_data['retry_type'] = self.object_class.TIME
            elif retry == RETRY.increment:
                retry_count = self.report_or_slice.retry_count
                update_data['retry_count'] = retry_count + 1
                update_data['retry_type'] = retry_type

            # the other choice for retry is RETRY.keep_same in which case we don't
            # want to do anything to the retry count bc we want to preserve as is
            if report_json:
                update_data['report_json'] = json.dumps(report_json)
            if report_platform_id:
                update_data['report_platform_id'] = report_platform_id
            if candidate_hosts is not None:
                # candidate_hosts will get smaller and smaller until it hopefully
                # is empty because we have taken care of all ofthe candidates so
                # we rewrite this each time
                update_data['candidate_hosts'] = json.dumps(candidate_hosts)
            if failed_hosts:
                # for failed hosts this list can keep growing, so we add the
                # newly failed hosts to the previous value
                failed = json.loads(self.report_or_slice.failed_hosts)
                for host in failed_hosts:
                    failed.append(host)
                update_data['failed_hosts'] = json.dumps(failed)
            if status:
                update_data['upload_ack_status'] = status
            if host_inventory_api_version:
                update_data['host_inventory_api_version'] = \
                    host_inventory_api_version
            if source:
                update_data['source'] = source
            if source_metadata:
                update_data['source_metadata'] = json.dumps(source_metadata)
            if ready_to_archive:
                update_data['ready_to_archive'] = ready_to_archive

            state_info = json.loads(self.report_or_slice.state_info)
            state_info.append(self.next_state)
            update_data['state_info'] = json.dumps(state_info)

            serializer = self.object_serializer(
                instance=self.report_or_slice,
                data=update_data,
                partial=True)

            serializer.is_valid(raise_exception=True)
            serializer.save()

        except Exception as error:
            DB_ERRORS.inc()
            self.should_run = False
            LOG.error(format_message(
                self.prefix,
                'Could not update %s record due to the following error %s.' % (
                    self.object_prefix.lower(), str(error)),
                account_number=self.account_number, report_platform_id=self.report_platform_id))
            print_error_loop_event()
    def test_calculating_queued_reports(self):
        """Test the calculate_queued_reports method."""
        status_info = Status()
        current_time = datetime.now(pytz.utc)
        self.report_record.state = Report.NEW
        self.report_record.save()
        reports_to_process = self.processor.calculate_queued_objects(
            current_time, status_info)
        self.assertEqual(reports_to_process, 1)

        min_old_time = current_time - timedelta(hours=8)
        older_report = Report(upload_srv_kafka_msg=json.dumps(self.msg),
                              account='4321',
                              report_platform_id=self.uuid2,
                              state=Report.STARTED,
                              state_info=json.dumps([Report.NEW]),
                              last_update_time=min_old_time,
                              retry_count=1,
                              retry_type=Report.TIME)
        older_report.save()

        retry_commit_report = Report(upload_srv_kafka_msg=json.dumps(self.msg),
                                     account='4321',
                                     report_platform_id=self.uuid2,
                                     state=Report.DOWNLOADED,
                                     state_info=json.dumps([Report.NEW]),
                                     last_update_time=min_old_time,
                                     git_commit='3948384729',
                                     retry_type=Report.GIT_COMMIT,
                                     retry_count=1)
        retry_commit_report.save()

        # create some reports that should not be counted
        not_old_enough = current_time - timedelta(hours=1)
        too_young_report = Report(upload_srv_kafka_msg=json.dumps(self.msg),
                                  account='4321',
                                  report_platform_id=self.uuid2,
                                  state=Report.DOWNLOADED,
                                  state_info=json.dumps([Report.NEW]),
                                  last_update_time=not_old_enough,
                                  git_commit='3948384729',
                                  retry_type=Report.TIME,
                                  retry_count=1)
        too_young_report.save()

        same_commit_report = Report(upload_srv_kafka_msg=json.dumps(self.msg),
                                    account='4321',
                                    report_platform_id=self.uuid2,
                                    state=Report.DOWNLOADED,
                                    state_info=json.dumps([Report.NEW]),
                                    last_update_time=min_old_time,
                                    git_commit=status_info.git_commit,
                                    retry_type=Report.GIT_COMMIT,
                                    retry_count=1)
        same_commit_report.save()

        reports_to_process = self.processor.calculate_queued_objects(
            current_time, status_info)
        self.assertEqual(reports_to_process, 3)

        # delete the older report object
        Report.objects.get(id=older_report.id).delete()
        Report.objects.get(id=retry_commit_report.id).delete()
        Report.objects.get(id=too_young_report.id).delete()
        Report.objects.get(id=same_commit_report.id).delete()
Exemple #8
0
    def update_slice_state(self, options,
                           report_slice):  # noqa: C901 (too-complex)
        """
        Update the report processor state and save.

        :param options: <dict> dictionary potentially containing the following:
            report_slice: <ReportSlice> the report slice to update
            state: <str> the state to update to
            retry: <enum> Retry.clear=clear count, RETRY.increment=increase count
            retry_type: <str> either time=retry after time,
                git_commit=retry after new commit
            report_json: <dict> dictionary containing the report json
            report_platform_id: <str> string containing report_platform_id
            ready_to_archive: <bool> boolean on whether or not to archive
        """
        try:
            state = options.get('state')
            retry_type = options.get('retry_type')
            retry = options.get('retry', RETRY.clear)
            ready_to_archive = options.get('ready_to_archive')
            status_info = Status()
            report_slice.last_update_time = datetime.now(pytz.utc)
            report_slice.state = state
            report_slice.git_commit = status_info.git_commit
            report_slice_data = {
                'last_update_time': datetime.now(pytz.utc),
                'state': state,
                'git_commit': status_info.git_commit
            }
            if not retry_type:
                retry_type = ReportSlice.TIME
            if retry == RETRY.clear:
                # After a successful transaction when we have reached the update
                # point, we want to set the retry count back to 0 because
                # any future failures should be unrelated
                report_slice_data['retry_count'] = 0
                report_slice_data['retry_type'] = ReportSlice.TIME
            elif retry == RETRY.increment:
                current_count = report_slice.retry_count
                report_slice_data['retry_count'] = current_count + 1
                report_slice_data['retry_type'] = ReportSlice.TIME
            # the other choice for retry is RETRY.keep_same in which case we don't
            # want to do anything to the retry count bc we want to preserve as is
            if ready_to_archive:
                report_slice_data['ready_to_archive'] = ready_to_archive
            state_info = json.loads(report_slice.state_info)
            state_info.append(state)
            report_slice_data['state_info'] = json.dumps(state_info)
            serializer = ReportSliceSerializer(instance=report_slice,
                                               data=report_slice_data,
                                               partial=True)
            serializer.is_valid(raise_exception=True)
            serializer.save()
            LOG.info(
                format_message(self.prefix,
                               'Successfully updated report slice %s' %
                               report_slice.report_slice_id,
                               account_number=self.account_number,
                               report_platform_id=self.report_platform_id))
        except Exception as error:  # pylint: disable=broad-except
            DB_ERRORS.inc()
            self.should_run = False
            LOG.error(
                format_message(
                    self.prefix,
                    'Could not update report slice record due to the following error %s.'
                    % str(error),
                    account_number=self.account_number,
                    report_platform_id=self.report_platform_id))
            stop_all_event_loops()
Exemple #9
0
    def update_object_state(self, options):  # noqa: C901 (too-complex)
        """
        Update the report processor state and save.

        :param options: <dict> potentially containing the following:
            retry: <enum> Retry.clear=clear count, RETRY.increment=increase count
            retry_type: <str> either time=retry after time,
                git_commit=retry after new commit
            report_json: <dict> dictionary containing the report json
            report_platform_id: <str> string containing report_platform_id
            status: <str> either success or failure based on the report
            source: <str> containing OpenShift cluster ID
            source_metadata: <dict> containing metadata info about the source
            ready_to_archive: <bool> bool regarding archive
        """
        try:
            status_info = Status()
            self.state = self.next_state

            # grab all of the potential options
            retry_type = options.get('retry_type', self.object_class.TIME)
            retry = options.get('retry', RETRY.clear)
            report_json = options.get('report_json')
            report_platform_id = options.get('report_platform_id')
            status = options.get('status')
            source = options.get('source')
            source_metadata = options.get('source_metadata')
            ready_to_archive = options.get('ready_to_archive')
            start_processing = options.get('start_processing')

            update_data = {
                'last_update_time': datetime.now(pytz.utc),
                'state': self.next_state,
                'git_commit': status_info.git_commit
            }
            # if this is the start of the processing, update the processing
            # start time
            if start_processing:
                update_data['processing_start_time'] = datetime.now(pytz.utc)

            if retry == RETRY.clear:
                # After a successful transaction when we have reached the update
                # point, we want to set the Retry count back to 0 because
                # any future failures should be unrelated
                update_data['retry_count'] = 0
                update_data['retry_type'] = self.object_class.TIME
            elif retry == RETRY.increment:
                retry_count = self.report_or_slice.retry_count
                update_data['retry_count'] = retry_count + 1
                update_data['retry_type'] = retry_type

            # the other choice for retry is RETRY.keep_same in which case we don't
            # want to do anything to the retry count bc we want to preserve as is
            if report_json:
                update_data['report_json'] = json.dumps(report_json)
            if report_platform_id:
                update_data['report_platform_id'] = report_platform_id
            if status:
                update_data['upload_ack_status'] = status
            if source:
                update_data['source'] = source
            if source_metadata:
                update_data['source_metadata'] = json.dumps(source_metadata)
            if ready_to_archive:
                update_data['ready_to_archive'] = ready_to_archive

            state_info = json.loads(self.report_or_slice.state_info)
            state_info.append(self.next_state)
            update_data['state_info'] = json.dumps(state_info)

            serializer = self.object_serializer(instance=self.report_or_slice,
                                                data=update_data,
                                                partial=True)

            serializer.is_valid(raise_exception=True)
            serializer.save()

        except Exception as error:
            DB_ERRORS.inc()
            self.should_run = False
            LOG.error(
                format_message(
                    self.prefix,
                    'Could not update %s record due to the following error %s.'
                    % (self.object_prefix.lower(), str(error)),
                    account_number=self.account_number,
                    report_platform_id=self.report_platform_id))
            stop_all_event_loops()