Example #1
0
    def get_events(
        self,
        gt: Optional[int] = None,
        gte: Optional[int] = None,
        lt: Optional[int] = None,
        lte: Optional[int] = None,
        limit: Optional[int] = None,
        is_ascending: bool = False,
        page_size: Optional[int] = None,
    ) -> Iterable[MessageLogged]:
        assert limit is None or limit > 0

        # Identify the first time bucket.
        now = time.time()
        started_on = self.log.started_on
        absolute_latest = min(float(now), lt or now, lte or now)
        absolute_earlyist = max(float(started_on), gt or 0, gte or 0)
        if is_ascending:
            position = absolute_earlyist
        else:
            position = absolute_latest

        # Start counting events.
        count_events = 0

        while True:
            bucket_id = make_timebucket_id(
                self.log.name, position, self.log.bucket_size
            )
            for message_logged_event in self.event_store.iter_events(
                originator_id=bucket_id,
                gt=gt,
                gte=gte,
                lt=lt,
                lte=lte,
                limit=limit,
                is_ascending=is_ascending,
                page_size=page_size,
            ):
                yield message_logged_event

                if limit is not None:
                    count_events += 1
                    if count_events >= limit:
                        return

            # See if there's another bucket.
            if is_ascending:
                next_timestamp = next_bucket_starts(position, self.log.bucket_size)
                if next_timestamp > absolute_latest:
                    return
                else:
                    position = next_timestamp
            else:
                if position < absolute_earlyist:
                    return
                else:
                    position = previous_bucket_starts(position, self.log.bucket_size)
Example #2
0
    def get_events(self,
                   gt=None,
                   gte=None,
                   lt=None,
                   lte=None,
                   limit=None,
                   is_ascending=False,
                   page_size=None):
        assert limit is None or limit > 0

        # Identify the first time bucket.
        now = time()
        started_on = self.log.started_on
        absolute_latest = min(now, lt or now, lte or now)
        absolute_earlyist = max(started_on, gt or 0, gte or 0)
        if is_ascending:
            position = absolute_earlyist
        else:
            position = absolute_latest

        # Start counting events.
        count_events = 0

        while True:
            bucket_id = make_timebucket_id(self.log.name, position,
                                           self.log.bucket_size)
            for message_logged_event in self.event_store.get_domain_events(
                    entity_id=bucket_id,
                    gt=gt,
                    gte=gte,
                    lt=lt,
                    lte=lte,
                    limit=limit,
                    is_ascending=is_ascending,
                    page_size=page_size,
            ):
                yield message_logged_event

                if limit is not None:
                    count_events += 1
                    if count_events >= limit:
                        raise StopIteration

            # See if there's another bucket.
            if is_ascending:
                next_timestamp = next_bucket_starts(position,
                                                    self.log.bucket_size)
                if next_timestamp > absolute_latest:
                    raise StopIteration
                else:
                    position = next_timestamp
            else:
                if position < absolute_earlyist:
                    raise StopIteration
                else:
                    position = previous_bucket_starts(position,
                                                      self.log.bucket_size)
    def get_events(self, gt=None, gte=None, lt=None, lte=None, limit=None, is_ascending=False, page_size=None):
        assert limit is None or limit > 0

        # Identify the first time bucket.
        now = time()
        started_on = self.log.started_on
        absolute_latest = min(now, lt or now, lte or now)
        absolute_earlyist = max(started_on, gt or 0, gte or 0)
        if is_ascending:
            position = absolute_earlyist
        else:
            position = absolute_latest

        # Start counting events.
        count_events = 0

        while True:
            bucket_id = make_timebucket_id(self.log.name, position, self.log.bucket_size)
            for message_logged_event in self.event_store.get_domain_events(
                originator_id=bucket_id,
                gt=gt,
                gte=gte,
                lt=lt,
                lte=lte,
                limit=limit,
                is_ascending=is_ascending,
                page_size=page_size,
            ):
                yield message_logged_event

                if limit is not None:
                    count_events += 1
                    if count_events >= limit:
                        return

            # See if there's another bucket.
            if is_ascending:
                next_timestamp = next_bucket_starts(position, self.log.bucket_size)
                if next_timestamp > absolute_latest:
                    return
                else:
                    position = next_timestamp
            else:
                if position < absolute_earlyist:
                    return
                else:
                    position = previous_bucket_starts(position, self.log.bucket_size)
Example #4
0
    def test_buckets_of_all_sizes(self):
        # Start new second sized log.
        log_id2 = uuid4()
        log = start_new_timebucketedlog(name=log_id2, bucket_size='second')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new minute sized log.
        log_id3 = uuid4()
        log = start_new_timebucketedlog(name=log_id3, bucket_size='minute')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new hour sized log.
        log_id4 = uuid4()
        log = start_new_timebucketedlog(name=log_id4, bucket_size='hour')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new day sized log.
        log_id5 = uuid4()
        log = start_new_timebucketedlog(name=log_id5, bucket_size='day')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new month sized log.
        log_id6 = uuid4()
        log = start_new_timebucketedlog(name=log_id6, bucket_size='month')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new year sized log.
        log_id7 = uuid4()
        log = start_new_timebucketedlog(name=log_id7, bucket_size='year')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new default sized log.
        log_id8 = uuid4()
        log = start_new_timebucketedlog(name=log_id8)
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new invalid sized log.
        with self.assertRaises(ValueError):
            log_id9 = uuid4()
            log = start_new_timebucketedlog(name=log_id9,
                                            bucket_size='invalid')

        # Check the helper methods are protected against invalid bucket sizes.
        with self.assertRaises(ValueError):
            log_id10 = uuid4()
            make_timebucket_id(log_id10,
                               decimaltimestamp(),
                               bucket_size='invalid')

        with self.assertRaises(ValueError):
            bucket_starts(decimaltimestamp(), bucket_size='invalid')

        with self.assertRaises(ValueError):
            bucket_duration(bucket_size='invalid')
    def test_buckets_of_all_sizes(self):
        # Start new second sized log.
        log_id2 = uuid4()
        log = start_new_timebucketedlog(name=log_id2, bucket_size='second')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new minute sized log.
        log_id3 = uuid4()
        log = start_new_timebucketedlog(name=log_id3, bucket_size='minute')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new hour sized log.
        log_id4 = uuid4()
        log = start_new_timebucketedlog(name=log_id4, bucket_size='hour')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new day sized log.
        log_id5 = uuid4()
        log = start_new_timebucketedlog(name=log_id5, bucket_size='day')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new month sized log.
        log_id6 = uuid4()
        log = start_new_timebucketedlog(name=log_id6, bucket_size='month')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new year sized log.
        log_id7 = uuid4()
        log = start_new_timebucketedlog(name=log_id7, bucket_size='year')
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new default sized log.
        log_id8 = uuid4()
        log = start_new_timebucketedlog(name=log_id8)
        log.append_message('message')

        # Get the messages.
        reader = TimebucketedlogReader(log, self.log_event_store)
        self.assertTrue(len(list(reader.get_messages())))

        # Start new invalid sized log.
        with self.assertRaises(ValueError):
            log_id9 = uuid4()
            log = start_new_timebucketedlog(name=log_id9, bucket_size='invalid')

        # Check the helper methods are protected against invalid bucket sizes.
        with self.assertRaises(ValueError):
            log_id10 = uuid4()
            make_timebucket_id(log_id10, time.time(), bucket_size='invalid')

        with self.assertRaises(ValueError):
            bucket_starts(time.time(), bucket_size='invalid')

        with self.assertRaises(ValueError):
            bucket_duration(bucket_size='invalid')