def _TimestampRangeFromTuple(self, ts_tuple): start, end = ts_tuple if start is not None: if start == 0: start = None else: # Convert RDFDatetime to usec start = float(start) # Bigtable can only handle ms precision: # https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2626 # If we give it a filter with usec values it raises RPC error with # "Timestamp granularity mismatch". Truncate to ms here. start -= start % 1000 start = self.DatetimeFromMicroseconds(start) if end is not None: # Convert RDFDatetime to usec end = float(end) # Some searches use 2**64 signed int to signal "no upper limit", there's a # better way to do that with the API using None. if end >= (2**64) / 2: end = None else: # Truncate to ms end -= end % 1000 # GRR expects inclusive timestamps for upper and lower. TimestampRange # is exclusive on the end. So we add 1ms to the upper bound, which is # the next smallest timestamp bigtable will accept. # https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2608 end += 1000 end = self.DatetimeFromMicroseconds(end) return row_filters.TimestampRange(start=start, end=end)
def test_timestamp_filter_millisecond_granularity(self): from google.cloud.bigtable import row_filters end = datetime.datetime.now() start = end - datetime.timedelta(minutes=60) timestamp_range = row_filters.TimestampRange(start=start, end=end) timefilter = row_filters.TimestampRangeFilter(timestamp_range) row_data = self._table.read_rows(filter_=timefilter) row_data.consume_all()
def filter_limit_timestamp_range(project_id, instance_id, table_id): client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) end = datetime.datetime(2019, 5, 1) rows = table.read_rows(filter_=row_filters.TimestampRangeFilter( row_filters.TimestampRange(end=end))) for row in rows: print_row(row)