Пример #1
0
    def test_read_rows(self):
        # Setup Expected Response
        last_scanned_row_key = b"-126"
        expected_response = {"last_scanned_row_key": last_scanned_row_key}
        expected_response = bigtable_pb2.ReadRowsResponse(**expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[iter([expected_response])])
        patch = mock.patch("google.api_core.grpc_helpers.create_channel")
        with patch as create_channel:
            create_channel.return_value = channel
            client = bigtable_v2.BigtableClient()

        # Setup Request
        table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]")

        response = client.read_rows(table_name)
        resources = list(response)
        assert len(resources) == 1
        assert expected_response == resources[0]

        assert len(channel.requests) == 1
        expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
Пример #2
0
    def build_updated_request(self):
        """ Updates the given message request as per last scanned key
        """
        r_kwargs = {
            "table_name": self.message.table_name,
            "filter": self.message.filter,
        }

        if self.message.rows_limit != 0:
            r_kwargs["rows_limit"] = max(
                1, self.message.rows_limit - self.rows_read_so_far
            )

        # if neither RowSet.row_keys nor RowSet.row_ranges currently exist,
        # add row_range that starts with last_scanned_key as start_key_open
        # to request only rows that have not been returned yet
        if not self.message.HasField("rows"):
            row_range = data_v2_pb2.RowRange(start_key_open=self.last_scanned_key)
            r_kwargs["rows"] = data_v2_pb2.RowSet(row_ranges=[row_range])
        else:
            row_keys = self._filter_rows_keys()
            row_ranges = self._filter_row_ranges()
            r_kwargs["rows"] = data_v2_pb2.RowSet(
                row_keys=row_keys, row_ranges=row_ranges
            )
        return data_messages_v2_pb2.ReadRowsRequest(**r_kwargs)
Пример #3
0
    def build_updated_request(self):
        """ Updates the given message request as per last scanned key
        """
        r_kwargs = {
            'table_name': self.message.table_name,
            'filter': self.message.filter
        }

        if self.message.rows_limit != 0:
            r_kwargs['rows_limit'] = max(
                1, self.message.rows_limit - self.rows_read_so_far)

        row_keys = self._filter_rows_keys()
        row_ranges = self._filter_row_ranges()
        r_kwargs['rows'] = data_v2_pb2.RowSet(row_keys=row_keys,
                                              row_ranges=row_ranges)

        return data_messages_v2_pb2.ReadRowsRequest(**r_kwargs)
Пример #4
0
    def test_read_rows(self):
        # Setup Expected Response
        last_scanned_row_key = b'-126'
        expected_response = {'last_scanned_row_key': last_scanned_row_key}
        expected_response = bigtable_pb2.ReadRowsResponse(**expected_response)

        # Mock the API response
        channel = ChannelStub(responses=[iter([expected_response])])
        client = bigtable_v2.BigtableClient(channel=channel)

        # Setup Request
        table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')

        response = client.read_rows(table_name)
        resources = list(response)
        assert len(resources) == 1
        assert expected_response == resources[0]

        assert len(channel.requests) == 1
        expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name)
        actual_request = channel.requests[0][1]
        assert expected_request == actual_request
Пример #5
0
def _ReadRowsRequestPB(*args, **kw):
    from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2

    return messages_v2_pb2.ReadRowsRequest(*args, **kw)
Пример #6
0
def _create_row_request(
    table_name,
    start_key=None,
    end_key=None,
    filter_=None,
    limit=None,
    end_inclusive=False,
    app_profile_id=None,
    row_set=None,
):
    """Creates a request to read rows in a table.

    :type table_name: str
    :param table_name: The name of the table to read from.

    :type start_key: bytes
    :param start_key: (Optional) The beginning of a range of row keys to
                      read from. The range will include ``start_key``. If
                      left empty, will be interpreted as the empty string.

    :type end_key: bytes
    :param end_key: (Optional) The end of a range of row keys to read from.
                    The range will not include ``end_key``. If left empty,
                    will be interpreted as an infinite string.

    :type filter_: :class:`.RowFilter`
    :param filter_: (Optional) The filter to apply to the contents of the
                    specified row(s). If unset, reads the entire table.

    :type limit: int
    :param limit: (Optional) The read will terminate after committing to N
                  rows' worth of results. The default (zero) is to return
                  all results.

    :type end_inclusive: bool
    :param end_inclusive: (Optional) Whether the ``end_key`` should be
                  considered inclusive. The default is False (exclusive).

    :type: app_profile_id: str
    :param app_profile_id: (Optional) The unique name of the AppProfile.

    :type row_set: :class:`row_set.RowSet`
    :param row_set: (Optional) The row set containing multiple row keys and
                    row_ranges.

    :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest`
    :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs.
    :raises: :class:`ValueError <exceptions.ValueError>` if both
             ``row_set`` and one of ``start_key`` or ``end_key`` are set
    """
    request_kwargs = {"table_name": table_name}
    if (start_key is not None or end_key is not None) and row_set is not None:
        raise ValueError("Row range and row set cannot be "
                         "set simultaneously")

    if filter_ is not None:
        request_kwargs["filter"] = filter_.to_pb()
    if limit is not None:
        request_kwargs["rows_limit"] = limit
    if app_profile_id is not None:
        request_kwargs["app_profile_id"] = app_profile_id

    message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs)

    if start_key is not None or end_key is not None:
        row_set = RowSet()
        row_set.add_row_range(
            RowRange(start_key, end_key, end_inclusive=end_inclusive))

    if row_set is not None:
        row_set._update_message_request(message)

    return message
    def read_rows(
        self,
        table_name,
        app_profile_id=None,
        rows=None,
        filter_=None,
        rows_limit=None,
        retry=google.api_core.gapic_v1.method.DEFAULT,
        timeout=google.api_core.gapic_v1.method.DEFAULT,
        metadata=None,
    ):
        """
        Streams back the contents of all requested rows in key order, optionally
        applying the same Reader filter to each. Depending on their size,
        rows and cells may be broken up across multiple responses, but
        atomicity of each row will still be preserved. See the
        ReadRowsResponse documentation for details.

        Example:
            >>> from google.cloud import bigtable_v2
            >>>
            >>> client = bigtable_v2.BigtableClient()
            >>>
            >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
            >>>
            >>> for element in client.read_rows(table_name):
            ...     # process element
            ...     pass

        Args:
            table_name (str): The unique name of the table from which to read. Values are of the form
                ``projects/<project>/instances/<instance>/tables/<table>``.
            app_profile_id (str): This value specifies routing for replication. If not specified, the
                "default" application profile will be used.
            rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows.

                If a dict is provided, it must be of the same form as the protobuf
                message :class:`~google.cloud.bigtable_v2.types.RowSet`
            filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset,
                reads the entirety of each row.

                If a dict is provided, it must be of the same form as the protobuf
                message :class:`~google.cloud.bigtable_v2.types.RowFilter`
            rows_limit (long): The read will terminate after committing to N rows' worth of results. The
                default (zero) is to return all results.
            retry (Optional[google.api_core.retry.Retry]):  A retry object used
                to retry requests. If ``None`` is specified, requests will
                be retried using a default configuration.
            timeout (Optional[float]): The amount of time, in seconds, to wait
                for the request to complete. Note that if ``retry`` is
                specified, the timeout applies to each individual attempt.
            metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
                that is provided to the method.

        Returns:
            Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse].

        Raises:
            google.api_core.exceptions.GoogleAPICallError: If the request
                    failed for any reason.
            google.api_core.exceptions.RetryError: If the request failed due
                    to a retryable error and retry attempts failed.
            ValueError: If the parameters are invalid.
        """
        # Wrap the transport method to add retry and timeout logic.
        if "read_rows" not in self._inner_api_calls:
            self._inner_api_calls[
                "read_rows"] = google.api_core.gapic_v1.method.wrap_method(
                    self.transport.read_rows,
                    default_retry=self._method_configs["ReadRows"].retry,
                    default_timeout=self._method_configs["ReadRows"].timeout,
                    client_info=self._client_info,
                )

        request = bigtable_pb2.ReadRowsRequest(
            table_name=table_name,
            app_profile_id=app_profile_id,
            rows=rows,
            filter=filter_,
            rows_limit=rows_limit,
        )
        if metadata is None:
            metadata = []
        metadata = list(metadata)
        try:
            routing_header = [("table_name", table_name)]
        except AttributeError:
            pass
        else:
            routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
                routing_header)
            metadata.append(routing_metadata)

        return self._inner_api_calls["read_rows"](request,
                                                  retry=retry,
                                                  timeout=timeout,
                                                  metadata=metadata)
Пример #8
0
def _create_row_request(table_name,
                        row_key=None,
                        start_key=None,
                        end_key=None,
                        filter_=None,
                        limit=None,
                        end_inclusive=False,
                        app_profile_id=None):
    """Creates a request to read rows in a table.

    :type table_name: str
    :param table_name: The name of the table to read from.

    :type row_key: bytes
    :param row_key: (Optional) The key of a specific row to read from.

    :type start_key: bytes
    :param start_key: (Optional) The beginning of a range of row keys to
                      read from. The range will include ``start_key``. If
                      left empty, will be interpreted as the empty string.

    :type end_key: bytes
    :param end_key: (Optional) The end of a range of row keys to read from.
                    The range will not include ``end_key``. If left empty,
                    will be interpreted as an infinite string.

    :type filter_: :class:`.RowFilter`
    :param filter_: (Optional) The filter to apply to the contents of the
                    specified row(s). If unset, reads the entire table.

    :type limit: int
    :param limit: (Optional) The read will terminate after committing to N
                  rows' worth of results. The default (zero) is to return
                  all results.

    :type end_inclusive: bool
    :param end_inclusive: (Optional) Whether the ``end_key`` should be
                  considered inclusive. The default is False (exclusive).

    :type: app_profile_id: str
    :param app_profile_id: (Optional) The unique name of the AppProfile.

    :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest`
    :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs.
    :raises: :class:`ValueError <exceptions.ValueError>` if both
             ``row_key`` and one of ``start_key`` and ``end_key`` are set
    """
    request_kwargs = {'table_name': table_name}
    if (row_key is not None
            and (start_key is not None or end_key is not None)):
        raise ValueError('Row key and row range cannot be '
                         'set simultaneously')
    range_kwargs = {}
    if start_key is not None or end_key is not None:
        if start_key is not None:
            range_kwargs['start_key_closed'] = _to_bytes(start_key)
        if end_key is not None:
            end_key_key = 'end_key_open'
            if end_inclusive:
                end_key_key = 'end_key_closed'
            range_kwargs[end_key_key] = _to_bytes(end_key)
    if filter_ is not None:
        request_kwargs['filter'] = filter_.to_pb()
    if limit is not None:
        request_kwargs['rows_limit'] = limit
    if app_profile_id is not None:
        request_kwargs['app_profile_id'] = app_profile_id

    message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs)

    if row_key is not None:
        message.rows.row_keys.append(_to_bytes(row_key))

    if range_kwargs:
        message.rows.row_ranges.add(**range_kwargs)

    return message
Пример #9
0
    def read_rows(self,
                  table_name,
                  app_profile_id=None,
                  rows=None,
                  filter_=None,
                  rows_limit=None,
                  retry=google.api_core.gapic_v1.method.DEFAULT,
                  timeout=google.api_core.gapic_v1.method.DEFAULT):
        """
        Streams back the contents of all requested rows in key order, optionally
        applying the same Reader filter to each. Depending on their size,
        rows and cells may be broken up across multiple responses, but
        atomicity of each row will still be preserved. See the
        ReadRowsResponse documentation for details.

        Example:
            >>> from google.cloud import bigtable_v2
            >>>
            >>> client = bigtable_v2.BigtableClient()
            >>>
            >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
            >>>
            >>> for element in client.read_rows(table_name):
            ...     # process element
            ...     pass

        Args:
            table_name (str): The unique name of the table from which to read.
                Values are of the form
                ``projects/<project>/instances/<instance>/tables/<table>``.
            app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature
                is not currently available to most Cloud Bigtable customers. This feature
                might be changed in backward-incompatible ways and is not recommended for
                production use. It is not subject to any SLA or deprecation policy.

                This value specifies routing for replication. If not specified, the
                \"default\" application profile will be used.
            rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows.
                If a dict is provided, it must be of the same form as the protobuf
                message :class:`~google.cloud.bigtable_v2.types.RowSet`
            filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset,
                reads the entirety of each row.
                If a dict is provided, it must be of the same form as the protobuf
                message :class:`~google.cloud.bigtable_v2.types.RowFilter`
            rows_limit (long): The read will terminate after committing to N rows' worth of results. The
                default (zero) is to return all results.
            retry (Optional[google.api_core.retry.Retry]):  A retry object used
                to retry requests. If ``None`` is specified, requests will not
                be retried.
            timeout (Optional[float]): The amount of time, in seconds, to wait
                for the request to complete. Note that if ``retry`` is
                specified, the timeout applies to each individual attempt.

        Returns:
            Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse].

        Raises:
            google.api_core.exceptions.GoogleAPICallError: If the request
                    failed for any reason.
            google.api_core.exceptions.RetryError: If the request failed due
                    to a retryable error and retry attempts failed.
            ValueError: If the parameters are invalid.
        """
        request = bigtable_pb2.ReadRowsRequest(
            table_name=table_name,
            app_profile_id=app_profile_id,
            rows=rows,
            filter=filter_,
            rows_limit=rows_limit,
        )
        return self._read_rows(request, retry=retry, timeout=timeout)