def test_mutate_rows(self): from google.cloud.bigtable._generated.bigtable_pb2 import ( MutateRowsResponse) from google.cloud.bigtable.row import DirectRow from google.rpc.status_pb2 import Status from tests.unit._testing import _FakeStub client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) table = self._make_one(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) row_1.set_cell('cf', b'col', b'value1') row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') response = MutateRowsResponse(entries=[ MutateRowsResponse.Entry( index=0, status=Status(code=0), ), MutateRowsResponse.Entry( index=1, status=Status(code=1), ), ], ) # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) statuses = table.mutate_rows([row_1, row_2]) result = [status.code for status in statuses] expected_result = [0, 1] self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_retry(self): from google.api_core.exceptions import ServiceUnavailable from google.cloud.bigtable._generated.bigtable_pb2 import ( MutateRowsResponse) from google.cloud.bigtable.row import DirectRow from google.rpc.status_pb2 import Status from tests.unit._testing import _FakeStub # Setup: # - Mutate 3 rows. # Action: # - Initial attempt will mutate all 3 rows. # Expectation: # - Second row returns retryable error code, so expect a raise. # - State of responses_statuses should be # [success, retryable, non-retryable] client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) row_1.set_cell('cf', b'col', b'value1') row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') row_3 = DirectRow(row_key=b'row_key_3', table=table) row_3.set_cell('cf', b'col', b'value3') response = MutateRowsResponse(entries=[ MutateRowsResponse.Entry( index=0, status=Status(code=0), ), MutateRowsResponse.Entry( index=1, status=Status(code=4), ), MutateRowsResponse.Entry( index=2, status=Status(code=1), ), ], ) # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) worker = self._make_worker(table._instance._client, table.name, [row_1, row_2, row_3]) with self.assertRaises(ServiceUnavailable): worker._do_mutate_retryable_rows() statuses = worker.responses_statuses result = [status.code for status in statuses] expected_result = [0, 4, 1] self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try(self): from google.cloud.bigtable._generated.bigtable_pb2 import ( MutateRowsResponse) from google.cloud.bigtable.row import DirectRow from google.rpc.status_pb2 import Status from tests.unit._testing import _FakeStub # Setup: # - Mutate 4 rows. # - First try results: # [success, retryable, non-retryable, retryable] # Action: # - Second try should re-attempt the 'retryable' rows. # Expectation: # - After second try: # [success, non-retryable, non-retryable, success] client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) row_1.set_cell('cf', b'col', b'value1') row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') row_3 = DirectRow(row_key=b'row_key_3', table=table) row_3.set_cell('cf', b'col', b'value3') row_4 = DirectRow(row_key=b'row_key_4', table=table) row_4.set_cell('cf', b'col', b'value4') response = MutateRowsResponse(entries=[ MutateRowsResponse.Entry( index=0, status=Status(code=1), ), MutateRowsResponse.Entry( index=1, status=Status(code=0), ), ], ) # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) worker = self._make_worker(table._instance._client, table.name, [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses( [0, 4, 1, 10]) statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] expected_result = [0, 1, 1, 0] self.assertEqual(result, expected_result)
def test_callable_retry_timeout(self): from google.api_core.retry import Retry from google.cloud.bigtable._generated.bigtable_pb2 import ( MutateRowsResponse) from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY from google.rpc.status_pb2 import Status # Setup: # - Mutate 2 rows. # Action: # - Initial attempt will mutate all 2 rows. # Expectation: # - Both rows always return retryable errors. # - google.api_core.Retry should keep retrying. # - Check MutateRows is called multiple times. # - By the time deadline is reached, statuses should be # [retryable, retryable] client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) row_1.set_cell('cf', b'col', b'value1') row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') response = MutateRowsResponse(entries=[ MutateRowsResponse.Entry( index=0, status=Status(code=4), ), MutateRowsResponse.Entry( index=1, status=Status(code=4), ), ], ) # Patch the stub used by the API method. client._data_stub = mock.MagicMock() client._data_stub.MutateRows.return_value = [response] retry = DEFAULT_RETRY.with_delay(initial=0.1, maximum=0.2, multiplier=2.0).with_deadline(0.5) worker = self._make_worker(client, table.name, [row_1, row_2]) statuses = worker(retry=retry) result = [status.code for status in statuses] expected_result = [4, 4] self.assertTrue(client._data_stub.MutateRows.call_count > 1) self.assertEqual(result, expected_result)
def test_write_metrics(self): MetricsEnvironment.process_wide_container().reset() write_fn = bigtableio._BigTableWriteFn(self._PROJECT_ID, self._INSTANCE_ID, self._TABLE_ID) write_fn.table = self.table write_fn.start_bundle() number_of_rows = 2 error = Status() error.message = 'Entity already exists.' error.code = ALREADY_EXISTS success = Status() success.message = 'Success' success.code = OK rows_response = [error, success] * number_of_rows with patch.object(Table, 'mutate_rows', return_value=rows_response): direct_rows = [ self.generate_row(i) for i in range(number_of_rows * 2) ] for direct_row in direct_rows: write_fn.process(direct_row) write_fn.finish_bundle() self.verify_write_call_metric( self._PROJECT_ID, self._INSTANCE_ID, self._TABLE_ID, ServiceCallMetric.bigtable_error_code_to_grpc_status_string( ALREADY_EXISTS), 2) self.verify_write_call_metric( self._PROJECT_ID, self._INSTANCE_ID, self._TABLE_ID, ServiceCallMetric.bigtable_error_code_to_grpc_status_string( OK), 2)
def test_extract_status(self): self.assertIsNone(_extract_status(None)) self.assertEqual( _extract_status(SpanStatus(canonical_code=StatusCanonicalCode.OK)), Status(details=None, code=0), ) self.assertEqual( _extract_status( SpanStatus( canonical_code=StatusCanonicalCode.UNKNOWN, description="error_desc", )), Status(details=None, code=2, message="error_desc"), )
def test_write_metrics(self): MetricsEnvironment.process_wide_container().reset() write_fn = bigtableio._BigTableWriteFn(self._PROJECT_ID, self._INSTANCE_ID, self._TABLE_ID) write_fn.table = self.table write_fn.start_bundle() number_of_rows = 2 error = Status() error.message = 'Entity already exists.' error.code = ALREADY_EXISTS success = Status() success.message = 'Success' success.code = OK rows_response = [error, success] * number_of_rows with patch.object(Table, 'mutate_rows', return_value=rows_response): direct_rows = [ self.generate_row(i) for i in range(number_of_rows * 2) ] for direct_row in direct_rows: write_fn.process(direct_row) try: write_fn.finish_bundle() except: # pylint: disable=bare-except # Currently we fail the bundle when there are any failures. # TODO(BEAM-13849): remove after bigtableio can selectively retry. pass self.verify_write_call_metric( self._PROJECT_ID, self._INSTANCE_ID, self._TABLE_ID, ServiceCallMetric.bigtable_error_code_to_grpc_status_string( ALREADY_EXISTS), 2) self.verify_write_call_metric( self._PROJECT_ID, self._INSTANCE_ID, self._TABLE_ID, ServiceCallMetric.bigtable_error_code_to_grpc_status_string( OK), 2)
def test_do_mutate_retryable_rows_mismatch_num_responses(self): from google.cloud.bigtable._generated.bigtable_pb2 import ( MutateRowsResponse) from google.cloud.bigtable.row import DirectRow from google.rpc.status_pb2 import Status from tests.unit._testing import _FakeStub client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) row_1.set_cell('cf', b'col', b'value1') row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') response = MutateRowsResponse(entries=[ MutateRowsResponse.Entry( index=0, status=Status(code=0), ), ], ) # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) worker = self._make_worker(table._instance._client, table.name, [row_1, row_2]) with self.assertRaises(RuntimeError): worker._do_mutate_retryable_rows()
async def test_other_error_details_present(): any1 = Any() any1.Pack(RetryInfo()) any2 = Any() any2.Pack(ErrorInfo(reason="RESET", domain="pubsublite.googleapis.com")) status_pb = Status(code=10, details=[any1, any2]) assert is_reset_signal(Aborted("", response=make_call(status_pb)))
def test_mutate_rows(self): from google.rpc.status_pb2 import Status instance = mock.MagicMock() table = self._make_one(self.TABLE_ID, instance) response = [Status(code=0), Status(code=1)] mock_worker = mock.Mock(return_value=response) with mock.patch( 'google.cloud.bigtable.table._RetryableMutateRowsWorker', new=mock.MagicMock(return_value=mock_worker)): statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()]) result = [status.code for status in statuses] expected_result = [0, 1] self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows(self): from google.cloud.bigtable._generated.bigtable_pb2 import ( MutateRowsResponse) from google.cloud.bigtable.row import DirectRow from google.rpc.status_pb2 import Status from tests.unit._testing import _FakeStub # Setup: # - Mutate 2 rows. # Action: # - Initial attempt will mutate all 2 rows. # Expectation: # - Expect [success, non-retryable] client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) row_1.set_cell('cf', b'col', b'value1') row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') response = MutateRowsResponse(entries=[ MutateRowsResponse.Entry( index=0, status=Status(code=0), ), MutateRowsResponse.Entry( index=1, status=Status(code=1), ), ], ) # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) worker = self._make_worker(table._instance._client, table.name, [row_1, row_2]) statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] expected_result = [0, 1] self.assertEqual(result, expected_result)
def test_extract_status_code_error(self): self.assertEqual( _extract_status( SpanStatus( status_code=StatusCode.ERROR, description="error_desc", )), Status(code=code_pb2.UNKNOWN, message="error_desc"), )
def test_extract_status_code_future_added(self): self.assertEqual( _extract_status( SpanStatus( status_code=mock.Mock(), description="unknown_description", )), Status(code=code_pb2.UNKNOWN, message="unknown_description"), )
def _extract_status(status: trace_api.Status) -> Optional[Status]: """Convert a Status object to protobuf object.""" if not status: return None status_dict = {"details": None, "code": status.canonical_code.value} if status.description is not None: status_dict["message"] = status.description return Status(**status_dict)
def _make_responses(self, codes): import six from google.cloud.bigtable_v2.proto.bigtable_pb2 import ( MutateRowsResponse) from google.rpc.status_pb2 import Status entries = [MutateRowsResponse.Entry( index=i, status=Status(code=codes[i])) for i in six.moves.xrange(len(codes))] return MutateRowsResponse(entries=entries)
def test_mutate_rows(self): from google.rpc.status_pb2 import Status channel = self._make_channel() client = self._make_client(project='project-id', channel=channel, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) response = [Status(code=0), Status(code=1)] mock_worker = mock.Mock(return_value=response) with mock.patch( 'google.cloud.bigtable.table._RetryableMutateRowsWorker', new=mock.MagicMock(return_value=mock_worker)): statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()]) result = [status.code for status in statuses] expected_result = [0, 1] self.assertEqual(result, expected_result)
def test_extract_status_code_and_desc(self): self.assertEqual( _extract_status( SpanStatus( status_code=StatusCode.UNSET, description="error_desc", )), Status(details=None, code=StatusCode.UNSET.value, message="error_desc"), )
def from_headers(cls, headers): """ Rehydrate a new instance from headers received as trailing metadata. """ code = int(headers.get("grpc-status")) message = headers.get("grpc-message") status = headers.get(GRPC_DETAILS_METADATA_KEY) return cls( code=STATUS_CODE_INT_TO_ENUM_MAP[code], message=message, status=Status.FromString(status) if status else None, )
def handler(exc_info, code=None, message=None): exc_type, exc, tb = exc_info code = code or StatusCode.PERMISSION_DENIED message = "Not allowed!" status = Status( code=STATUS_CODE_ENUM_TO_INT_MAP[code], message=message, details=[], # don't include traceback ) return GrpcError(code=code, message=message, status=status)
def test__update_state_error(self): from google.longrunning import operations_pb2 from google.rpc.status_pb2 import Status operation = self._make_one(None, None) self.assertIsNone(operation.error) self.assertIsNone(operation.response) error_pb = Status(code=1) operation_pb = operations_pb2.Operation(error=error_pb) operation._update_state(operation_pb) self.assertEqual(operation.error, error_pb) self.assertIsNone(operation.response)
def test_run_statement_w_homogeneous_insert_statements(self): """Check that Connection executed homogeneous insert statements.""" from google.cloud.spanner_dbapi.checksum import ResultsChecksum from google.cloud.spanner_dbapi.cursor import Statement from google.rpc.status_pb2 import Status from google.rpc.code_pb2 import OK sql = "INSERT INTO T (f1, f2) VALUES (%s, %s), (%s, %s)" params = ["a", "b", "c", "d"] param_types = {"f1": str, "f2": str} connection = self._make_connection() transaction = mock.MagicMock() connection.transaction_checkout = mock.Mock(return_value=transaction) transaction.batch_update = mock.Mock(return_value=(Status(code=OK), 1)) statement = Statement(sql, params, param_types, ResultsChecksum(), True) connection.run_statement(statement, retried=True) self.assertEqual(len(connection._statements), 0)
def _grpc_error_to_status_proto(error: GrpcError) -> Status: """ Given a GrpcError, construct a google.rpc.status_pb2.Status proto """ message = error.message or '' details = [] if error.code: error_info = ErrorInfo(code=error.code) error_info_packed = Any() error_info_packed.Pack(error_info) details.append(error_info_packed) if isinstance(error, BadRequestError) and error.field_errors: bad_request = _construct_bad_request_proto(error.field_errors) bad_request_packed = Any() bad_request_packed.Pack(bad_request) details.append(bad_request_packed) status_code_int = error.status_code.value[0] status_proto = Status(code=status_code_int, message=message, details=details) return status_proto
def default_error_from_exception(exc_info, code=None, message=None): """ Create a new GrpcError instance representing an underlying exception. If the `GRPC_DEBUG` key is set in the Nameko config, the `status` message will capture the underyling traceback in a `google.rpc.error_details.DebugInfo` message. """ exc_type, exc, tb = exc_info code = code or StatusCode.UNKNOWN message = message or str(exc) status = Status(code=STATUS_CODE_ENUM_TO_INT_MAP[code], message=message) if config.get("GRPC_DEBUG"): debug_info = Any() debug_info.Pack( DebugInfo( stack_entries=traceback.format_exception(*exc_info), detail=str(exc), ) ) status.details.append(debug_info) return GrpcError(code=code, message=message, status=status)
def test_extract_status_code(self): self.assertEqual( _extract_status(SpanStatus(status_code=StatusCode.OK)), Status(details=None, code=StatusCode.OK.value), )
def test_export(self): resource_info = Resource({ "cloud.account.id": 123, "host.id": "host", "cloud.zone": "US", "cloud.provider": "gcp", "gcp.resource_type": "gce_instance", }) span_datas = [ Span( name="span_name", context=SpanContext( trace_id=int(self.example_trace_id, 16), span_id=int(self.example_span_id, 16), is_remote=False, ), parent=None, kind=SpanKind.INTERNAL, resource=resource_info, attributes={"attr_key": "attr_value"}, ) ] cloud_trace_spans = { "name": "projects/{}/traces/{}/spans/{}".format(self.project_id, self.example_trace_id, self.example_span_id), "span_id": self.example_span_id, "parent_span_id": None, "display_name": TruncatableString(value="span_name", truncated_byte_count=0), "attributes": ProtoSpan.Attributes( attribute_map={ "g.co/r/gce_instance/zone": _format_attribute_value("US"), "g.co/r/gce_instance/instance_id": _format_attribute_value("host"), "g.co/r/gce_instance/project_id": _format_attribute_value("123"), "g.co/agent": self.agent_code, "attr_key": _format_attribute_value("attr_value"), }), "links": None, "status": Status(code=StatusCode.UNSET.value), "time_events": None, "start_time": None, "end_time": None, # pylint: disable=no-member "span_kind": ProtoSpan.SpanKind.INTERNAL, } client = mock.Mock() exporter = CloudTraceSpanExporter(self.project_id, client=client) exporter.export(span_datas) self.assertTrue(client.batch_write_spans.called) client.batch_write_spans.assert_called_with( "projects/{}".format(self.project_id), [cloud_trace_spans])
def make_reset_signal() -> GoogleAPICallError: any = Any() any.Pack(ErrorInfo(reason="RESET", domain="pubsublite.googleapis.com")) status_pb = Status(code=10, details=[any]) return Aborted("", response=make_call(status_pb))
def _batch_update_helper(self, error_after=None, count=0): from google.rpc.status_pb2 import Status from google.protobuf.struct_pb2 import Struct from google.cloud.spanner_v1.proto.result_set_pb2 import ResultSet from google.cloud.spanner_v1.proto.result_set_pb2 import ResultSetStats from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteBatchDmlResponse from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector from google.cloud.spanner_v1._helpers import _make_value_pb insert_dml = "INSERT INTO table(pkey, desc) VALUES (%pkey, %desc)" insert_params = {"pkey": 12345, "desc": "DESCRIPTION"} insert_param_types = {"pkey": "INT64", "desc": "STRING"} update_dml = 'UPDATE table SET desc = desc + "-amended"' delete_dml = "DELETE FROM table WHERE desc IS NULL" dml_statements = [ (insert_dml, insert_params, insert_param_types), update_dml, delete_dml, ] stats_pbs = [ ResultSetStats(row_count_exact=1), ResultSetStats(row_count_exact=2), ResultSetStats(row_count_exact=3), ] if error_after is not None: stats_pbs = stats_pbs[:error_after] expected_status = Status(code=400) else: expected_status = Status(code=200) expected_row_counts = [stats.row_count_exact for stats in stats_pbs] response = ExecuteBatchDmlResponse( status=expected_status, result_sets=[ResultSet(stats=stats_pb) for stats_pb in stats_pbs], ) database = _Database() api = database.spanner_api = self._make_spanner_api() api.execute_batch_dml.return_value = response session = _Session(database) transaction = self._make_one(session) transaction._transaction_id = self.TRANSACTION_ID transaction._execute_sql_count = count status, row_counts = transaction.batch_update(dml_statements) self.assertEqual(status, expected_status) self.assertEqual(row_counts, expected_row_counts) expected_transaction = TransactionSelector(id=self.TRANSACTION_ID) expected_insert_params = Struct( fields={ key: _make_value_pb(value) for (key, value) in insert_params.items() } ) expected_statements = [ { "sql": insert_dml, "params": expected_insert_params, "param_types": insert_param_types, }, {"sql": update_dml}, {"sql": delete_dml}, ] api.execute_batch_dml.assert_called_once_with( session=self.SESSION_NAME, transaction=expected_transaction, statements=expected_statements, seqno=count, metadata=[("google-cloud-resource-prefix", database.name)], ) self.assertEqual(transaction._execute_sql_count, count + 1)
def _make_status(code=_STATUS_CODE, message=_STATUS_MESSAGE): from google.cloud.bigtable.error import Status status_pb = _make_status_pb(code=code, message=message) return Status(status_pb)
def _make_status_pb(code=_STATUS_CODE, message=_STATUS_MESSAGE): from google.rpc.status_pb2 import Status return Status(code=code, message=message)
def submit_task(self, task_name): try: queue_name = task_name.rsplit("/", 2)[0] if queue_name not in self._queue_tasks: raise ValueError("Not a valid queue") except IndexError: # Invalid task name, raise ValueError raise ValueError() # This is a special-case that does not exist # one the live server and it exists so that # local development servers can direct a task # to run on a particular port qs = task_name.rsplit("?", 1)[-1] if qs: params = parse_qs(qs) port = int(params.get("port", [self._target_port])[0]) task_name = task_name.rsplit("?", 1)[0] else: port = self._target_port # Locate the task in the queue for i, task in enumerate(self._queue_tasks[queue_name]): if task.name == task_name: index = i break else: logger.debug( "[TASKS] Tasks were: %s", [x.name for x in self._queue_tasks[queue_name]] ) raise NotFound("Task not found: %s" % task_name) def now(): return Timestamp(seconds=int(datetime.now().timestamp())) schedule_time = now() dispatch_time = None response_time = None task = self._queue_tasks[queue_name].pop(index) # Remove the task try: dispatch_time = now() response = _make_task_request(queue_name, task, port) except error.HTTPError as e: response_status = e.code logging.error("Error submitting task, moving to the back of the queue") logging.error("Reason was: %s" % e.reason) self._queue_tasks[queue_name].append(task) except (ConnectionRefusedError, error.URLError): response_status = 500 logger.exception( "Error submitting task, moving to the back of the queue" ) self._queue_tasks[queue_name].append(task) else: response_status = response.status attempt = Attempt( schedule_time=schedule_time, dispatch_time=dispatch_time, response_time=response_time, response_status=Status(code=response_status) ) kwargs = { "first_attempt": task.first_attempt or attempt, "last_attempt": attempt } task.MergeFrom(Task(**kwargs)) assert (task) return task