def test_subscription_iam_policy(self): from google.cloud.pubsub.iam import PUBSUB_SUBSCRIPTIONS_GET_IAM_POLICY self._maybe_emulator_skip() topic_name = 'test-sub-iam-policy-topic' + unique_resource_id('-') topic = Config.CLIENT.topic(topic_name) topic.create() # Retry / backoff up to 7 seconds (1 + 2 + 4) retry = RetryResult(lambda result: result, max_tries=4) retry(topic.exists)() self.to_delete.append(topic) SUB_NAME = 'test-sub-iam-policy-sub' + unique_resource_id('-') subscription = topic.subscription(SUB_NAME) subscription.create() # Retry / backoff up to 7 seconds (1 + 2 + 4) retry = RetryResult(lambda result: result, max_tries=4) retry(subscription.exists)() self.to_delete.insert(0, subscription) if subscription.check_iam_permissions( [PUBSUB_SUBSCRIPTIONS_GET_IAM_POLICY]): policy = subscription.get_iam_policy() viewers = set(policy.viewers) viewers.add(policy.user('*****@*****.**')) policy.viewers = viewers new_policy = subscription.set_iam_policy(policy) self.assertEqual(new_policy.viewers, policy.viewers)
def test_session_crud(self): retry_true = RetryResult(operator.truth) retry_false = RetryResult(operator.not_) session = self._db.session() self.assertFalse(session.exists()) session.create() retry_true(session.exists)() session.delete() retry_false(session.exists)()
def test_exists(self): retry_until_true = RetryResult(lambda result: result) retry_until_false = RetryResult(lambda result: not result) temp_table_id = "test-table_existence" temp_table = Config.INSTANCE.table(temp_table_id) self.assertFalse(temp_table.exists()) temp_table.create() self.assertTrue(retry_until_true(temp_table.exists)()) temp_table.delete() self.assertFalse(retry_until_false(temp_table.exists)())
def test_list_subscriptions(self): TOPIC_NAME = 'list-sub' + unique_resource_id('-') topic = Config.CLIENT.topic(TOPIC_NAME) topic.create() self.to_delete.append(topic) empty = _consume_subscriptions(topic) self.assertEqual(len(empty), 0) subscriptions_to_create = [ 'new' + unique_resource_id(), 'newer' + unique_resource_id(), 'newest' + unique_resource_id(), ] for subscription_name in subscriptions_to_create: subscription = topic.subscription(subscription_name) subscription.create() self.to_delete.append(subscription) # Retrieve the subscriptions. def _all_created(result): return len(result) == len(subscriptions_to_create) retry = RetryResult(_all_created) all_subscriptions = retry(_consume_subscriptions)(topic) created = [subscription for subscription in all_subscriptions if subscription.name in subscriptions_to_create] self.assertEqual(len(created), len(subscriptions_to_create))
def test_create_snapshot(self): TOPIC_NAME = 'create-snap-def' + unique_resource_id('-') topic = Config.CLIENT.topic(TOPIC_NAME) before_snapshots = _consume_snapshots(Config.CLIENT) self.assertFalse(topic.exists()) topic.create() self.to_delete.append(topic) SUBSCRIPTION_NAME = 'subscribing-now' + unique_resource_id('-') subscription = topic.subscription(SUBSCRIPTION_NAME, ack_deadline=600) self.assertFalse(subscription.exists()) subscription.create() self.to_delete.append(subscription) SNAPSHOT_NAME = 'new-snapshot' + unique_resource_id('-') snapshot = subscription.snapshot(SNAPSHOT_NAME) snapshot.create() self.to_delete.append(snapshot) # There is no GET method for snapshot, so check existence using # list def retry_predicate(result): return len(result) > len(before_snapshots) retry = RetryResult(retry_predicate, max_tries=5) after_snapshots = retry(_consume_snapshots)(Config.CLIENT) self.assertEqual(len(before_snapshots) + 1, len(after_snapshots)) def full_name(obj): return obj.full_name self.assertIn(snapshot.full_name, map(full_name, after_snapshots)) self.assertNotIn(snapshot.full_name, map(full_name, before_snapshots)) with self.assertRaises(Conflict): snapshot.create()
def instance(): cluster_id = BIGTABLE_INSTANCE client = bigtable.Client(project=PROJECT, admin=True) serve_nodes = 1 storage_type = enums.StorageType.SSD production = enums.Instance.Type.PRODUCTION labels = {"prod-label": "prod-label"} instance = client.instance(BIGTABLE_INSTANCE, instance_type=production, labels=labels) if not instance.exists(): cluster = instance.cluster( cluster_id, location_id=BIGTABLE_ZONE, serve_nodes=serve_nodes, default_storage_type=storage_type, ) instance.create(clusters=[cluster]) # Eventual consistency check retry_found = RetryResult(bool) retry_found(instance.exists)() yield instance.delete()
def dev_instance(): cluster_id = BIGTABLE_DEV_INSTANCE client = bigtable.Client(project=PROJECT, admin=True) storage_type = enums.StorageType.SSD development = enums.Instance.Type.DEVELOPMENT labels = {"dev-label": "dev-label"} instance = client.instance(BIGTABLE_DEV_INSTANCE, instance_type=development, labels=labels) if not instance.exists(): cluster = instance.cluster(cluster_id, location_id=BIGTABLE_ZONE, default_storage_type=storage_type) instance.create(clusters=[cluster]) # Eventual consistency check retry_found = RetryResult(bool) retry_found(instance.exists)() yield instance.delete()
def _query_timeseries_with_retries(): MAX_RETRIES = 10 def _has_timeseries(result): return len(list(result)) > 0 retry_result = RetryResult(_has_timeseries, max_tries=MAX_RETRIES)(client.query) return RetryErrors(BadRequest, max_tries=MAX_RETRIES)(retry_result)
def test_annotate_video(client): features_element = videointelligence_v1.enums.Feature.LABEL_DETECTION features = [features_element] response = client.annotate_video(input_uri=INPUT_URI, features=features) retry = RetryResult(result_predicate=bool, max_tries=7) retry(response.done)() result = response.result() annotations = result.annotation_results[0] assert len(annotations.segment_label_annotations) > 0
def test_report_exception(self): # Get a class name unique to this test case. class_name = 'RuntimeError' + unique_resource_id('_') # Simulate an error: group won't exist until we report # first exception. _simulate_exception(class_name, Config.CLIENT) is_one = functools.partial(operator.eq, 1) is_one.__name__ = 'is_one' # partial() has no name. wrapped_get_count = RetryResult(is_one)(_get_error_count) error_count = wrapped_get_count(class_name, Config.CLIENT) self.assertEqual(error_count, 1)
def _wait_until_complete(operation, max_attempts=10): """Wait until an operation has completed. :type operation: :class:`google.cloud.operation.Operation` :param operation: Operation that has not completed. :type max_attempts: int :param max_attempts: (Optional) The maximum number of times to check if the operation has completed. Defaults to 5. :rtype: bool :returns: Boolean indicating if the operation is complete. """ retry = RetryResult(_operation_complete, max_tries=max_attempts) return retry(operation.poll)()
def _list_entries(logger): """Retry-ing list entries in a logger. Retry until there are actual results and retry on any failures. :type logger: :class:`~google.cloud.logging.logger.Logger` :param logger: A Logger containing entries. :rtype: list :returns: List of all entries consumed. """ inner = RetryResult(_has_entries, max_tries=9)(_consume_entries) outer = RetryErrors((ServiceUnavailable, ResourceExhausted), max_tries=9)(inner) return outer(logger)
def _list_entries(logger): """Retry-ing list entries in a logger. Retry until there are actual results and retry on any failures. :type logger: :class:`~google.cloud.logging.logger.Logger` :param logger: A Logger containing entries. :rtype: list :returns: List of all entries consumed. """ inner = RetryResult(_has_entries)(_consume_entries) outer = RetryErrors(GaxError, _retry_on_unavailable)(inner) return outer(logger)
def test_topic_iam_policy(self): from google.cloud.pubsub.iam import PUBSUB_TOPICS_GET_IAM_POLICY self._maybe_emulator_skip() topic_name = 'test-topic-iam-policy-topic' + unique_resource_id('-') topic = Config.CLIENT.topic(topic_name) topic.create() # Retry / backoff up to 7 seconds (1 + 2 + 4) retry = RetryResult(lambda result: result, max_tries=4) retry(topic.exists)() self.to_delete.append(topic) if topic.check_iam_permissions([PUBSUB_TOPICS_GET_IAM_POLICY]): policy = topic.get_iam_policy() policy.viewers.add(policy.user('*****@*****.**')) new_policy = topic.set_iam_policy(policy) self.assertEqual(new_policy.viewers, policy.viewers)
def _wait_until_complete(operation, max_attempts=10): """Wait until an operation has completed. :type operation: :class:`google.cloud.operation.Operation` :param operation: Operation that has not completed. :type max_attempts: int :param max_attempts: (Optional) The maximum number of times to check if the operation has completed. Defaults to 5. :rtype: bool :returns: Boolean indicating if the operation is complete. """ # This bizarre delay is necessary because the v1 API seems to return # the v1beta1 type URL sometimes if you poll too soon. time.sleep(3) retry = RetryResult(_operation_complete, max_tries=max_attempts) return retry(operation.poll)()
def test_insert_nested_nested(self): # See #2951 SF = bigquery.SchemaField schema = [ SF('string_col', 'STRING', mode='NULLABLE'), SF('record_col', 'RECORD', mode='NULLABLE', fields=[ SF('nested_string', 'STRING', mode='NULLABLE'), SF('nested_repeated', 'INTEGER', mode='REPEATED'), SF('nested_record', 'RECORD', mode='NULLABLE', fields=[ SF('nested_nested_string', 'STRING', mode='NULLABLE'), ]), ]), ] record = { 'nested_string': 'another string value', 'nested_repeated': [0, 1, 2], 'nested_record': { 'nested_nested_string': 'some deep insight' }, } to_insert = [('Some value', record)] table_name = 'test_table' dataset = Config.CLIENT.dataset(_make_dataset_name('issue_2951')) retry_403(dataset.create)() self.to_delete.append(dataset) table = dataset.table(table_name, schema=schema) table.create() self.to_delete.insert(0, table) table.insert_data(to_insert) retry = RetryResult(_has_rows, max_tries=8) rows = retry(self._fetch_single_page)(table) self.assertEqual(rows, to_insert)
def test_insert_data_then_dump_table(self): NOW_SECONDS = 1448911495.484366 NOW = datetime.datetime.utcfromtimestamp( NOW_SECONDS).replace(tzinfo=UTC) ROWS = [ ('Phred Phlyntstone', 32, NOW), ('Bharney Rhubble', 33, NOW + datetime.timedelta(seconds=10)), ('Wylma Phlyntstone', 29, NOW + datetime.timedelta(seconds=20)), ('Bhettye Rhubble', 27, None), ] ROW_IDS = range(len(ROWS)) dataset = Config.CLIENT.dataset( _make_dataset_name('insert_data_then_dump')) self.assertFalse(dataset.exists()) retry_403(dataset.create)() self.to_delete.append(dataset) TABLE_NAME = 'test_table' full_name = bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED') age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED') now = bigquery.SchemaField('now', 'TIMESTAMP') table = dataset.table(TABLE_NAME, schema=[full_name, age, now]) self.assertFalse(table.exists()) table.create() self.to_delete.insert(0, table) self.assertTrue(table.exists()) errors = table.insert_data(ROWS, ROW_IDS) self.assertEqual(len(errors), 0) rows = () # Allow for "warm up" before rows visible. See: # https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability # 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds retry = RetryResult(_has_rows, max_tries=8) rows = retry(self._fetch_single_page)(table) by_age = operator.itemgetter(1) self.assertEqual(sorted(rows, key=by_age), sorted(ROWS, key=by_age))
def test_detect_web_images_from_gcs(self): client = Config.CLIENT bucket_name = Config.TEST_BUCKET.name blob_name = 'landmark.jpg' blob = Config.TEST_BUCKET.blob(blob_name) self.to_delete_by_case.append(blob) # Clean-up. with open(LANDMARK_FILE, 'rb') as file_obj: blob.upload_from_file(file_obj) source_uri = 'gs://%s/%s' % (bucket_name, blob_name) image = client.image(source_uri=source_uri) limit = 5 images_good = functools.partial(self._check_web_images, limit=limit) images_good.__name__ = 'images_good' # partial() has no name. retry = RetryResult(images_good) web_images = retry(image.detect_web)(limit=limit) self._assert_web_images(web_images, limit)
def test_list_topics(self): before = _consume_topics(Config.CLIENT) topics_to_create = [ 'new' + unique_resource_id(), 'newer' + unique_resource_id(), 'newest' + unique_resource_id(), ] for topic_name in topics_to_create: topic = Config.CLIENT.topic(topic_name) topic.create() self.to_delete.append(topic) # Retrieve the topics. def _all_created(result): return len(result) == len(before) + len(topics_to_create) retry = RetryResult(_all_created) after = retry(_consume_topics)(Config.CLIENT) created = [topic for topic in after if topic.name in topics_to_create and topic.project == Config.CLIENT.project] self.assertEqual(len(created), len(topics_to_create))
def test_create_table_insert_fetch_nested_schema(self): table_name = 'test_table' dataset = Config.CLIENT.dataset( _make_dataset_name('create_table_nested_schema')) self.assertFalse(dataset.exists()) retry_403(dataset.create)() self.to_delete.append(dataset) schema = _load_json_schema() table = dataset.table(table_name, schema=schema) table.create() self.to_delete.insert(0, table) self.assertTrue(table.exists()) self.assertEqual(table.name, table_name) to_insert = [] # Data is in "JSON Lines" format, see http://jsonlines.org/ json_filename = os.path.join(WHERE, 'data', 'characters.jsonl') with open(json_filename) as rows_file: for line in rows_file: mapping = json.loads(line) to_insert.append( tuple(mapping[field.name] for field in schema)) errors = table.insert_data(to_insert) self.assertEqual(len(errors), 0) retry = RetryResult(_has_rows, max_tries=8) fetched = retry(self._fetch_single_page)(table) self.assertEqual(len(fetched), len(to_insert)) for found, expected in zip(sorted(fetched), sorted(to_insert)): self.assertEqual(found[0], expected[0]) # Name self.assertEqual(found[1], int(expected[1])) # Age self.assertEqual(found[2], expected[2]) # Weight self.assertEqual(found[3], expected[3]) # IsMagic self.assertEqual(len(found[4]), len(expected[4])) # Spells for f_spell, e_spell in zip(found[4], expected[4]): self.assertEqual(f_spell['Name'], e_spell['Name']) parts = time.strptime( e_spell['LastUsed'], '%Y-%m-%d %H:%M:%S UTC') e_used = datetime.datetime(*parts[0:6], tzinfo=UTC) self.assertEqual(f_spell['LastUsed'], e_used) self.assertEqual(f_spell['DiscoveredBy'], e_spell['DiscoveredBy']) self.assertEqual(f_spell['Properties'], e_spell['Properties']) e_icon = base64.standard_b64decode( e_spell['Icon'].encode('ascii')) self.assertEqual(f_spell['Icon'], e_icon) parts = time.strptime(expected[5], '%H:%M:%S') e_teatime = datetime.time(*parts[3:6]) self.assertEqual(found[5], e_teatime) # TeaTime parts = time.strptime(expected[6], '%Y-%m-%d') e_nextvac = datetime.date(*parts[0:3]) self.assertEqual(found[6], e_nextvac) # NextVacation parts = time.strptime(expected[7], '%Y-%m-%dT%H:%M:%S') e_favtime = datetime.datetime(*parts[0:6]) self.assertEqual(found[7], e_favtime) # FavoriteTime
b"\x08 Y\x13\xe2\n\x02i\xadc\xe2\xd99x" ) _RETRYABLE_CODES = [ 409, # Conflict 429, # TooManyRequests 503, # ServiceUnavailable ] def _not_retryable(response): return response.status_code not in _RETRYABLE_CODES retry_transient_errors = RetryResult(_not_retryable) def get_encryption_headers(key=ENCRYPTION_KEY): """Builds customer-supplied encryption key headers See `Managing Data Encryption`_ for more details. Args: key (bytes): 32 byte key to build request key and hash. Returns: Dict[str, str]: The algorithm, key and key-SHA256 headers. .. _Managing Data Encryption: https://cloud.google.com/storage/docs/encryption