コード例 #1
0
    def test_insert_data_then_dump_table(self):
        import datetime
        from google.cloud._helpers import UTC

        NOW_SECONDS = 1448911495.484366
        NOW = datetime.datetime.utcfromtimestamp(NOW_SECONDS).replace(
            tzinfo=UTC)
        ROWS = [
            ('Phred Phlyntstone', 32, NOW),
            ('Bharney Rhubble', 33, NOW + datetime.timedelta(seconds=10)),
            ('Wylma Phlyntstone', 29, NOW + datetime.timedelta(seconds=20)),
            ('Bhettye Rhubble', 27, None),
        ]
        ROW_IDS = range(len(ROWS))
        dataset = Config.CLIENT.dataset(
            _make_dataset_name('insert_data_then_dump'))
        self.assertFalse(dataset.exists())

        retry_403(dataset.create)()
        self.to_delete.append(dataset)

        TABLE_NAME = 'test_table'
        full_name = bigquery.SchemaField('full_name',
                                         'STRING',
                                         mode='REQUIRED')
        age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED')
        now = bigquery.SchemaField('now', 'TIMESTAMP')
        table = dataset.table(TABLE_NAME, schema=[full_name, age, now])
        self.assertFalse(table.exists())
        table.create()
        self.to_delete.insert(0, table)
        self.assertTrue(table.exists())

        errors = table.insert_data(ROWS, ROW_IDS)
        self.assertEqual(len(errors), 0)

        rows = ()

        def _has_rows(result):
            return len(result[0]) > 0

        # Allow for 90 seconds of "warm up" before rows visible.  See:
        # https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
        # 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
        retry = RetryResult(_has_rows, max_tries=8)
        rows, _, _ = retry(table.fetch_data)()

        by_age = operator.itemgetter(1)
        self.assertEqual(sorted(rows, key=by_age), sorted(ROWS, key=by_age))
コード例 #2
0
def _wait_until_complete(operation, max_attempts=5):
    """Wait until an operation has completed.

    :type operation: :class:`google.cloud.operation.Operation`
    :param operation: Operation that has not completed.

    :type max_attempts: int
    :param max_attempts: (Optional) The maximum number of times to check if
                         the operation has completed. Defaults to 5.

    :rtype: bool
    :returns: Boolean indicating if the operation is complete.
    """
    retry = RetryResult(_operation_complete, max_tries=max_attempts)
    return retry(operation.poll)()
コード例 #3
0
def _list_entries(logger):
    """Retry-ing list entries in a logger.

    Retry until there are actual results and retry on any
    failures.

    :type logger: :class:`~google.cloud.logging.logger.Logger`
    :param logger: A Logger containing entries.

    :rtype: list
    :returns: List of all entries consumed.
    """
    inner = RetryResult(_has_entries)(_consume_entries)
    outer = RetryErrors(GaxError, _retry_on_unavailable)(inner)
    return outer(logger)
コード例 #4
0
    def test_topic_iam_policy(self):
        from google.cloud.pubsub.iam import PUBSUB_TOPICS_GET_IAM_POLICY
        self._maybe_emulator_skip()
        topic_name = 'test-topic-iam-policy-topic' + unique_resource_id('-')
        topic = Config.CLIENT.topic(topic_name)
        topic.create()

        # Retry / backoff up to 7 seconds (1 + 2 + 4)
        retry = RetryResult(lambda result: result, max_tries=4)
        retry(topic.exists)()
        self.to_delete.append(topic)

        if topic.check_iam_permissions([PUBSUB_TOPICS_GET_IAM_POLICY]):
            policy = topic.get_iam_policy()
            policy.viewers.add(policy.user('*****@*****.**'))
            new_policy = topic.set_iam_policy(policy)
            self.assertEqual(new_policy.viewers, policy.viewers)
コード例 #5
0
def _operation_wait(operation, max_attempts=5):
    """Wait until an operation has completed.

    :type operation: :class:`gcloud.bigtable.instance.Operation`
    :param operation: Operation that has not finished.

    :type max_attempts: int
    :param max_attempts: (Optional) The maximum number of times to check if
                         the operation has finished. Defaults to 5.

    :rtype: bool
    :returns: Boolean indicating if the operation finished.
    """
    def _operation_finished(result):
        return result

    retry = RetryResult(_operation_finished, max_tries=max_attempts)
    return retry(operation.finished)()
コード例 #6
0
    def test_insert_nested_nested(self):
        # See #2951
        SF = bigquery.SchemaField
        schema = [
            SF('string_col', 'STRING', mode='NULLABLE'),
            SF('record_col',
               'RECORD',
               mode='NULLABLE',
               fields=[
                   SF('nested_string', 'STRING', mode='NULLABLE'),
                   SF('nested_repeated', 'INTEGER', mode='REPEATED'),
                   SF('nested_record',
                      'RECORD',
                      mode='NULLABLE',
                      fields=[
                          SF('nested_nested_string', 'STRING',
                             mode='NULLABLE'),
                      ]),
               ]),
        ]
        record = {
            'nested_string': 'another string value',
            'nested_repeated': [0, 1, 2],
            'nested_record': {
                'nested_nested_string': 'some deep insight'
            },
        }
        to_insert = [('Some value', record)]
        table_name = 'test_table'
        dataset = Config.CLIENT.dataset(_make_dataset_name('issue_2951'))

        retry_403(dataset.create)()
        self.to_delete.append(dataset)

        table = dataset.table(table_name, schema=schema)
        table.create()
        self.to_delete.insert(0, table)

        table.insert_data(to_insert)

        retry = RetryResult(_has_rows, max_tries=8)
        rows = retry(self._fetch_single_page)(table)

        self.assertEqual(rows, to_insert)
コード例 #7
0
    def test_list_topics(self):
        before = _consume_topics(Config.CLIENT)
        topics_to_create = [
            'new' + unique_resource_id(),
            'newer' + unique_resource_id(),
            'newest' + unique_resource_id(),
        ]
        for topic_name in topics_to_create:
            topic = Config.CLIENT.topic(topic_name)
            topic.create()
            self.to_delete.append(topic)

        # Retrieve the topics.
        def _all_created(result):
            return len(result) == len(before) + len(topics_to_create)

        retry = RetryResult(_all_created)
        after = retry(_consume_topics)(Config.CLIENT)

        created = [topic for topic in after
                   if topic.name in topics_to_create and
                   topic.project == Config.CLIENT.project]
        self.assertEqual(len(created), len(topics_to_create))
コード例 #8
0
ファイル: storage.py プロジェクト: ofek/google-cloud-python
    def test_second_level(self):
        expected_names = [
            'parent/child/file21.txt',
            'parent/child/file22.txt',
        ]

        def _all_in_list(pair):
            _, blobs = pair
            return [blob.name for blob in blobs] == expected_names

        def _all_blobs():
            iterator = self.bucket.list_blobs(delimiter='/',
                                              prefix='parent/child/')
            response = iterator.get_next_page_response()
            blobs = list(iterator.get_items_from_response(response))
            return iterator, blobs

        retry = RetryResult(_all_in_list)
        iterator, _ = retry(_all_blobs)()
        self.assertEqual(iterator.page_number, 1)
        self.assertTrue(iterator.next_page_token is None)
        self.assertEqual(iterator.prefixes,
                         set(['parent/child/grand/', 'parent/child/other/']))
コード例 #9
0
    def test_fetch_delete_subscription_w_deleted_topic(self):
        from gcloud.iterator import MethodIterator
        TO_DELETE = 'delete-me' + unique_resource_id('-')
        ORPHANED = 'orphaned' + unique_resource_id('-')
        topic = Config.CLIENT.topic(TO_DELETE)
        topic.create()
        subscription = topic.subscription(ORPHANED)
        subscription.create()
        topic.delete()

        def _fetch():
            return list(MethodIterator(Config.CLIENT.list_subscriptions))

        def _found_orphan(result):
            names = [subscription.name for subscription in result]
            return ORPHANED in names

        retry_until_found_orphan = RetryResult(_found_orphan)
        all_subs = retry_until_found_orphan(_fetch)()

        created = [
            subscription for subscription in all_subs
            if subscription.name == ORPHANED
        ]
        self.assertEqual(len(created), 1)
        orphaned = created[0]

        def _no_topic(instance):
            return instance.topic is None

        # Wait for the topic to clear: up to 127 seconds (2 ** 7 - 1)
        retry_until_no_topic = RetryInstanceState(_no_topic, max_tries=8)
        retry_until_no_topic(orphaned.reload)()

        self.assertTrue(orphaned.topic is None)
        orphaned.delete()
コード例 #10
0
ファイル: logging_.py プロジェクト: yang-g/gcloud-python
 def _list_entries(self, logger):
     from grpc._channel import _Rendezvous
     inner = RetryResult(_has_entries)(logger.list_entries)
     outer = RetryErrors(_Rendezvous, _retry_on_unavailable)(inner)
     return outer()
コード例 #11
0
    def _list_entries(self, logger):
        from google.gax.errors import GaxError

        inner = RetryResult(_has_entries)(logger.list_entries)
        outer = RetryErrors(GaxError, _retry_on_unavailable)(inner)
        return outer()
コード例 #12
0
    def test_create_table_insert_fetch_nested_schema(self):

        table_name = 'test_table'
        dataset = Config.CLIENT.dataset(
            _make_dataset_name('create_table_nested_schema'))
        self.assertFalse(dataset.exists())

        retry_403(dataset.create)()
        self.to_delete.append(dataset)

        schema = _load_json_schema()
        table = dataset.table(table_name, schema=schema)
        table.create()
        self.to_delete.insert(0, table)
        self.assertTrue(table.exists())
        self.assertEqual(table.name, table_name)

        to_insert = []
        # Data is in "JSON Lines" format, see http://jsonlines.org/
        json_filename = os.path.join(WHERE, 'bigquery_test_data.jsonl')
        with open(json_filename) as rows_file:
            for line in rows_file:
                mapping = json.loads(line)
                to_insert.append(tuple(mapping[field.name]
                                       for field in schema))

        errors = table.insert_data(to_insert)
        self.assertEqual(len(errors), 0)

        retry = RetryResult(_has_rows, max_tries=8)
        fetched = retry(self._fetch_single_page)(table)
        self.assertEqual(len(fetched), len(to_insert))

        for found, expected in zip(sorted(fetched), sorted(to_insert)):
            self.assertEqual(found[0], expected[0])  # Name
            self.assertEqual(found[1], int(expected[1]))  # Age
            self.assertEqual(found[2], expected[2])  # Weight
            self.assertEqual(found[3], expected[3])  # IsMagic

            self.assertEqual(len(found[4]), len(expected[4]))  # Spells
            for f_spell, e_spell in zip(found[4], expected[4]):
                self.assertEqual(f_spell['Name'], e_spell['Name'])
                parts = time.strptime(e_spell['LastUsed'],
                                      '%Y-%m-%d %H:%M:%S UTC')
                e_used = datetime.datetime(*parts[0:6], tzinfo=UTC)
                self.assertEqual(f_spell['LastUsed'], e_used)
                self.assertEqual(f_spell['DiscoveredBy'],
                                 e_spell['DiscoveredBy'])
                self.assertEqual(f_spell['Properties'], e_spell['Properties'])

                e_icon = base64.standard_b64decode(
                    e_spell['Icon'].encode('ascii'))
                self.assertEqual(f_spell['Icon'], e_icon)

            parts = time.strptime(expected[5], '%H:%M:%S')
            e_teatime = datetime.time(*parts[3:6])
            self.assertEqual(found[5], e_teatime)  # TeaTime

            parts = time.strptime(expected[6], '%Y-%m-%d')
            e_nextvac = datetime.date(*parts[0:3])
            self.assertEqual(found[6], e_nextvac)  # NextVacation

            parts = time.strptime(expected[7], '%Y-%m-%dT%H:%M:%S')
            e_favtime = datetime.datetime(*parts[0:6])
            self.assertEqual(found[7], e_favtime)  # FavoriteTime
コード例 #13
0
 def _list_entries(self, logger):
     from google.cloud.exceptions import GrpcRendezvous
     inner = RetryResult(_has_entries)(logger.list_entries)
     outer = RetryErrors(GrpcRendezvous, _retry_on_unavailable)(inner)
     return outer()