Exemplo n.º 1
0
def test_bigtable_delete_instance():
    # [START bigtable_delete_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance_id_to_delete = "inst-my-" + unique_resource_id("-")
    # [END bigtable_delete_instance]

    cluster_id = "clus-my-" + unique_resource_id("-")

    instance = client.instance(
        instance_id_to_delete, instance_type=PRODUCTION, labels=LABELS
    )
    cluster = instance.cluster(
        cluster_id,
        location_id=ALT_LOCATION_ID,
        serve_nodes=SERVER_NODES,
        default_storage_type=STORAGE_TYPE,
    )
    operation = instance.create(clusters=[cluster])
    # We want to make sure the operation completes.
    operation.result(timeout=100)

    # [START bigtable_delete_instance]
    instance_to_delete = client.instance(instance_id_to_delete)
    instance_to_delete.delete()
    # [END bigtable_delete_instance]

    assert not instance_to_delete.exists()
Exemplo n.º 2
0
def test_bigtable_create_instance():
    # [START bigtable_create_prod_instance]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    my_instance_id = "inst-my-" + unique_resource_id("-")
    my_cluster_id = "clus-my-" + unique_resource_id("-")
    location_id = "us-central1-f"
    serve_nodes = 3
    storage_type = enums.StorageType.SSD
    production = enums.Instance.Type.PRODUCTION
    labels = {"prod-label": "prod-label"}

    client = Client(admin=True)
    instance = client.instance(my_instance_id, instance_type=production, labels=labels)
    cluster = instance.cluster(
        my_cluster_id,
        location_id=location_id,
        serve_nodes=serve_nodes,
        default_storage_type=storage_type,
    )
    operation = instance.create(clusters=[cluster])
    # We want to make sure the operation completes.
    operation.result(timeout=100)
    # [END bigtable_create_prod_instance]
    assert instance.exists()
    instance.delete()
    def test_create_sink_pubsub_topic(self):
        from google.cloud import pubsub_v1

        SINK_NAME = 'test-create-sink-topic%s' % (_RESOURCE_ID,)
        TOPIC_NAME = 'logging-systest{}'.format(unique_resource_id('-'))

        # Create the destination topic, and set up the IAM policy to allow
        # Stackdriver Logging to write into it.
        publisher = pubsub_v1.PublisherClient()
        topic_path = publisher.topic_path(Config.CLIENT.project, TOPIC_NAME)
        self.to_delete.append(_DeleteWrapper(publisher, topic_path))
        publisher.create_topic(topic_path)

        policy = publisher.get_iam_policy(topic_path)
        policy.bindings.add(
            role='roles/owner',
            members=['group:[email protected]']
        )
        publisher.set_iam_policy(topic_path, policy)

        TOPIC_URI = 'pubsub.googleapis.com/%s' % (topic_path,)

        sink = Config.CLIENT.sink(SINK_NAME, DEFAULT_FILTER, TOPIC_URI)
        self.assertFalse(sink.exists())
        sink.create()
        self.to_delete.append(sink)
        self.assertTrue(sink.exists())
Exemplo n.º 4
0
def test_watch_query(client, cleanup):
    db = client
    doc_ref = db.collection(u"users").document(u"alovelace" + unique_resource_id())
    query_ref = db.collection(u"users").where("first", "==", u"Ada")

    # Initial setting
    doc_ref.set({u"first": u"Jane", u"last": u"Doe", u"born": 1900})

    sleep(1)

    # Setup listener
    def on_snapshot(docs, changes, read_time):
        on_snapshot.called_count += 1

        # A snapshot should return the same thing as if a query ran now.
        query_ran = db.collection(u"users").where("first", "==", u"Ada").stream()
        assert len(docs) == len([i for i in query_ran])

    on_snapshot.called_count = 0

    query_ref.on_snapshot(on_snapshot)

    # Alter document
    doc_ref.set({u"first": u"Ada", u"last": u"Lovelace", u"born": 1815})

    for _ in range(10):
        if on_snapshot.called_count == 1:
            return
        sleep(1)

    if on_snapshot.called_count != 1:
        raise AssertionError(
            "Failed to get exactly one document change: count: "
            + str(on_snapshot.called_count)
        )
Exemplo n.º 5
0
    def test_create_instance_defaults(self):
        from google.cloud.bigtable import enums

        ALT_INSTANCE_ID = "ndef" + unique_resource_id("-")
        instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS)
        ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster"
        cluster = instance.cluster(
            ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES
        )
        operation = instance.create(clusters=[cluster])
        # We want to make sure the operation completes.
        operation.result(timeout=10)

        # Make sure this instance gets deleted after the test case.
        self.instances_to_delete.append(instance)

        # Create a new instance instance and make sure it is the same.
        instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID)
        instance_alt.reload()

        self.assertEqual(instance, instance_alt)
        self.assertEqual(instance.display_name, instance_alt.display_name)
        # Make sure that by default a PRODUCTION type instance is created
        self.assertIsNone(instance.type_)
        self.assertEqual(instance_alt.type_, enums.Instance.Type.PRODUCTION)
    def test_create_sink_pubsub_topic(self):
        from google.cloud import pubsub_v1

        SINK_NAME = "test-create-sink-topic%s" % (_RESOURCE_ID,)
        TOPIC_NAME = "logging-systest{}".format(unique_resource_id("-"))

        # Create the destination topic, and set up the IAM policy to allow
        # Stackdriver Logging to write into it.
        publisher = pubsub_v1.PublisherClient()
        topic_path = publisher.topic_path(Config.CLIENT.project, TOPIC_NAME)
        self.to_delete.append(_DeleteWrapper(publisher, topic_path))
        publisher.create_topic(topic_path)

        policy = publisher.get_iam_policy(topic_path)
        policy.bindings.add(role="roles/owner", members=["group:[email protected]"])
        publisher.set_iam_policy(topic_path, policy)

        TOPIC_URI = "pubsub.googleapis.com/%s" % (topic_path,)

        retry = RetryErrors((Conflict, ServiceUnavailable), max_tries=10)
        sink = Config.CLIENT.sink(SINK_NAME, DEFAULT_FILTER, TOPIC_URI)
        self.assertFalse(sink.exists())

        retry(sink.create)()

        self.to_delete.append(sink)
        self.assertTrue(sink.exists())
Exemplo n.º 7
0
def test_collection_group_queries(client, cleanup):
    collection_group = "b" + unique_resource_id("-")

    doc_paths = [
        "abc/123/" + collection_group + "/cg-doc1",
        "abc/123/" + collection_group + "/cg-doc2",
        collection_group + "/cg-doc3",
        collection_group + "/cg-doc4",
        "def/456/" + collection_group + "/cg-doc5",
        collection_group + "/virtual-doc/nested-coll/not-cg-doc",
        "x" + collection_group + "/not-cg-doc",
        collection_group + "x/not-cg-doc",
        "abc/123/" + collection_group + "x/not-cg-doc",
        "abc/123/x" + collection_group + "/not-cg-doc",
        "abc/" + collection_group,
    ]

    batch = client.batch()
    for doc_path in doc_paths:
        doc_ref = client.document(doc_path)
        batch.set(doc_ref, {"x": 1})

    batch.commit()

    query = client.collection_group(collection_group)
    snapshots = list(query.stream())
    found = [snapshot.id for snapshot in snapshots]
    expected = ["cg-doc1", "cg-doc2", "cg-doc3", "cg-doc4", "cg-doc5"]
    assert found == expected
Exemplo n.º 8
0
def test_document_get(client, cleanup):
    now = datetime.datetime.utcnow().replace(tzinfo=UTC)
    document_id = 'for-get' + unique_resource_id('-')
    document = client.document('created', document_id)
    # Add to clean-up before API request (in case ``create()`` fails).
    cleanup(document)

    # First make sure it doesn't exist.
    assert not document.get().exists

    ref_doc = client.document('top', 'middle1', 'middle2', 'bottom')
    data = {
        'turtle': 'power',
        'cheese': 19.5,
        'fire': 199099299,
        'referee': ref_doc,
        'gio': firestore.GeoPoint(45.5, 90.0),
        'deep': [
            u'some',
            b'\xde\xad\xbe\xef',
        ],
        'map': {
            'ice': True,
            'water': None,
            'vapor': {
                'deeper': now,
            },
        },
    }
    write_result = document.create(data)
    snapshot = document.get()
    check_snapshot(snapshot, document, data, write_result)
    assert_timestamp_less(snapshot.create_time, snapshot.read_time)
Exemplo n.º 9
0
def setUpModule():
    Config.CLIENT = storage.Client()
    bucket_name = 'new' + unique_resource_id()
    # In the **very** rare case the bucket name is reserved, this
    # fails with a ConnectionError.
    Config.TEST_BUCKET = Config.CLIENT.bucket(bucket_name)
    retry_429(Config.TEST_BUCKET.create)()
Exemplo n.º 10
0
    def test_copy_existing_file_with_user_project(self):
        new_bucket_name = 'copy-w-requester-pays' + unique_resource_id('-')
        created = Config.CLIENT.create_bucket(
            new_bucket_name, requester_pays=True)
        self.case_buckets_to_delete.append(new_bucket_name)
        self.assertEqual(created.name, new_bucket_name)
        self.assertTrue(created.requester_pays)

        to_delete = []
        blob = storage.Blob('simple', bucket=created)
        blob.upload_from_string(b'DEADBEEF')
        to_delete.append(blob)
        try:
            with_user_project = Config.CLIENT.bucket(
                new_bucket_name, user_project=USER_PROJECT)

            new_blob = retry_bad_copy(with_user_project.copy_blob)(
                blob, with_user_project, 'simple-copy')
            to_delete.append(new_blob)

            base_contents = blob.download_as_string()
            copied_contents = new_blob.download_as_string()
            self.assertEqual(base_contents, copied_contents)
        finally:
            for blob in to_delete:
                retry_429(blob.delete)()
Exemplo n.º 11
0
 def test_create_bucket(self):
     new_bucket_name = 'a-new-bucket' + unique_resource_id('-')
     self.assertRaises(exceptions.NotFound,
                       Config.CLIENT.get_bucket, new_bucket_name)
     created = retry_429(Config.CLIENT.create_bucket)(new_bucket_name)
     self.case_buckets_to_delete.append(new_bucket_name)
     self.assertEqual(created.name, new_bucket_name)
Exemplo n.º 12
0
    def test_update_type(self):
        from google.cloud.bigtable.enums import Instance

        _DEVELOPMENT = Instance.Type.DEVELOPMENT
        _PRODUCTION = Instance.Type.PRODUCTION
        ALT_INSTANCE_ID = "ndif" + unique_resource_id("-")
        instance = Config.CLIENT.instance(
            ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS
        )
        operation = instance.create(location_id=LOCATION_ID, serve_nodes=None)
        # Make sure this instance gets deleted after the test case.
        self.instances_to_delete.append(instance)

        # We want to make sure the operation completes.
        operation.result(timeout=10)

        # Unset the display_name
        instance.display_name = None

        instance.type_ = _PRODUCTION
        operation = instance.update()

        # We want to make sure the operation completes.
        operation.result(timeout=10)

        # Create a new instance instance and reload it.
        instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID)
        self.assertIsNone(instance_alt.type_)
        instance_alt.reload()
        self.assertEqual(instance_alt.type_, _PRODUCTION)
Exemplo n.º 13
0
def test_bigtable_create_additional_cluster():
    # [START bigtable_create_cluster]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    # Assuming that there is an existing instance with `INSTANCE_ID`
    # on the server already.
    # to create an instance see
    # 'https://cloud.google.com/bigtable/docs/creating-instance'

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    cluster_id = "clus-my-" + unique_resource_id("-")
    location_id = "us-central1-a"
    serve_nodes = 3
    storage_type = enums.StorageType.SSD

    cluster = instance.cluster(
        cluster_id,
        location_id=location_id,
        serve_nodes=serve_nodes,
        default_storage_type=storage_type,
    )
    operation = cluster.create()
    # We want to make sure the operation completes.
    operation.result(timeout=100)
    # [END bigtable_create_cluster]
    assert cluster.exists()

    cluster.delete()
Exemplo n.º 14
0
def test_document_integer_field(client, cleanup):
    document_id = 'for-set' + unique_resource_id('-')
    document = client.document('i-did-it', document_id)
    # Add to clean-up before API request (in case ``set()`` fails).
    cleanup(document)

    data1 = {
        '1a': {
            '2b': '3c',
            'ab': '5e'},
        '6f': {
            '7g': '8h',
            'cd': '0j'}
    }
    document.create(data1)

    data2 = {'1a.ab': '4d', '6f.7g': '9h'}
    option2 = client.write_option(exists=True)
    document.update(data2, option=option2)
    snapshot = document.get()
    expected = {
        '1a': {
            '2b': '3c',
            'ab': '4d'},
        '6f': {
            '7g': '9h',
            'cd': '0j'}
    }
    assert snapshot.to_dict() == expected
Exemplo n.º 15
0
    def test_rewrite_rotate_with_user_project(self):
        BLOB_NAME = 'rotating-keys'
        file_data = self.FILES['simple']
        new_bucket_name = 'rewrite-rotate-up' + unique_resource_id('-')
        created = Config.CLIENT.create_bucket(
            new_bucket_name, requester_pays=True)
        try:
            with_user_project = Config.CLIENT.bucket(
                new_bucket_name, user_project=USER_PROJECT)

            SOURCE_KEY = os.urandom(32)
            source = with_user_project.blob(
                BLOB_NAME, encryption_key=SOURCE_KEY)
            source.upload_from_filename(file_data['path'])
            source_data = source.download_as_string()

            DEST_KEY = os.urandom(32)
            dest = with_user_project.blob(BLOB_NAME, encryption_key=DEST_KEY)
            token, rewritten, total = dest.rewrite(source)

            self.assertEqual(token, None)
            self.assertEqual(rewritten, len(source_data))
            self.assertEqual(total, len(source_data))

            self.assertEqual(dest.download_as_string(), source_data)
        finally:
            retry_429(created.delete)(force=True)
Exemplo n.º 16
0
def test_document_set_merge(client, cleanup):
    document_id = 'for-set' + unique_resource_id('-')
    document = client.document('i-did-it', document_id)
    # Add to clean-up before API request (in case ``set()`` fails).
    cleanup(document)

    # 0. Make sure the document doesn't exist yet
    snapshot = document.get()
    assert not snapshot.exists

    # 1. Use ``create()`` to create the document.
    data1 = {'name': 'Sam',
             'address': {'city': 'SF',
                         'state': 'CA'}}
    write_result1 = document.create(data1)
    snapshot1 = document.get()
    assert snapshot1.to_dict() == data1
    # Make sure the update is what created the document.
    assert snapshot1.create_time == snapshot1.update_time
    assert snapshot1.update_time == write_result1.update_time

    # 2. Call ``set()`` to merge
    data2 = {'address': {'city': 'LA'}}
    write_result2 = document.set(data2, merge=True)
    snapshot2 = document.get()
    assert snapshot2.to_dict() == {'name': 'Sam',
                                   'address': {'city': 'LA',
                                               'state': 'CA'}}
    # Make sure the create time hasn't changed.
    assert snapshot2.create_time == snapshot1.create_time
    assert snapshot2.update_time == write_result2.update_time
Exemplo n.º 17
0
    def test_create_instance(self):
        from google.cloud.bigtable import enums

        _DEVELOPMENT = enums.Instance.Type.DEVELOPMENT
        _STATE = enums.Instance.State.READY

        ALT_INSTANCE_ID = "new" + unique_resource_id("-")
        instance = Config.CLIENT.instance(
            ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS
        )
        ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster"
        cluster = instance.cluster(ALT_CLUSTER_ID, location_id=LOCATION_ID)
        operation = instance.create(clusters=[cluster])
        # We want to make sure the operation completes.
        operation.result(timeout=10)

        # Make sure this instance gets deleted after the test case.
        self.instances_to_delete.append(instance)

        # Create a new instance instance and make sure it is the same.
        instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID)
        instance_alt.reload()

        self.assertEqual(instance, instance_alt)
        self.assertEqual(instance.display_name, instance_alt.display_name)
        self.assertEqual(instance.type_, instance_alt.type_)
        self.assertEqual(instance_alt.labels, LABELS)
        self.assertEqual(_STATE, instance_alt.state)
Exemplo n.º 18
0
def test_watch_document(client, cleanup):
    db = client
    doc_ref = db.collection(u"users").document(u"alovelace" + unique_resource_id())

    # Initial setting
    doc_ref.set({u"first": u"Jane", u"last": u"Doe", u"born": 1900})

    sleep(1)

    # Setup listener
    def on_snapshot(docs, changes, read_time):
        on_snapshot.called_count += 1

    on_snapshot.called_count = 0

    doc_ref.on_snapshot(on_snapshot)

    # Alter document
    doc_ref.set({u"first": u"Ada", u"last": u"Lovelace", u"born": 1815})

    sleep(1)

    for _ in range(10):
        if on_snapshot.called_count == 1:
            return
        sleep(1)

    if on_snapshot.called_count != 1:
        raise AssertionError(
            "Failed to get exactly one document change: count: "
            + str(on_snapshot.called_count)
        )
Exemplo n.º 19
0
def test_watch_collection(client, cleanup):
    db = client
    doc_ref = db.collection(u"users").document(u"alovelace" + unique_resource_id())
    collection_ref = db.collection(u"users")

    # Initial setting
    doc_ref.set({u"first": u"Jane", u"last": u"Doe", u"born": 1900})

    # Setup listener
    def on_snapshot(docs, changes, read_time):
        on_snapshot.called_count += 1
        for doc in [doc for doc in docs if doc.id == doc_ref.id]:
            on_snapshot.born = doc.get("born")

    on_snapshot.called_count = 0
    on_snapshot.born = 0

    collection_ref.on_snapshot(on_snapshot)

    # delay here so initial on_snapshot occurs and isn't combined with set
    sleep(1)

    doc_ref.set({u"first": u"Ada", u"last": u"Lovelace", u"born": 1815})

    for _ in range(10):
        if on_snapshot.born == 1815:
            break
        sleep(1)

    if on_snapshot.born != 1815:
        raise AssertionError(
            "Expected the last document update to update born: " + str(on_snapshot.born)
        )
Exemplo n.º 20
0
def test_document_set_merge(client, cleanup):
    document_id = "for-set" + unique_resource_id("-")
    document = client.document("i-did-it", document_id)
    # Add to clean-up before API request (in case ``set()`` fails).
    cleanup(document)

    # 0. Make sure the document doesn't exist yet
    snapshot = document.get()
    assert not snapshot.exists

    # 1. Use ``create()`` to create the document.
    data1 = {"name": "Sam", "address": {"city": "SF", "state": "CA"}}
    write_result1 = document.create(data1)
    snapshot1 = document.get()
    assert snapshot1.to_dict() == data1
    # Make sure the update is what created the document.
    assert snapshot1.create_time == snapshot1.update_time
    assert snapshot1.update_time == write_result1.update_time

    # 2. Call ``set()`` to merge
    data2 = {"address": {"city": "LA"}}
    write_result2 = document.set(data2, merge=True)
    snapshot2 = document.get()
    assert snapshot2.to_dict() == {
        "name": "Sam",
        "address": {"city": "LA", "state": "CA"},
    }
    # Make sure the create time hasn't changed.
    assert snapshot2.create_time == snapshot1.create_time
    assert snapshot2.update_time == write_result2.update_time
Exemplo n.º 21
0
def test_query_unary(client, cleanup):
    collection_name = "unary" + unique_resource_id("-")
    collection = client.collection(collection_name)
    field_name = "foo"

    _, document0 = collection.add({field_name: None})
    # Add to clean-up.
    cleanup(document0)

    nan_val = float("nan")
    _, document1 = collection.add({field_name: nan_val})
    # Add to clean-up.
    cleanup(document1)

    # 0. Query for null.
    query0 = collection.where(field_name, "==", None)
    values0 = list(query0.stream())
    assert len(values0) == 1
    snapshot0 = values0[0]
    assert snapshot0.reference._path == document0._path
    assert snapshot0.to_dict() == {field_name: None}

    # 1. Query for a NAN.
    query1 = collection.where(field_name, "==", nan_val)
    values1 = list(query1.stream())
    assert len(values1) == 1
    snapshot1 = values1[0]
    assert snapshot1.reference._path == document1._path
    data1 = snapshot1.to_dict()
    assert len(data1) == 1
    assert math.isnan(data1[field_name])
Exemplo n.º 22
0
def test_document_delete(client, cleanup):
    document_id = "deleted" + unique_resource_id("-")
    document = client.document("here-to-be", document_id)
    # Add to clean-up before API request (in case ``create()`` fails).
    cleanup(document)
    document.create({"not": "much"})

    # 1. Call ``delete()`` with invalid (in the past) "last timestamp" option.
    snapshot1 = document.get()
    timestamp_pb = timestamp_pb2.Timestamp(
        seconds=snapshot1.update_time.nanos - 3600, nanos=snapshot1.update_time.nanos
    )
    option1 = client.write_option(last_update_time=timestamp_pb)
    with pytest.raises(FailedPrecondition):
        document.delete(option=option1)

    # 2. Call ``delete()`` with invalid (in future) "last timestamp" option.
    timestamp_pb = timestamp_pb2.Timestamp(
        seconds=snapshot1.update_time.nanos + 3600, nanos=snapshot1.update_time.nanos
    )
    option2 = client.write_option(last_update_time=timestamp_pb)
    with pytest.raises(FailedPrecondition):
        document.delete(option=option2)

    # 3. Actually ``delete()`` the document.
    delete_time3 = document.delete()

    # 4. ``delete()`` again, even though we know the document is gone.
    delete_time4 = document.delete()
    assert_timestamp_less(delete_time3, delete_time4)
Exemplo n.º 23
0
def test_document_set(client, cleanup):
    document_id = "for-set" + unique_resource_id("-")
    document = client.document("i-did-it", document_id)
    # Add to clean-up before API request (in case ``set()`` fails).
    cleanup(document)

    # 0. Make sure the document doesn't exist yet
    snapshot = document.get()
    assert snapshot.to_dict() is None

    # 1. Use ``create()`` to create the document.
    data1 = {"foo": 88}
    write_result1 = document.create(data1)
    snapshot1 = document.get()
    assert snapshot1.to_dict() == data1
    # Make sure the update is what created the document.
    assert snapshot1.create_time == snapshot1.update_time
    assert snapshot1.update_time == write_result1.update_time

    # 2. Call ``set()`` again to overwrite.
    data2 = {"bar": None}
    write_result2 = document.set(data2)
    snapshot2 = document.get()
    assert snapshot2.to_dict() == data2
    # Make sure the create time hasn't changed.
    assert snapshot2.create_time == snapshot1.create_time
    assert snapshot2.update_time == write_result2.update_time
Exemplo n.º 24
0
def test_bigtable_delete_cluster():
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster_id = "clus-my-" + unique_resource_id("-")
    cluster = instance.cluster(
        cluster_id,
        location_id=ALT_LOCATION_ID,
        serve_nodes=SERVER_NODES,
        default_storage_type=STORAGE_TYPE,
    )
    operation = cluster.create()
    # We want to make sure the operation completes.
    operation.result(timeout=1000)

    # [START bigtable_delete_cluster]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster_to_delete = instance.cluster(cluster_id)

    cluster_to_delete.delete()
    # [END bigtable_delete_cluster]
    assert not cluster_to_delete.exists()
Exemplo n.º 25
0
def test_update_document(client, cleanup):
    document_id = "for-update" + unique_resource_id("-")
    document = client.document("made", document_id)
    # Add to clean-up before API request (in case ``create()`` fails).
    cleanup(document)

    # 0. Try to update before the document exists.
    with pytest.raises(NotFound) as exc_info:
        document.update({"not": "there"})
    assert exc_info.value.message.startswith(MISSING_DOCUMENT)
    assert document_id in exc_info.value.message

    # 1. Try to update before the document exists (now with an option).
    with pytest.raises(NotFound) as exc_info:
        document.update({"still": "not-there"})
    assert exc_info.value.message.startswith(MISSING_DOCUMENT)
    assert document_id in exc_info.value.message

    # 2. Update and create the document (with an option).
    data = {"foo": {"bar": "baz"}, "scoop": {"barn": 981}, "other": True}
    write_result2 = document.create(data)

    # 3. Send an update without a field path (no option).
    field_updates3 = {"foo": {"quux": 800}}
    write_result3 = document.update(field_updates3)
    assert_timestamp_less(write_result2.update_time, write_result3.update_time)
    snapshot3 = document.get()
    expected3 = {
        "foo": field_updates3["foo"],
        "scoop": data["scoop"],
        "other": data["other"],
    }
    assert snapshot3.to_dict() == expected3

    # 4. Send an update **with** a field path and a delete and a valid
    #    "last timestamp" option.
    field_updates4 = {"scoop.silo": None, "other": firestore.DELETE_FIELD}
    option4 = client.write_option(last_update_time=snapshot3.update_time)
    write_result4 = document.update(field_updates4, option=option4)
    assert_timestamp_less(write_result3.update_time, write_result4.update_time)
    snapshot4 = document.get()
    expected4 = {
        "foo": field_updates3["foo"],
        "scoop": {"barn": data["scoop"]["barn"], "silo": field_updates4["scoop.silo"]},
    }
    assert snapshot4.to_dict() == expected4

    # 5. Call ``update()`` with invalid (in the past) "last timestamp" option.
    assert_timestamp_less(option4._last_update_time, snapshot4.update_time)
    with pytest.raises(FailedPrecondition) as exc_info:
        document.update({"bad": "time-past"}, option=option4)

    # 6. Call ``update()`` with invalid (in future) "last timestamp" option.
    timestamp_pb = timestamp_pb2.Timestamp(
        seconds=snapshot4.update_time.nanos + 3600, nanos=snapshot4.update_time.nanos
    )
    option6 = client.write_option(last_update_time=timestamp_pb)
    with pytest.raises(FailedPrecondition) as exc_info:
        document.update({"bad": "time-future"}, option=option6)
Exemplo n.º 26
0
    def test_list_buckets(self):
        buckets_to_create = [
            'new' + unique_resource_id(),
            'newer' + unique_resource_id(),
            'newest' + unique_resource_id(),
        ]
        created_buckets = []
        for bucket_name in buckets_to_create:
            bucket = Config.CLIENT.bucket(bucket_name)
            retry_429(bucket.create)()
            self.case_buckets_to_delete.append(bucket_name)

        # Retrieve the buckets.
        all_buckets = Config.CLIENT.list_buckets()
        created_buckets = [bucket for bucket in all_buckets
                           if bucket.name in buckets_to_create]
        self.assertEqual(len(created_buckets), len(buckets_to_create))
Exemplo n.º 27
0
def setUpModule():
    VisionSystemTestBase.client = vision.ImageAnnotatorClient()
    storage_client = storage.Client()
    bucket_name = "new" + unique_resource_id()
    VisionSystemTestBase.test_bucket = storage_client.bucket(bucket_name)

    # 429 Too Many Requests in case API requests rate-limited.
    retry_429 = RetryErrors(exceptions.TooManyRequests)
    retry_429(VisionSystemTestBase.test_bucket.create)()
Exemplo n.º 28
0
def test_cannot_use_foreign_key(client, cleanup):
    document_id = "cannot" + unique_resource_id("-")
    document = client.document("foreign-key", document_id)
    # Add to clean-up before API request (in case ``create()`` fails).
    cleanup(document)

    other_client = firestore.Client(
        project="other-prahj", credentials=client._credentials, database="dee-bee"
    )
    assert other_client._database_string != client._database_string
    fake_doc = other_client.document("foo", "bar")
    with pytest.raises(InvalidArgument):
        document.create({"ref": fake_doc})
Exemplo n.º 29
0
 def test_create_product_set(self):
     product_set = vision.types.ProductSet(display_name="display name")
     product_set_id = "set" + unique_resource_id()
     product_set_path = self.ps_client.product_set_path(
         project=PROJECT_ID, location=self.location, product_set=product_set_id
     )
     self.product_sets_to_delete.append(product_set_path)
     response = self.ps_client.create_product_set(
         parent=self.location_path,
         product_set=product_set,
         product_set_id=product_set_id,
     )
     self.assertEqual(response.name, product_set_path)
Exemplo n.º 30
0
    def test_write_point(self):
        METRIC_TYPE = ('custom.googleapis.com/tmp/system_test_example' +
                       unique_resource_id())
        METRIC_KIND = monitoring.MetricKind.GAUGE
        VALUE_TYPE = monitoring.ValueType.DOUBLE
        DESCRIPTION = 'System test example -- DELETE ME!'
        VALUE = 3.14

        client = monitoring.Client()
        descriptor = client.metric_descriptor(
            METRIC_TYPE,
            metric_kind=METRIC_KIND,
            value_type=VALUE_TYPE,
            description=DESCRIPTION,
        )

        descriptor.create()

        metric = client.metric(METRIC_TYPE, {})
        resource = client.resource('global', {})

        retry_500(client.write_point)(metric, resource, VALUE)

        def _query_timeseries_with_retries():
            MAX_RETRIES = 6

            def _has_timeseries(result):
                return len(list(result)) > 0

            retry_result = RetryResult(
                _has_timeseries,
                max_tries=MAX_RETRIES,
                backoff=3)(client.query)
            return RetryErrors(
                BadRequest,
                max_tries=MAX_RETRIES,
                backoff=3)(retry_result)

        query = _query_timeseries_with_retries()(METRIC_TYPE, minutes=5)
        timeseries_list = list(query)
        self.assertEqual(len(timeseries_list), 1)
        timeseries = timeseries_list[0]
        self.assertEqual(timeseries.metric, metric)
        # project_id label only exists on output.
        del timeseries.resource.labels['project_id']
        self.assertEqual(timeseries.resource, resource)

        descriptor.delete()

        with self.assertRaises(NotFound):
            descriptor.delete()
Exemplo n.º 31
0
def test_document_integer_field(client, cleanup):
    document_id = 'for-set' + unique_resource_id('-')
    document = client.document('i-did-it', document_id)
    # Add to clean-up before API request (in case ``set()`` fails).
    cleanup(document)

    data1 = {'1a': {'2b': '3c', 'ab': '5e'}, '6f': {'7g': '8h', 'cd': '0j'}}
    option1 = client.write_option(exists=False)
    document.set(data1, option=option1)

    data2 = {'1a.ab': '4d', '6f.7g': '9h'}
    option2 = client.write_option(create_if_missing=True)
    document.update(data2, option=option2)
    snapshot = document.get()
    expected = {'1a': {'2b': '3c', 'ab': '4d'}, '6f': {'7g': '9h', 'cd': '0j'}}
    assert snapshot.to_dict() == expected
Exemplo n.º 32
0
 def test_get_product(self):
     # Create a Product.
     product = vision.types.Product(display_name="product display name",
                                    product_category="apparel")
     product_id = "product" + unique_resource_id()
     product_path = self.ps_client.product_path(project=PROJECT_ID,
                                                location=self.location,
                                                product=product_id)
     response = self.ps_client.create_product(parent=self.location_path,
                                              product=product,
                                              product_id=product_id)
     self.products_to_delete.append(response.name)
     self.assertEqual(response.name, product_path)
     # Get the Product.
     get_response = self.ps_client.get_product(name=product_path)
     self.assertEqual(get_response.name, product_path)
Exemplo n.º 33
0
def test_listing_project_topics(publisher, project, cleanup):
    topic_paths = [
        publisher.topic_path(project,
                             "topic-{}".format(i) + unique_resource_id("."))
        for i in range(1, 4)
    ]
    for topic in topic_paths:
        cleanup.append((publisher.delete_topic, topic))
        publisher.create_topic(topic)

    project_path = publisher.project_path(project)
    project_topics = publisher.list_topics(project_path)
    project_topics = set(t.name for t in project_topics)

    # there might be other topics in the project, thus do a "is subset" check
    assert set(topic_paths) <= project_topics
Exemplo n.º 34
0
def test_bigtable_list_app_profiles():
    app_profile = Config.INSTANCE.app_profile(
        app_profile_id="app-prof-" + unique_resource_id("-"),
        routing_policy_type=enums.RoutingPolicyType.ANY,
    )
    app_profile = app_profile.create(ignore_warnings=True)

    # [START bigtable_list_app_profiles]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    app_profiles_list = instance.list_app_profiles()
    # [END bigtable_list_app_profiles]
    assert len(app_profiles_list) > 0
Exemplo n.º 35
0
    def test_create_instance(self):
        ALT_INSTANCE_ID = 'new' + unique_resource_id('-')
        instance = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID)
        operation = instance.create()
        # Make sure this instance gets deleted after the test case.
        self.instances_to_delete.append(instance)

        # We want to make sure the operation completes.
        operation.result(timeout=10)

        # Create a new instance instance and make sure it is the same.
        instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID)
        instance_alt.reload()

        self.assertEqual(instance, instance_alt)
        self.assertEqual(instance.display_name, instance_alt.display_name)
Exemplo n.º 36
0
 def test_create_product_set(self):
     # Create a ProductSet.
     product_set = vision.types.ProductSet(display_name="display name")
     product_set_id = "set" + unique_resource_id()
     product_set_path = self.ps_client.product_set_path(
         project=PROJECT_ID,
         location=self.location,
         product_set=product_set_id)
     response = self.ps_client.create_product_set(
         parent=self.location_path,
         product_set=product_set,
         product_set_id=product_set_id,
     )
     self.product_sets_to_delete.append(response.name)
     # Verify the ProductSet was successfully created.
     self.assertEqual(response.name, product_set_path)
Exemplo n.º 37
0
    def test_write_point(self):
        METRIC_TYPE = ('custom.googleapis.com/tmp/system_test_example' +
                       unique_resource_id())
        METRIC_KIND = monitoring.MetricKind.GAUGE
        VALUE_TYPE = monitoring.ValueType.DOUBLE
        DESCRIPTION = 'System test example -- DELETE ME!'
        VALUE = 3.14

        client = monitoring.Client()
        descriptor = client.metric_descriptor(
            METRIC_TYPE,
            metric_kind=METRIC_KIND,
            value_type=VALUE_TYPE,
            description=DESCRIPTION,
        )

        descriptor.create()

        metric = client.metric(METRIC_TYPE, {})
        resource = client.resource('global', {})

        retry_500(client.write_point)(metric, resource, VALUE)

        def _query_timeseries_with_retries():
            MAX_RETRIES = 10

            def _has_timeseries(result):
                return len(list(result)) > 0

            retry_result = RetryResult(_has_timeseries,
                                       max_tries=MAX_RETRIES)(client.query)
            return RetryErrors(BadRequest, max_tries=MAX_RETRIES)(retry_result)

        query = _query_timeseries_with_retries()(METRIC_TYPE, minutes=5)
        timeseries_list = list(query)
        self.assertEqual(len(timeseries_list), 1)
        timeseries = timeseries_list[0]
        self.assertEqual(timeseries.metric, metric)
        # project_id label only exists on output.
        del timeseries.resource.labels['project_id']
        self.assertEqual(timeseries.resource, resource)

        descriptor.delete()

        with self.assertRaises(NotFound):
            descriptor.delete()
Exemplo n.º 38
0
    def test_topic_iam_policy(self):
        from google.cloud.pubsub.iam import PUBSUB_TOPICS_GET_IAM_POLICY
        self._maybe_emulator_skip()
        topic_name = 'test-topic-iam-policy-topic' + unique_resource_id('-')
        topic = Config.CLIENT.topic(topic_name)
        topic.create()

        # Retry / backoff up to 7 seconds (1 + 2 + 4)
        retry = RetryResult(lambda result: result, max_tries=4)
        retry(topic.exists)()
        self.to_delete.append(topic)

        if topic.check_iam_permissions([PUBSUB_TOPICS_GET_IAM_POLICY]):
            policy = topic.get_iam_policy()
            policy.viewers.add(policy.user('*****@*****.**'))
            new_policy = topic.set_iam_policy(policy)
            self.assertEqual(new_policy.viewers, policy.viewers)
def test_collection_add(client, cleanup):
    collection1 = client.collection('collek')
    collection2 = client.collection('collek', 'shun', 'child')
    explicit_doc_id = 'hula' + unique_resource_id('-')

    # Auto-ID at top-level.
    data1 = {'foo': 'bar'}
    update_time1, document_ref1 = collection1.add(data1)
    cleanup(document_ref1)
    snapshot1 = document_ref1.get()
    assert snapshot1.to_dict() == data1
    assert snapshot1.create_time == update_time1
    assert snapshot1.update_time == update_time1
    assert RANDOM_ID_REGEX.match(document_ref1.id)

    # Explicit ID at top-level.
    data2 = {'baz': 999}
    update_time2, document_ref2 = collection1.add(data2,
                                                  document_id=explicit_doc_id)
    cleanup(document_ref2)
    snapshot2 = document_ref2.get()
    assert snapshot2.to_dict() == data2
    assert snapshot2.create_time == update_time2
    assert snapshot2.update_time == update_time2
    assert document_ref2.id == explicit_doc_id

    # Auto-ID for nested collection.
    data3 = {'quux': b'\x00\x01\x02\x03'}
    update_time3, document_ref3 = collection2.add(data3)
    cleanup(document_ref3)
    snapshot3 = document_ref3.get()
    assert snapshot3.to_dict() == data3
    assert snapshot3.create_time == update_time3
    assert snapshot3.update_time == update_time3
    assert RANDOM_ID_REGEX.match(document_ref3.id)

    # Explicit for nested collection.
    data4 = {'kazaam': None, 'bad': False}
    update_time4, document_ref4 = collection2.add(data4,
                                                  document_id=explicit_doc_id)
    cleanup(document_ref4)
    snapshot4 = document_ref4.get()
    assert snapshot4.to_dict() == data4
    assert snapshot4.create_time == update_time4
    assert snapshot4.update_time == update_time4
    assert document_ref4.id == explicit_doc_id
    def test_create_and_delete_metric_descriptor(self):
        METRIC_TYPE = ('custom.googleapis.com/tmp/system_test_example' +
                       unique_resource_id())
        METRIC_KIND = monitoring.MetricKind.GAUGE
        VALUE_TYPE = monitoring.ValueType.DOUBLE
        DESCRIPTION = 'System test example -- DELETE ME!'

        client = monitoring.Client()
        descriptor = client.metric_descriptor(
            METRIC_TYPE,
            metric_kind=METRIC_KIND,
            value_type=VALUE_TYPE,
            description=DESCRIPTION,
        )

        retry_500(descriptor.create)()
        retry_404_500(descriptor.delete)()
Exemplo n.º 41
0
    def test_snapshot_seek_subscriber_permissions_sufficient(
        self, project, publisher, topic_path, subscriber, subscription_path, cleanup
    ):
        snapshot_name = "snap" + unique_resource_id("-")
        snapshot_path = "projects/{}/snapshots/{}".format(project, snapshot_name)

        # Make sure the topic and subscription get deleted.
        cleanup.append((publisher.delete_topic, (), {"topic": topic_path}))
        cleanup.append(
            (subscriber.delete_subscription, (), {"subscription": subscription_path})
        )
        cleanup.append((subscriber.delete_snapshot, (), {"snapshot": snapshot_path}))

        # Create a topic and subscribe to it.
        publisher.create_topic(name=topic_path)
        subscriber.create_subscription(
            name=subscription_path, topic=topic_path, retain_acked_messages=True
        )

        # A service account granting only the pubsub.subscriber role must be used.
        filename = os.path.join(
            os.environ["KOKORO_GFILE_DIR"], "pubsub-subscriber-service-account.json"
        )
        subscriber_only_client = type(subscriber).from_service_account_file(filename)

        # Publish two messages and create a snapshot inbetween.
        _publish_messages(publisher, topic_path, batch_sizes=[1])
        response = subscriber.pull(subscription=subscription_path, max_messages=10)
        assert len(response.received_messages) == 1

        subscriber.create_snapshot(name=snapshot_path, subscription=subscription_path)

        _publish_messages(publisher, topic_path, batch_sizes=[1])
        response = subscriber.pull(subscription=subscription_path, max_messages=10)
        assert len(response.received_messages) == 1

        # A subscriber-only client should be allowed to seek to a snapshot.
        seek_request = gapic_types.SeekRequest(
            subscription=subscription_path, snapshot=snapshot_path
        )
        subscriber_only_client.seek(seek_request)

        # We should receive one message again, since we sought back to a snapshot.
        response = subscriber.pull(subscription=subscription_path, max_messages=10)
        assert len(response.received_messages) == 1
Exemplo n.º 42
0
def test_create_document(client, cleanup):
    now = datetime.datetime.utcnow().replace(tzinfo=UTC)
    document_id = 'shun' + unique_resource_id('-')
    document = client.document('collek', document_id)
    # Add to clean-up before API request (in case ``create()`` fails).
    cleanup(document)

    data = {
        'now': firestore.SERVER_TIMESTAMP,
        'eenta-ger': 11,
        'bites': b'\xe2\x98\x83 \xe2\x9b\xb5',
        'also': {
            'nestednow': firestore.SERVER_TIMESTAMP,
            'quarter': 0.25,
        },
    }
    write_result = document.create(data)
    updated = _pb_timestamp_to_datetime(write_result.update_time)
    delta = updated - now
    # Allow a bit of clock skew, but make sure timestamps are close.
    assert -300.0 < delta.total_seconds() < 300.0

    with pytest.raises(AlreadyExists):
        document.create(data)

    # Verify the server times.
    snapshot = document.get()
    stored_data = snapshot.to_dict()
    server_now = stored_data['now']

    delta = updated - server_now
    # NOTE: We could check the ``transform_results`` from the write result
    #       for the document transform, but this value gets dropped. Instead
    #       we make sure the timestamps are close.
    assert 0.0 <= delta.total_seconds() < 5.0
    expected_data = {
        'now': server_now,
        'eenta-ger': data['eenta-ger'],
        'bites': data['bites'],
        'also': {
            'nestednow': server_now,
            'quarter': data['also']['quarter'],
        },
    }
    assert stored_data == expected_data
Exemplo n.º 43
0
def test_create_document(client, cleanup):
    now = datetime.datetime.utcnow().replace(tzinfo=UTC)
    document_id = "shun" + unique_resource_id("-")
    document = client.document("collek", document_id)
    # Add to clean-up before API request (in case ``create()`` fails).
    cleanup(document)

    data = {
        "now": firestore.SERVER_TIMESTAMP,
        "eenta-ger": 11,
        "bites": b"\xe2\x98\x83 \xe2\x9b\xb5",
        "also": {
            "nestednow": firestore.SERVER_TIMESTAMP,
            "quarter": 0.25
        },
    }
    write_result = document.create(data)
    updated = _pb_timestamp_to_datetime(write_result.update_time)
    delta = updated - now
    # Allow a bit of clock skew, but make sure timestamps are close.
    assert -300.0 < delta.total_seconds() < 300.0

    with pytest.raises(AlreadyExists):
        document.create(data)

    # Verify the server times.
    snapshot = document.get()
    stored_data = snapshot.to_dict()
    server_now = stored_data["now"]

    delta = updated - server_now
    # NOTE: We could check the ``transform_results`` from the write result
    #       for the document transform, but this value gets dropped. Instead
    #       we make sure the timestamps are close.
    assert 0.0 <= delta.total_seconds() < 5.0
    expected_data = {
        "now": server_now,
        "eenta-ger": data["eenta-ger"],
        "bites": data["bites"],
        "also": {
            "nestednow": server_now,
            "quarter": data["also"]["quarter"]
        },
    }
    assert stored_data == expected_data
def test_cannot_use_foreign_key(client, cleanup):
    document_id = 'cannot' + unique_resource_id('-')
    document = client.document('foreign-key', document_id)
    # Add to clean-up before API request (in case ``create()`` fails).
    cleanup(document)

    other_client = firestore.Client(project='other-prahj',
                                    credentials=client._credentials,
                                    database='dee-bee')
    assert other_client._database_string != client._database_string
    fake_doc = other_client.document('foo', 'bar')
    # NOTE: google-gax **does not** raise a GaxError for INVALID_ARGUMENT.
    with pytest.raises(ValueError) as exc_info:
        document.create({'ref': fake_doc})

    assert len(exc_info.value.args) == 1
    err_msg = exc_info.value.args[0]
    assert err_msg == 'RPC failed'
Exemplo n.º 45
0
 def test_notification_w_user_project(self):
     new_bucket_name = 'notification-minimal' + unique_resource_id('-')
     bucket = retry_429(Config.CLIENT.create_bucket)(new_bucket_name,
                                                     requester_pays=True)
     self.case_buckets_to_delete.append(new_bucket_name)
     with_user_project = Config.CLIENT.bucket(new_bucket_name,
                                              user_project=USER_PROJECT)
     self.assertEqual(list(with_user_project.list_notifications()), [])
     notification = with_user_project.notification(self.TOPIC_NAME)
     retry_429(notification.create)()
     try:
         self.assertTrue(notification.exists())
         self.assertIsNotNone(notification.notification_id)
         notifications = list(with_user_project.list_notifications())
         self.assertEqual(len(notifications), 1)
         self.assertEqual(notifications[0].topic_name, self.TOPIC_NAME)
     finally:
         notification.delete()
Exemplo n.º 46
0
def test_create_document_w_subcollection(client, cleanup):
    document_id = "shun" + unique_resource_id("-")
    document = client.document("collek", document_id)
    # Add to clean-up before API request (in case ``create()`` fails).
    cleanup(document)

    data = {"now": firestore.SERVER_TIMESTAMP}
    document.create(data)

    child_ids = ["child1", "child2"]

    for child_id in child_ids:
        subcollection = document.collection(child_id)
        _, subdoc = subcollection.add({"foo": "bar"})
        cleanup(subdoc)

    children = document.collections()
    assert sorted(child.id for child in children) == sorted(child_ids)
Exemplo n.º 47
0
def test_collection_add(client, cleanup):
    collection1 = client.collection("collek")
    collection2 = client.collection("collek", "shun", "child")
    explicit_doc_id = "hula" + unique_resource_id("-")

    # Auto-ID at top-level.
    data1 = {"foo": "bar"}
    update_time1, document_ref1 = collection1.add(data1)
    cleanup(document_ref1)
    snapshot1 = document_ref1.get()
    assert snapshot1.to_dict() == data1
    assert snapshot1.update_time == update_time1
    assert RANDOM_ID_REGEX.match(document_ref1.id)

    # Explicit ID at top-level.
    data2 = {"baz": 999}
    update_time2, document_ref2 = collection1.add(data2,
                                                  document_id=explicit_doc_id)
    cleanup(document_ref2)
    snapshot2 = document_ref2.get()
    assert snapshot2.to_dict() == data2
    assert snapshot2.create_time == update_time2
    assert snapshot2.update_time == update_time2
    assert document_ref2.id == explicit_doc_id

    # Auto-ID for nested collection.
    data3 = {"quux": b"\x00\x01\x02\x03"}
    update_time3, document_ref3 = collection2.add(data3)
    cleanup(document_ref3)
    snapshot3 = document_ref3.get()
    assert snapshot3.to_dict() == data3
    assert snapshot3.update_time == update_time3
    assert RANDOM_ID_REGEX.match(document_ref3.id)

    # Explicit for nested collection.
    data4 = {"kazaam": None, "bad": False}
    update_time4, document_ref4 = collection2.add(data4,
                                                  document_id=explicit_doc_id)
    cleanup(document_ref4)
    snapshot4 = document_ref4.get()
    assert snapshot4.to_dict() == data4
    assert snapshot4.create_time == update_time4
    assert snapshot4.update_time == update_time4
    assert document_ref4.id == explicit_doc_id
Exemplo n.º 48
0
def test_query_with_order_dot_key(client, cleanup):
    db = client
    collection_id = "collek" + unique_resource_id("-")
    collection = db.collection(collection_id)
    for index in range(100, -1, -1):
        doc = collection.document("test_{:09d}".format(index))
        data = {"count": 10 * index, "wordcount": {"page1": index * 10 + 100}}
        doc.set(data)
        cleanup(doc.delete)
    query = collection.order_by("wordcount.page1").limit(3)
    data = [doc.to_dict()["wordcount"]["page1"] for doc in query.stream()]
    assert [100, 110, 120] == data
    for snapshot in collection.order_by("wordcount.page1").limit(3).stream():
        last_value = snapshot.get("wordcount.page1")
    cursor_with_nested_keys = {"wordcount": {"page1": last_value}}
    found = list(
        collection.order_by("wordcount.page1").start_after(
            cursor_with_nested_keys).limit(3).stream())
    found_data = [
        {
            u"count": 30,
            u"wordcount": {
                u"page1": 130
            }
        },
        {
            u"count": 40,
            u"wordcount": {
                u"page1": 140
            }
        },
        {
            u"count": 50,
            u"wordcount": {
                u"page1": 150
            }
        },
    ]
    assert found_data == [snap.to_dict() for snap in found]
    cursor_with_dotted_paths = {"wordcount.page1": last_value}
    cursor_with_key_data = list(
        collection.order_by("wordcount.page1").start_after(
            cursor_with_dotted_paths).limit(3).stream())
    assert found_data == [snap.to_dict() for snap in cursor_with_key_data]
Exemplo n.º 49
0
    def test_bucket_update_labels(self):
        bucket_name = 'update-labels' + unique_resource_id('-')
        bucket = retry_429(Config.CLIENT.create_bucket)(bucket_name)
        self.case_buckets_to_delete.append(bucket_name)
        self.assertTrue(bucket.exists())

        updated_labels = {'test-label': 'label-value'}
        bucket.labels = updated_labels
        bucket.update()
        self.assertEqual(bucket.labels, updated_labels)

        new_labels = {'another-label': 'another-value'}
        bucket.labels = new_labels
        bucket.patch()
        self.assertEqual(bucket.labels, new_labels)

        bucket.labels = {}
        bucket.update()
        self.assertEqual(bucket.labels, {})
Exemplo n.º 50
0
def test_batch(client, cleanup):
    collection_name = "batch" + unique_resource_id("-")

    document1 = client.document(collection_name, "abc")
    document2 = client.document(collection_name, "mno")
    document3 = client.document(collection_name, "xyz")
    # Add to clean-up before API request (in case ``create()`` fails).
    cleanup(document1)
    cleanup(document2)
    cleanup(document3)

    data2 = {"some": {"deep": "stuff", "and": "here"}, "water": 100.0}
    document2.create(data2)
    document3.create({"other": 19})

    batch = client.batch()
    data1 = {"all": True}
    batch.create(document1, data1)
    new_value = "there"
    batch.update(document2, {"some.and": new_value})
    batch.delete(document3)
    write_results = batch.commit()

    assert len(write_results) == 3

    write_result1 = write_results[0]
    write_result2 = write_results[1]
    write_result3 = write_results[2]
    assert not write_result3.HasField("update_time")

    snapshot1 = document1.get()
    assert snapshot1.to_dict() == data1
    assert snapshot1.create_time == write_result1.update_time
    assert snapshot1.update_time == write_result1.update_time

    snapshot2 = document2.get()
    assert snapshot2.to_dict() != data2
    data2["some"]["and"] = new_value
    assert snapshot2.to_dict() == data2
    assert_timestamp_less(snapshot2.create_time, write_result2.update_time)
    assert snapshot2.update_time == write_result2.update_time

    assert not document3.get().exists
Exemplo n.º 51
0
    def test_log_handler_async(self):
        LOG_MESSAGE = 'It was the worst of times'

        handler_name = 'gcp-async' + unique_resource_id('-')
        handler = CloudLoggingHandler(Config.CLIENT, name=handler_name)
        # only create the logger to delete, hidden otherwise
        logger = Config.CLIENT.logger(handler_name)
        self.to_delete.append(logger)

        cloud_logger = logging.getLogger(handler.name)
        cloud_logger.addHandler(handler)
        cloud_logger.warn(LOG_MESSAGE)
        handler.flush()
        entries = _list_entries(logger)
        expected_payload = {
            'message': LOG_MESSAGE,
            'python_logger': handler.name
        }
        self.assertEqual(len(entries), 1)
        self.assertEqual(entries[0].payload, expected_payload)
Exemplo n.º 52
0
def test_bigtable_create_app_profile():
    # [START bigtable_create_app_profile]
    from google.cloud.bigtable import Client
    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    app_profile_id = "app-prof-" + unique_resource_id('-')
    description = 'routing policy-multy'
    routing_policy_type = enums.RoutingPolicyType.ANY

    app_profile = instance.app_profile(app_profile_id=app_profile_id,
                                       routing_policy_type=routing_policy_type,
                                       description=description,
                                       cluster_id=CLUSTER_ID)

    app_profile = app_profile.create(ignore_warnings=True)
    # [END bigtable_create_app_profile]
    assert app_profile.exists()

    app_profile.delete(ignore_warnings=True)
Exemplo n.º 53
0
    def test_bucket_get_blob_with_user_project(self):
        new_bucket_name = 'w-requester-pays' + unique_resource_id('-')
        data = b'DEADBEEF'
        created = Config.CLIENT.create_bucket(new_bucket_name,
                                              requester_pays=True)
        self.case_buckets_to_delete.append(new_bucket_name)
        self.assertEqual(created.name, new_bucket_name)
        self.assertTrue(created.requester_pays)

        with_user_project = Config.CLIENT.bucket(new_bucket_name,
                                                 user_project=USER_PROJECT)

        self.assertIsNone(with_user_project.get_blob('nonesuch'))
        to_add = created.blob('blob-name')
        to_add.upload_from_string(data)
        try:
            found = with_user_project.get_blob('blob-name')
            self.assertEqual(found.download_as_string(), data)
        finally:
            to_add.delete()
Exemplo n.º 54
0
def test_create_document_w_subcollection(client, cleanup):
    document_id = 'shun' + unique_resource_id('-')
    document = client.document('collek', document_id)
    # Add to clean-up before API request (in case ``create()`` fails).
    cleanup(document)

    data = {
        'now': firestore.SERVER_TIMESTAMP,
    }
    document.create(data)

    child_ids = ['child1', 'child2']

    for child_id in child_ids:
        subcollection = document.collection(child_id)
        _, subdoc = subcollection.add({'foo': 'bar'})
        cleanup(subdoc)

    children = document.collections()
    assert sorted(child.id for child in children) == sorted(child_ids)
Exemplo n.º 55
0
def test_document_set_w_int_field(client, cleanup):
    document_id = 'set-int-key' + unique_resource_id('-')
    document = client.document('i-did-it', document_id)
    # Add to clean-up before API request (in case ``set()`` fails).
    cleanup(document)

    # 0. Make sure the document doesn't exist yet
    snapshot = document.get()
    assert not snapshot.exists

    # 1. Use ``create()`` to create the document.
    before = {'testing': '1'}
    document.create(before)

    # 2. Replace using ``set()``.
    data = {'14': {'status': 'active'}}
    document.set(data)

    # 3. Verify replaced data.
    snapshot1 = document.get()
    assert snapshot1.to_dict() == data
Exemplo n.º 56
0
def test_bigtable_delete_cluster():
    # [START bigtable_delete_cluster]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster_id = "clus-my-" + unique_resource_id('-')
    # [END bigtable_delete_cluster]

    cluster = instance.cluster(cluster_id, location_id=ALT_LOCATION_ID,
                               serve_nodes=SERVER_NODES,
                               default_storage_type=STORAGE_TYPE)
    operation = cluster.create()
    # We want to make sure the operation completes.
    operation.result(timeout=1000)

    # [START bigtable_delete_cluster]
    cluster_to_delete = instance.cluster(cluster_id)
    cluster_to_delete.delete()
    # [END bigtable_delete_cluster]
    assert not cluster_to_delete.exists()
Exemplo n.º 57
0
 def test_list_products(self):
     # Create a Product.
     product = vision.types.Product(display_name="product display name",
                                    product_category="apparel")
     product_id = "product" + unique_resource_id()
     product_path = self.ps_client.product_path(project=PROJECT_ID,
                                                location=self.location,
                                                product=product_id)
     response = self.ps_client.create_product(parent=self.location_path,
                                              product=product,
                                              product_id=product_id)
     self.products_to_delete.append(response.name)
     self.assertEqual(response.name, product_path)
     # Verify Products can be listed.
     products_iterator = self.ps_client.list_products(
         parent=self.location_path)
     products_exist = False
     for product in products_iterator:
         products_exist = True
         break
     self.assertTrue(products_exist)
Exemplo n.º 58
0
 def test_update_product(self):
     # Create a Product.
     product = vision.types.Product(display_name="product display name",
                                    product_category="apparel")
     product_id = "product" + unique_resource_id()
     product_path = self.ps_client.product_path(project=PROJECT_ID,
                                                location=self.location,
                                                product=product_id)
     response = self.ps_client.create_product(parent=self.location_path,
                                              product=product,
                                              product_id=product_id)
     self.products_to_delete.append(response.name)
     self.assertEqual(response.name, product_path)
     # Update the Product.
     new_display_name = "updated product name"
     updated_product_request = vision.types.Product(
         name=product_path, display_name=new_display_name)
     update_mask = vision.types.FieldMask(paths=["display_name"])
     updated_product = self.ps_client.update_product(
         product=updated_product_request, update_mask=update_mask)
     self.assertEqual(updated_product.display_name, new_display_name)
Exemplo n.º 59
0
def test_document_set_w_int_field(client, cleanup):
    document_id = "set-int-key" + unique_resource_id("-")
    document = client.document("i-did-it", document_id)
    # Add to clean-up before API request (in case ``set()`` fails).
    cleanup(document)

    # 0. Make sure the document doesn't exist yet
    snapshot = document.get()
    assert not snapshot.exists

    # 1. Use ``create()`` to create the document.
    before = {"testing": "1"}
    document.create(before)

    # 2. Replace using ``set()``.
    data = {"14": {"status": "active"}}
    document.set(data)

    # 3. Verify replaced data.
    snapshot1 = document.get()
    assert snapshot1.to_dict() == data
Exemplo n.º 60
0
def test_collection_group_queries_filters(client):
    collection_group = "b" + unique_resource_id("-")

    doc_paths = [
        "a/a/" + collection_group + "/cg-doc1",
        "a/b/a/b/" + collection_group + "/cg-doc2",
        "a/b/" + collection_group + "/cg-doc3",
        "a/b/c/d/" + collection_group + "/cg-doc4",
        "a/c/" + collection_group + "/cg-doc5",
        collection_group + "/cg-doc6",
        "a/b/nope/nope",
    ]

    batch = client.batch()

    for index, doc_path in enumerate(doc_paths):
        doc_ref = client.document(doc_path)
        batch.set(doc_ref, {"x": index})

    batch.commit()

    query = (client.collection_group(collection_group).where(
        firestore.field_path.FieldPath.document_id(), ">=",
        client.document("a/b")).where(
            firestore.field_path.FieldPath.document_id(), "<=",
            client.document("a/b0")))
    snapshots = list(query.stream())
    found = set(snapshot.id for snapshot in snapshots)
    assert found == set(["cg-doc2", "cg-doc3", "cg-doc4"])

    query = (client.collection_group(collection_group).where(
        firestore.field_path.FieldPath.document_id(), ">",
        client.document("a/b")).where(
            firestore.field_path.FieldPath.document_id(),
            "<",
            client.document("a/b/{}/cg-doc3".format(collection_group)),
        ))
    snapshots = list(query.stream())
    found = set(snapshot.id for snapshot in snapshots)
    assert found == set(["cg-doc2"])