Ejemplo n.º 1
0
def test_bigtable_delete_cluster():
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster_id = "clus-my-" + unique_resource_id("-")
    cluster = instance.cluster(
        cluster_id,
        location_id=ALT_LOCATION_ID,
        serve_nodes=SERVER_NODES,
        default_storage_type=STORAGE_TYPE,
    )
    operation = cluster.create()
    # We want to make sure the operation completes.
    operation.result(timeout=1000)

    # [START bigtable_delete_cluster]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster_to_delete = instance.cluster(cluster_id)

    cluster_to_delete.delete()
    # [END bigtable_delete_cluster]
    assert not cluster_to_delete.exists()
Ejemplo n.º 2
0
def test_bigtable_delete_instance():
    # [START bigtable_delete_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance_id_to_delete = "inst-my-" + unique_resource_id("-")
    # [END bigtable_delete_instance]

    cluster_id = "clus-my-" + unique_resource_id("-")

    instance = client.instance(
        instance_id_to_delete, instance_type=PRODUCTION, labels=LABELS
    )
    cluster = instance.cluster(
        cluster_id,
        location_id=ALT_LOCATION_ID,
        serve_nodes=SERVER_NODES,
        default_storage_type=STORAGE_TYPE,
    )
    operation = instance.create(clusters=[cluster])
    # We want to make sure the operation completes.
    operation.result(timeout=100)

    # [START bigtable_delete_instance]
    instance_to_delete = client.instance(instance_id_to_delete)
    instance_to_delete.delete()
    # [END bigtable_delete_instance]

    assert not instance_to_delete.exists()
Ejemplo n.º 3
0
def test_bigtable_set_iam_policy_then_get_iam_policy():
    # [START bigtable_set_iam_policy]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable.policy import Policy
    from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE

    # [END bigtable_set_iam_policy]

    service_account_email = Config.CLIENT._credentials.service_account_email

    # [START bigtable_set_iam_policy]
    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance.reload()
    new_policy = Policy()
    new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)]

    policy_latest = instance.set_iam_policy(new_policy)
    # [END bigtable_set_iam_policy]

    assert len(policy_latest.bigtable_admins) > 0

    # [START bigtable_get_iam_policy]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    policy = instance.get_iam_policy()
    # [END bigtable_get_iam_policy]

    assert len(policy.bigtable_admins) > 0
Ejemplo n.º 4
0
def test_bigtable_delete_instance():
    from google.cloud.bigtable import Client

    client = Client(admin=True)

    instance = client.instance("inst-my-123", instance_type=PRODUCTION, labels=LABELS)
    cluster = instance.cluster(
        "clus-my-123",
        location_id=ALT_LOCATION_ID,
        serve_nodes=1,
        default_storage_type=STORAGE_TYPE,
    )
    operation = instance.create(clusters=[cluster])

    # Make sure this instance gets deleted after the test case.
    INSTANCES_TO_DELETE.append(instance)

    # We want to make sure the operation completes.
    operation.result(timeout=100)

    # [START bigtable_delete_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)

    instance_id = "inst-my-123"
    instance_to_delete = client.instance(instance_id)
    instance_to_delete.delete()
    # [END bigtable_delete_instance]

    assert not instance_to_delete.exists()
def test_bigtable_row_append_cell_value():
    row = Config.TABLE.row(ROW_KEY1)

    cell_val1 = b"1"
    row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val1)
    row.commit()

    # [START bigtable_row_append_cell_value]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row = table.row(ROW_KEY1, append=True)

    cell_val2 = b"2"
    row.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, cell_val2)
    # [END bigtable_row_append_cell_value]
    row.commit()

    row_data = table.read_row(ROW_KEY1)
    actual_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1)
    assert actual_value == cell_val1 + cell_val2

    # [START bigtable_row_commit]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row = Config.TABLE.row(ROW_KEY2)
    cell_val = 1
    row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val)
    row.commit()
    # [END bigtable_row_commit]

    # [START bigtable_row_increment_cell_value]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row = table.row(ROW_KEY2, append=True)

    int_val = 3
    row.increment_cell_value(COLUMN_FAMILY_ID, COL_NAME1, int_val)
    # [END bigtable_row_increment_cell_value]
    row.commit()

    row_data = table.read_row(ROW_KEY2)
    actual_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1)

    import struct

    _PACK_I64 = struct.Struct(">q").pack
    assert actual_value == _PACK_I64(cell_val + int_val)
    table.truncate(timeout=200)
def test_bigtable_create_update_delete_column_family():
    # [START bigtable_create_column_family]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    column_family_id = "column_family_id1"
    gc_rule = column_family.MaxVersionsGCRule(2)
    column_family_obj = table.column_family(column_family_id, gc_rule=gc_rule)
    column_family_obj.create()

    # [END bigtable_create_column_family]
    column_families = table.list_column_families()
    assert column_families[column_family_id].gc_rule == gc_rule

    # [START bigtable_update_column_family]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    # Already existing column family id
    column_family_id = "column_family_id1"
    # Define the GC rule to retain data with max age of 5 days
    max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))
    column_family_obj = table.column_family(column_family_id, gc_rule=max_age_rule)
    column_family_obj.update()
    # [END bigtable_update_column_family]

    updated_families = table.list_column_families()
    assert updated_families[column_family_id].gc_rule == max_age_rule

    # [START bigtable_delete_column_family]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    column_family_id = "column_family_id1"
    column_family_obj = table.column_family(column_family_id)
    column_family_obj.delete()
    # [END bigtable_delete_column_family]
    column_families = table.list_column_families()
    assert column_family_id not in column_families
def test_bigtable_row_setcell_commit_rowkey():
    # [START bigtable_row_set_cell]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key = b"row_key_1"
    cell_val = b"cell-val"
    row_obj = table.row(row_key)
    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val)
    # [END bigtable_row_set_cell]
    row_obj.commit()

    # [START bigtable_row_commit]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key = b"row_key_2"
    cell_val = b"cell-val"
    row_obj = table.row(row_key)
    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val)
    row_obj.commit()
    # [END bigtable_row_commit]

    actual_rows_keys = []
    for row in table.read_rows():
        actual_rows_keys.append(row.row_key)

    assert actual_rows_keys == [b"row_key_1", b"row_key_2"]

    # [START bigtable_row_row_key]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key_id = b"row_key_2"
    row_obj = table.row(row_key_id)
    row_key = row_obj.row_key
    # [END bigtable_row_row_key]
    assert row_key == row_key_id
    table.truncate(timeout=300)
def test_bigtable_create_family_gc_intersection():
    # [START bigtable_create_family_gc_intersection]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    max_versions_rule = column_family.MaxVersionsGCRule(2)
    max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))

    intersection_rule = column_family.GCRuleIntersection(
        [max_versions_rule, max_age_rule]
    )

    column_family_obj = table.column_family("cf4", intersection_rule)
    column_family_obj.create()

    # [END bigtable_create_family_gc_intersection]

    rule = str(column_family_obj.to_pb())
    assert "intersection" in rule
    assert "max_num_versions: 2" in rule
    assert "max_age" in rule
    assert "seconds: 432000" in rule
    column_family_obj.delete()
def test_bigtable_row_delete_cells():
    table_row_del_cells = Config.INSTANCE.table(TABLE_ID)
    row_key1 = b"row_key_1"
    row_obj = table_row_del_cells.row(row_key1)

    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row_obj.commit()
    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME2, CELL_VAL2)
    row_obj.commit()

    actual_rows_keys = []
    for row in table_row_del_cells.read_rows():
        actual_rows_keys.append(row.row_key)
    assert actual_rows_keys == [row_key1]

    # [START bigtable_row_delete_cells]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key = b"row_key_1"
    row_obj = table.row(row_key)

    row_obj.delete_cells(COLUMN_FAMILY_ID, [COL_NAME1, COL_NAME2])
    row_obj.commit()
    # [END bigtable_row_delete_cells]

    for row in table.read_rows():
        assert not row.row_key
Ejemplo n.º 10
0
def test_bigtable_row_delete_cell():
    # [START bigtable_row_delete_cell]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key1 = b"row_key_1"
    row_obj = table.row(row_key1)
    # [END bigtable_row_delete_cell]

    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row_obj.commit()

    row_key2 = b"row_key_2"
    row_obj = table.row(row_key2)
    row_obj.set_cell(COLUMN_FAMILY_ID2, COL_NAME2, CELL_VAL2)
    row_obj.commit()

    actual_rows_keys = []
    for row in table.read_rows():
        actual_rows_keys.append(row.row_key)
    assert actual_rows_keys == [row_key1, row_key2]

    # [START bigtable_row_delete_cell]
    row_obj.delete_cell(COLUMN_FAMILY_ID2, COL_NAME2)
    row_obj.commit()
    # [END bigtable_row_delete_cell]

    actual_rows_keys = []
    for row in table.read_rows():
        actual_rows_keys.append(row.row_key)
    assert actual_rows_keys == [row_key1]
    table.truncate(timeout=300)
Ejemplo n.º 11
0
def test_bigtable_create_instance():
    # [START bigtable_create_prod_instance]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    my_instance_id = "inst-my-" + unique_resource_id("-")
    my_cluster_id = "clus-my-" + unique_resource_id("-")
    location_id = "us-central1-f"
    serve_nodes = 3
    storage_type = enums.StorageType.SSD
    production = enums.Instance.Type.PRODUCTION
    labels = {"prod-label": "prod-label"}

    client = Client(admin=True)
    instance = client.instance(my_instance_id, instance_type=production, labels=labels)
    cluster = instance.cluster(
        my_cluster_id,
        location_id=location_id,
        serve_nodes=serve_nodes,
        default_storage_type=storage_type,
    )
    operation = instance.create(clusters=[cluster])
    # We want to make sure the operation completes.
    operation.result(timeout=100)
    # [END bigtable_create_prod_instance]
    assert instance.exists()
    instance.delete()
Ejemplo n.º 12
0
def test_bigtable_create_additional_cluster():
    # [START bigtable_create_cluster]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    # Assuming that there is an existing instance with `INSTANCE_ID`
    # on the server already.
    # to create an instance see
    # 'https://cloud.google.com/bigtable/docs/creating-instance'

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    cluster_id = "clus-my-" + unique_resource_id("-")
    location_id = "us-central1-a"
    serve_nodes = 3
    storage_type = enums.StorageType.SSD

    cluster = instance.cluster(
        cluster_id,
        location_id=location_id,
        serve_nodes=serve_nodes,
        default_storage_type=storage_type,
    )
    operation = cluster.create()
    # We want to make sure the operation completes.
    operation.result(timeout=100)
    # [END bigtable_create_cluster]
    assert cluster.exists()

    cluster.delete()
def test_bigtable_table_row():
    # [START bigtable_table_row]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_keys = [b"row_key_1", b"row_key_2"]
    row1_obj = table.row(row_keys[0])
    row2_obj = table.row(row_keys[1])
    # [END bigtable_table_row]

    row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row1_obj.commit()
    row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row2_obj.commit()

    actual_rows_keys = []
    for row in table.read_rows():
        actual_rows_keys.append(row.row_key)

    assert actual_rows_keys == row_keys

    table.truncate(timeout=300)
def test_bigtable_row_delete():
    table_row_del = Config.INSTANCE.table(TABLE_ID)
    row_obj = table_row_del.row(b"row_key_1")
    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val")
    row_obj.commit()
    actual_rows_keys = []
    for row in table_row_del.read_rows():
        actual_rows_keys.append(row.row_key)
    assert actual_rows_keys == [b"row_key_1"]

    # [START bigtable_row_delete]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key = b"row_key_1"
    row_obj = table.row(row_key)

    row_obj.delete()
    row_obj.commit()
    # [END bigtable_row_delete]

    actual_rows_keys = []
    for row in table.read_rows():
        actual_rows_keys.append(row.row_key)
    assert len(actual_rows_keys) == 0
def test_bigtable_row_setcell_rowkey():
    # [START bigtable_row_set_cell]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row = table.row(ROW_KEY1)

    cell_val = b"cell-val"
    row.set_cell(
        COLUMN_FAMILY_ID, COL_NAME1, cell_val, timestamp=datetime.datetime.utcnow()
    )
    # [END bigtable_row_set_cell]

    response = table.mutate_rows([row])
    # validate that all rows written successfully
    for i, status in enumerate(response):
        assert status.code == 0

    # [START bigtable_row_row_key]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row = table.row(ROW_KEY1)
    row_key = row.row_key
    # [END bigtable_row_row_key]
    assert row_key == ROW_KEY1

    # [START bigtable_row_table]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row = table.row(ROW_KEY1)
    table1 = row.table
    # [END bigtable_row_table]

    assert table1 == table
    table.truncate(timeout=200)
Ejemplo n.º 16
0
def test_bigtable_list_tables():
    # [START bigtable_list_tables]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    tables_list = instance.list_tables()
    # [END bigtable_list_tables]
    assert len(tables_list) > 0
Ejemplo n.º 17
0
def test_bigtable_reload_instance():
    # [START bigtable_reload_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance.reload()
    # [END bigtable_reload_instance]
    assert instance.type_ == PRODUCTION.value
Ejemplo n.º 18
0
def test_bigtable_instance_state():
    # [START bigtable_instance_state]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance_state = instance.state
    # [END bigtable_instance_state]
    assert not instance_state
Ejemplo n.º 19
0
def test_bigtable_list_clusters_on_instance():
    # [START bigtable_list_clusters_on_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    (clusters_list, failed_locations_list) = instance.list_clusters()
    # [END bigtable_list_clusters_on_instance]
    assert len(clusters_list) > 0
Ejemplo n.º 20
0
def test_bigtable_instance_exists():
    # [START bigtable_check_instance_exists]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance_exists = instance.exists()
    # [END bigtable_check_instance_exists]
    assert instance_exists
Ejemplo n.º 21
0
def test_bigtable_cluster_exists():
    # [START bigtable_check_cluster_exists]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)
    cluster_exists = cluster.exists()
    # [END bigtable_check_cluster_exists]
    assert cluster_exists
Ejemplo n.º 22
0
def test_bigtable_reload_cluster():
    # [START bigtable_reload_cluster]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)
    cluster.reload()
    # [END bigtable_reload_cluster]
    assert cluster.serve_nodes == SERVER_NODES
Ejemplo n.º 23
0
def test_bigtable_update_instance():
    # [START bigtable_update_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    display_name = "My new instance"
    instance.display_name = display_name
    instance.update()
    # [END bigtable_update_instance]
    assert instance.display_name == display_name
Ejemplo n.º 24
0
def test_bigtable_update_cluster():
    # [START bigtable_update_cluster]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)
    cluster.serve_nodes = 4
    cluster.update()
    # [END bigtable_update_cluster]
    assert cluster.serve_nodes == 4
Ejemplo n.º 25
0
def test_bigtable_cluster_state():
    # [START bigtable_cluster_state]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)
    cluster_state = cluster.state
    # [END bigtable_cluster_state]

    assert not cluster_state
Ejemplo n.º 26
0
def test_bigtable_test_iam_permissions():
    # [START bigtable_test_iam_permissions]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance.reload()
    permissions = ["bigtable.clusters.create", "bigtable.tables.create"]
    permissions_allowed = instance.test_iam_permissions(permissions)
    # [END bigtable_test_iam_permissions]

    assert permissions_allowed == permissions
def test_bigtable_table_column_family():
    # [START bigtable_table_column_family]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    table = instance.table(TABLE_ID)
    column_family_obj = table.column_family(COLUMN_FAMILY_ID)
    # [END bigtable_table_column_family]

    assert column_family_obj.column_family_id == COLUMN_FAMILY_ID
def test_bigtable_get_cluster_states():
    # [START bigtable_get_cluster_states]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    get_cluster_states = table.get_cluster_states()
    # [END bigtable_get_cluster_states]

    assert CLUSTER_ID in get_cluster_states
def test_bigtable_list_column_families():
    # [START bigtable_list_column_families]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    column_family_list = table.list_column_families()
    # [END bigtable_list_column_families]

    assert len(column_family_list) > 0
Ejemplo n.º 30
0
def test_bigtable_create_table():
    # [START bigtable_create_table]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table("table_my")
    # Define the GC policy to retain only the most recent 2 versions.
    max_versions_rule = column_family.MaxVersionsGCRule(2)
    table.create(column_families={"cf1": max_versions_rule})
    # [END bigtable_create_table]
    assert table.exists()
Ejemplo n.º 31
0
def test_bigtable_delete_table():
    # [START bigtable_delete_table]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table("table_id_del")
    # [END bigtable_delete_table]

    table.create()
    assert table.exists()

    # [START bigtable_delete_table]
    table.delete()
    # [END bigtable_delete_table]
    assert not table.exists()
Ejemplo n.º 32
0
def test_bigtable_list_app_profiles():
    app_profile = Config.INSTANCE.app_profile(
        app_profile_id="app-prof-" + unique_resource_id("-"),
        routing_policy_type=enums.RoutingPolicyType.ANY,
    )
    app_profile = app_profile.create(ignore_warnings=True)

    # [START bigtable_list_app_profiles]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    app_profiles_list = instance.list_app_profiles()
    # [END bigtable_list_app_profiles]
    assert len(app_profiles_list) > 0
Ejemplo n.º 33
0
def test_bigtable_table_name():
    import re

    # [START bigtable_table_name]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    table = instance.table(TABLE_ID)
    table_name = table.name
    # [END bigtable_table_name]
    _table_name_re = re.compile(r"^projects/(?P<project>[^/]+)/"
                                r"instances/(?P<instance>[^/]+)/tables/"
                                r"(?P<table_id>[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$")
    assert _table_name_re.match(table_name)
Ejemplo n.º 34
0
def test_bigtable_table_test_iam_permissions():
    table_policy = Config.INSTANCE.table("table_id_iam_policy")
    table_policy.create()
    assert table_policy.exists

    # [START bigtable_table_test_iam_permissions]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table("table_id_iam_policy")

    permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"]
    permissions_allowed = table.test_iam_permissions(permissions)
    # [END bigtable_table_test_iam_permissions]
    assert permissions_allowed == permissions
Ejemplo n.º 35
0
def test_bigtable_create_table():
    # [START bigtable_create_table]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table("table_my")
    # Define the GC policy to retain only the most recent 2 versions.
    max_versions_rule = column_family.MaxVersionsGCRule(2)
    table.create(column_families={"cf1": max_versions_rule})
    # [END bigtable_create_table]

    try:
        assert table.exists()
    finally:
        retry_429(table.delete)()
Ejemplo n.º 36
0
def test_bigtable_instance_from_pb():
    # [START bigtable_instance_from_pb]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable_admin_v2.types import instance_pb2

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    name = instance.name
    instance_pb = instance_pb2.Instance(name=name,
                                        display_name=INSTANCE_ID,
                                        type=PRODUCTION,
                                        labels=LABELS)

    instance2 = instance.from_pb(instance_pb, client)
    # [END bigtable_instance_from_pb]
    assert instance2.name == instance.name
Ejemplo n.º 37
0
def test_bigtable_sample_row_keys():
    # [START bigtable_sample_row_keys]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    table = instance.table("table_id1_samplerow")
    # [END bigtable_sample_row_keys]
    initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"]
    table.create(initial_split_keys=initial_split_keys)
    # [START bigtable_sample_row_keys]
    data = table.sample_row_keys()
    actual_keys, offset = zip(*[(rk.row_key, rk.offset_bytes) for rk in data])
    # [END bigtable_sample_row_keys]
    initial_split_keys.append(b"")
    assert list(actual_keys) == initial_split_keys
    table.delete()
Ejemplo n.º 38
0
def test_bigtable_cluster_name():
    import re

    # [START bigtable_cluster_name]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)
    cluster_name = cluster.name
    # [END bigtable_cluster_name]

    _cluster_name_re = re.compile(r"^projects/(?P<project>[^/]+)/"
                                  r"instances/(?P<instance>[^/]+)/"
                                  r"clusters/(?P<cluster_id>"
                                  r"[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$")

    assert _cluster_name_re.match(cluster_name)
Ejemplo n.º 39
0
def test_bigtable_set_iam_policy():
    # [START bigtable_set_iam_policy]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable.policy import Policy
    from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance.reload()
    ins_policy = Policy()
    ins_policy[BIGTABLE_ADMIN_ROLE] = [
        Policy.user("*****@*****.**"),
        Policy.service_account("*****@*****.**")]

    policy_latest = instance.set_iam_policy(ins_policy)
    # [END bigtable_set_iam_policy]

    assert len(policy_latest.bigtable_admins) is not 0
Ejemplo n.º 40
0
def test_bigtable_mutations_batcher():
    # [START bigtable_mutations_batcher]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    batcher = table.mutations_batcher()
    # [END bigtable_mutations_batcher]

    # Below code will be used while creating batcher.py snippets.
    # So not removing this code as of now.
    row_keys = [b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4',
                b'row_key_20', b'row_key_22', b'row_key_200']
    column_name = 'column_name'.encode()
    # Add a single row
    row_key = row_keys[0]
    row = table.row(row_key)
    row.set_cell(COLUMN_FAMILY_ID,
                 column_name,
                 'value-0',
                 timestamp=datetime.datetime.utcnow())
    batcher.mutate(row)
    # Add a collections of rows
    rows = []
    for i in range(1, len(row_keys)):
        row = table.row(row_keys[i])
        value = 'value_{}'.format(i).encode()
        row.set_cell(COLUMN_FAMILY_ID,
                     column_name,
                     value,
                     timestamp=datetime.datetime.utcnow())
        rows.append(row)
    batcher.mutate_rows(rows)
    # batcher will flush current batch if it
    # reaches the max flush_count

    # Manually send the current batch to Cloud Bigtable
    batcher.flush()
    rows_on_table = []
    for row in table.read_rows():
        rows_on_table.append(row.row_key)
    assert len(rows_on_table) == len(row_keys)
    table.truncate(timeout=200)
Ejemplo n.º 41
0
def test_bigtable_viewers_policy():
    service_account_email = Config.CLIENT._credentials.service_account_email

    # [START bigtable_viewers_policy]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable.policy import Policy
    from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance.reload()
    new_policy = Policy()
    new_policy[BIGTABLE_VIEWER_ROLE] = [Policy.service_account(service_account_email)]

    policy_latest = instance.set_iam_policy(new_policy)
    policy = policy_latest.bigtable_viewers
    # [END bigtable_viewers_policy]

    assert len(policy) > 0
Ejemplo n.º 42
0
def test_bigtable_column_family_name():
    # [START bigtable_column_family_name]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    column_families = table.list_column_families()
    column_family_obj = column_families[COLUMN_FAMILY_ID]
    column_family_name = column_family_obj.name
    # [END bigtable_column_family_name]
    import re

    _cf_name_re = re.compile(r"^projects/(?P<project>[^/]+)/"
                             r"instances/(?P<instance>[^/]+)/tables/"
                             r"(?P<table>[^/]+)/columnFamilies/"
                             r"(?P<cf_id>[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$")
    assert _cf_name_re.match(column_family_name)
Ejemplo n.º 43
0
def test_bigtable_create_family_gc_max_versions():
    # [START bigtable_create_family_gc_max_versions]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    # Define the GC policy to retain only the most recent 2 versions
    max_versions_rule = column_family.MaxVersionsGCRule(2)

    column_family_obj = table.column_family("cf2", max_versions_rule)
    column_family_obj.create()

    # [END bigtable_create_family_gc_max_versions]
    rule = str(column_family_obj.to_pb())
    assert "max_num_versions: 2" in rule
    column_family_obj.delete()
Ejemplo n.º 44
0
def test_bigtable_row_clear_get_mutations_size():
    # [START bigtable_row_get_mutations_size]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key_id = b"row_key_1"
    row_obj = table.row(row_key_id)

    mutation_size = row_obj.get_mutations_size()
    # [END bigtable_row_get_mutations_size]
    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val")
    mutation_size = row_obj.get_mutations_size()
    assert mutation_size > 0

    row_obj.clear()
    mutation_size = row_obj.get_mutations_size()
    assert mutation_size == 0
Ejemplo n.º 45
0
def test_bigtable_create_family_gc_max_age():
    # [START bigtable_create_family_gc_max_age]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    # Define the GC rule to retain data with max age of 5 days
    max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))

    column_family_obj = table.column_family("cf1", max_age_rule)
    column_family_obj.create()

    # [END bigtable_create_family_gc_max_age]
    rule = str(column_family_obj.to_pb())
    assert "max_age" in rule
    assert "seconds: 432000" in rule
    column_family_obj.delete()
Ejemplo n.º 46
0
def test_bigtable_create_app_profile():
    # [START bigtable_create_app_profile]
    from google.cloud.bigtable import Client
    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    app_profile_id = "app-prof-" + unique_resource_id('-')
    description = 'routing policy-multy'
    routing_policy_type = enums.RoutingPolicyType.ANY

    app_profile = instance.app_profile(app_profile_id=app_profile_id,
                                       routing_policy_type=routing_policy_type,
                                       description=description,
                                       cluster_id=CLUSTER_ID)

    app_profile = app_profile.create(ignore_warnings=True)
    # [END bigtable_create_app_profile]
    assert app_profile.exists()

    app_profile.delete(ignore_warnings=True)
Ejemplo n.º 47
0
def test_bigtable_list_app_profiles():
    app_profile = Config.INSTANCE.app_profile(
        app_profile_id="app-prof-" + UNIQUE_SUFFIX,
        routing_policy_type=enums.RoutingPolicyType.ANY,
    )
    app_profile = app_profile.create(ignore_warnings=True)

    # [START bigtable_list_app_profiles]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    app_profiles_list = instance.list_app_profiles()
    # [END bigtable_list_app_profiles]

    try:
        assert len(app_profiles_list) > 0
    finally:
        retry_429_503(app_profile.delete)(ignore_warnings=True)
Ejemplo n.º 48
0
def test_bigtable_delete_cluster():
    # [START bigtable_delete_cluster]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster_id = "clus-my-" + unique_resource_id('-')
    # [END bigtable_delete_cluster]

    cluster = instance.cluster(cluster_id, location_id=ALT_LOCATION_ID,
                               serve_nodes=SERVER_NODES,
                               default_storage_type=STORAGE_TYPE)
    operation = cluster.create()
    # We want to make sure the operation completes.
    operation.result(timeout=1000)

    # [START bigtable_delete_cluster]
    cluster_to_delete = instance.cluster(cluster_id)
    cluster_to_delete.delete()
    # [END bigtable_delete_cluster]
    assert not cluster_to_delete.exists()
Ejemplo n.º 49
0
def test_bigtable_cluster_from_pb():
    # [START bigtable_cluster_from_pb]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable_admin_v2.types import instance_pb2

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)

    name = cluster.name
    cluster_state = cluster.state
    cluster_pb = instance_pb2.Cluster(
        name=name,
        location=LOCATION_ID,
        state=cluster_state,
        serve_nodes=SERVER_NODES,
        default_storage_type=STORAGE_TYPE,
    )

    cluster2 = cluster.from_pb(cluster_pb, instance)
    # [END bigtable_cluster_from_pb]
    assert cluster2.name == cluster.name
Ejemplo n.º 50
0
def test_bigtable_create_table():
    # [START bigtable_create_table]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    # Create table without Column families.
    table1 = instance.table("table_id1")
    table1.create()

    # Create table with Column families.
    table2 = instance.table("table_id2")
    # Define the GC policy to retain only the most recent 2 versions.
    max_versions_rule = column_family.MaxVersionsGCRule(2)
    table2.create(column_families={"cf1": max_versions_rule})

    # [END bigtable_create_table]
    assert table1.exists()
    assert table2.exists()
    table1.delete()
    table2.delete()
Ejemplo n.º 51
0
def test_bigtable_add_row_range_with_prefix():
    row_keys = [
        b"row_key_1",
        b"row_key_2",
        b"row_key_3",
        b"sample_row_key_1",
        b"sample_row_key_2",
    ]

    rows = []
    for row_key in row_keys:
        row = Config.TABLE.row(row_key)
        row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
        rows.append(row)
    Config.TABLE.mutate_rows(rows)

    # [START bigtable_add_row_range_with_prefix]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable.row_set import RowSet

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_set = RowSet()
    row_set.add_row_range_with_prefix("row")
    # [END bigtable_add_row_range_with_prefix]

    read_rows = table.read_rows(row_set=row_set)
    expected_row_keys = [
        b"row_key_1",
        b"row_key_2",
        b"row_key_3",
    ]
    found_row_keys = [row.row_key for row in read_rows]
    assert found_row_keys == expected_row_keys
    table.truncate(timeout=200)
Ejemplo n.º 52
0
def test_bigtable_create_family_gc_nested():
    # [START bigtable_create_family_gc_nested]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    # Create a column family with nested GC policies.
    # Create a nested GC rule:
    # Drop cells that are either older than the 10 recent versions
    # OR
    # Drop cells that are older than a month AND older than the
    # 2 recent versions
    rule1 = column_family.MaxVersionsGCRule(10)
    rule2 = column_family.GCRuleIntersection(
        [
            column_family.MaxAgeGCRule(datetime.timedelta(days=5)),
            column_family.MaxVersionsGCRule(2),
        ]
    )

    nested_rule = column_family.GCRuleUnion([rule1, rule2])

    column_family_obj = table.column_family("cf5", nested_rule)
    column_family_obj.create()

    # [END bigtable_create_family_gc_nested]

    rule = str(column_family_obj.to_pb())
    assert "intersection" in rule
    assert "max_num_versions: 2" in rule
    assert "max_age" in rule
    assert "seconds: 432000" in rule
    column_family_obj.delete()
Ejemplo n.º 53
0
def test_bigtable_create_family_gc_union():
    # [START bigtable_create_family_gc_union]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    max_versions_rule = column_family.MaxVersionsGCRule(2)
    max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))

    union_rule = column_family.GCRuleUnion([max_versions_rule, max_age_rule])

    column_family_obj = table.column_family("cf3", union_rule)
    column_family_obj.create()

    # [END bigtable_create_family_gc_union]
    rule = str(column_family_obj.to_pb())
    assert "union" in rule
    assert "max_age" in rule
    assert "seconds: 432000" in rule
    assert "max_num_versions: 2" in rule
    column_family_obj.delete()
Ejemplo n.º 54
0
def test_bigtable_create_reload_delete_app_profile():
    import re

    # [START bigtable_create_app_profile]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    routing_policy_type = enums.RoutingPolicyType.ANY

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    description = "routing policy-multy"

    app_profile = instance.app_profile(
        app_profile_id=APP_PROFILE_ID,
        routing_policy_type=routing_policy_type,
        description=description,
        cluster_id=CLUSTER_ID,
    )

    app_profile = app_profile.create(ignore_warnings=True)
    # [END bigtable_create_app_profile]

    # [START bigtable_app_profile_name]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    app_profile = instance.app_profile(APP_PROFILE_ID)

    app_profile_name = app_profile.name
    # [END bigtable_app_profile_name]
    _profile_name_re = re.compile(r"^projects/(?P<project>[^/]+)/"
                                  r"instances/(?P<instance>[^/]+)/"
                                  r"appProfiles/(?P<appprofile_id>"
                                  r"[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$")
    assert _profile_name_re.match(app_profile_name)

    # [START bigtable_app_profile_exists]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    app_profile = instance.app_profile(APP_PROFILE_ID)

    app_profile_exists = app_profile.exists()
    # [END bigtable_app_profile_exists]
    assert app_profile_exists

    # [START bigtable_reload_app_profile]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    app_profile = instance.app_profile(APP_PROFILE_ID)

    app_profile.reload()
    # [END bigtable_reload_app_profile]
    assert app_profile.routing_policy_type == ROUTING_POLICY_TYPE

    # [START bigtable_update_app_profile]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    app_profile = instance.app_profile(APP_PROFILE_ID)
    app_profile.reload()

    description = "My new app profile"
    app_profile.description = description
    app_profile.update()
    # [END bigtable_update_app_profile]
    assert app_profile.description == description

    # [START bigtable_delete_app_profile]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    app_profile = instance.app_profile(APP_PROFILE_ID)
    app_profile.reload()

    app_profile.delete(ignore_warnings=True)
    # [END bigtable_delete_app_profile]
    assert not app_profile.exists()
Ejemplo n.º 55
0
def test_bigtable_row_data_cells_cell_value_cell_values():

    value = b"value_in_col1"
    row = Config.TABLE.row(b"row_key_1")
    row.set_cell(
        COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow()
    )
    row.commit()

    row.set_cell(
        COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow()
    )
    row.commit()

    # [START bigtable_row_data_cells]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row_key = "row_key_1"
    row_data = table.read_row(row_key)

    cells = row_data.cells
    # [END bigtable_row_data_cells]

    actual_cell_value = cells[COLUMN_FAMILY_ID][COL_NAME1][0].value
    assert actual_cell_value == value

    # [START bigtable_row_cell_value]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row_key = "row_key_1"
    row_data = table.read_row(row_key)

    cell_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1)
    # [END bigtable_row_cell_value]
    assert cell_value == value

    # [START bigtable_row_cell_values]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row_key = "row_key_1"
    row_data = table.read_row(row_key)

    cell_values = row_data.cell_values(COLUMN_FAMILY_ID, COL_NAME1)
    # [END bigtable_row_cell_values]

    for actual_value, timestamp in cell_values:
        assert actual_value == value

    value2 = b"value_in_col2"
    row.set_cell(COLUMN_FAMILY_ID, COL_NAME2, value2)
    row.commit()

    # [START bigtable_row_find_cells]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row_key = "row_key_1"
    row = table.read_row(row_key)

    cells = row.find_cells(COLUMN_FAMILY_ID, COL_NAME2)
    # [END bigtable_row_find_cells]

    assert cells[0].value == value2
    table.truncate(timeout=200)
Ejemplo n.º 56
0
def test_bigtable_add_row_add_row_range_add_row_range_from_keys():
    row_keys = [
        b"row_key_1",
        b"row_key_2",
        b"row_key_3",
        b"row_key_4",
        b"row_key_5",
        b"row_key_6",
        b"row_key_7",
        b"row_key_8",
        b"row_key_9",
    ]

    rows = []
    for row_key in row_keys:
        row = Config.TABLE.row(row_key)
        row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
        rows.append(row)
    Config.TABLE.mutate_rows(rows)

    # [START bigtable_add_row_key]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable.row_set import RowSet

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_set = RowSet()
    row_set.add_row_key(b"row_key_5")
    # [END bigtable_add_row_key]

    read_rows = table.read_rows(row_set=row_set)
    expected_row_keys = [b"row_key_5"]
    found_row_keys = [row.row_key for row in read_rows]
    assert found_row_keys == expected_row_keys

    # [START bigtable_add_row_range]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable.row_set import RowSet
    from google.cloud.bigtable.row_set import RowRange

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_set = RowSet()
    row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7"))
    # [END bigtable_add_row_range]

    read_rows = table.read_rows(row_set=row_set)
    expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"]
    found_row_keys = [row.row_key for row in read_rows]
    assert found_row_keys == expected_row_keys

    # [START bigtable_row_range_from_keys]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable.row_set import RowSet

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_set = RowSet()
    row_set.add_row_range_from_keys(start_key=b"row_key_3", end_key=b"row_key_7")
    # [END bigtable_row_range_from_keys]

    read_rows = table.read_rows(row_set=row_set)
    expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"]
    found_row_keys = [row.row_key for row in read_rows]
    assert found_row_keys == expected_row_keys

    table.truncate(timeout=200)
Ejemplo n.º 57
0
def test_bigtable_write_read_drop_truncate():
    # [START bigtable_mutate_rows]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row_keys = [
        b"row_key_1",
        b"row_key_2",
        b"row_key_3",
        b"row_key_4",
        b"row_key_20",
        b"row_key_22",
        b"row_key_200",
    ]
    col_name = b"col-name1"
    rows = []
    for i, row_key in enumerate(row_keys):
        value = "value_{}".format(i).encode()
        row = table.row(row_key)
        row.set_cell(
            COLUMN_FAMILY_ID, col_name, value, timestamp=datetime.datetime.utcnow()
        )
        rows.append(row)
    response = table.mutate_rows(rows)
    # validate that all rows written successfully
    for i, status in enumerate(response):
        if status.code is not 0:
            print("Row number {} failed to write".format(i))
    # [END bigtable_mutate_rows]
    assert len(response) == len(rows)
    # [START bigtable_read_row]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row_key = "row_key_1"
    row = table.read_row(row_key)
    # [END bigtable_read_row]
    assert row.row_key.decode("utf-8") == row_key
    # [START bigtable_read_rows]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    # Read full table
    partial_rows = table.read_rows()
    read_rows = [row for row in partial_rows]
    # [END bigtable_read_rows]
    assert len(read_rows) == len(rows)
    # [START bigtable_drop_by_prefix]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    row_key_prefix = b"row_key_2"
    table.drop_by_prefix(row_key_prefix, timeout=200)
    # [END bigtable_drop_by_prefix]
    dropped_row_keys = [b"row_key_2", b"row_key_20", b"row_key_22", b"row_key_200"]
    for row in table.read_rows():
        assert row.row_key.decode("utf-8") not in dropped_row_keys

    # [START bigtable_truncate_table]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    table.truncate(timeout=200)
    # [END bigtable_truncate_table]
    rows_data_after_truncate = []
    for row in table.read_rows():
        rows_data_after_truncate.append(row.row_key)
    assert rows_data_after_truncate == []
Ejemplo n.º 58
0
def test_bigtable_batcher_mutate_flush_mutate_rows():
    # [START bigtable_batcher_mutate]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    # Batcher for max row bytes, max_row_bytes=1024 is optional.
    batcher = table.mutations_batcher(max_row_bytes=1024)

    # Add a single row
    row_key = b"row_key_1"
    row = table.row(row_key)
    row.set_cell(
        COLUMN_FAMILY_ID, COL_NAME1, "value-0", timestamp=datetime.datetime.utcnow()
    )

    # In batcher, mutate will flush current batch if it
    # reaches the max_row_bytes
    batcher.mutate(row)
    batcher.flush()
    # [END bigtable_batcher_mutate]

    # [START bigtable_batcher_flush]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    # Batcher for max row bytes, max_row_bytes=1024 is optional.
    batcher = table.mutations_batcher(max_row_bytes=1024)

    # Add a single row
    row_key = b"row_key"
    row = table.row(row_key)
    row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, "value-0")

    # In batcher, mutate will flush current batch if it
    # reaches the max_row_bytes
    batcher.mutate(row)
    batcher.flush()
    # [END bigtable_batcher_flush]

    rows_on_table = []
    for row in table.read_rows():
        rows_on_table.append(row.row_key)
    assert len(rows_on_table) == 2
    table.truncate(timeout=200)

    # [START bigtable_batcher_mutate_rows]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    batcher = table.mutations_batcher()

    row1 = table.row(b"row_key_1")
    row2 = table.row(b"row_key_2")
    row3 = table.row(b"row_key_3")
    row4 = table.row(b"row_key_4")

    row1.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val1")
    row2.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val2")
    row3.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val3")
    row4.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val4")

    batcher.mutate_rows([row1, row2, row3, row4])

    # batcher will flush current batch if it
    # reaches the max flush_count
    # Manually send the current batch to Cloud Bigtable
    batcher.flush()
    # [END bigtable_batcher_mutate_rows]

    rows_on_table = []
    for row in table.read_rows():
        rows_on_table.append(row.row_key)
    assert len(rows_on_table) == 4
    table.truncate(timeout=200)
Ejemplo n.º 59
0
 def start_bundle(self):
     if self.table is None:
         client = Client(project=self.beam_options['project_id'])
         instance = client.instance(self.beam_options['instance_id'])
         self.table = instance.table(self.beam_options['table_id'])
     self.batcher = self.table.mutations_batcher()
Ejemplo n.º 60
0
class BigtableIOWriteTest(unittest.TestCase):
    """ Bigtable Write Connector Test

  """
    DEFAULT_TABLE_PREFIX = "python-test"
    instance_id = DEFAULT_TABLE_PREFIX + "-" + str(uuid.uuid4())[:8]
    cluster_id = DEFAULT_TABLE_PREFIX + "-" + str(uuid.uuid4())[:8]
    table_id = DEFAULT_TABLE_PREFIX + "-" + str(uuid.uuid4())[:8]
    number = 500
    LOCATION_ID = "us-east1-b"

    def setUp(self):
        try:
            from google.cloud.bigtable import enums
            self.STORAGE_TYPE = enums.StorageType.HDD
            self.INSTANCE_TYPE = enums.Instance.Type.DEVELOPMENT
        except ImportError:
            self.STORAGE_TYPE = 2
            self.INSTANCE_TYPE = 2

        self.test_pipeline = TestPipeline(is_integration_test=True)
        self.runner_name = type(self.test_pipeline.runner).__name__
        self.project = self.test_pipeline.get_option('project')
        self.client = Client(project=self.project, admin=True)

        self._delete_old_instances()

        self.instance = self.client.instance(self.instance_id,
                                             instance_type=self.INSTANCE_TYPE,
                                             labels=LABELS)

        if not self.instance.exists():
            cluster = self.instance.cluster(
                self.cluster_id,
                self.LOCATION_ID,
                default_storage_type=self.STORAGE_TYPE)
            self.instance.create(clusters=[cluster])
        self.table = self.instance.table(self.table_id)

        if not self.table.exists():
            max_versions_rule = column_family.MaxVersionsGCRule(2)
            column_family_id = 'cf1'
            column_families = {column_family_id: max_versions_rule}
            self.table.create(column_families=column_families)

    def _delete_old_instances(self):
        instances = self.client.list_instances()
        EXISTING_INSTANCES[:] = instances

        def age_in_hours(micros):
            return (
                datetime.datetime.utcnow().replace(tzinfo=UTC) -
                (_datetime_from_microseconds(micros))).total_seconds() // 3600

        CLEAN_INSTANCE = [
            i for instance in EXISTING_INSTANCES for i in instance
            if (LABEL_KEY in i.labels.keys() and (
                age_in_hours(int(i.labels[LABEL_KEY])) >= 2))
        ]

        if CLEAN_INSTANCE:
            for instance in CLEAN_INSTANCE:
                instance.delete()

    def tearDown(self):
        if self.instance.exists():
            self.instance.delete()

    def test_bigtable_write(self):
        number = self.number
        pipeline_args = self.test_pipeline.options_list
        pipeline_options = PipelineOptions(pipeline_args)

        with beam.Pipeline(options=pipeline_options) as pipeline:
            config_data = {
                'project_id': self.project,
                'instance_id': self.instance,
                'table_id': self.table
            }
            _ = (pipeline
                 | 'Generate Direct Rows' >> GenerateTestRows(
                     number, **config_data))

        assert pipeline.result.state == PipelineState.DONE

        read_rows = self.table.read_rows()
        assert len([_ for _ in read_rows]) == number

        if not hasattr(pipeline.result, 'has_job') or pipeline.result.has_job:
            read_filter = MetricsFilter().with_name('Written Row')
            query_result = pipeline.result.metrics().query(read_filter)
            if query_result['counters']:
                read_counter = query_result['counters'][0]

                logging.info('Number of Rows: %d', read_counter.committed)
                assert read_counter.committed == number