Example #1
0
    def setUp(self):
        try:
            from google.cloud.bigtable import enums
            self.STORAGE_TYPE = enums.StorageType.HDD
            self.INSTANCE_TYPE = enums.Instance.Type.DEVELOPMENT
        except ImportError:
            self.STORAGE_TYPE = 2
            self.INSTANCE_TYPE = 2

        self.test_pipeline = TestPipeline(is_integration_test=True)
        self.runner_name = type(self.test_pipeline.runner).__name__
        self.project = self.test_pipeline.get_option('project')
        self.client = Client(project=self.project, admin=True)

        self._delete_old_instances()

        self.instance = self.client.instance(self.instance_id,
                                             instance_type=self.INSTANCE_TYPE,
                                             labels=LABELS)

        if not self.instance.exists():
            cluster = self.instance.cluster(
                self.cluster_id,
                self.LOCATION_ID,
                default_storage_type=self.STORAGE_TYPE)
            operation = self.instance.create(clusters=[cluster])
            operation.result(timeout=10)

        self.table = self.instance.table(self.table_id)

        if not self.table.exists():
            max_versions_rule = column_family.MaxVersionsGCRule(2)
            column_family_id = 'cf1'
            column_families = {column_family_id: max_versions_rule}
            self.table.create(column_families=column_families)
def test_bigtable_delete_instance():
    from google.cloud.bigtable import Client

    client = Client(admin=True)

    instance = client.instance("inst-my-123", instance_type=PRODUCTION, labels=LABELS)
    cluster = instance.cluster(
        "clus-my-123",
        location_id=ALT_LOCATION_ID,
        serve_nodes=1,
        default_storage_type=STORAGE_TYPE,
    )
    operation = instance.create(clusters=[cluster])

    # Make sure this instance gets deleted after the test case.
    INSTANCES_TO_DELETE.append(instance)

    # We want to make sure the operation completes.
    operation.result(timeout=100)

    # [START bigtable_delete_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)

    instance_id = "inst-my-123"
    instance_to_delete = client.instance(instance_id)
    instance_to_delete.delete()
    # [END bigtable_delete_instance]

    assert not instance_to_delete.exists()
Example #3
0
def test_bigtable_create_instance():
    # [START bigtable_create_prod_instance]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    my_instance_id = "inst-my-" + unique_resource_id("-")
    my_cluster_id = "clus-my-" + unique_resource_id("-")
    location_id = "us-central1-f"
    serve_nodes = 3
    storage_type = enums.StorageType.SSD
    production = enums.Instance.Type.PRODUCTION
    labels = {"prod-label": "prod-label"}

    client = Client(admin=True)
    instance = client.instance(my_instance_id,
                               instance_type=production,
                               labels=labels)
    cluster = instance.cluster(
        my_cluster_id,
        location_id=location_id,
        serve_nodes=serve_nodes,
        default_storage_type=storage_type,
    )
    operation = instance.create(clusters=[cluster])

    # Make sure this instance gets deleted after the test case.
    INSTANCES_TO_DELETE.append(instance)

    # We want to make sure the operation completes.
    operation.result(timeout=100)
    # [END bigtable_create_prod_instance]

    assert instance.exists()
    instance.delete()
def test_bigtable_delete_instance():
    # [START bigtable_delete_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance_id_to_delete = "inst-my-" + unique_resource_id('-')
    # [END bigtable_delete_instance]
    cluster_id = "clus-my-" + unique_resource_id('-')

    instance = client.instance(instance_id_to_delete,
                               instance_type=PRODUCTION,
                               labels=LABELS)
    cluster = instance.cluster(cluster_id,
                               location_id=ALT_LOCATION_ID,
                               serve_nodes=SERVER_NODES,
                               default_storage_type=STORAGE_TYPE)
    operation = instance.create(clusters=[cluster])
    # We want to make sure the operation completes.
    operation.result(timeout=100)

    # [START bigtable_delete_instance]
    instance_to_delete = client.instance(instance_id_to_delete)
    instance_to_delete.delete()
    # [END bigtable_delete_instance]
    assert not instance_to_delete.exists()
Example #5
0
def test_bigtable_create_app_profile():
    # [START bigtable_create_app_profile]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    app_profile_id = "app-prof-" + UNIQUE_SUFFIX
    description = "routing policy-multy"
    routing_policy_type = enums.RoutingPolicyType.ANY

    app_profile = instance.app_profile(
        app_profile_id=app_profile_id,
        routing_policy_type=routing_policy_type,
        description=description,
        cluster_id=CLUSTER_ID,
    )

    app_profile = app_profile.create(ignore_warnings=True)
    # [END bigtable_create_app_profile]

    try:
        assert app_profile.exists()
    finally:
        retry_429(app_profile.delete)(ignore_warnings=True)
Example #6
0
def test_bigtable_create_additional_cluster():
    # [START bigtable_create_cluster]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    # Assuming that there is an existing instance with `INSTANCE_ID`
    # on the server already.
    # to create an instance see
    # 'https://cloud.google.com/bigtable/docs/creating-instance'

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    cluster_id = "clus-my-" + unique_resource_id("-")
    location_id = "us-central1-a"
    serve_nodes = 3
    storage_type = enums.StorageType.SSD

    cluster = instance.cluster(
        cluster_id,
        location_id=location_id,
        serve_nodes=serve_nodes,
        default_storage_type=storage_type,
    )
    operation = cluster.create()
    # We want to make sure the operation completes.
    operation.result(timeout=100)
    # [END bigtable_create_cluster]
    assert cluster.exists()

    cluster.delete()
def test_bigtable_row_delete():
    table_row_del = Config.INSTANCE.table(TABLE_ID)
    row_obj = table_row_del.row(b"row_key_1")
    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val")
    row_obj.commit()
    actual_rows_keys = []
    for row in table_row_del.read_rows():
        actual_rows_keys.append(row.row_key)
    assert actual_rows_keys == [b"row_key_1"]

    # [START bigtable_row_delete]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key = b"row_key_1"
    row_obj = table.row(row_key)

    row_obj.delete()
    row_obj.commit()
    # [END bigtable_row_delete]

    actual_rows_keys = []
    for row in table.read_rows():
        actual_rows_keys.append(row.row_key)
    assert len(actual_rows_keys) == 0
def test_bigtable_delete_cluster():
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster_id = "clus-my-" + unique_resource_id("-")
    cluster = instance.cluster(
        cluster_id,
        location_id=ALT_LOCATION_ID,
        serve_nodes=SERVER_NODES,
        default_storage_type=STORAGE_TYPE,
    )
    operation = cluster.create()
    # We want to make sure the operation completes.
    operation.result(timeout=1000)

    # [START bigtable_delete_cluster]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster_to_delete = instance.cluster(cluster_id)

    cluster_to_delete.delete()
    # [END bigtable_delete_cluster]
    assert not cluster_to_delete.exists()
Example #9
0
def test_bigtable_row_delete_cell():
    # [START bigtable_row_delete_cell]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key1 = b"row_key_1"
    row_obj = table.row(row_key1)
    # [END bigtable_row_delete_cell]

    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row_obj.commit()

    row_key2 = b"row_key_2"
    row_obj = table.row(row_key2)
    row_obj.set_cell(COLUMN_FAMILY_ID2, COL_NAME2, CELL_VAL2)
    row_obj.commit()

    actual_rows_keys = []
    for row in table.read_rows():
        actual_rows_keys.append(row.row_key)
    assert actual_rows_keys == [row_key1, row_key2]

    # [START bigtable_row_delete_cell]
    row_obj.delete_cell(COLUMN_FAMILY_ID2, COL_NAME2)
    row_obj.commit()
    # [END bigtable_row_delete_cell]

    actual_rows_keys = []
    for row in table.read_rows():
        actual_rows_keys.append(row.row_key)
    assert actual_rows_keys == [row_key1]
    table.truncate(timeout=300)
def test_bigtable_row_delete_cells():
    table_row_del_cells = Config.INSTANCE.table(TABLE_ID)
    row_key1 = b"row_key_1"
    row_obj = table_row_del_cells.row(row_key1)

    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row_obj.commit()
    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME2, CELL_VAL2)
    row_obj.commit()

    actual_rows_keys = []
    for row in table_row_del_cells.read_rows():
        actual_rows_keys.append(row.row_key)
    assert actual_rows_keys == [row_key1]

    # [START bigtable_row_delete_cells]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key = b"row_key_1"
    row_obj = table.row(row_key)

    row_obj.delete_cells(COLUMN_FAMILY_ID, [COL_NAME1, COL_NAME2])
    row_obj.commit()
    # [END bigtable_row_delete_cells]

    for row in table.read_rows():
        assert not row.row_key
Example #11
0
def test_bigtable_set_iam_policy_then_get_iam_policy():
    # [START bigtable_set_iam_policy]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable.policy import Policy
    from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE

    # [END bigtable_set_iam_policy]

    service_account_email = Config.CLIENT._credentials.service_account_email

    # [START bigtable_set_iam_policy]
    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance.reload()
    new_policy = Policy()
    new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)]

    policy_latest = instance.set_iam_policy(new_policy)
    # [END bigtable_set_iam_policy]

    assert len(policy_latest.bigtable_admins) > 0

    # [START bigtable_get_iam_policy]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    policy = instance.get_iam_policy()
    # [END bigtable_get_iam_policy]

    assert len(policy.bigtable_admins) > 0
Example #12
0
def test_bigtable_create_instance():
    # [START bigtable_create_prod_instance]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    my_instance_id = "inst-my-" + unique_resource_id('-')
    my_cluster_id = "clus-my-" + unique_resource_id('-')
    location_id = 'us-central1-f'
    serve_nodes = 3
    storage_type = enums.StorageType.SSD
    production = enums.Instance.Type.PRODUCTION
    labels = {'prod-label': 'prod-label'}

    client = Client(admin=True)
    instance = client.instance(my_instance_id, instance_type=production,
                               labels=labels)
    cluster = instance.cluster(my_cluster_id, location_id=location_id,
                               serve_nodes=serve_nodes,
                               default_storage_type=storage_type)
    operation = instance.create(clusters=[cluster])
    # We want to make sure the operation completes.
    operation.result(timeout=100)
    # [END bigtable_create_prod_instance]
    assert instance.exists()
    instance.delete()
def test_bigtable_table_row():
    # [START bigtable_table_row]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_keys = [b"row_key_1", b"row_key_2"]
    row1_obj = table.row(row_keys[0])
    row2_obj = table.row(row_keys[1])
    # [END bigtable_table_row]

    row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row1_obj.commit()
    row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row2_obj.commit()

    actual_rows_keys = []
    for row in table.read_rows():
        actual_rows_keys.append(row.row_key)

    assert actual_rows_keys == row_keys

    table.truncate(timeout=300)
def test_bigtable_table_row():
    # [START bigtable_table_row]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_keys = [b"row_key_1", b"row_key_2"]
    row1_obj = table.row(row_keys[0])
    row2_obj = table.row(row_keys[1])
    # [END bigtable_table_row]

    row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row1_obj.commit()
    row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row2_obj.commit()

    actual_rows_keys = []
    for row in table.read_rows():
        actual_rows_keys.append(row.row_key)

    assert actual_rows_keys == row_keys

    table.truncate(timeout=300)
Example #15
0
def test_bigtable_create_family_gc_intersection():
    # [START bigtable_create_family_gc_intersection]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    max_versions_rule = column_family.MaxVersionsGCRule(2)
    max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))

    intersection_rule = column_family.GCRuleIntersection(
        [max_versions_rule, max_age_rule])

    column_family_obj = table.column_family("cf4", intersection_rule)
    column_family_obj.create()

    # [END bigtable_create_family_gc_intersection]

    rule = str(column_family_obj.to_pb())
    assert "intersection" in rule
    assert "max_num_versions: 2" in rule
    assert "max_age" in rule
    assert "seconds: 432000" in rule
    column_family_obj.delete()
Example #16
0
def test_bigtable_create_family_gc_nested():
    # [START bigtable_create_family_gc_nested]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    # Create a column family with nested GC policies.
    # Create a nested GC rule:
    # Drop cells that are either older than the 10 recent versions
    # OR
    # Drop cells that are older than a month AND older than the
    # 2 recent versions
    rule1 = column_family.MaxVersionsGCRule(10)
    rule2 = column_family.GCRuleIntersection([
        column_family.MaxAgeGCRule(datetime.timedelta(days=5)),
        column_family.MaxVersionsGCRule(2),
    ])

    nested_rule = column_family.GCRuleUnion([rule1, rule2])

    column_family_obj = table.column_family("cf5", nested_rule)
    column_family_obj.create()

    # [END bigtable_create_family_gc_nested]

    rule = str(column_family_obj.to_pb())
    assert "intersection" in rule
    assert "max_num_versions: 2" in rule
    assert "max_age" in rule
    assert "seconds: 432000" in rule
    column_family_obj.delete()
Example #17
0
def test_bigtable_table_conditional_row():
    # [START bigtable_table_conditional_row]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable.row_filters import PassAllFilter

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_keys = [b"row_key_1", b"row_key_2"]
    filter_ = PassAllFilter(True)
    row1_obj = table.conditional_row(row_keys[0], filter_=filter_)
    row2_obj = table.conditional_row(row_keys[1], filter_=filter_)
    # [END bigtable_table_conditional_row]

    row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1, state=False)
    row1_obj.commit()
    row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1, state=False)
    row2_obj.commit()

    written_row_keys = []
    for row in table.read_rows():
        written_row_keys.append(row.row_key)

    assert written_row_keys == row_keys

    table.truncate(timeout=300)
Example #18
0
def test_bigtable_row_delete_cells():
    table_row_del_cells = Config.INSTANCE.table(TABLE_ID)
    row_key1 = b"row_key_1"
    row_obj = table_row_del_cells.row(row_key1)

    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1)
    row_obj.commit()
    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME2, CELL_VAL2)
    row_obj.commit()

    written_row_keys = []
    for row in table_row_del_cells.read_rows():
        written_row_keys.append(row.row_key)
    assert written_row_keys == [row_key1]

    # [START bigtable_row_delete_cells]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key = b"row_key_1"
    row_obj = table.row(row_key)

    row_obj.delete_cells(COLUMN_FAMILY_ID, [COL_NAME1, COL_NAME2])
    row_obj.commit()
    # [END bigtable_row_delete_cells]

    for row in table.read_rows():
        assert not row.row_key
Example #19
0
def test_bigtable_row_delete():
    table_row_del = Config.INSTANCE.table(TABLE_ID)
    row_obj = table_row_del.row(b"row_key_1")
    row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val")
    row_obj.commit()
    written_row_keys = []
    for row in table_row_del.read_rows():
        written_row_keys.append(row.row_key)
    assert written_row_keys == [b"row_key_1"]

    # [START bigtable_row_delete]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    row_key = b"row_key_1"
    row_obj = table.row(row_key)

    row_obj.delete()
    row_obj.commit()
    # [END bigtable_row_delete]

    written_row_keys = []
    for row in table.read_rows():
        written_row_keys.append(row.row_key)
    assert len(written_row_keys) == 0
def test_bigtable_create_family_gc_intersection():
    # [START bigtable_create_family_gc_intersection]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    max_versions_rule = column_family.MaxVersionsGCRule(2)
    max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5))

    intersection_rule = column_family.GCRuleIntersection(
        [max_versions_rule, max_age_rule]
    )

    column_family_obj = table.column_family("cf4", intersection_rule)
    column_family_obj.create()

    # [END bigtable_create_family_gc_intersection]

    rule = str(column_family_obj.to_pb())
    assert "intersection" in rule
    assert "max_num_versions: 2" in rule
    assert "max_age" in rule
    assert "seconds: 432000" in rule
    column_family_obj.delete()
Example #21
0
def test_bigtable_create_instance():
    # [START bigtable_create_prod_instance]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    my_instance_id = "inst-my-" + UNIQUE_SUFFIX
    my_cluster_id = "clus-my-" + UNIQUE_SUFFIX
    location_id = "us-central1-f"
    serve_nodes = 3
    storage_type = enums.StorageType.SSD
    production = enums.Instance.Type.PRODUCTION
    labels = {"prod-label": "prod-label"}

    client = Client(admin=True)
    instance = client.instance(my_instance_id, instance_type=production, labels=labels)
    cluster = instance.cluster(
        my_cluster_id,
        location_id=location_id,
        serve_nodes=serve_nodes,
        default_storage_type=storage_type,
    )
    operation = instance.create(clusters=[cluster])

    # We want to make sure the operation completes.
    operation.result(timeout=100)

    # [END bigtable_create_prod_instance]

    try:
        assert instance.exists()
    finally:
        retry_429(instance.delete)()
Example #22
0
def test_bigtable_create_additional_cluster():
    # [START bigtable_create_cluster]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    # Assuming that there is an existing instance with `INSTANCE_ID`
    # on the server already.
    # to create an instance see
    # 'https://cloud.google.com/bigtable/docs/creating-instance'

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    cluster_id = "clus-my-" + unique_resource_id('-')
    location_id = 'us-central1-a'
    serve_nodes = 3
    storage_type = enums.StorageType.SSD

    cluster = instance.cluster(cluster_id, location_id=location_id,
                               serve_nodes=serve_nodes,
                               default_storage_type=storage_type)
    operation = cluster.create()
    # We want to make sure the operation completes.
    operation.result(timeout=100)
    # [END bigtable_create_cluster]
    assert cluster.exists()

    cluster.delete()
Example #23
0
def test_bigtable_delete_instance():
    # [START bigtable_delete_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance_id_to_delete = "inst-my-" + unique_resource_id("-")
    # [END bigtable_delete_instance]

    cluster_id = "clus-my-" + unique_resource_id("-")

    instance = client.instance(
        instance_id_to_delete, instance_type=PRODUCTION, labels=LABELS
    )
    cluster = instance.cluster(
        cluster_id,
        location_id=ALT_LOCATION_ID,
        serve_nodes=SERVER_NODES,
        default_storage_type=STORAGE_TYPE,
    )
    operation = instance.create(clusters=[cluster])
    # We want to make sure the operation completes.
    operation.result(timeout=100)

    # [START bigtable_delete_instance]
    instance_to_delete = client.instance(instance_id_to_delete)
    instance_to_delete.delete()
    # [END bigtable_delete_instance]

    assert not instance_to_delete.exists()
Example #24
0
def test_bigtable_create_instance():
    # [START bigtable_create_prod_instance]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import enums

    my_instance_id = "inst-my-" + unique_resource_id("-")
    my_cluster_id = "clus-my-" + unique_resource_id("-")
    location_id = "us-central1-f"
    serve_nodes = 3
    storage_type = enums.StorageType.SSD
    production = enums.Instance.Type.PRODUCTION
    labels = {"prod-label": "prod-label"}

    client = Client(admin=True)
    instance = client.instance(my_instance_id, instance_type=production, labels=labels)
    cluster = instance.cluster(
        my_cluster_id,
        location_id=location_id,
        serve_nodes=serve_nodes,
        default_storage_type=storage_type,
    )
    operation = instance.create(clusters=[cluster])
    # We want to make sure the operation completes.
    operation.result(timeout=100)
    # [END bigtable_create_prod_instance]
    assert instance.exists()
    instance.delete()
Example #25
0
 def start_bundle(self):
     if self.table is None:
         client = Client(project=self.beam_options['project_id'])
         instance = client.instance(self.beam_options['instance_id'])
         self.table = instance.table(self.beam_options['table_id'])
     flush_count = self.beam_options['flush_count']
     max_row_bytes = self.beam_options['max_row_bytes']
     self.batcher = self.table.mutations_batcher(flush_count, max_row_bytes)
Example #26
0
def test_bigtable_list_instances():
    # [START bigtable_list_instances]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    (instances_list, failed_locations_list) = client.list_instances()
    # [END bigtable_list_instances]
    assert len(instances_list) > 0
Example #27
0
def test_bigtable_list_clusters_in_project():
    # [START bigtable_list_clusters_in_project]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    (clusters_list, failed_locations_list) = client.list_clusters()
    # [END bigtable_list_clusters_in_project]
    assert len(clusters_list) > 0
Example #28
0
 def start_bundle(self):
     if self.table is None:
         self.table = Client(project=self.beam_options['project_id']) \
           .instance(self.beam_options['instance_id']) \
           .table(self.beam_options['table_id'])
     self.batcher = self.table.mutations_batcher(
         self.beam_options['flush_count'],
         self.beam_options['max_row_bytes'])
Example #29
0
def test_bigtable_instance_state():
    # [START bigtable_instance_state]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance_state = instance.state
    # [END bigtable_instance_state]
    assert not instance_state
Example #30
0
  def __init__(self, project_id, instance_id, table_id):
    from google.cloud.bigtable import enums

    self.project_id = project_id
    self.instance_id = instance_id
    self.table_id = table_id
    self.STORAGE_TYPE = enums.StorageType.HDD
    self.INSTANCE_TYPE = enums.Instance.Type.DEVELOPMENT
    self.client = Client(project=self.project_id, admin=True)
Example #31
0
def test_bigtable_list_tables():
    # [START bigtable_list_tables]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    tables_list = instance.list_tables()
    # [END bigtable_list_tables]
    assert len(tables_list) > 0
Example #32
0
def test_bigtable_instance_name():
    import re

    # [START bigtable_instance_name]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance_name = instance.name
def test_bigtable_list_tables():
    # [START bigtable_list_tables]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    tables_list = instance.list_tables()
    # [END bigtable_list_tables]
    assert len(tables_list) is not 0
def test_bigtable_instance_state():
    # [START bigtable_instance_state]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance_state = instance.state
    # [END bigtable_instance_state]
    assert not instance_state
Example #35
0
def test_bigtable_instance_exists():
    # [START bigtable_check_instance_exists]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance_exists = instance.exists()
    # [END bigtable_check_instance_exists]
    assert instance_exists
Example #36
0
def test_bigtable_reload_instance():
    # [START bigtable_reload_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance.reload()
    # [END bigtable_reload_instance]
    assert instance.type_ == PRODUCTION.value
Example #37
0
def test_bigtable_list_clusters_on_instance():
    # [START bigtable_list_clusters_on_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    (clusters_list, failed_locations_list) = instance.list_clusters()
    # [END bigtable_list_clusters_on_instance]
    assert len(clusters_list) > 0
Example #38
0
def test_bigtable_reload_cluster():
    # [START bigtable_reload_cluster]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)
    cluster.reload()
    # [END bigtable_reload_cluster]
    assert cluster.serve_nodes == SERVER_NODES
Example #39
0
def test_bigtable_cluster_exists():
    # [START bigtable_check_cluster_exists]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)
    cluster_exists = cluster.exists()
    # [END bigtable_check_cluster_exists]
    assert cluster_exists
Example #40
0
def test_bigtable_get_iam_policy():
    # [START bigtable_get_iam_policy]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    policy_latest = instance.get_iam_policy()
    # [END bigtable_get_iam_policy]

    assert len(policy_latest.bigtable_viewers) is not 0
def test_bigtable_table_exists():
    # [START bigtable_api_check_table_exists]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    table_exists = table.exists()
    # [END bigtable_api_check_table_exists]
    assert table_exists
Example #42
0
 def start_bundle(self):
     if self.table is None:
         client = Client(project=self.beam_options['project_id'])
         instance = client.instance(self.beam_options['instance_id'])
         self.table = instance.table(self.beam_options['table_id'])
     self.service_call_metric = self.start_service_call_metrics(
         self.beam_options['project_id'], self.beam_options['instance_id'],
         self.beam_options['table_id'])
     self.batcher = _MutationsBatcher(self.table)
     self.batcher.set_flush_callback(self.write_mutate_metrics)
Example #43
0
def test_bigtable_cluster_name():
    import re

    # [START bigtable_cluster_name]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)
    cluster_name = cluster.name
def test_bigtable_cluster_state():
    # [START bigtable_cluster_state]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)
    cluster_state = cluster.state
    # [END bigtable_cluster_state]

    assert not cluster_state
Example #45
0
def test_bigtable_update_instance():
    # [START bigtable_update_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    display_name = "My new instance"
    instance.display_name = display_name
    instance.update()
    # [END bigtable_update_instance]
    assert instance.display_name == display_name
Example #46
0
def test_bigtable_update_cluster():
    # [START bigtable_update_cluster]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    cluster = instance.cluster(CLUSTER_ID)
    cluster.serve_nodes = 4
    cluster.update()
    # [END bigtable_update_cluster]
    assert cluster.serve_nodes == 4
def test_bigtable_list_column_families():
    # [START bigtable_list_column_families]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    column_family_list = table.list_column_families()
    # [END bigtable_list_column_families]

    assert len(column_family_list) > 0
def test_bigtable_table_column_family():
    # [START bigtable_table_column_family]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)

    table = instance.table(TABLE_ID)
    column_family_obj = table.column_family(COLUMN_FAMILY_ID)
    # [END bigtable_table_column_family]

    assert column_family_obj.column_family_id == COLUMN_FAMILY_ID
Example #49
0
def test_bigtable_test_iam_permissions():
    # [START bigtable_test_iam_permissions]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    instance.reload()
    permissions = ["bigtable.clusters.create", "bigtable.tables.create"]
    permissions_allowed = instance.test_iam_permissions(permissions)
    # [END bigtable_test_iam_permissions]

    assert permissions_allowed == permissions
def test_bigtable_get_cluster_states():
    # [START bigtable_get_cluster_states]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)

    get_cluster_states = table.get_cluster_states()
    # [END bigtable_get_cluster_states]

    assert CLUSTER_ID in get_cluster_states
Example #51
0
def test_bigtable_create_table():
    # [START bigtable_create_table]
    from google.cloud.bigtable import Client
    from google.cloud.bigtable import column_family

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table("table_my")
    # Define the GC policy to retain only the most recent 2 versions.
    max_versions_rule = column_family.MaxVersionsGCRule(2)
    table.create(column_families={"cf1": max_versions_rule})
    # [END bigtable_create_table]
    assert table.exists()
def test_bigtable_update_instance():
    # [START bigtable_update_instance]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    display_name = "My new instance"
    instance.display_name = display_name
    instance.update()
    # [END bigtable_update_instance]
    assert instance.display_name == display_name

    # Make sure this instance gets deleted after the test case.
    INSTANCES_TO_DELETE.append(instance)
def test_bigtable_mutations_batcher():
    # [START bigtable_mutations_batcher]
    from google.cloud.bigtable import Client

    client = Client(admin=True)
    instance = client.instance(INSTANCE_ID)
    table = instance.table(TABLE_ID)
    batcher = table.mutations_batcher()
    # [END bigtable_mutations_batcher]

    # Below code will be used while creating batcher.py snippets.
    # So not removing this code as of now.
    row_keys = [
        b"row_key_1",
        b"row_key_2",
        b"row_key_3",
        b"row_key_4",
        b"row_key_20",
        b"row_key_22",
        b"row_key_200",
    ]
    column_name = "column_name".encode()
    # Add a single row
    row_key = row_keys[0]
    row = table.row(row_key)
    row.set_cell(
        COLUMN_FAMILY_ID, column_name, "value-0", timestamp=datetime.datetime.utcnow()
    )
    batcher.mutate(row)
    # Add a collections of rows
    rows = []
    for i in range(1, len(row_keys)):
        row = table.row(row_keys[i])
        value = "value_{}".format(i).encode()
        row.set_cell(
            COLUMN_FAMILY_ID, column_name, value, timestamp=datetime.datetime.utcnow()
        )
        rows.append(row)
    batcher.mutate_rows(rows)
    # batcher will flush current batch if it
    # reaches the max flush_count

    # Manually send the current batch to Cloud Bigtable
    batcher.flush()
    rows_on_table = []
    for row in table.read_rows():
        rows_on_table.append(row.row_key)
    assert len(rows_on_table) == len(row_keys)
    table.truncate(timeout=200)