def init_pool(app_config): ''' Setup Connection From the documentation: Creating a Connection object is a heavyweight operation; you should create a single Connection and share it among threads in your application. ''' credentials = GoogleCredentials.get_application_default() connection_pool = None if 'GOOGLE_PROJECT_ID' and 'BIGTABLE_INSTANCE' in app_config: try: client = bigtable.Client(project=app_config['GOOGLE_PROJECT_ID'], admin=True, credentials=credentials) instance = client.instance(app_config['BIGTABLE_INSTANCE']) size = 10 if 'BIGTABLE_POOL_SIZE' in app_config: size = app_config['BIGTABLE_POOL_SIZE'] connection_pool = happybase.pool.ConnectionPool(size, instance=instance) except Exception as err: #pylint: disable=W0703 logging.exception("ERROR: Could not make connection") logging.exception(err) else: logging.warning('WARNING: no connection made') return connection_pool
def setUpClass(cls): """Create a test bigtable instance.""" data_store_test._DataStoreTest.setUpClass() cls.test_project_id = config_lib.CONFIG[ "CloudBigtable.test_project_id"] if not cls.test_project_id: raise unittest.SkipTest( "No CloudBigtable.test_project_id set, skipping.") cls.db = cloud_bigtable_data_store.CloudBigTableDataStore() cls.btclient = bigtable.Client(project=cls.test_project_id, admin=True) cls.btclient.start() # Ideally we wouldn't stand up a new instance for each test run, but # deleting an instance just marks it for delete in 7 days and you can't # recreate with the same name. Users can set instance_id=[yourid] to test # with an existing instance. if not cls.instance_id: cls.instance_id = "".join((cls.TEST_BIGTABLE_INSTANCE_PREFIX, str(random.randrange(1, 1000)))) cls.db.Initialize(project_id=cls.test_project_id, instance_id=cls.instance_id) # Hold a reference to the instance in our class level btclient so tests can # access it. cls.btinstance = cls.btclient.instance(cls.instance_id)
def main(project_id, instance_id, config_dir, remove_tables): ''' main ''' print(config_dir) config_files = glob.glob(config_dir + "/*.json") client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) connection = happybase.Connection(instance=instance) current_tables = [] for config_file in config_files: config = read_json(config_file) table_id = config['bigtable_table_name'] current_tables.append(table_id) all_tables = connection.tables() for table in all_tables: if True: if remove_tables: print 'Removing: ' + table connection.delete_table(table) time.sleep(2) else: print 'Unused table: ' + table
def StartClient(self, project_id=None, instance_id=None): # Connection to bigtable is fairly expensive so we open one and re-use it. # https://cloud.google.com/bigtable/docs/performance self.btclient = bigtable.Client(project=project_id) self.btclient.start() self.instance = self.btclient.instance(instance_id) self.table = self.instance.table( config_lib.CONFIG["CloudBigtable.table_name"])
def setUp(self): self.btclient_patcher = mock.patch.object(bigtable, "Client", autospec=True) self.btclient_patcher.start() self.db = cloud_bigtable_data_store.CloudBigTableDataStore() self.db.table = mock.MagicMock() self.btclient = bigtable.Client(project="something", admin=True)
def __init__(self, table, project_id): self._column_family_id = _COLUMN_FAMILY_ID self._client = bigtable.Client(project=project_id, admin=True) self._client_instance = None self._start_index = 0 self._end_index = 0 self._table_name = table self._table_instance = None # Start client to enable receiving requests self.StartClient()
def CreateInstanceAndTable(self, project_id=None, instance_id=None): # The client must be created with admin=True because it will create a # table. with bigtable.Client(project=project_id, admin=True) as btclient: tablename = config_lib.CONFIG["CloudBigtable.table_name"] instance_name = config_lib.CONFIG["CloudBigtable.instance_name"] btinstance = self.GetInstance(btclient, instance_id) if not btinstance: logging.info("Creating cloud bigtable: %s.%s in %s", instance_id, tablename, project_id) btinstance = btclient.instance( instance_id, display_name=instance_name, serve_nodes=config_lib.CONFIG["CloudBigtable.serve_nodes"], location=config_lib. CONFIG["CloudBigtable.instance_location"]) operation = btinstance.create() self.WaitOnOperation(operation) table = self.GetTable(btinstance, tablename) if not table: table = btinstance.table(tablename) table.create() for column, gc_rules in self.COLUMN_FAMILIES.iteritems(): gc_rule = None if gc_rules: age = gc_rules.get("age", None) if age: gc_rule = bigtable.column_family.MaxAgeGCRule(age) version_max = gc_rules.get("versions", None) if version_max: gc_rule = bigtable.column_family.MaxVersionsGCRule( version_max) cf = table.column_family(column, gc_rule=gc_rule) cf.create() return btinstance
def main(project, cluster_id, zone, table_name): # The client must be created with admin=True because it will create a # table. client = bigtable.Client(project=project, admin=True) with client: cluster = client.cluster(zone, cluster_id) cluster.reload() connection = happybase.Connection(cluster=cluster) print('Creating the {} table.'.format(table_name)) column_family_name = 'cf1' connection.create_table( table_name, { column_family_name: dict() # Use default options. }) table = connection.table(table_name) print('Writing some greetings to the table.') column_name = '{fam}:greeting'.format(fam=column_family_name) greetings = [ 'Hello World!', 'Hello Cloud Bigtable!', 'Hello HappyBase!', ] for value in greetings: # Use a random key to distribute writes more evenly across shards. # See: https://cloud.google.com/bigtable/docs/schema-design row_key = str(uuid.uuid4()) table.put(row_key, {column_name: value}) print('Scanning for all greetings:') for key, row in table.scan(): print('\t{}: {}'.format(key, row[column_name])) print('Deleting the {} table.'.format(table_name)) connection.delete_table(table_name)
def main(project_id, instance_id, config_dir, remove_tables, pattern): ''' main ''' print(config_dir) config_files = glob.glob(config_dir + "/" + pattern) client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) connection = happybase.Connection(instance=instance) for config_file in config_files: config = read_json(config_file) table_id = config['bigtable_table_name'] if remove_tables: print('REMOVING ' + table_id + ' bigtable table') try: connection.delete_table(table_id) except Exception as err: print err print('NO TABLE FOUND TO DELETE') print('CREATING ' + table_id + ' bigtable table') try: connection.create_table(table_id, COLUMN_FAMILIES) except Exception as err: print(err) print('TABLE EXISTS') time.sleep(2) connection.close()
def main(project_id, cluster_id, zone, table_name): # [START connecting_to_bigtable] # The client must be created with admin=True because it will create a # table. client = bigtable.Client(project=project_id, admin=True) cluster = client.cluster(zone, cluster_id) connection = happybase.Connection(cluster=cluster) # [END connecting_to_bigtable] try: # [START creating_a_table] print('Creating the {} table.'.format(table_name)) column_family_name = 'cf1' connection.create_table( table_name, { column_family_name: dict() # Use default options. }) # [END creating_a_table] # [START writing_rows] print('Writing some greetings to the table.') table = connection.table(table_name) column_name = '{fam}:greeting'.format(fam=column_family_name) greetings = [ 'Hello World!', 'Hello Cloud Bigtable!', 'Hello HappyBase!', ] for i, value in enumerate(greetings): # Note: This example uses sequential numeric IDs for simplicity, # but this can result in poor performance in a production # application. Since rows are stored in sorted order by key, # sequential keys can result in poor distribution of operations # across nodes. # # For more information about how to design a Bigtable schema for # the best performance, see the documentation: # # https://cloud.google.com/bigtable/docs/schema-design row_key = 'greeting{}'.format(i) table.put(row_key, {column_name: value}) # [END writing_rows] # [START getting_a_row] print('Getting a single greeting by row key.') key = 'greeting0' row = table.row(key) print('\t{}: {}'.format(key, row[column_name])) # [END getting_a_row] # [START scanning_all_rows] print('Scanning for all greetings:') for key, row in table.scan(): print('\t{}: {}'.format(key, row[column_name])) # [END scanning_all_rows] # [START deleting_a_table] print('Deleting the {} table.'.format(table_name)) connection.delete_table(table_name) # [END deleting_a_table] finally: connection.close()
def main(project_id, instance_id, table_id): # [START connecting_to_bigtable] # The client must be created with admin=True because it will create a table. # with bigtable.Client(project=project_id, admin=True) as client: instance = client.instance(instance_id) # [[END connecting_to_bigtable] # [START creating_a_table] print('Creating the {} table.'.format(table_id)) table = instance.table(table_id) table.create() column_family_id = 'cf1' cf1 = table.column_family(column_family_id) cf1.create() # [END creating_a_table] # [START writing_rows] print('Writing some greetings to the table.') column_id = 'greeting'.encode('utf-8') greetings = [ 'Hello World!', 'HEllo Cloud Bigtable!', 'Hello Python!', ] for i, value in enumerate(greetings): # Note: This example uses sequential numeric IDs for simplicity, # but this can result in poor performance in a production # application. Since rows are stored in sorted order by key, # sequential keys can result in poor distribution of operations # across nodes. # # For more information about how to design a Bigtable dschema for # the best performance, see the documentation: # # https://cloud.google.com/bigtable/docs/schema-design row_key = 'greeting{}'.format(i) row = table.row(row_key) row.set_cell( column_family_id, column_id.encode('utf-8'), value.encode('utf-8')) row.commit() # [END writng_rows] # [START getting_a_row] print('Getting a single greeting by row key.') key = 'greeting0' row = table.read_row(key.encode('utf-8')) value = row.cells[column_family_id][column_id.encode('utf-8')][0].value print('\t{}: {}'.format(key, value.decode('utf-8'))) # [END getting_a_row] # [START scannig_all_rows] print('Scanning for all greetings:') partial_rows = table.read_rows() partial_rows.consume_all() for row_key, row in partial_rows.rows.items(): key = row_key.decode('utf-8') cell = row.cells[column_family_id][column_id.encode('utf-8')][0] value = cell.value.decode('utf-8') print('\t{}: {}'.format(key, value)) # [END scanning_all_rows] # [START deleting_a_table] print('Deleting the {} table.'.format(table_id)) table.delete()
def main(project_id, instance_id): client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) connection = happybase.Connection(instance=instance) all_tables = connection.tables()