def testTableRequestCreateDropTable(self): # create table failed without TableLimits set self.table_request.set_statement(self.create_tb_statement) if not is_onprem(): self.assertRaises(IllegalArgumentException, self.handle.table_request, self.table_request) # create table succeed with TableLimits set self.table_request.set_table_limits(self.table_limits) result = self.handle.table_request(self.table_request) if is_onprem(): self.check_table_result(result, State.CREATING, has_schema=False) else: self.check_table_result(result, State.CREATING, self.table_limits, False) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_table_result(result, State.ACTIVE, self.table_limits) # drop table by resetting the statement self.table_request.set_statement(self.drop_tb_statement) result = self.handle.table_request(self.table_request) self.check_table_result(result, State.DROPPING, check_limit=False, check_schema=False) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_table_result(result, State.DROPPED, has_schema=False)
def setUpClass(cls): cls.handle = None cls.set_up_class() create_statement = ('CREATE TABLE ' + table_name + '(fld_id INTEGER, fld_long LONG, \ fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, fld_str STRING, \ fld_bin BINARY, fld_time TIMESTAMP(7), fld_num NUMBER, fld_json JSON, \ fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(fld_id)) USING TTL 1 HOURS') limits = TableLimits(5000, 5000, 50) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls.table_request(create_request) # put and get some data, read_units = 100, write_units = 199 row = get_row() key = {'fld_id': 1} put_request = PutRequest().set_value(row).set_table_name(table_name) get_request = GetRequest().set_key(key).set_table_name(table_name) count = 0 while count < 100: cls.handle.put(put_request) cls.handle.get(get_request) count += 1 # sleep to allow records to accumulate over time, but not if # using Cloudsim. if not_cloudsim() and not is_onprem(): sleep(2) # need to sleep to allow usage records to accumulate but not if # using CloudSim, which doesn't generate usage records. if not_cloudsim() and not is_onprem(): sleep(40)
def check_table_result(self, result, state, table_limits=None, has_schema=True, has_operation_id=True, check_limit=True): assert isinstance(self, TestCase) # check table name self.assertEqual(result.get_table_name(), table_name) # check state self.assertEqual(result.get_state(), state) # check table limits if check_limit: if is_onprem() or table_limits is None: self.assertIsNone(result.get_table_limits()) else: self.assertEqual(result.get_table_limits().get_read_units(), table_limits.get_read_units()) self.assertEqual(result.get_table_limits().get_write_units(), table_limits.get_write_units()) self.assertEqual(result.get_table_limits().get_storage_gb(), table_limits.get_storage_gb()) # check table schema # TODO: For on-prem proxy, TableResult.get_schema() always return None, # This is a known bug, when it is fixed, the test should be change. if not_cloudsim() and not is_onprem(): (self.assertIsNotNone(result.get_schema()) if has_schema else self.assertIsNone(result.get_schema())) # check operation id operation_id = result.get_operation_id() (self.assertIsNotNone(operation_id) if has_operation_id else self.assertIsNone(operation_id))
def testWriteMultipleAddIllegalRequestAndAbortIfUnsuccessful(self): self.assertRaises(IllegalArgumentException, self.write_multiple_request.add, 'IllegalRequest', True) self.assertRaises(IllegalArgumentException, self.write_multiple_request.add, PutRequest(), 'IllegalAbortIfUnsuccessful') # add two operations with different table name self.write_multiple_request.add(self.requests[0], True) self.assertRaises(IllegalArgumentException, self.write_multiple_request.add, self.illegal_requests[0], False) self.write_multiple_request.clear() # add two operations with different major paths self.write_multiple_request.add(self.requests[0], True).add(self.illegal_requests[1], False) self.assertRaises(IllegalArgumentException, self.handle.write_multiple, self.write_multiple_request) self.write_multiple_request.clear() # add operations when sub requests reached the max number if not is_onprem(): count = 0 while count <= 50: row = get_row() row['fld_id'] = count self.write_multiple_request.add( PutRequest().set_value(row).set_table_name(table_name), True) count += 1 self.assertRaises(BatchOperationNumberLimitException, self.handle.write_multiple, self.write_multiple_request)
def testQueryPreparedStatementUpdateWithMaxReadKb(self): fld_sid = 0 fld_id = 1 fld_long = 2147483649 # set a small max_read_kb to read a row to update prepared_statement = self.prepare_result_update.get_prepared_statement( ) prepared_statement.set_variable('$fld_sid', fld_sid).set_variable( '$fld_id', fld_id) self.query_request.set_prepared_statement( self.prepare_result_update).set_max_read_kb(1) if not is_onprem(): self.assertRaises(IllegalArgumentException, self.handle.query, self.query_request) # set a enough max_read_kb to read a row to update self.query_request.set_max_read_kb(2) result = self.handle.query(self.query_request) records = self.check_query_result(result, 1) self.assertEqual(records[0], {'NumRowsUpdated': 1}) self.check_cost(result, 2, 4, 4, 4) # check the updated row prepared_statement = self.prepare_result_select.get_prepared_statement( ) prepared_statement.set_variable('$fld_long', fld_long) self.query_request.set_prepared_statement(prepared_statement) result = self.handle.query(self.query_request) records = self.check_query_result(result, 1) self.assertEqual(records[0], self._expected_row(fld_sid, fld_id, fld_long)) self.check_cost(result, 1, 2, 0, 0)
def testTableUsageWithEndTime(self): # set a start time to avoid unexpected table usage information, and set # the end time current = int(round(time() * 1000)) start_time = current - 120000 end_time = current - 180000 self.table_usage_request.set_table_name(table_name).set_start_time( start_time).set_end_time(end_time) if is_onprem(): self.assertRaises(OperationNotSupportedException, self.handle.get_table_usage, self.table_usage_request) return result = self.handle.get_table_usage(self.table_usage_request) self._check_table_usage_result(result, 0 if not_cloudsim() else 1) # set current time as end time self.table_usage_request.set_end_time(current) result = self.handle.get_table_usage(self.table_usage_request) self._check_table_usage_result(result, 1, start_time, current) # set current time in ISO 8601 formatted string as end time end_str = datetime.now().isoformat() end_time = int( mktime( datetime.strptime(end_str, '%Y-%m-%dT%H:%M:%S.%f').timetuple()) * 1000) self.table_usage_request.set_end_time(end_str) result = self.handle.get_table_usage(self.table_usage_request) self._check_table_usage_result(result, 1, start_time, end_time)
def testTableRequestCreateDropTable(self): # create table failed without TableLimits set self.table_request.set_statement(self.create_tb_statement) if not is_onprem(): self.assertRaises(IllegalArgumentException, self.handle.table_request, self.table_request) # create table succeed with TableLimits set self.table_request.set_table_limits(self.table_limits) result = self.handle.table_request(self.table_request) self.check_table_result( result, State.CREATING, self.table_limits, False) self._wait_for_completion(result) self.check_table_result(result, State.ACTIVE, self.table_limits) # drop table by resetting the statement self.table_request.set_statement(self.drop_tb_statement) result = self.handle.table_request(self.table_request) # TODO: A difference between old cloud proxy and new cloud proxy, during # DROPPING phase, the table limit is not none for old proxy but none for # new proxy. self._check_table_result(result, State.DROPPING, check_limit=False) self._wait_for_completion(result) # TODO: A difference between old cloud proxy and new cloud proxy, after # table DROPPED, the table limit is not none for old proxy but none for # new proxy. self._check_table_result(result, State.DROPPED, has_schema=False, check_limit=False)
def testTableUsageWithStartEndTimeAndLimit(self): # start time, end time and limit current = int(round(time() * 1000)) start_time = current - 180000 self.table_usage_request.set_table_name(table_name).set_start_time( start_time).set_end_time(current).set_limit(2) if is_onprem(): self.assertRaises(OperationNotSupportedException, self.handle.get_table_usage, self.table_usage_request) return result = self.handle.get_table_usage(self.table_usage_request) self._check_table_usage_result(result, 2 if not_cloudsim() else 1, start_time, current) # start time, end time in ISO 8601 formatted string and limit current = datetime.now() start_str = (current + timedelta(seconds=-180)).isoformat() end_str = (current + timedelta(seconds=-60)).isoformat() start_time = int( mktime( datetime.strptime(start_str, '%Y-%m-%dT%H:%M:%S.%f').timetuple()) * 1000) end_time = int( mktime( datetime.strptime(end_str, '%Y-%m-%dT%H:%M:%S.%f').timetuple()) * 1000) self.table_usage_request.set_start_time(start_str).set_end_time( end_str) result = self.handle.get_table_usage(self.table_usage_request) self._check_table_usage_result(result, 2 if not_cloudsim() else 1, start_time, end_time)
def check_cost(self, result, read_kb, read_units, write_kb, write_units, advance=False, multi_shards=False): assert isinstance(self, TestCase) if is_onprem(): self.assertEqual(result.get_read_kb(), 0) self.assertEqual(result.get_read_units(), 0) self.assertEqual(result.get_write_kb(), 0) self.assertEqual(result.get_write_units(), 0) elif isinstance(result, QueryResult) and advance: self.assertGreater(result.get_read_kb(), read_kb) self.assertGreater(result.get_read_units(), read_units) self.assertEqual(result.get_write_kb(), write_kb) self.assertEqual(result.get_write_units(), write_units) elif isinstance(result, QueryResult) and multi_shards: self.assertGreaterEqual(result.get_read_kb(), read_kb) self.assertLessEqual(result.get_read_kb(), read_kb + 1) self.assertGreaterEqual(result.get_read_units(), read_units) self.assertLessEqual(result.get_read_units(), read_units + 2) self.assertEqual(result.get_write_kb(), write_kb) self.assertEqual(result.get_write_units(), write_units) else: self.assertEqual(result.get_read_kb(), read_kb) self.assertEqual(result.get_read_units(), read_units) self.assertEqual(result.get_write_kb(), write_kb) self.assertEqual(result.get_write_units(), write_units)
def set_access_token_provider(config, tenant_id): if is_cloudsim(): authorization_provider = NoSecurityAccessTokenProvider(tenant_id) elif is_dev_pod() or is_minicloud(): authorization_provider = KeystoreAccessTokenProvider().set_tenant( tenant_id) elif is_prod_pod(): if credentials_file is None: raise IllegalArgumentException( 'Must specify the credentials file path.') creds_provider = PropertiesCredentialsProvider().set_properties_file( credentials_file) authorization_provider = DefaultAccessTokenProvider( idcs_url=idcs_url(), entitlement_id=entitlement_id, creds_provider=creds_provider, timeout_ms=timeout) elif is_onprem(): if user_name is None and password is None: authorization_provider = StoreAccessTokenProvider() else: if user_name is None or password is None: raise IllegalArgumentException( 'Please set both the user_name and password.') authorization_provider = StoreAccessTokenProvider( user_name, password) else: raise IllegalArgumentException('Please set the test server.') config.set_authorization_provider(authorization_provider)
def check_table_result(self, result, state, table_limits=None, has_schema=True, has_operation_id=True, check_limit=True, check_schema=True, check_operation_id=True): assert isinstance(self, TestCase) # check compartment id if (version is None or is_onprem() or version is not None and is_cloudsim() and compare_version(version, '1.4.0') >= 0): self.assertIsNone(result.get_compartment_id()) else: self.assertEqual(result.get_compartment_id(), tenant_id) # check table name self.assertEqual(result.get_table_name(), table_name) # check state if isinstance(state, str): self.assertEqual(result.get_state(), state) else: self.assertTrue(result.get_state() in state) # check table limits if check_limit: if table_limits is None or is_onprem(): self.assertIsNone(result.get_table_limits()) else: self.assertEqual(result.get_table_limits().get_read_units(), table_limits.get_read_units()) self.assertEqual(result.get_table_limits().get_write_units(), table_limits.get_write_units()) self.assertEqual(result.get_table_limits().get_storage_gb(), table_limits.get_storage_gb()) # check table schema # TODO: For on-prem proxy, TableResult.get_schema() always return None, # This is a known bug, when it is fixed, the test should be change. if check_schema and not_cloudsim() and not is_onprem(): (self.assertIsNotNone(result.get_schema()) if has_schema else self.assertIsNone(result.get_schema())) # check operation id if check_operation_id: operation_id = result.get_operation_id() (self.assertIsNotNone(operation_id) if has_operation_id else self.assertIsNone(operation_id))
def testPutSetLargeSizeValue(self): self.row['fld_str'] = self.get_random_str(2) self.put_request.set_value(self.row) if is_onprem(): version = self.handle.put(self.put_request).get_version() self.assertIsNotNone(version) else: self.assertRaises(RequestSizeLimitException, self.handle.put, self.put_request)
def testListTablesWithNamespace(self): if is_onprem(): # set a namespace that not exist for handle in range(self.num_handles): self.list_tables_requests[handle].set_namespace(namespace) result = self.handles[handle].list_tables( self.list_tables_requests[handle]) self.assertEqual(result.get_tables(), []) self.assertEqual(result.get_last_returned_index(), 0)
def testTableRequestSetIllegalTableName(self): self.assertRaises(IllegalArgumentException, self.table_request.set_table_name, {'name': table_name}) if not is_onprem(): self.table_request.set_table_name('IllegalTable').set_table_limits( self.table_limits) self.assertRaises(TableNotFoundException, self.handle.table_request, self.table_request)
def setUp(self): self.handles = list() self.list_tables_requests = list() self.num_handles = 1 if is_prod_pod() or is_onprem() else 2 for handle in range(self.num_handles): tenant = tenant_id + ('' if handle == 0 else str(handle)) self.handles.append(get_handle(tenant)) self.list_tables_requests.append( ListTablesRequest().set_timeout(timeout)) self.list_tables_request = ListTablesRequest().set_timeout(timeout)
def testQueryStatementSelectWithMaxReadKb(self): num_records = 6 max_read_kb = 4 self.query_request.set_statement(query_statement).set_max_read_kb( max_read_kb) result = self.handle.query(self.query_request) # TODO: [#27744] KV doesn't honor max read kb for on-prem proxy because # it has no table limits. if is_onprem(): records = self.check_query_result(result, num_records) else: records = self.check_query_result(result, max_read_kb + 1, True) for idx in range(len(records)): self.assertEqual(records[idx], self._expected_row(1, idx)) self.check_cost(result, max_read_kb + prepare_cost + 1, max_read_kb * 2 + prepare_cost + 2, 0, 0)
def _check_list_tables_result(self, names, last_returned_index, eq=False): for handle in range(self.num_handles): result = self.handles[handle].list_tables(self.list_tables_request) if is_minicloud(): # TODO: Minicloud doesn't handle start index and limit so far, # and the last index returned is always 0. self.assertEqual(result.get_tables(), table_names[handle]) self.assertEqual(result.get_last_returned_index(), 0) elif is_onprem() and not eq: self.assertGreater(set(result.get_tables()), set(names[handle])) self.assertGreater(result.get_last_returned_index(), last_returned_index[handle]) else: self.assertEqual(result.get_tables(), names[handle]) self.assertEqual(result.get_last_returned_index(), last_returned_index[handle])
def testTableRequestAlterTable(self): # create table before altering table request = TableRequest().set_statement( self.create_tb_statement).set_table_limits(self.table_limits) self._do_table_request(request) # alter table failed with TableLimits set if not is_onprem(): request.set_statement(self.alter_fld_statement) self.assertRaises(IllegalArgumentException, self.handle.table_request, request) # alter table succeed without TableLimits set self.table_request.set_statement(self.alter_fld_statement) result = self.handle.table_request(self.table_request) self.check_table_result(result, State.UPDATING, self.table_limits) self._wait_for_completion(result) self.check_table_result(result, State.ACTIVE, self.table_limits) # drop table after altering table request.set_statement(self.drop_tb_statement) self._do_table_request(request)
def testTableUsageNormal(self): self.table_usage_request.set_table_name(table_name) if is_onprem(): self.assertRaises(OperationNotSupportedException, self.handle.get_table_usage, self.table_usage_request) return result = self.handle.get_table_usage(self.table_usage_request) current = int(round(time() * 1000)) # TODO: The start time of the table usage record get from the proxy # should be in 1 min, that is from current - 60000 to current, but # currently for minicloud it is in 2 mins from current - 120000 to # current. Seconds in period for the table usage record is also not # stable, sometimes it is 0 and sometimes it is 60. So we need to check # it separately. self._check_table_usage_result(result, 1, current - 60000, check_separately=True)
def setUpClass(cls): cls.set_up_class() index_name = 'idx_' + table_name create_statement = ( 'CREATE TABLE ' + table_name + '(fld_sid INTEGER, fld_id INTEGER, \ fld_long LONG, fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, \ fld_str STRING, fld_bin BINARY, fld_time TIMESTAMP(6), fld_num NUMBER, \ fld_json JSON, fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(SHARD(fld_sid), fld_id))') limits = TableLimits(100, 100, 1) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls.table_request(create_request) if (version is not None and (is_cloudsim() and compare_version(version, '1.4.0') == -1 or is_onprem() and compare_version(version, '20.2.0') == -1)): create_idx_request = TableRequest() create_idx_statement = ( 'CREATE INDEX ' + index_name + '1 ON ' + table_name + '(fld_long)') create_idx_request.set_statement(create_idx_statement) cls.table_request(create_idx_request) create_idx_statement = ( 'CREATE INDEX ' + index_name + '2 ON ' + table_name + '(fld_str)') create_idx_request.set_statement(create_idx_statement) cls.table_request(create_idx_request) create_idx_statement = ( 'CREATE INDEX ' + index_name + '3 ON ' + table_name + '(fld_bool)') create_idx_request.set_statement(create_idx_statement) cls.table_request(create_idx_request) create_idx_statement = ( 'CREATE INDEX ' + index_name + '4 ON ' + table_name + '(fld_json.location as point)') create_idx_request.set_statement(create_idx_statement) cls.table_request(create_idx_request) global query_statement query_statement = ('SELECT fld_sid, fld_id FROM ' + table_name + ' WHERE fld_sid = 1')
def testWriteMultipleAddIllegalRequestAndAbortIfUnsuccessful(self): self.assertRaises(IllegalArgumentException, self.write_multiple_request.add, 'IllegalRequest', True) self.assertRaises(IllegalArgumentException, self.write_multiple_request.add, PutRequest(), 'IllegalAbortIfUnsuccessful') # add two operations with different table name self.write_multiple_request.add(self.requests[0], True) self.assertRaises(IllegalArgumentException, self.write_multiple_request.add, self.illegal_requests[0], False) self.write_multiple_request.clear() # add two operations with different major paths self.write_multiple_request.add(self.requests[0], True).add(self.illegal_requests[1], False) self.assertRaises(IllegalArgumentException, self.handle.write_multiple, self.write_multiple_request) if not is_onprem(): # add operations when the request size exceeded the limit self.write_multiple_request.clear() for op in range(64): row = get_row() row['fld_str'] = self.get_random_str(0.4) self.write_multiple_request.add( PutRequest().set_value(row).set_table_name(table_name), True) self.assertRaises(RequestSizeLimitException, self.handle.write_multiple, self.write_multiple_request) # add operations when sub requests reached the max number self.write_multiple_request.clear() for op in range(51): row = get_row() row['fld_id'] = op self.write_multiple_request.add( PutRequest().set_value(row).set_table_name(table_name), True) self.assertRaises(BatchOperationNumberLimitException, self.handle.write_multiple, self.write_multiple_request)
def generate_authorization_provider(tenant_id): if is_cloudsim(): authorization_provider = InsecureAuthorizationProvider(tenant_id) elif is_dev_pod() or is_minicloud(): authorization_provider = TestSignatureProvider(tenant_id) elif is_prod_pod(): if iam_principal() == 'user principal': if credentials_file is None: raise IllegalArgumentException( 'Must specify the credentials file path.') authorization_provider = SignatureProvider( config_file=credentials_file) elif iam_principal() == 'instance principal': if isinstance(endpoint, str): region = Regions.from_region_id(endpoint) else: region = endpoint if region is None: authorization_provider = ( SignatureProvider.create_with_instance_principal()) else: authorization_provider = ( SignatureProvider.create_with_instance_principal( region=region)) elif iam_principal() == 'resource principals': authorization_provider = ( SignatureProvider.create_with_resource_principal()) else: raise IllegalArgumentException('Must specify the principal.') elif is_onprem(): if user_name is None and password is None: authorization_provider = StoreAccessTokenProvider() else: if user_name is None or password is None: raise IllegalArgumentException( 'Please set both the user_name and password.') authorization_provider = StoreAccessTokenProvider( user_name, password) else: raise IllegalArgumentException('Please set the test server.') return authorization_provider
def testTableUsageWithStartTime(self): # set the start time start_time = int(round(time() * 1000)) - 120000 self.table_usage_request.set_table_name(table_name).set_start_time( start_time) if is_onprem(): self.assertRaises(OperationNotSupportedException, self.handle.get_table_usage, self.table_usage_request) return result = self.handle.get_table_usage(self.table_usage_request) self._check_table_usage_result(result, 1, start_time) # set the start time in ISO 8601 formatted string start_str = (datetime.now() + timedelta(seconds=-120)).isoformat() start_time = int( mktime( datetime.strptime(start_str, '%Y-%m-%dT%H:%M:%S.%f').timetuple()) * 1000) self.table_usage_request.set_start_time(start_str) result = self.handle.get_table_usage(self.table_usage_request) self._check_table_usage_result(result, 1, start_time)
def setUpClass(cls): add_tier() cls.handles = list() global table_names table_names = list() num_tables = 3 # # In pod env create 1 handle, otherwise create 2 handles for additional # testing # num_handles = 1 if is_prod_pod() or is_onprem() else 2 for handle in range(num_handles): tenant = tenant_id + ('' if handle == 0 else str(handle)) add_tenant(tenant) table_names.append(list()) cls.handles.append(get_handle(tenant)) for table in range(handle + num_tables): tb_name = table_name + str(table) table_names[handle].append(tb_name) # # Add a sleep for a pod to let things happen # if is_pod(): sleep(60) drop_request = TableRequest().set_statement( 'DROP TABLE IF EXISTS ' + tb_name) cls.table_request(drop_request, cls.handles[handle]) create_statement = ('CREATE TABLE ' + tb_name + '(fld_id INTEGER, \ fld_long LONG, fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, \ fld_str STRING, fld_bin BINARY, fld_time TIMESTAMP(2), fld_num NUMBER, \ fld_json JSON, fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(fld_id)) USING TTL 16 HOURS') limits = TableLimits(10, 10, 1) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls.table_request(create_request, cls.handles[handle])
def testTableRequestModifyTableLimits(self): # create table before modifying the table limits request = TableRequest().set_statement( self.create_tb_statement).set_table_limits(self.table_limits) self._do_table_request(request) # modify the table limits table_limits = TableLimits(50, 50, 1) self.table_request.set_table_name(table_name).set_table_limits( table_limits) if is_onprem(): self.assertRaises(OperationNotSupportedException, self.handle.table_request, self.table_request) return result = self.handle.table_request(self.table_request) self.check_table_result(result, State.UPDATING, check_limit=False, check_schema=False) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_table_result(result, State.ACTIVE, table_limits) # drop table after modifying the table limits request.set_statement(self.drop_tb_statement) self._do_table_request(request)
def testTableRequestModifyTableLimits(self): # create table before modifying the table limits request = TableRequest().set_statement( self.create_tb_statement).set_table_limits(self.table_limits) self._do_table_request(request) # modify the table limits table_limits = TableLimits(10000, 10000, 100) self.table_request.set_table_name(table_name).set_table_limits( table_limits) if is_onprem(): self.assertRaises(OperationNotSupportedException, self.handle.table_request, self.table_request) return result = self.handle.table_request(self.table_request) self.assertEqual(result.get_table_name(), table_name) self.assertEqual(result.get_state(), State.UPDATING) if not_cloudsim(): self.assertIsNotNone(result.get_schema()) self.assertIsNotNone(result.get_operation_id()) self._wait_for_completion(result) self.check_table_result(result, State.ACTIVE, table_limits) # drop table after modifying the table limits request.set_statement(self.drop_tb_statement) self._do_table_request(request)
class TestSystemStatusRequest(unittest.TestCase, TestBase): if is_onprem(): @classmethod def setUpClass(cls): cls.set_up_class() @classmethod def tearDownClass(cls): cls.tear_down_class() def setUp(self): self.set_up() self.handle_config = get_handle_config(tenant_id) self.create = 'CREATE NAMESPACE ' + namespace self.drop = 'DROP NAMESPACE ' + namespace + ' CASCADE' self.sys_request = SystemRequest().set_timeout(timeout) self.sys_status = SystemStatusRequest().set_timeout(timeout) def tearDown(self): self.tear_down() def testSystemStatusRequestSetIllegalOperationId(self): self.assertRaises(IllegalArgumentException, self.sys_status.set_operation_id, {}) self.assertRaises(IllegalArgumentException, self.sys_status.set_operation_id, '') def testSystemStatusRequestSetIllegalStatement(self): self.assertRaises(IllegalArgumentException, self.sys_status.set_statement, {}) self.assertRaises(IllegalArgumentException, self.sys_status.set_statement, '') def testSystemStatusRequestSetIllegalTimeout(self): self.assertRaises(IllegalArgumentException, self.sys_status.set_timeout, 'IllegalTimeout') self.assertRaises(IllegalArgumentException, self.sys_status.set_timeout, 0) self.assertRaises(IllegalArgumentException, self.sys_status.set_timeout, -1) def testSystemStatusRequestSetIllegalDefaults(self): self.assertRaises(IllegalArgumentException, self.sys_status.set_defaults, 'IllegalDefaults') def testSystemStatusRequestSetDefaults(self): self.sys_status.set_defaults(self.handle_config) self.assertEqual(self.sys_status.get_timeout(), timeout) def testSystemStatusRequestNoOperationId(self): self.assertRaises(IllegalArgumentException, self.handle.system_status, self.sys_status) def testSystemStatusRequestGets(self): operation_id = '100' self.sys_status.set_operation_id(operation_id).set_statement( self.create) self.assertEqual(self.sys_status.get_operation_id(), operation_id) self.assertEqual(self.sys_status.get_statement(), self.create) self.assertEqual(self.sys_status.get_timeout(), timeout) def testSystemStatusRequestIllegalRequest(self): self.assertRaises(IllegalArgumentException, self.handle.system_status, 'IllegalRequest') def testSystemStatusRequestNormal(self): # execute create namespace system request. self.sys_request.set_statement(self.create) result = self.handle.system_request(self.sys_request) # show the status of the create namespace system request. self.sys_status.set_operation_id(result.get_operation_id()) result = self.handle.system_status(self.sys_status) self.check_system_result(result, SystemState.WORKING, True) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_system_result(result, SystemState.COMPLETE, True) # execute drop namespace system request. self.sys_request.set_statement(self.drop) result = self.handle.system_request(self.sys_request) # show the status of the drop namespace system request. self.sys_status.set_operation_id( result.get_operation_id()).set_statement(self.create) result = self.handle.system_status(self.sys_status) self.check_system_result(result, SystemState.WORKING, True, statement=self.create) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_system_result(result, SystemState.COMPLETE, True, statement=self.create)
class TestSystemRequest(unittest.TestCase, TestBase): if is_onprem(): @classmethod def setUpClass(cls): cls.set_up_class() @classmethod def tearDownClass(cls): cls.tear_down_class() def setUp(self): self.set_up() self.handle_config = get_handle_config(tenant_id) self.show = 'SHOW AS JSON NAMESPACES' self.create = 'CREATE NAMESPACE ' + namespace self.drop = 'DROP NAMESPACE ' + namespace + ' CASCADE' self.sys_request = SystemRequest() def tearDown(self): self.tear_down() def testSystemRequestSetIllegalStatement(self): self.assertRaises(IllegalArgumentException, self.sys_request.set_statement, {}) self.assertRaises(IllegalArgumentException, self.sys_request.set_statement, '') self.sys_request.set_statement('IllegalStatement') self.assertRaises(IllegalArgumentException, self.handle.system_request, self.sys_request) def testSystemRequestSetIllegalTimeout(self): self.assertRaises(IllegalArgumentException, self.sys_request.set_timeout, 'IllegalTimeout') self.assertRaises(IllegalArgumentException, self.sys_request.set_timeout, 0) self.assertRaises(IllegalArgumentException, self.sys_request.set_timeout, -1) def testSystemRequestSetIllegalDefaults(self): self.assertRaises(IllegalArgumentException, self.sys_request.set_defaults, 'IllegalDefaults') def testSystemRequestSetDefaults(self): self.sys_request.set_defaults(self.handle_config) self.assertEqual(self.sys_request.get_timeout(), table_request_timeout) def testSystemRequestNoStatement(self): self.assertRaises(IllegalArgumentException, self.handle.system_request, self.sys_request) def testSystemRequestGets(self): self.sys_request.set_statement(self.show).set_timeout(timeout) self.assertEqual(self.sys_request.get_statement(), self.show) self.assertEqual(self.sys_request.get_timeout(), timeout) def testSystemRequestIllegalRequest(self): self.assertRaises(IllegalArgumentException, self.handle.system_request, 'IllegalRequest') def testSystemRequestNormal(self): # create namespace. self.sys_request.set_statement(self.create) result = self.handle.system_request(self.sys_request) self.check_system_result(result, SystemState.WORKING, True, statement=self.create) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_system_result(result, SystemState.COMPLETE, True, statement=self.create) # show namespaces. self.sys_request.set_statement(self.show) result = self.handle.system_request(self.sys_request) self.check_system_result(result, SystemState.COMPLETE, has_result_string=True, statement=self.show) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_system_result(result, SystemState.COMPLETE, has_result_string=True, statement=self.show) # drop namespace self.sys_request.set_statement(self.drop) result = self.handle.system_request(self.sys_request) self.check_system_result(result, SystemState.WORKING, True, statement=self.drop) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_system_result(result, SystemState.COMPLETE, True, statement=self.drop) def testDoSystemRequest(self): # create namespace. result = self.handle.do_system_request(self.create, wait_timeout, 1000) self.check_system_result(result, SystemState.COMPLETE, True, statement=self.create) # show namespaces. result = self.handle.do_system_request(self.show, wait_timeout, 1000) self.check_system_result(result, SystemState.COMPLETE, has_result_string=True, statement=self.show) # drop namespace. result = self.handle.do_system_request(self.drop, wait_timeout, 1000) self.check_system_result(result, SystemState.COMPLETE, True, statement=self.drop) def testListNamespaces(self): # show namespaces. results = self.handle.list_namespaces() self.assertGreaterEqual(len(results), 1) for result in results: self.assertTrue(self._is_str(result)) def testListRoles(self): # show roles. results = self.handle.list_roles() self.assertGreaterEqual(len(results), 6) for result in results: self.assertTrue(self._is_str(result)) def testListUsers(self): # show users. results = self.handle.list_users() if security(): self.assertGreaterEqual(len(results), 1) for result in results: self.assertTrue(isinstance(result, UserInfo)) self.assertTrue(self._is_str(result.get_id())) self.assertTrue(self._is_str(result.get_name())) else: self.assertIsNone(results) def _is_str(self, data): if ((version_info.major == 2 and isinstance(data, (str, unicode)) or version_info.major == 3 and isinstance(data, str)) and len(data) != 0): return True return False
def testListTablesWithNamespace(self): if is_onprem(): # set a namespace that not exist self.list_tables_request.set_namespace(namespace) self._check_list_tables_result([[]], [0], True)
class TestRateLimiting(unittest.TestCase, TestBase): """ Tests for driver-side rate limiting. These tests require a Cloud Simulator instance as rate limiting is not available or need for on-premise. """ if not is_onprem(): @classmethod def setUpClass(cls): cls.set_up_class() create_statement = ('CREATE TABLE ' + table_name + '(id integer, \ name string, primary key(id))') create_request = TableRequest().set_statement( create_statement).set_table_limits(TableLimits(100, 100, 1)) cls.table_request(create_request) @classmethod def tearDownClass(cls): cls.tear_down_class() def setUp(self): handle_config = get_handle_config(tenant_id).set_retry_handler( DefaultRetryHandler()).set_consistency(Consistency.EVENTUAL) self.handle = NoSQLHandle(handle_config) def tearDown(self): self.handle.close() def testRateLimitingBasicInternal(self): self._test_limiters(False, 500, 200, 200, 10, 100.0) def testRateLimitingBasicExternal(self): self._test_limiters(True, 500, 200, 200, 10, 100.0) def testRateLimitingBasicInternalPercent(self): self._test_limiters(False, 500, 200, 200, 10, 20.0) def testRateLimitingBasicExternalPercent(self): self._test_limiters(True, 500, 200, 200, 10, 20.0) def testRateLimitingExtendedInternalFull(self): # Skip unless extended tests are enabled if rate_limiter_extended(): allunits = [1, 50, 300] for units in allunits: if units == 500: self._test_limiters(False, 500, units, units, 10, 100.0) def testRateLimitingExtendedInternalPercent(self): # Skip unless extended tests are enabled if rate_limiter_extended(): allunits = [10, 100, 2000] for units in allunits: self._test_limiters(False, 500, units, units, 10, 10.0) def testRateLimitingExtendedExternalFull(self): # Skip unless extended tests are enabled if rate_limiter_extended(): allunits = [1, 50, 300] for units in allunits: self._test_limiters(True, 500, units, units, 10, 100.0) def testRateLimitingExtendedExternalPercent(self): # Skip unless extended tests are enabled if rate_limiter_extended(): allunits = [10, 100, 2000] for units in allunits: self._test_limiters(True, 500, units, units, 10, 10.0) def _alter_table_limits(self, limits): table_request = TableRequest().set_table_name( table_name).set_table_limits(limits).set_timeout(15000) return self.handle.do_table_request(table_request, 15000, 1000) def _do_rate_limited_ops(self, num_seconds, read_limit, write_limit, max_rows, check_units, use_percent, use_external_limiters): """ Runs puts and gets continuously for N seconds. Verify that the resultant RUs/WUs used match the given rate limits. """ if read_limit == 0 and write_limit == 0: return put_request = PutRequest().set_table_name(table_name) get_request = GetRequest().set_table_name(table_name) key = dict() # TODO: random sizes 0-nKB. value = dict() value['name'] = 'jane' start_time = int(round(time() * 1000)) end_time = start_time + num_seconds * 1000 read_units_used = 0 write_units_used = 0 total_delayed_ms = 0 throttle_exceptions = 0 rlim = None wlim = None max_val = float(read_limit + write_limit) if not use_external_limiters: # Reset internal limiters so they don't have unused units. self.handle.get_client().reset_rate_limiters(table_name) else: rlim = SimpleRateLimiter(read_limit * use_percent / 100.0, 1) wlim = SimpleRateLimiter(write_limit * use_percent / 100.0, 1) while True: fld_id = int(random() * max_rows) if read_limit == 0: do_put = True elif write_limit == 0: do_put = False else: v = int(random() * max_val) do_put = v >= read_limit try: if do_put: value['id'] = fld_id put_request.set_value(value).set_read_rate_limiter( None).set_write_rate_limiter(wlim) pres = self.handle.put(put_request) write_units_used += pres.get_write_units() total_delayed_ms += pres.get_rate_limit_delayed_ms() rs = pres.get_retry_stats() if rs is not None: throttle_exceptions += rs.get_num_exceptions( WriteThrottlingException.__class__.__name__) else: key['id'] = fld_id get_request.set_key(key).set_read_rate_limiter( rlim).set_write_rate_limiter(None) gres = self.handle.get(get_request) read_units_used += gres.get_read_units() total_delayed_ms += gres.get_rate_limit_delayed_ms() rs = gres.get_retry_stats() if rs is not None: throttle_exceptions += rs.get_num_exceptions( ReadThrottlingException.__class__.__name__) except ReadThrottlingException: self.fail( 'Expected no read throttling exceptions, got one.') except WriteThrottlingException: self.fail( 'Expected no write throttling exceptions, got one.') if int(round(time() * 1000)) >= end_time: break num_seconds = (int(round(time() * 1000)) - start_time) / 1000 rus = read_units_used / num_seconds wus = write_units_used / num_seconds if not check_units: return use_percent /= 100.0 if (rus < read_limit * use_percent * 0.8 or rus > read_limit * use_percent * 1.2): self.fail('Gets: Expected around ' + str(read_limit * use_percent) + ' RUs, got ' + str(rus)) if (wus < write_limit * use_percent * 0.8 or wus > write_limit * use_percent * 1.2): self.fail('Puts: Expected around ' + str(write_limit * use_percent) + ' WUs, got ' + str(wus)) def _do_rate_limited_queries(self, num_seconds, read_limit, max_kb, single_partition, use_percent, use_external_limiters): """ Runs queries continuously for N seconds. Verify that the resultant RUs used match the given rate limit. """ start_time = int(round(time() * 1000)) end_time = start_time + num_seconds * 1000 read_units_used = 0 rlim = None wlim = None if not use_external_limiters: # Reset internal limiters so they don't have unused units. self.handle.get_client().reset_rate_limiters(table_name) else: rlim = SimpleRateLimiter(read_limit * use_percent / 100.0, 1) wlim = SimpleRateLimiter(read_limit * use_percent / 100.0, 1) prep_req = PrepareRequest() if single_partition: # Query based on single partition scanning. fld_id = int(random() * 500) prep_req.set_statement('SELECT * FROM ' + table_name + ' WHERE id = ' + str(fld_id)) else: # Query based on all partitions scanning. prep_req.set_statement('SELECT * FROM ' + table_name + ' WHERE name = "jane"') prep_res = self.handle.prepare(prep_req) self.assertTrue(prep_res.get_prepared_statement() is not None, 'Prepare statement failed.') read_units_used += prep_res.get_read_units() while True: """ We need a 20 second timeout because in some cases this is called on a table with 500 rows and 50RUs (uses 1000RUs = 20 seconds). """ query_req = QueryRequest().set_prepared_statement( prep_res).set_timeout(20000).set_read_rate_limiter( rlim).set_write_rate_limiter(wlim) if max_kb > 0: # Query with size limit. query_req.set_max_read_kb(max_kb) try: while True: res = self.handle.query(query_req) res.get_results() read_units_used += res.get_read_units() if query_req.is_done(): break except ReadThrottlingException: self.fail('Expected no throttling exceptions, got one.') except RequestTimeoutException: # This may happen for very small limit tests. pass if int(round(time() * 1000)) >= end_time: break num_seconds = (int(round(time() * 1000)) - start_time) / 1000 use_percent /= 100.0 rus = read_units_used / num_seconds expected_rus = read_limit * use_percent # For very small expected amounts, just verify within 1 RU. if (expected_rus < 4 and expected_rus - 1 <= rus <= expected_rus + 1): return if rus < expected_rus * 0.6 or rus > expected_rus * 1.5: self.fail('Queries: Expected around ' + str(expected_rus) + ' RUs, got ' + str(rus)) def _run_limited_ops_on_table(self, read_limit, write_limit, max_seconds, max_rows, use_percent, use_external_limiters): """ Runs get/puts then queries on a table. Verify RUs/WUs are within given limits. """ self._alter_table_limits(TableLimits(read_limit, write_limit, 50)) """ We have to do the read/write ops separately since we're running single-threaded, and the result is hard to tell if it's correct (example: we'd get 37RUs and 15WUs). """ self._do_rate_limited_ops(max_seconds, 0, write_limit, max_rows, True, use_percent, use_external_limiters) self._do_rate_limited_ops(max_seconds, read_limit, 0, max_rows, True, use_percent, use_external_limiters) # Query based on single partition scanning. self._do_rate_limited_queries(max_seconds, read_limit, 20, True, use_percent, use_external_limiters) # Query based on all partitions scanning. self._do_rate_limited_queries(max_seconds, read_limit, 20, False, use_percent, use_external_limiters) def _test_limiters(self, use_external_limiters, max_rows, read_limit, write_limit, test_seconds, use_percent): # Clear any previous rate limiters. client = self.handle.get_client() client.enable_rate_limiting(False, 100.0) # Configure our handle for rate limiting. if not use_external_limiters: client.enable_rate_limiting(True, use_percent) # Limit bursts in tests. client.set_ratelimiter_duration_seconds(1) # Then do the actual testing. self._run_limited_ops_on_table(read_limit, write_limit, test_seconds, max_rows, use_percent, use_external_limiters)