def testTableRequestModifyTableLimits(self): # create table before modifying the table limits request = TableRequest().set_statement( self.create_tb_statement).set_table_limits(self.table_limits) result = self.handle.table_request(request) result.wait_for_state(self.handle, table_name, State.ACTIVE, wait_timeout, 1000) # modify the table limits table_limits = TableLimits(10000, 10000, 100) self.table_request.set_table_name(table_name).set_table_limits( table_limits) result = self.handle.table_request(self.table_request) self.assertEqual(result.get_table_name(), table_name) self.assertEqual(result.get_state(), State.UPDATING) if not_cloudsim(): self.assertIsNotNone(result.get_schema()) wait_result = result.wait_for_state(self.handle, table_name, State.ACTIVE, wait_timeout, 1000) self.assertEqual(wait_result.get_table_name(), table_name) self.assertEqual(wait_result.get_state(), State.ACTIVE) self.assertEqual(wait_result.get_table_limits().get_read_units(), table_limits.get_read_units()) self.assertEqual(wait_result.get_table_limits().get_write_units(), table_limits.get_write_units()) self.assertEqual(wait_result.get_table_limits().get_storage_gb(), table_limits.get_storage_gb()) if not_cloudsim(): self.assertIsNotNone(wait_result.get_schema()) self.assertIsNone(wait_result.get_operation_id()) # drop table after modifying the table limits request.set_statement(self.drop_tb_statement) result = self.handle.table_request(request) result.wait_for_state(self.handle, table_name, State.DROPPED, wait_timeout, 1000)
def testPutWithIdentityColumn(self): id_table = table_prefix + 'Identity' create_request = TableRequest().set_statement( 'CREATE TABLE ' + id_table + '(sid INTEGER, id LONG GENERATED \ ALWAYS AS IDENTITY, name STRING, PRIMARY KEY(SHARD(sid), id))') create_request.set_table_limits(TableLimits(50, 50, 1)) self.table_request(create_request) # test put a row with an extra field not in the table, by default this # will succeed row = {'name': 'myname', 'extra': 'extra', 'sid': 1} key = {'sid': 1, 'id': 1} expected = OrderedDict() expected['sid'] = 1 expected['id'] = 1 expected['name'] = 'myname' self.put_request.set_table_name(id_table).set_value(row) result = self.handle.put(self.put_request) version = result.get_version() self._check_put_result(result, has_generated_value=True) self.check_cost(result, 0, 0, 1, 1) self.get_request.set_table_name(id_table).set_key(key) result = self.handle.get(self.get_request) self.check_get_result(result, expected, version) self.check_cost(result, 1, 2, 0, 0) # test put a row with identity field, this will fail because id is # 'generated always' and in that path it is not legal to provide a value # for id row['id'] = 1 self.assertRaises(IllegalArgumentException, self.handle.put, self.put_request)
def testWriteMultipleWithIdentityColumn(self): num_operations = 10 id_table = table_prefix + 'Identity' create_request = TableRequest().set_statement( 'CREATE TABLE ' + id_table + '(sid INTEGER, id LONG GENERATED \ ALWAYS AS IDENTITY, name STRING, PRIMARY KEY(SHARD(sid), id))') create_request.set_table_limits(TableLimits(5000, 5000, 50)) self.table_request(create_request) # add ten operations row = {'name': 'myname', 'sid': 1} for idx in range(num_operations): put_request = PutRequest().set_table_name(id_table).set_value(row) put_request.set_identity_cache_size(idx) self.write_multiple_request.add(put_request, False) # execute the write multiple request versions = list() result = self.handle.write_multiple(self.write_multiple_request) op_results = self._check_write_multiple_result(result, num_operations) for idx in range(result.size()): versions.append( self._check_operation_result(op_results[idx], True, True, idx + 1)) self.check_cost(result, 0, 0, num_operations, num_operations) # check the records after write_multiple request succeed self.get_request.set_table_name(id_table) for idx in range(num_operations): self.get_request.set_key({'sid': 1, 'id': idx + 1}) result = self.handle.get(self.get_request) expected = OrderedDict() expected['sid'] = 1 expected['id'] = idx + 1 expected['name'] = 'myname' self.check_get_result(result, expected, versions[idx]) self.check_cost(result, 1, 2, 0, 0)
def setUpClass(cls): TestBase.set_up_class() index_name = 'idx_' + table_name create_statement = ('CREATE TABLE ' + table_name + '(fld_sid INTEGER, fld_id INTEGER, \ fld_long LONG, fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, \ fld_str STRING, fld_bin BINARY, fld_time TIMESTAMP(6), fld_num NUMBER, \ fld_json JSON, fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(SHARD(fld_sid), fld_id))') limits = TableLimits(5000, 5000, 50) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls._result = TestBase.table_request(create_request, State.ACTIVE) create_index_statement = ('CREATE INDEX ' + index_name + ' ON ' + table_name + '(fld_long)') create_index_request = TableRequest().set_statement( create_index_statement) cls._result = TestBase.table_request(create_index_request, State.ACTIVE) global prepare_cost prepare_cost = 2 global query_statement query_statement = ('SELECT fld_sid, fld_id FROM ' + table_name + ' WHERE fld_sid = 1')
def testTableRequestAlterTableTTL(self): # create table before altering table request = TableRequest().set_statement( self.create_tb_statement).set_table_limits(self.table_limits) self._do_table_request(request) # alter table ttl self.table_request.set_statement(self.alter_ttl_statement) result = self.handle.table_request(self.table_request) self.check_table_result(result, State.UPDATING, self.table_limits) self._wait_for_completion(result) self.check_table_result(result, State.ACTIVE, self.table_limits) # drop table after altering table request.set_statement(self.drop_tb_statement) self._do_table_request(request)
def setUpClass(cls): add_tier() cls._handles = list() cls._drop_requests = list() global table_names table_names = list() num_tables = 3 # # In pod env create 1 handle, otherwise create 2 handles for additional # testing # cls._num_handles = 1 if is_pod() else 2 for handle in range(cls._num_handles): add_tenant(tenant_id + str(handle)) table_names.append(list()) cls._drop_requests.append(list()) cls._handles.append(get_handle(tenant_id + str(handle))) for table in range(handle + num_tables): table_names[handle].append(table_name + str(table)) for table in range(handle + num_tables): # # Add a sleep for a pod to let things happen # if is_pod(): sleep(60) drop_statement = ('DROP TABLE IF EXISTS ' + table_names[handle][table]) drop_request = TableRequest().set_statement(drop_statement) cls._drop_requests[handle].append(drop_request) cls._result = cls._handles[handle].table_request(drop_request) cls._result.wait_for_state(cls._handles[handle], table_names[handle][table], State.DROPPED, wait_timeout, 1000) create_statement = ('CREATE TABLE ' + table_names[handle][table] + '(\ fld_id INTEGER, fld_long LONG, fld_float FLOAT, fld_double DOUBLE, \ fld_bool BOOLEAN, fld_str STRING, fld_bin BINARY, fld_time TIMESTAMP(2), \ fld_num NUMBER, fld_json JSON, fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(fld_id)) USING TTL 16 HOURS') limits = TableLimits(5000, 5000, 50) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls._result = cls._handles[handle].table_request( create_request) cls._result.wait_for_state(cls._handles[handle], table_names[handle][table], State.ACTIVE, wait_timeout, 1000)
def testTableRequestCreateDropIndex(self): # create table before creating index request = TableRequest().set_statement( self.create_tb_statement).set_table_limits(self.table_limits) self._do_table_request(request) # create index by resetting the statement self.table_request.set_statement(self.create_idx_statement) result = self.handle.table_request(self.table_request) self.check_table_result(result, State.UPDATING, check_limit=False, check_schema=False) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_table_result(result, State.ACTIVE, self.table_limits) # drop index by resetting the statement self.table_request.set_statement(self.drop_idx_statement) result = self.handle.table_request(self.table_request) self.check_table_result(result, State.UPDATING, check_limit=False, check_schema=False) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_table_result(result, State.ACTIVE, self.table_limits) # drop table after dropping index self.table_request.set_statement(self.drop_tb_statement) self._do_table_request(self.table_request)
def setUpClass(cls): cls.set_up_class() create_statement = ('CREATE TABLE ' + table_name + '(id integer, \ name string, primary key(id))') create_request = TableRequest().set_statement( create_statement).set_table_limits(TableLimits(100, 100, 1)) cls.table_request(create_request)
def setUp(self): self.base = 'http://localhost:' + str(8000) self._generate_credentials_file() self.token_provider = None # Not matter which request. self.request = TableRequest() self.handle_config = NoSQLHandleConfig(self.base)
def setUpClass(cls): cls.handle = None cls.set_up_class() create_statement = ('CREATE TABLE ' + table_name + '(fld_id INTEGER, fld_long LONG, \ fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, fld_str STRING, \ fld_bin BINARY, fld_time TIMESTAMP(7), fld_num NUMBER, fld_json JSON, \ fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(fld_id)) USING TTL 1 HOURS') limits = TableLimits(5000, 5000, 50) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls.table_request(create_request) # put and get some data, read_units = 100, write_units = 199 row = get_row() key = {'fld_id': 1} put_request = PutRequest().set_value(row).set_table_name(table_name) get_request = GetRequest().set_key(key).set_table_name(table_name) count = 0 while count < 100: cls.handle.put(put_request) cls.handle.get(get_request) count += 1 # sleep to allow records to accumulate over time, but not if # using Cloudsim. if not_cloudsim() and not is_onprem(): sleep(2) # need to sleep to allow usage records to accumulate but not if # using CloudSim, which doesn't generate usage records. if not_cloudsim() and not is_onprem(): sleep(40)
def setUpClass(cls): TestBase.set_up_class() create_statement = ('CREATE TABLE ' + table_name + '(fld_id INTEGER, fld_long LONG, \ fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, fld_str STRING, \ fld_bin BINARY, fld_time TIMESTAMP(7), fld_num NUMBER, fld_json JSON, \ fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(fld_id)) USING TTL 1 HOURS') limits = TableLimits(5000, 5000, 50) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls._result = TestBase.table_request(create_request, State.ACTIVE) # put and get some data, read_units = 100, write_units = 199 row = { 'fld_id': 1, 'fld_long': 2147483648, 'fld_float': 3.1414999961853027, 'fld_double': 3.1415, 'fld_bool': True, 'fld_str': '{"name": u1, "phone": null}', 'fld_bin': bytearray(pack('>i', 4)), 'fld_time': datetime.now(), 'fld_num': Decimal(5), 'fld_json': { 'a': '1', 'b': None, 'c': '3' }, 'fld_arr': ['a', 'b', 'c'], 'fld_map': { 'a': '1', 'b': '2', 'c': '3' }, 'fld_rec': { 'fld_id': 1, 'fld_bool': False, 'fld_str': None } } key = {'fld_id': 1} put_request = PutRequest().set_value(row).set_table_name(table_name) get_request = GetRequest().set_key(key).set_table_name(table_name) count = 0 while count < 100: cls._handle.put(put_request) cls._handle.get(get_request) count += 1 # sleep to allow records to accumulate over time, but not if # using Cloudsim. if not_cloudsim(): sleep(2) # need to sleep to allow usage records to accumulate but not if # using CloudSim, which doesn't generate usage records. if not_cloudsim(): sleep(40)
def tearDown(self): try: TableResult.wait_for_state(self.handle, State.ACTIVE, wait_timeout, 1000, table_name) drop_request = TableRequest().set_statement(self.drop_tb_statement) self._do_table_request(drop_request) except TableNotFoundException: pass finally: self.tear_down()
def tearDown(self): try: get_table = GetTableRequest().set_table_name(table_name) result = self.handle.get_table(get_table) result.wait_for_completion(self.handle, wait_timeout, 1000) drop_request = TableRequest().set_statement(self.drop_tb_statement) self._do_table_request(drop_request) except TableNotFoundException: pass finally: self.tear_down()
def setUp(self): self.set_up() self.handle_config = get_handle_config(tenant_id) self.create_tb_statement = ('CREATE TABLE ' + table_name + '(fld_id INTEGER, fld_long LONG, \ fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, fld_str STRING, \ fld_bin BINARY, fld_time TIMESTAMP(4), fld_num NUMBER, fld_json JSON, \ fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(fld_id)) USING TTL 30 DAYS') self.create_idx_statement = ('CREATE INDEX ' + index_name + ' ON ' + table_name + '(fld_str, fld_double)') self.alter_fld_statement = ('ALTER TABLE ' + table_name + '(DROP fld_num)') self.alter_ttl_statement = ('ALTER TABLE ' + table_name + ' USING TTL 16 HOURS') self.drop_idx_statement = ('DROP INDEX ' + index_name + ' ON ' + table_name) self.drop_tb_statement = ('DROP TABLE IF EXISTS ' + table_name) self.table_request = TableRequest() self.table_limits = TableLimits(100, 100, 1)
def tearDown(self): try: TableResult.wait_for_state(self.handle, table_name, State.ACTIVE, wait_timeout, 1000) drop_request = TableRequest().set_statement(self.drop_tb_statement) result = self.handle.table_request(drop_request) result.wait_for_state(self.handle, table_name, State.DROPPED, wait_timeout, 1000) except TableNotFoundException: pass finally: TestBase.tear_down(self)
def setUpClass(cls): cls.set_up_class() create_statement = ('CREATE TABLE ' + table_name + '(fld_sid INTEGER, fld_id INTEGER, \ fld_long LONG, fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, \ fld_str STRING, fld_bin BINARY, fld_time TIMESTAMP(8), fld_num NUMBER, \ fld_json JSON, fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(SHARD(fld_sid), fld_id))') limits = TableLimits(50, 50, 1) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls.table_request(create_request)
def testGetTableWithOperationId(self): drop_request = TableRequest().set_statement('DROP TABLE IF EXISTS ' + table_name) table_result = self.handle.table_request(drop_request) self.get_table_request.set_table_name(table_name).set_operation_id( table_result.get_operation_id()) result = self.handle.get_table(self.get_table_request) if is_minicloud() or is_pod(): self.check_table_result(result, [State.DROPPING, State.DROPPED], table_limits) else: self.check_table_result(result, [State.DROPPING, State.DROPPED]) table_result.wait_for_completion(self.handle, wait_timeout, 1000)
def setUpClass(cls): TestBase.set_up_class() create_statement = ('CREATE TABLE ' + table_name + '(fld_id INTEGER, fld_long LONG, \ fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, fld_str STRING, \ fld_bin BINARY, fld_time TIMESTAMP(3), fld_num NUMBER, fld_json JSON, \ fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(fld_id)) USING TTL 1 HOURS') limits = TableLimits(5000, 5000, 50) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls._result = TestBase.table_request(create_request, State.ACTIVE)
def setUpClass(cls): cls.set_up_class() global table_names, index_names, num_indexes, index_fields table_names = list() num_tables = 2 index_names = list() num_indexes = 1 index_fields = list() for index in range(2): index_fields.append(list()) index_fields[0].append('fld_double') index_fields[1].append('fld_str') for table in range(num_tables): tb_name = table_name + str(table) table_names.append(tb_name) create_statement = ('CREATE TABLE ' + tb_name + '(fld_id INTEGER, \ fld_long LONG, fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, \ fld_str STRING, fld_bin BINARY, fld_time TIMESTAMP(0), fld_num NUMBER, \ fld_json JSON, fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(fld_id)) USING TTL 2 DAYS') limits = TableLimits(50, 50, 1) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls.table_request(create_request) index_names.append(list()) for index in range(table + num_indexes): idx_name = index_name + str(index) index_names[table].append(idx_name) create_index_statement = ('CREATE INDEX ' + idx_name + ' ON ' + tb_name + '(' + ','.join(index_fields[index]) + ')') create_index_request = TableRequest().set_statement( create_index_statement) cls.table_request(create_index_request) if is_minicloud(): index_names[table].reverse()
def testTableRequestAlterTableTTL(self): # create table before altering table request = TableRequest().set_statement( self.create_tb_statement).set_table_limits(self.table_limits) result = self.handle.table_request(request) result.wait_for_state(self.handle, table_name, State.ACTIVE, wait_timeout, 1000) # alter table ttl self.table_request.set_statement(self.alter_ttl_statement) result = self.handle.table_request(self.table_request) self.assertEqual(result.get_table_name(), table_name) self.assertEqual(result.get_state(), State.UPDATING) self.assertEqual(result.get_table_limits().get_read_units(), self.table_limits.get_read_units()) self.assertEqual(result.get_table_limits().get_write_units(), self.table_limits.get_write_units()) self.assertEqual(result.get_table_limits().get_storage_gb(), self.table_limits.get_storage_gb()) if not_cloudsim(): self.assertIsNotNone(result.get_schema()) wait_result = result.wait_for_state(self.handle, table_name, State.ACTIVE, wait_timeout, 1000) self.assertEqual(wait_result.get_table_name(), table_name) self.assertEqual(wait_result.get_state(), State.ACTIVE) self.assertEqual(wait_result.get_table_limits().get_read_units(), self.table_limits.get_read_units()) self.assertEqual(wait_result.get_table_limits().get_write_units(), self.table_limits.get_write_units()) self.assertEqual(wait_result.get_table_limits().get_storage_gb(), self.table_limits.get_storage_gb()) if not_cloudsim(): self.assertIsNotNone(wait_result.get_schema()) self.assertIsNone(wait_result.get_operation_id()) # drop table after altering table request.set_statement(self.drop_tb_statement) result = self.handle.table_request(request) result.wait_for_state(self.handle, table_name, State.DROPPED, wait_timeout, 1000)
def setUpClass(cls): add_tier() cls.handles = list() global table_names table_names = list() num_tables = 3 # # In pod env create 1 handle, otherwise create 2 handles for additional # testing # num_handles = 1 if is_prod_pod() or is_onprem() else 2 for handle in range(num_handles): tenant = tenant_id + ('' if handle == 0 else str(handle)) add_tenant(tenant) table_names.append(list()) cls.handles.append(get_handle(tenant)) for table in range(handle + num_tables): tb_name = table_name + str(table) table_names[handle].append(tb_name) # # Add a sleep for a pod to let things happen # if is_pod(): sleep(60) drop_request = TableRequest().set_statement( 'DROP TABLE IF EXISTS ' + tb_name) cls.table_request(drop_request, cls.handles[handle]) create_statement = ('CREATE TABLE ' + tb_name + '(fld_id INTEGER, \ fld_long LONG, fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, \ fld_str STRING, fld_bin BINARY, fld_time TIMESTAMP(2), fld_num NUMBER, \ fld_json JSON, fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(fld_id)) USING TTL 16 HOURS') limits = TableLimits(10, 10, 1) create_request = TableRequest().set_statement( create_statement).set_table_limits(limits) cls.table_request(create_request, cls.handles[handle])
def setUpClass(cls): cls.set_up_class() global table_ttl table_ttl = TimeToLive.of_days(2) create_statement = ('CREATE TABLE ' + table_name + '(fld_id INTEGER, fld_long LONG, \ fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, fld_str STRING, \ fld_bin BINARY, fld_time TIMESTAMP(6), fld_num NUMBER, fld_json JSON, \ fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(fld_id)) USING TTL ' + str(table_ttl)) create_request = TableRequest().set_statement( create_statement).set_table_limits(TableLimits(100, 100, 1)) cls.table_request(create_request)
def tearDownClass(cls): for handle in range(len(cls.handles)): try: ltr = ListTablesRequest() result = cls.handles[handle].list_tables(ltr) for table in result.get_tables(): if table.startswith(table_prefix): drop_request = TableRequest().set_statement( 'DROP TABLE IF EXISTS ' + table) cls.table_request(drop_request, cls.handles[handle]) finally: cls.handles[handle].close() delete_tenant(tenant_id + str(handle)) delete_tier()
def testTableRequestModifyTableLimits(self): # create table before modifying the table limits request = TableRequest().set_statement( self.create_tb_statement).set_table_limits(self.table_limits) self._do_table_request(request) # modify the table limits table_limits = TableLimits(50, 50, 1) self.table_request.set_table_name(table_name).set_table_limits( table_limits) if is_onprem(): self.assertRaises(OperationNotSupportedException, self.handle.table_request, self.table_request) return result = self.handle.table_request(self.table_request) self.check_table_result(result, State.UPDATING, check_limit=False, check_schema=False) result.wait_for_completion(self.handle, wait_timeout, 1000) self.check_table_result(result, State.ACTIVE, table_limits) # drop table after modifying the table limits request.set_statement(self.drop_tb_statement) self._do_table_request(request)
def testTableRequestModifyTableLimits(self): # create table before modifying the table limits request = TableRequest().set_statement( self.create_tb_statement).set_table_limits(self.table_limits) self._do_table_request(request) # modify the table limits table_limits = TableLimits(10000, 10000, 100) self.table_request.set_table_name(table_name).set_table_limits( table_limits) if is_onprem(): self.assertRaises(OperationNotSupportedException, self.handle.table_request, self.table_request) return result = self.handle.table_request(self.table_request) self.assertEqual(result.get_table_name(), table_name) self.assertEqual(result.get_state(), State.UPDATING) if not_cloudsim(): self.assertIsNotNone(result.get_schema()) self.assertIsNotNone(result.get_operation_id()) self._wait_for_completion(result) self.check_table_result(result, State.ACTIVE, table_limits) # drop table after modifying the table limits request.set_statement(self.drop_tb_statement) self._do_table_request(request)
def testAccessTokenProviderOldPath(self): account_at = 'account-at' service_at = 'service-at' class TokenHandler(SimpleHTTPRequestHandler): def do_GET(self): rawpath = self.path.split('?')[0] if rawpath == APP_ENDPOINT: self.send_response(codes.ok) self.send_header('Content-Type', 'application/json') self.send_header('Content-Length', str(len(PSM_INFO))) self.end_headers() self.wfile.write(PSM_INFO.encode()) def do_POST(self): rawpath = self.path.split('?')[0] if rawpath == TOKEN_ENDPOINT: content = self.rfile.read( int(self.headers['Content-Length'])) content = unquote(content.decode()) if 'andc' in content: res = str.format(TOKEN_RESULT, service_at) else: res = str.format(TOKEN_RESULT, account_at) self.send_response(codes.ok) self.send_header('Content-Type', 'application/json') self.send_header('Content-Length', str(len(res))) self.end_headers() self.wfile.write(res.encode()) httpd, port = self.__find_port_start_server(TokenHandler) self.base = 'http://localhost:' + str(port) self.token_provider = DefaultAccessTokenProvider( idcs_url=self.base, entitlement_id='123456789', creds_provider=self.creds_provider) # get authorization string for ListTablesRequest result = self.token_provider.get_authorization_string( ListTablesRequest()) self.assertIsNotNone(result) self.assertEqual(result, 'Bearer ' + account_at) # get authorization string for TableRequest result = self.token_provider.get_authorization_string(TableRequest()) self.assertIsNotNone(result) self.assertEqual(result, 'Bearer ' + service_at) self.__stop_server(httpd)
def setUpClass(cls): TestBase.set_up_class() table_ttl = TimeToLive.of_hours(16) create_statement = ('CREATE TABLE ' + table_name + '(fld_sid INTEGER, fld_id INTEGER, \ fld_long LONG, fld_float FLOAT, fld_double DOUBLE, fld_bool BOOLEAN, \ fld_str STRING, fld_bin BINARY, fld_time TIMESTAMP(7), fld_num NUMBER, \ fld_json JSON, fld_arr ARRAY(STRING), fld_map MAP(STRING), \ fld_rec RECORD(fld_id LONG, fld_bool BOOLEAN, fld_str STRING), \ PRIMARY KEY(SHARD(fld_sid), fld_id)) USING TTL ' + str(table_ttl)) create_request = TableRequest().set_statement( create_statement).set_table_limits(TableLimits(5000, 5000, 50)) cls._result = TestBase.table_request(create_request, State.ACTIVE) global row, tb_expect_expiration, hour_in_milliseconds row = { 'fld_sid': 1, 'fld_id': 1, 'fld_long': 2147483648, 'fld_float': 3.1414999961853027, 'fld_double': 3.1415, 'fld_bool': True, 'fld_str': '{"name": u1, "phone": null}', 'fld_bin': bytearray(pack('>i', 4)), 'fld_time': datetime.now(), 'fld_num': Decimal(5), 'fld_json': { 'a': '1', 'b': None, 'c': '3' }, 'fld_arr': ['a', 'b', 'c'], 'fld_map': { 'a': '1', 'b': '2', 'c': '3' }, 'fld_rec': { 'fld_id': 1, 'fld_bool': False, 'fld_str': None } } put_request = PutRequest().set_value(row).set_table_name(table_name) cls._handle.put(put_request) tb_expect_expiration = table_ttl.to_expiration_time( int(round(time() * 1000))) hour_in_milliseconds = 60 * 60 * 1000
def testRealCloudGetAuthorizationStringAndToken(self): generate_properties_file(idcs_url(), credentials_file) self.token_provider = DefaultAccessTokenProvider( idcs_props_file=properties_file) # get authorization string for ListTablesRequest result = self.token_provider.get_authorization_string( ListTablesRequest()) self.assertIsNotNone(result) # get authorization string for TableRequest result = self.token_provider.get_authorization_string( TableRequest()) self.assertIsNotNone(result) # get account access token result = self.token_provider.get_account_access_token() self.assertIsNotNone(result) # get service access token result = self.token_provider.get_service_access_token() self.assertIsNotNone(result)
def testAccessTokenProviderGetAuthorizationString(self): global GET_INFO, POST_INFO GET_INFO = CLIENT_INFO POST_INFO = None httpd, port = self._find_port_start_server(TokenHandler) self.base = 'http://localhost:' + str(port) self.token_provider = DefaultAccessTokenProvider( idcs_url=self.base, creds_provider=self.creds_provider) # get authorization string for ListTablesRequest result = self.token_provider.get_authorization_string( ListTablesRequest()) self.assertIsNotNone(result) self.assertEqual(result, 'Bearer ' + ACCOUNT_AT) # get authorization string for TableRequest result = self.token_provider.get_authorization_string(TableRequest()) self.assertIsNotNone(result) self.assertEqual(result, 'Bearer ' + SERVICE_AT) self._stop_server(httpd)
def testGetTableWithOperationId(self): drop_request = TableRequest().set_statement('DROP TABLE IF EXISTS ' + table_name) table_result = self.handle.table_request(drop_request) self.get_table_request.set_table_name(table_name).set_operation_id( table_result.get_operation_id()) result = self.handle.get_table(self.get_table_request) # TODO: A difference between old cloud proxy and new cloud proxy, during # DROPPING phase, the table limit is not none for old proxy but none for # new proxy. self.check_table_result(result, State.DROPPING, check_limit=False) if is_minicloud(): table_result.wait_for_state(self.handle, [State.ACTIVE, State.DROPPED], wait_timeout, 1000, result=table_result) else: table_result.wait_for_completion(self.handle, wait_timeout, 1000)