def __tx_start(conn, concurrency, isolation, timeout, label): query_struct = Query(OP_TX_START, [('concurrency', Byte), ('isolation', Byte), ('timeout', Long), ('label', String)]) return query_perform(query_struct, conn, query_params={ 'concurrency': concurrency, 'isolation': isolation, 'timeout': timeout, 'label': label }, response_config=[('tx_id', Int)])
def cache_get_names(connection: 'Connection', query_id=None) -> 'APIResult': """ Gets existing cache names. :param connection: connection to GridGain server, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and a list of cache names, non-zero status and an error description otherwise. """ query_struct = Query(OP_CACHE_GET_NAMES, query_id=query_id) result = query_struct.perform( connection, response_config=[ ('cache_names', StringArray), ], ) if result.status == 0: result.value = result.value['cache_names'] return result
def __cache_put_all(connection, cache_info, pairs): query_struct = Query(OP_CACHE_PUT_ALL, [ ('cache_info', CacheInfo), ('data', Map), ]) return query_perform( query_struct, connection, query_params={ 'cache_info': cache_info, 'data': pairs, }, )
def __cache_put(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query(OP_CACHE_PUT, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ]) return query_perform(query_struct, connection, query_params={ 'cache_info': cache_info, 'key': key, 'value': value })
def __scan_cursor_get_page(conn, cursor): query_struct = Query(OP_QUERY_SCAN_CURSOR_GET_PAGE, [ ('cursor', Long), ]) return query_perform(query_struct, conn, query_params={ 'cursor': cursor, }, response_config=[ ('data', Map), ('more', Bool), ], post_process_fun=__query_result_post_process)
def __sql_fields(conn, cache_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout): if query_args is None: query_args = [] query_struct = Query(OP_QUERY_SQL_FIELDS, [ ('cache_info', CacheInfo), ('schema', String), ('page_size', Int), ('max_rows', Int), ('query_str', String), ('query_args', AnyDataArray()), ('statement_type', StatementType), ('distributed_joins', Bool), ('local', Bool), ('replicated_only', Bool), ('enforce_join_order', Bool), ('collocated', Bool), ('lazy', Bool), ('timeout', Long), ('include_field_names', Bool), ], response_type=SQLResponse) return query_perform( query_struct, conn, query_params={ 'cache_info': cache_info, 'schema': schema, 'page_size': page_size, 'max_rows': max_rows, 'query_str': query_str, 'query_args': query_args, 'statement_type': statement_type, 'distributed_joins': distributed_joins, 'local': local, 'replicated_only': replicated_only, 'enforce_join_order': enforce_join_order, 'collocated': collocated, 'lazy': lazy, 'timeout': timeout, 'include_field_names': include_field_names, }, include_field_names=include_field_names, has_cursor=True, )
def __cache_get_all(connection, cache_info, keys): query_struct = Query(OP_CACHE_GET_ALL, [ ('cache_info', CacheInfo), ('keys', AnyDataArray()), ]) return query_perform(query_struct, connection, query_params={ 'cache_info': cache_info, 'keys': keys, }, response_config=[ ('data', Map), ], post_process_fun=__post_process_value_by_key('data'))
def __cache_contains_keys(connection, cache_info, keys): query_struct = Query(OP_CACHE_CONTAINS_KEYS, [ ('cache_info', CacheInfo), ('keys', AnyDataArray()), ]) return query_perform(query_struct, connection, query_params={ 'cache_info': cache_info, 'keys': keys, }, response_config=[ ('value', Bool), ], post_process_fun=__post_process_value_by_key('value'))
def __cache_contains_key(connection, cache_info, key, key_hint): query_struct = Query(OP_CACHE_CONTAINS_KEY, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ]) return query_perform(query_struct, connection, query_params={ 'cache_info': cache_info, 'key': key, }, response_config=[ ('value', Bool), ], post_process_fun=__post_process_value_by_key('value'))
def __cache_get_and_remove(connection, cache_info, key, key_hint): query_struct = Query(OP_CACHE_GET_AND_REMOVE, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ]) return query_perform(query_struct, connection, query_params={ 'cache_info': cache_info, 'key': key, }, response_config=[ ('value', AnyDataObject), ], post_process_fun=__post_process_value_by_key('value'))
def __sql_fields_cursor_get_page(conn, cursor, field_count): query_struct = Query(OP_QUERY_SQL_FIELDS_CURSOR_GET_PAGE, [ ('cursor', Long), ]) return query_perform(query_struct, conn, query_params={ 'cursor': cursor, }, response_config=[ ('data', StructArray([(f'field_{i}', AnyDataObject) for i in range(field_count)])), ('more', Bool), ], post_process_fun=__post_process_sql_fields_cursor)
def __cluster_set_state(connection, state): if not ClusterState.has_value(state): raise ClusterError(f'Unknown cluster state [state={state}]') if not connection.protocol_context.is_cluster_api_supported(): raise NotSupportedByClusterError('Cluster API is not supported by the cluster') query_struct = Query( OP_CLUSTER_CHANGE_STATE, [ ('state', Byte) ] ) return query_perform( query_struct, connection, query_params={ 'state': state, } )
def __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query(OP_CACHE_GET_AND_PUT_IF_ABSENT, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ]) return query_perform(query_struct, connection, query_params={ 'cache_info': cache_info, 'key': key, 'value': value, }, response_config=[ ('value', AnyDataObject), ], post_process_fun=__post_process_value_by_key('value'))
def __cache_replace(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query(OP_CACHE_REPLACE, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ]) return query_perform( query_struct, connection, query_params={ 'cache_info': cache_info, 'key': key, 'value': value, }, response_config=[ ('success', Bool), ], post_process_fun=__post_process_value_by_key('success'))
def __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint): query_struct = Query(OP_CACHE_REMOVE_IF_EQUALS, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ]) return query_perform( query_struct, connection, query_params={ 'cache_info': cache_info, 'key': key, 'sample': sample, }, response_config=[ ('success', Bool), ], post_process_fun=__post_process_value_by_key('success'))
def __cache_get_size(connection, cache_info, peek_modes): if peek_modes is None: peek_modes = [] elif not isinstance(peek_modes, (list, tuple)): peek_modes = [peek_modes] query_struct = Query(OP_CACHE_GET_SIZE, [ ('cache_info', CacheInfo), ('peek_modes', ByteArray), ]) return query_perform(query_struct, connection, query_params={ 'cache_info': cache_info, 'peek_modes': peek_modes, }, response_config=[ ('count', Long), ], post_process_fun=__post_process_value_by_key('count'))
def __cache_get_node_partitions(conn, caches): query_struct = Query(OP_CACHE_PARTITIONS, [ ('cache_ids', cache_ids), ]) if not is_iterable(caches): caches = [caches] return query_perform(query_struct, conn, query_params={ 'cache_ids': [{ 'cache_id': cache } for cache in caches], }, response_config=[ ('version_major', Long), ('version_minor', Int), ('partition_mapping', partition_mapping), ], post_process_fun=__post_process_partitions)
def __cache_local_peek(conn, cache_info, key, key_hint, peek_modes): if peek_modes is None: peek_modes = [] elif not isinstance(peek_modes, (list, tuple)): peek_modes = [peek_modes] query_struct = Query(OP_CACHE_LOCAL_PEEK, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('peek_modes', ByteArray), ]) return query_perform(query_struct, conn, query_params={ 'cache_info': cache_info, 'key': key, 'peek_modes': peek_modes, }, response_config=[ ('value', AnyDataObject), ], post_process_fun=__post_process_value_by_key('value'))
def __scan(conn, cache_info, page_size, partitions, local): query_struct = Query(OP_QUERY_SCAN, [ ('cache_info', CacheInfo), ('filter', Null), ('page_size', Int), ('partitions', Int), ('local', Bool), ]) return query_perform(query_struct, conn, query_params={ 'cache_info': cache_info, 'filter': None, 'page_size': page_size, 'partitions': partitions, 'local': 1 if local else 0, }, response_config=[ ('cursor', Long), ('data', Map), ('more', Bool), ], post_process_fun=__query_result_post_process)
def get_binary_type( connection: 'Connection', binary_type: Union[str, int], query_id=None, ) -> APIResult: """ Gets the binary type information by type ID. :param connection: connection to GridGain server, :param binary_type: binary type name or ID, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. """ query_struct = Query( OP_GET_BINARY_TYPE, [ ('type_id', Int), ], query_id=query_id, ) _, send_buffer = query_struct.from_python({ 'type_id': entity_id(binary_type), }) connection.send(send_buffer) response_head_struct = get_response_class(connection)([ ('type_exists', Bool), ]) response_head_type, recv_buffer = response_head_struct.parse(connection) response_head = response_head_type.from_buffer_copy(recv_buffer) response_parts = [] if response_head.type_exists: resp_body_type, resp_body_buffer = body_struct.parse(connection) response_parts.append(('body', resp_body_type)) resp_body = resp_body_type.from_buffer_copy(resp_body_buffer) recv_buffer += resp_body_buffer if resp_body.is_enum: resp_enum, resp_enum_buffer = enum_struct.parse(connection) response_parts.append(('enums', resp_enum)) recv_buffer += resp_enum_buffer resp_schema_type, resp_schema_buffer = schema_struct.parse(connection) response_parts.append(('schema', resp_schema_type)) recv_buffer += resp_schema_buffer response_class = type( 'GetBinaryTypeResponse', (response_head_type,), { '_pack_': 1, '_fields_': response_parts, } ) response = response_class.from_buffer_copy(recv_buffer) result = APIResult(response) if result.status != 0: return result result.value = { 'type_exists': response.type_exists } if hasattr(response, 'body'): result.value.update(body_struct.to_python(response.body)) if hasattr(response, 'enums'): result.value['enums'] = enum_struct.to_python(response.enums) if hasattr(response, 'schema'): result.value['schema'] = { x['schema_id']: [ z['schema_field_id'] for z in x['schema_fields'] ] for x in schema_struct.to_python(response.schema) } return result
def put_binary_type( connection: 'Connection', type_name: str, affinity_key_field: str=None, is_enum=False, schema: dict=None, query_id=None, ) -> APIResult: """ Registers binary type information in cluster. :param connection: connection to GridGain server, :param type_name: name of the data type being registered, :param affinity_key_field: (optional) name of the affinity key field, :param is_enum: (optional) register enum if True, binary object otherwise. Defaults to False, :param schema: (optional) when register enum, pass a dict of enumerated parameter names as keys and an integers as values. When register binary type, pass a dict of field names: field types. Binary type with no fields is OK, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. """ # prepare data if schema is None: schema = {} type_id = entity_id(type_name) data = { 'type_name': type_name, 'type_id': type_id, 'affinity_key_field': affinity_key_field, 'binary_fields': [], 'is_enum': is_enum, 'schema': [], } schema_id = None if is_enum: data['enums'] = [] for literal, ordinal in schema.items(): data['enums'].append({ 'literal': literal, 'type_id': ordinal, }) else: # assemble schema and calculate schema ID in one go schema_id = FNV1_OFFSET_BASIS if schema else 0 for field_name, data_type in schema.items(): # TODO: check for allowed data types field_id = entity_id(field_name) data['binary_fields'].append({ 'field_name': field_name, 'type_id': int.from_bytes( data_type.type_code, byteorder=PROTOCOL_BYTE_ORDER ), 'field_id': field_id, }) schema_id ^= (field_id & 0xff) schema_id = int_overflow(schema_id * FNV1_PRIME) schema_id ^= ((field_id >> 8) & 0xff) schema_id = int_overflow(schema_id * FNV1_PRIME) schema_id ^= ((field_id >> 16) & 0xff) schema_id = int_overflow(schema_id * FNV1_PRIME) schema_id ^= ((field_id >> 24) & 0xff) schema_id = int_overflow(schema_id * FNV1_PRIME) data['schema'].append({ 'schema_id': schema_id, 'schema_fields': [ {'schema_field_id': entity_id(x)} for x in schema ], }) # do query if is_enum: query_struct = Query( OP_PUT_BINARY_TYPE, [ ('type_id', Int), ('type_name', String), ('affinity_key_field', String), ('binary_fields', binary_fields_struct), ('is_enum', Bool), ('enums', enum_struct), ('schema', schema_struct), ], query_id=query_id, ) else: query_struct = Query( OP_PUT_BINARY_TYPE, [ ('type_id', Int), ('type_name', String), ('affinity_key_field', String), ('binary_fields', binary_fields_struct), ('is_enum', Bool), ('schema', schema_struct), ], query_id=query_id, ) result = query_struct.perform(connection, query_params=data) if result.status == 0: result.value = { 'type_id': type_id, 'schema_id': schema_id, } return result
def cache_replace_if_equals( connection: 'Connection', cache: Union[str, int], key: Any, sample: Any, value: Any, key_hint: 'GridGainDataType' = None, sample_hint: 'GridGainDataType' = None, value_hint: 'GridGainDataType' = None, binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exists and value equals provided sample. :param connection: connection to GridGain server, :param cache: name or ID of the cache, :param key: key for the cache entry, :param sample: a sample to compare the stored value with, :param value: new value for the given key, :param key_hint: (optional) GridGain data type, for which the given key should be converted, :param sample_hint: (optional) GridGain data type, for whic the given sample should be converted :param value_hint: (optional) GridGain data type, for which the given value should be converted, :param binary: (optional) pass True to keep the value in binary form. False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and a boolean success code, or non-zero status and an error description if something has gone wrong. """ query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], query_id=query_id, ) result = query_struct.perform( connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'key': key, 'sample': sample, 'value': value, }, response_config=[ ('success', Bool), ], ) if result.status == 0: result.value = result.value['success'] return result
def cache_local_peek( conn: 'Connection', cache: Union[str, int], key: Any, key_hint: 'GridGainDataType' = None, peek_modes: int = 0, binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Peeks at in-memory cached value using default optional peek mode. This method will not load value from any persistent store or from a remote node. :param conn: connection: connection to GridGain server, :param cache: name or ID of the cache, :param key: entry key, :param key_hint: (optional) GridGain data type, for which the given key should be converted, :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), :param binary: (optional) pass True to keep the value in binary form. False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and a peeked value (null if not found). """ if not isinstance(peek_modes, (list, tuple)): if peek_modes == 0: peek_modes = [] else: peek_modes = [peek_modes] query_struct = Query( OP_CACHE_LOCAL_PEEK, [ ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject), ('peek_modes', PeekModes), ], query_id=query_id, ) result = query_struct.perform( conn, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'key': key, 'peek_modes': peek_modes, }, response_config=[ ('value', AnyDataObject), ], ) if result.status != 0: return result result.value = result.value['value'] return result
def __cache_create_with_name(op_code, conn, name): query_struct = Query(op_code, [('cache_name', String)]) return query_perform(query_struct, conn, query_params={'cache_name': name})
def cache_get_node_partitions( conn: 'Connection', caches: Union[int, Iterable[int]], query_id: int = None, ) -> APIResult: """ Gets partition mapping for a GridGain cache or a number of caches. See “IEP-23: Best Effort Affinity for thin clients”. :param conn: connection to GridGain server, :param caches: cache ID(s) the mapping is provided for, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. """ query_struct = Query(OP_CACHE_PARTITIONS, [ ('cache_ids', cache_ids), ], query_id=query_id) if not is_iterable(caches): caches = [caches] result = query_struct.perform( conn, query_params={ 'cache_ids': [{ 'cache_id': cache } for cache in caches], }, response_config=[ ('version_major', Long), ('version_minor', Int), ('partition_mapping', partition_mapping), ], ) if result.status == 0: # tidying up the result value = { 'version': (result.value['version_major'], result.value['version_minor']), 'partition_mapping': [], } for i, partition_map in enumerate(result.value['partition_mapping']): cache_id = partition_map['cache_mapping'][0]['cache_id'] value['partition_mapping'].insert( i, { 'cache_id': cache_id, 'is_applicable': partition_map['is_applicable'], }) if partition_map['is_applicable']: value['partition_mapping'][i]['cache_config'] = { a['key_type_id']: a['affinity_key_field_id'] for a in partition_map['cache_mapping'][0]['cache_config'] } value['partition_mapping'][i]['node_mapping'] = { p['node_uuid']: [x['partition_id'] for x in p['node_partitions']] for p in partition_map['node_mapping'] } result.value = value return result
def __cache_destroy(connection, cache): query_struct = Query(OP_CACHE_DESTROY, [('cache_id', Int)]) return query_perform(query_struct, connection, query_params={'cache_id': cache_id(cache)})
def __cache_get_names(connection): query_struct = Query(OP_CACHE_GET_NAMES) return query_perform(query_struct, connection, response_config=[('cache_names', StringArray)], post_process_fun=__post_process_cache_names)
def sql( conn: 'Connection', cache: Union[str, int], table_name: str, query_str: str, page_size: int, query_args=None, distributed_joins: bool = False, replicated_only: bool = False, local: bool = False, timeout: int = 0, binary: bool = False, query_id: int = None ) -> APIResult: """ Executes an SQL query over data stored in the cluster. The query returns the whole record (key and value). :param conn: connection to GridGain server, :param cache: name or ID of the cache, :param table_name: name of a type or SQL table, :param query_str: SQL query string, :param page_size: cursor page size, :param query_args: (optional) query arguments, :param distributed_joins: (optional) distributed joins. Defaults to False, :param replicated_only: (optional) whether query contains only replicated tables or not. Defaults to False, :param local: (optional) pass True if this query should be executed on local node only. Defaults to False, :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), :param binary: (optional) pass True to keep the value in binary form. False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. Value dict is of following format: * `cursor`: int, cursor ID, * `data`: dict, result rows as key-value pairs, * `more`: bool, True if more data is available for subsequent ‘sql_get_page’ calls. """ if query_args is None: query_args = [] query_struct = Query( OP_QUERY_SQL, [ ('hash_code', Int), ('flag', Byte), ('table_name', String), ('query_str', String), ('query_args', AnyDataArray()), ('distributed_joins', Bool), ('local', Bool), ('replicated_only', Bool), ('page_size', Int), ('timeout', Long), ], query_id=query_id, ) result = query_struct.perform( conn, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'table_name': table_name, 'query_str': query_str, 'query_args': query_args, 'distributed_joins': 1 if distributed_joins else 0, 'local': 1 if local else 0, 'replicated_only': 1 if replicated_only else 0, 'page_size': page_size, 'timeout': timeout, }, response_config=[ ('cursor', Long), ('data', Map), ('more', Bool), ], ) if result.status == 0: result.value = dict(result.value) return result
def sql_fields( conn: 'Connection', cache: Union[str, int], query_str: str, page_size: int, query_args=None, schema: str = None, statement_type: int = StatementType.ANY, distributed_joins: bool = False, local: bool = False, replicated_only: bool = False, enforce_join_order: bool = False, collocated: bool = False, lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, timeout: int = 0, binary: bool = False, query_id: int = None ) -> APIResult: """ Performs SQL fields query. :param conn: connection to GridGain server, :param cache: name or ID of the cache, :param query_str: SQL query string, :param page_size: cursor page size, :param query_args: (optional) query arguments. List of values or (value, type hint) tuples, :param schema: (optional) schema for the query. Defaults to `PUBLIC`, :param statement_type: (optional) statement type. Can be: * StatementType.ALL − any type (default), * StatementType.SELECT − select, * StatementType.UPDATE − update. :param distributed_joins: (optional) distributed joins. Defaults to False, :param local: (optional) pass True if this query should be executed on local node only. Defaults to False, :param replicated_only: (optional) whether query contains only replicated tables or not. Defaults to False, :param enforce_join_order: (optional) enforce join order. Defaults to False, :param collocated: (optional) whether your data is co-located or not. Defaults to False, :param lazy: (optional) lazy query execution. Defaults to False, :param include_field_names: (optional) include field names in result. Defaults to False, :param max_rows: (optional) query-wide maximum of rows. Defaults to -1 (all rows), :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), :param binary: (optional) pass True to keep the value in binary form. False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. Value dict is of following format: * `cursor`: int, cursor ID, * `data`: list, result values, * `more`: bool, True if more data is available for subsequent ‘sql_fields_cursor_get_page’ calls. """ if query_args is None: query_args = [] query_struct = Query( OP_QUERY_SQL_FIELDS, [ ('hash_code', Int), ('flag', Byte), ('schema', String), ('page_size', Int), ('max_rows', Int), ('query_str', String), ('query_args', AnyDataArray()), ('statement_type', StatementType), ('distributed_joins', Bool), ('local', Bool), ('replicated_only', Bool), ('enforce_join_order', Bool), ('collocated', Bool), ('lazy', Bool), ('timeout', Long), ('include_field_names', Bool), ], query_id=query_id, ) return query_struct.perform( conn, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'schema': schema, 'page_size': page_size, 'max_rows': max_rows, 'query_str': query_str, 'query_args': query_args, 'statement_type': statement_type, 'distributed_joins': distributed_joins, 'local': local, 'replicated_only': replicated_only, 'enforce_join_order': enforce_join_order, 'collocated': collocated, 'lazy': lazy, 'timeout': timeout, 'include_field_names': include_field_names, }, sql=True, include_field_names=include_field_names, has_cursor=True, )
def scan( conn: 'Connection', cache: Union[str, int], page_size: int, partitions: int = -1, local: bool = False, binary: bool = False, query_id: int = None, ) -> APIResult: """ Performs scan query. :param conn: connection to GridGain server, :param cache: name or ID of the cache, :param page_size: cursor page size, :param partitions: (optional) number of partitions to query (negative to query entire cache), :param local: (optional) pass True if this query should be executed on local node only. Defaults to False, :param binary: (optional) pass True to keep the value in binary form. False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. Value dict is of following format: * `cursor`: int, cursor ID, * `data`: dict, result rows as key-value pairs, * `more`: bool, True if more data is available for subsequent ‘scan_cursor_get_page’ calls. """ query_struct = Query( OP_QUERY_SCAN, [ ('hash_code', Int), ('flag', Byte), ('filter', Null), ('page_size', Int), ('partitions', Int), ('local', Bool), ], query_id=query_id, ) result = query_struct.perform( conn, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'filter': None, 'page_size': page_size, 'partitions': partitions, 'local': 1 if local else 0, }, response_config=[ ('cursor', Long), ('data', Map), ('more', Bool), ], ) if result.status == 0: result.value = dict(result.value) return result