Ejemplo n.º 1
0
def client(connection_param):
    client = Client(partition_aware=True, timeout=CLIENT_SOCKET_TIMEOUT)
    try:
        client.connect(connection_param)
        yield client
    finally:
        client.close()
Ejemplo n.º 2
0
def client():
    client = Client(partition_aware=True)
    try:
        client.connect('127.0.0.1', 10801)
        yield client
    finally:
        client.close()
Ejemplo n.º 3
0
def client():
    client = Client()
    try:
        client.connect('127.0.0.1', 10801)
        yield client
    finally:
        client.close()
Ejemplo n.º 4
0
def client():
    client = Client(event_listeners=[QueryRouteListener()])
    try:
        client.connect('127.0.0.1', 10801)
        yield client
    finally:
        client.close()
        events.clear()
Ejemplo n.º 5
0
def client():
    client = Client(partition_aware=True,
                    event_listeners=[QueryRouteListener()])
    try:
        client.connect(client_connection_string)
        yield client
    finally:
        requests.clear()
        client.close()
Ejemplo n.º 6
0
 def sync_client_connect():
     hs_to_listener = HandshakeTimeoutListener()
     client = Client(handshake_timeout=3.0, event_listeners=[hs_to_listener])
     start = time.monotonic()
     try:
         client.connect(DEFAULT_HOST, DEFAULT_PORT)
     except Exception as e:
         return time.monotonic() - start, hs_to_listener.events, e
     return time.monotonic() - start, hs_to_listener.events, None
def client(request, connection_param):
    client = Client(partition_aware=request.param == 'with-partition-awareness')
    try:
        client.connect(connection_param)
        if not client.protocol_context.is_transactions_supported():
            pytest.skip(f'skipped {request.node.name}, transaction api is not supported.')
        else:
            yield client
    finally:
        client.close()
Ejemplo n.º 8
0
 def sync_client_connect():
     hs_to_listener = HandshakeTimeoutListener()
     client = Client(timeout=5.0, handshake_timeout=3.0, event_listeners=[hs_to_listener])
     start = time.monotonic()
     try:
         client.connect(DEFAULT_HOST, DEFAULT_PORT)
         assert all(n.alive for n in client._nodes)
         client.get_cache_names()
     except Exception as e:
         return time.monotonic() - start, hs_to_listener.events, e
     return time.monotonic() - start, hs_to_listener.events, None
Ejemplo n.º 9
0
    def inner():
        client = Client(**kwargs)
        with client.connect("127.0.0.1", 10801):
            with get_or_create_cache(client, 'test-cache') as cache:
                cache.put(1, 1)

                assert cache.get(1) == 1
def test_connection_context(connection_param, partition_aware):
    is_partition_aware = partition_aware == 'with_partition_aware'
    client = Client(partition_aware=is_partition_aware)

    # Check context manager
    with client.connect(connection_param):
        __check_open(client, is_partition_aware)
    __check_closed(client)

    # Check standard way
    try:
        client.connect(connection_param)
        __check_open(client, is_partition_aware)
    finally:
        client.close()
        __check_closed(client)
Ejemplo n.º 11
0
def skip_if_no_affinity(request, server1):
    client = Client(partition_aware=True)
    with client.connect('127.0.0.1', 10801):
        if not client.partition_awareness_supported_by_protocol:
            pytest.skip(
                f'skipped {request.node.name}, partition awareness is not supported.'
            )
Ejemplo n.º 12
0
def expiry_policy_supported(request, server1):
    client = Client()
    with client.connect('127.0.0.1', 10801):
        result = client.protocol_context.is_expiry_policy_supported()
        if not result and request.node.get_closest_marker('skip_if_no_expiry_policy'):
            pytest.skip(f'skipped {request.node.name}, ExpiryPolicy APIis not supported.')

        return result
Ejemplo n.º 13
0
def test_connection_error_with_incorrect_config(invalid_ssl_params, caplog):
    listener = AccumulatingConnectionListener()
    with pytest.raises(ReconnectError):
        client = Client(event_listeners=[listener], **invalid_ssl_params)
        with client.connect([("127.0.0.1", 10801)]):
            pass

    __assert_handshake_failed_log(caplog)
    __assert_handshake_failed_listener(listener)
Ejemplo n.º 14
0
def test_get_large_value(buf_len):
    with mock.patch.object(socket.socket,
                           'recv_into',
                           new=patched_recv_into_factory(buf_len)):
        c = Client()
        with c.connect("127.0.0.1", 10801):
            with get_or_create_cache(c, 'test') as cache:
                value = secrets.token_hex((1 << 16) + 1)
                cache.put(1, value)
                assert value == cache.get(1)
Ejemplo n.º 15
0
def test_auth_failed(username, password, with_ssl, ssl_params, caplog):
    ssl_params['use_ssl'] = with_ssl
    listener = AccumulatingConnectionListener()
    with pytest.raises(AuthenticationError):
        client = Client(username=username,
                        password=password,
                        event_listeners=[listener],
                        **ssl_params)
        with client.connect("127.0.0.1", 10801):
            pass

    __assert_auth_failed_log(caplog)
    __assert_auth_failed_listener(listener)
Ejemplo n.º 16
0
def client(
    node,
    timeout,
    affinity_aware,
    use_ssl,
    ssl_keyfile,
    ssl_certfile,
    ssl_ca_certfile,
    ssl_cert_reqs,
    ssl_ciphers,
    ssl_version,
    username,
    password,
):
    client = Client(
        timeout=timeout,
        affinity_aware=affinity_aware,
        use_ssl=use_ssl,
        ssl_keyfile=ssl_keyfile,
        ssl_certfile=ssl_certfile,
        ssl_ca_certfile=ssl_ca_certfile,
        ssl_cert_reqs=ssl_cert_reqs,
        ssl_ciphers=ssl_ciphers,
        ssl_version=ssl_version,
        username=username,
        password=password,
    )
    nodes = []
    for n in node:
        host, port = n.split(':')
        port = int(port)
        nodes.append((host, port))
    client.connect(nodes)
    yield client
    conn = client.random_node
    for cache_name in cache_get_names(conn).value:
        cache_destroy(conn, cache_name)
    client.close()
Ejemplo n.º 17
0
def test_auth_success(with_ssl, ssl_params, caplog):
    ssl_params['use_ssl'] = with_ssl
    listener = AccumulatingConnectionListener()
    client = Client(username=DEFAULT_IGNITE_USERNAME,
                    password=DEFAULT_IGNITE_PASSWORD,
                    event_listeners=[listener],
                    **ssl_params)
    with caplog.at_level(logger='pygridgain', level=logging.DEBUG):
        with client.connect("127.0.0.1", 10801):
            assert all(node.alive for node in client._nodes)
            conn = client._nodes[0]

        __assert_successful_connect_log(conn, caplog)
        __assert_successful_connect_events(conn, listener)
Ejemplo n.º 18
0
def sync_example():
    client = Client()
    with client.connect('127.0.0.1', 10800):
        cache = client.get_or_create_cache({
            PROP_NAME: 'tx_cache',
            PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL
        })

        # starting transaction
        key = 1
        with client.tx_start(
                isolation=TransactionIsolation.REPEATABLE_READ,
                concurrency=TransactionConcurrency.PESSIMISTIC
        ) as tx:
            cache.put(key, 'success')
            tx.commit()

        # key=1 value=success
        print(f"key={key} value={cache.get(key)}")

        # rollback transaction.
        try:
            with client.tx_start(
                    isolation=TransactionIsolation.REPEATABLE_READ,
                    concurrency=TransactionConcurrency.PESSIMISTIC
            ):
                cache.put(key, 'fail')
                raise RuntimeError('test')
        except RuntimeError:
            pass

        # key=1 value=success
        print(f"key={key} value={cache.get(key)}")

        # rollback transaction on timeout.
        try:
            with client.tx_start(timeout=1000, label='long-tx') as tx:
                cache.put(key, 'fail')
                time.sleep(2.0)
                tx.commit()
        except CacheError as e:
            # Cache transaction timed out: GridNearTxLocal[...timeout=1000, ... label=long-tx]
            print(e)

        # key=1 value=success
        print(f"key={key} value={cache.get(key)}")

        # destroy cache
        cache.destroy()
Ejemplo n.º 19
0
def test_client_with_failed_server(request, with_partition_awareness):
    srv = start_ignite(idx=4)
    try:
        client = Client(partition_aware=with_partition_awareness)
        with client.connect([("127.0.0.1", 10804)]):
            cache = client.get_or_create_cache(request.node.name)
            cache.put(1, 1)
            kill_process_tree(srv.pid)

            if with_partition_awareness:
                ex_class = (ReconnectError, ConnectionResetError)
            else:
                ex_class = ConnectionResetError

            with pytest.raises(ex_class):
                cache.get(1)
    finally:
        kill_process_tree(srv.pid)
Ejemplo n.º 20
0
def test_cluster_set_state(with_persistence):
    key = 42
    val = 42
    start_state = ClusterState.INACTIVE if with_persistence else ClusterState.ACTIVE

    client = Client()
    with client.connect([("127.0.0.1", 10801), ("127.0.0.1", 10802)]):
        cluster = client.get_cluster()
        assert cluster.get_state() == start_state

        cluster.set_state(ClusterState.ACTIVE)
        assert cluster.get_state() == ClusterState.ACTIVE

        check_cluster_state_error(cluster, 3)
        check_cluster_state_error(cluster, 42)
        check_cluster_state_error(cluster, 1234567890987654321)
        check_cluster_state_error(cluster, -1)

        cache = client.get_or_create_cache("test_cache")
        cache.put(key, val)
        assert cache.get(key) == val

        cluster.set_state(ClusterState.ACTIVE_READ_ONLY)
        assert cluster.get_state() == ClusterState.ACTIVE_READ_ONLY

        assert cache.get(key) == val
        with pytest.raises(CacheError):
            cache.put(key, val + 1)

        cluster.set_state(ClusterState.INACTIVE)
        assert cluster.get_state() == ClusterState.INACTIVE

        with pytest.raises(CacheError):
            cache.get(key)

        with pytest.raises(CacheError):
            cache.put(key, val + 1)

        cluster.set_state(ClusterState.ACTIVE)
        assert cluster.get_state() == ClusterState.ACTIVE

        cache.put(key, val + 2)
        assert cache.get(key) == val + 2
Ejemplo n.º 21
0
def sync_actions():
    print("Running sync ExpiryPolicy example.")

    client = Client()
    with client.connect('127.0.0.1', 10800):
        print("Create cache with expiry policy.")
        try:
            ttl_cache = client.create_cache({
                PROP_NAME:
                'test',
                PROP_EXPIRY_POLICY:
                ExpiryPolicy(create=timedelta(seconds=1.0))
            })
        except NotSupportedByClusterError:
            print(
                "'ExpiryPolicy' API is not supported by cluster. Finishing...")
            return

        try:
            ttl_cache.put(1, 1)
            time.sleep(0.5)
            print(f"key = {1}, value = {ttl_cache.get(1)}")
            # key = 1, value = 1
            time.sleep(1.2)
            print(f"key = {1}, value = {ttl_cache.get(1)}")
            # key = 1, value = None
        finally:
            ttl_cache.destroy()

        print("Create simple Cache and set TTL through `with_expire_policy`")
        simple_cache = client.create_cache('test')
        try:
            ttl_cache = simple_cache.with_expire_policy(access=timedelta(
                seconds=1.0))
            ttl_cache.put(1, 1)
            time.sleep(0.5)
            print(f"key = {1}, value = {ttl_cache.get(1)}")
            # key = 1, value = 1
            time.sleep(1.7)
            print(f"key = {1}, value = {ttl_cache.get(1)}")
            # key = 1, value = None
        finally:
            simple_cache.destroy()
Ejemplo n.º 22
0
def test_client_with_recovered_server(request, with_partition_awareness):
    srv = start_ignite(idx=4)
    try:
        client = Client(partition_aware=with_partition_awareness, timeout=CLIENT_SOCKET_TIMEOUT)
        with client.connect([("127.0.0.1", 10804)]):
            cache = client.get_or_create_cache(request.node.name)
            cache.put(1, 1)

            # Kill and restart server
            kill_process_tree(srv.pid)
            srv = start_ignite(idx=4)

            # First request may fail.
            try:
                cache.put(1, 2)
            except connection_errors:
                pass

            # Retry succeeds
            cache.put(1, 2)
            assert cache.get(1) == 2
    finally:
        kill_process_tree(srv.pid)
def test_events(request, server2):
    client = Client(event_listeners=[RecordingConnectionEventListener()])
    with client.connect([('127.0.0.1', 10800 + idx) for idx in range(1, 3)]):
        protocol_context = client.protocol_context
        nodes = {conn.port: conn for conn in client._nodes}
        cache = client.get_or_create_cache({
            PROP_NAME:
            request.node.name,
            PROP_CACHE_MODE:
            CacheMode.REPLICATED,
        })

        kill_process_tree(server2.pid)

        for _ in range(0, 100):
            try:
                cache.put(1, 1)
            except:  # noqa 13
                pass

            if any(isinstance(e, ConnectionLostEvent) for e in events):
                break

    __assert_events(nodes, protocol_context)
Ejemplo n.º 24
0
# limitations under the License.
#
from pygridgain import Client
from pygridgain.datatypes.cache_config import CacheMode
from pygridgain.datatypes.prop_codes import *
from pygridgain.exceptions import SocketError


nodes = [
    ('127.0.0.1', 10800),
    ('127.0.0.1', 10801),
    ('127.0.0.1', 10802),
]

client = Client(timeout=4.0)
client.connect(nodes)
print('Connected')

my_cache = client.get_or_create_cache({
    PROP_NAME: 'my_cache',
    PROP_CACHE_MODE: CacheMode.PARTITIONED,
    PROP_BACKUPS_NUMBER: 2,
})
my_cache.put('test_key', 0)
test_value = 0

# abstract main loop
while True:
    try:
        # do the work
        test_value = my_cache.get('test_key') or 0
Ejemplo n.º 25
0
#     'sum': DecimalObject,
#     'recipient': String,
#     'cashier_id': LongObject,
# }


class ExpenseVoucher(
        metaclass=GenericObjectMeta,
        schema=old_schema,
):
    pass


client = Client()

with client.connect('127.0.0.1', 10800):
    accounting = client.get_or_create_cache('accounting')

    for item, value in old_data.items():
        print(item)
        accounting.put(item, ExpenseVoucher(**value))

    data_classes = client.query_binary_type('ExpenseVoucher')
    print(data_classes)
    # {
    #     {547629991: <class 'pygridgain.binary.ExpenseVoucher'>, -231598180: <class '__main__.ExpenseVoucher'>}
    # }

s_id, data_class = data_classes.popitem()
schema = data_class.schema
Ejemplo n.º 26
0
def check_is_transactions_supported():
    client = Client()
    with client.connect('127.0.0.1', 10800):
        if not client.protocol_context.is_transactions_supported():
            print("'Transactions' API is not supported by cluster. Finishing...")
            exit(0)
Ejemplo n.º 27
0
def cluster_api_supported(request, server1):
    client = Client()
    with client.connect('127.0.0.1', 10801):
        if not client.protocol_context.is_cluster_api_supported():
            pytest.skip(
                f'skipped {request.node.name}, Cluster API is not supported.')
Ejemplo n.º 28
0
#     https://www.gridgain.com/products/software/community-edition/gridgain-community-edition-license
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict

from pygridgain import Client, GenericObjectMeta
from pygridgain.datatypes import DoubleObject, IntObject, String
from pygridgain.datatypes.prop_codes import *

client = Client()
client.connect('127.0.0.1', 10800)

student_cache = client.create_cache({
    PROP_NAME:
    'SQL_PUBLIC_STUDENT',
    PROP_SQL_SCHEMA:
    'PUBLIC',
    PROP_QUERY_ENTITIES: [
        {
            'table_name':
            'Student'.upper(),
            'key_field_name':
            'SID',
            'key_type_name':
            'java.lang.Integer',
            'field_name_aliases': [],
Ejemplo n.º 29
0
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pygridgain import Client
from pygridgain.datatypes.cache_config import CacheMode
from pygridgain.datatypes.prop_codes import PROP_NAME, PROP_CACHE_MODE, PROP_BACKUPS_NUMBER
from pygridgain.exceptions import SocketError

nodes = [
    ('127.0.0.1', 10800),
    ('127.0.0.1', 10801),
    ('127.0.0.1', 10802),
]

client = Client(timeout=4.0)
with client.connect(nodes):
    print('Connected')

    my_cache = client.get_or_create_cache({
        PROP_NAME: 'my_cache',
        PROP_CACHE_MODE: CacheMode.PARTITIONED,
        PROP_BACKUPS_NUMBER: 2,
    })
    my_cache.put('test_key', 0)
    test_value = 0

    # abstract main loop
    while True:
        try:
            # do the work
            test_value = my_cache.get('test_key') or 0
Ejemplo n.º 30
0
def test_client_with_multiple_bad_servers(with_partition_awareness):
    with pytest.raises(ReconnectError, match="Can not connect."):
        client = Client(partition_aware=with_partition_awareness)
        with client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]):
            pass