def test_dump_dbms_remote(docker_compose): nr_docs = 100 nr_search = 1 docs = list(_get_documents(nr=nr_docs, index_start=0, emb_size=EMB_SIZE)) dbms_flow_id, query_flow_id, workspace_id = _create_flows() r = _send_rest_request( REST_PORT_QUERY, 'search', 'post', [doc.dict() for doc in docs[:nr_search]], ) # TODO some times it was None assert (r['data']['docs'][0].get('matches') is None or r['data']['docs'][0].get('matches') == []) _send_rest_request(REST_PORT_DBMS, 'index', 'post', [doc.dict() for doc in docs]) _send_rest_request( REST_PORT_DBMS, 'post', 'post', data=[], exec_endpoint='/dump', params={ 'shards': SHARDS, 'dump_path': DUMP_PATH_DOCKER }, target_peapod='indexer_dbms', ) container_id = client.flows.get(dbms_flow_id)['metadata']['container_id'] dir_size = _path_size_remote(DUMP_PATH_DOCKER, container_id=container_id) assert dir_size > 0 logger.info(f'dump path size size: {dir_size}') # jinad is used for ctrl requests assert (DaemonID( client.flows.rolling_update( id=query_flow_id, pod_name='indexer_query', uses_with={'dump_path': DUMP_PATH_DOCKER}, )) == DaemonID(query_flow_id)) # data request goes to client r = _send_rest_request( REST_PORT_QUERY, 'search', 'post', [doc.dict() for doc in docs[:nr_search]], params={'top_k': 100}, ) for doc in r['data']['docs']: assert len(doc.get('matches')) == nr_docs assert client.flows.delete(dbms_flow_id) assert client.flows.delete(query_flow_id) assert client.workspaces.delete(workspace_id)
def test_flow_depends_localpath(monkeypatch): monkeypatch.setattr(dependencies, 'change_cwd', nullcontext) monkeypatch.setattr(dependencies, 'get_workspace_path', lambda *args: filename) f = FlowDepends(DaemonID('jworkspace'), filename, Environment(envs=['a=b'])) assert str(f.localpath()) == filename with pytest.raises(HTTPException) as e: monkeypatch.setattr(dependencies, "get_workspace_path", lambda *args: 'abc') f = FlowDepends(DaemonID('jworkspace'), filename, Environment(envs=['a=b'])) f.localpath()
def test_flow_depends_localpath(monkeypatch): monkeypatch.setattr(dependencies, "get_workspace_path", lambda *args: filename) f = FlowDepends(DaemonID('jworkspace'), filename) assert str(f.localpath()) == filename with pytest.raises(HTTPException) as e: monkeypatch.setattr(dependencies, "get_workspace_path", lambda *args: 'abc') f = FlowDepends(DaemonID('jworkspace'), filename) f.localpath()
def test_flow_depends_localpath(monkeypatch, tmpdir): filename = os.path.join(cur_dir, 'flow1.yml') monkeypatch.setattr(dependencies, 'change_cwd', nullcontext) monkeypatch.setattr(dependencies, 'get_workspace_path', lambda *args: filename) monkeypatch.setattr(FlowDepends, 'newfile', os.path.join(tmpdir, 'abc.yml')) f = FlowDepends(DaemonID('jworkspace'), filename, Environment(envs=['a=b'])) assert str(f.localpath()) == filename with pytest.raises(HTTPException) as e: monkeypatch.setattr(dependencies, "get_workspace_path", lambda *args: 'abc') f = FlowDepends(DaemonID('jworkspace'), filename, Environment(envs=['a=b'])) f.localpath()
def test_remote_flow(): client = JinaDClient(host=__default_host__, port=8000) workspace_id = client.workspaces.create( paths=[os.path.join(cur_dir, 'empty_flow.yml')] ) assert DaemonID(workspace_id).type == 'workspace' flow_id = client.flows.create(workspace_id=workspace_id, filename='empty_flow.yml') assert DaemonID(flow_id).type == 'flow' assert client.flows.get(flow_id) assert flow_id in client.flows.list() assert_request('get', url=f'http://localhost:23456/status/', expect_rcode=200) assert client.flows.delete(flow_id) assert client.workspaces.delete(workspace_id)
def test_flow_depends_ports(monkeypatch): expected_port = 28956 monkeypatch.setattr(dependencies, 'change_cwd', nullcontext) monkeypatch.setattr(dependencies, 'get_workspace_path', lambda *args: filename) f = FlowDepends(DaemonID('jworkspace'), filename, Environment(envs=['a=b'])) assert f.port_expose == expected_port assert f.ports == {f'{expected_port}/tcp': expected_port}
def test_flow_depends_load_and_dump(monkeypatch, tmpdir): filename = os.path.join(cur_dir, 'flow2.yml') monkeypatch.setattr(dependencies, 'get_workspace_path', lambda *args: tmpdir) monkeypatch.setattr(FlowDepends, 'localpath', lambda *args: os.path.join(tmpdir, filename)) monkeypatch.setattr(FlowDepends, 'newfile', os.path.join(tmpdir, 'abc.yml')) monkeypatch.setattr(FlowDepends, 'newname', 'abc.yml') copy(os.path.join(cur_dir, filename), tmpdir) fd = FlowDepends( workspace_id=DaemonID('jworkspace'), filename=filename, envs=Environment(envs=['a=b']), ) with change_cwd(tmpdir): f: Flow = Flow.load_config(fd.params.uses).build() assert f.port_expose == 12345 assert f.protocol == GatewayProtocolType.HTTP assert f['gateway'].args.runs_in_docker assert f['local_replicas'].args.runs_in_docker assert f['local_replicas'].args.port_in == 45678 assert f['local_replicas'].args.port_in is not None assert all(port in fd.ports.ports for port in [ f.port_expose, f['gateway'].args.port_in, f['gateway'].args.port_out, f['gateway'].args.port_ctrl, f['local_replicas'].args.port_in, f['local_replicas'].args.port_out, f['local_replicas'].args.port_ctrl, f['local_compound'].head_args.port_in, f['local_compound'].tail_args.port_out, ])
def test_dump_dbms_remote(executor_images, docker_compose): nr_docs = 100 nr_search = 1 docs = list(_get_documents(nr=nr_docs, index_start=0, emb_size=EMB_SIZE)) dbms_flow_id, query_flow_id, workspace_id = _create_flows() # check that there are no matches in Query Flow r = Client(host=HOST, port=REST_PORT_QUERY, protocol='http').search( inputs=[doc for doc in docs[:nr_search]], return_results=True) assert r[0].data.docs[0].matches is None or len( r[0].data.docs[0].matches) == 0 # index on DBMS flow Client(host=HOST, port=REST_PORT_DBMS, protocol='http').index(inputs=docs, return_results=True) # dump data for DBMS flow Client(host=HOST, port=REST_PORT_DBMS, protocol='http').post( on='/dump', parameters={ 'shards': SHARDS, 'dump_path': DUMP_PATH }, target_executor='indexer_dbms', ) # rolling_update on Query Flow assert (DaemonID( client.flows.rolling_update( id=query_flow_id, deployment_name='indexer_query', uses_with={'dump_path': DUMP_PATH}, )) == DaemonID(query_flow_id)) # validate that there are matches now r = Client(host=HOST, port=REST_PORT_QUERY, protocol='http').search( inputs=[doc for doc in docs[:nr_search]], return_results=True, parameters={'top_k': 10}, ) for doc in r[0].data.docs: assert len(doc.matches) == 10 assert client.flows.delete(dbms_flow_id) assert client.flows.delete(query_flow_id) assert client.workspaces.delete(workspace_id)
def test_workspace_clear(): client = JinaDClient(host=__default_host__, port=8000) for _ in range(2): workspace_id = client.workspaces.create( paths=[os.path.join(cur_dir, 'empty_flow.yml')]) assert DaemonID(workspace_id).type == 'workspace' assert (WorkspaceItem(**client.workspaces.get( id=workspace_id)).state == RemoteWorkspaceState.ACTIVE) assert workspace_id in client.workspaces.list() assert client.workspaces.clear()
async def test_async_jinad_client(async_jinad_client, pod_args): workspace_id = await async_jinad_client.workspaces.create(paths=[cur_dir]) assert DaemonID(workspace_id) success, pod_id = await async_jinad_client.pods.create( workspace_id=workspace_id, payload=replace_enum_to_str(vars(pod_args))) assert success assert pod_id assert is_pod_ready(pod_args) assert await async_jinad_client.pods.delete(pod_id) assert not is_pod_ready(pod_args) assert await async_jinad_client.workspaces.delete(workspace_id)
async def test_pod_rolling_update_async(monkeypatch): payload = {'1': 2} identity = DaemonID('jpod') client = AsyncPodClient(uri=MOCK_URI, logger=logger) monkeypatch.setattr( aiohttp, 'request', lambda **kwargs: MockAiohttpResponse({1: 2}, 201) ) response = await client.rolling_update(identity, uses_with=payload) assert response == {1: 2} monkeypatch.setattr(aiohttp, 'request', lambda **kwargs: MockAiohttpException()) assert await client.rolling_update(identity, uses_with=payload) is None
def test_pod_scale(monkeypatch): payload = {'1': 2} identity = DaemonID('jpod') client = PodClient(uri=MOCK_URI, logger=logger) monkeypatch.setattr( aiohttp, 'request', lambda **kwargs: MockAiohttpResponse({1: 2}, 201) ) response = client.scale(identity, replicas=2) assert response == {1: 2} monkeypatch.setattr(aiohttp, 'request', lambda **kwargs: MockAiohttpException()) assert client.scale(identity, replicas=2) is None
def test_deployment_rolling_update(monkeypatch): payload = {'1': 2} identity = DaemonID('jdeployment') client = DeploymentClient(uri=MOCK_URI, logger=logger) monkeypatch.setattr(aiohttp, 'request', lambda **kwargs: MockAiohttpResponse({1: 2}, 201)) response = client.rolling_update(identity, uses_with=payload) assert response == {1: 2} monkeypatch.setattr(aiohttp, 'request', lambda **kwargs: MockAiohttpException()) assert client.rolling_update(identity, uses_with=payload) is None
async def test_deployment_scale_async(monkeypatch): payload = {'1': 2} identity = DaemonID('jdeployment') client = AsyncDeploymentClient(uri=MOCK_URI, logger=logger) monkeypatch.setattr(aiohttp, 'request', lambda **kwargs: MockAiohttpResponse({1: 2}, 201)) response = await client.scale(identity, replicas=2) assert response == {1: 2} monkeypatch.setattr(aiohttp, 'request', lambda **kwargs: MockAiohttpException()) assert await client.scale(identity, replicas=2) is None
async def test_custom_project(): HOST = __default_host__ client = AsyncJinaDClient(host=HOST, port=8000) workspace_id = await client.workspaces.create( paths=[os.path.join(cur_dir, 'flow_app_ws')]) assert DaemonID(workspace_id).type == 'workspace' # Sleep to allow the workspace container to start await asyncio.sleep(20) async def gen_docs(): import string d = iter(string.ascii_lowercase) while True: try: yield Document(tags={'first': next(d), 'second': next(d)}) except StopIteration: return async for resp in Client(asyncio=True, host=HOST, port=42860, show_progress=True).post(on='/index', inputs=gen_docs): pass async for resp in Client(asyncio=True, host=HOST, port=42860, show_progress=True).post( on='/search', inputs=Document(tags={ 'key': 'first', 'value': 's' }), return_results=True, ): tags = resp.data.docs[0].matches[0].tags assert tags['first'] == 's' assert tags['second'] == 't' print(f'Deleting workspace {workspace_id}') assert await client.workspaces.delete(workspace_id)
def test_dump(monkeypatch, tmpdir): filename = os.path.join(cur_dir, 'flow3.yml') monkeypatch.setattr(dependencies, 'get_workspace_path', lambda *args: tmpdir) monkeypatch.setattr(FlowDepends, 'localpath', lambda *args: os.path.join(tmpdir, filename)) monkeypatch.setattr(FlowDepends, 'newname', os.path.join(tmpdir, 'abc.yml')) monkeypatch.setattr(FlowDepends, 'newfile', 'abc.yml') copy(os.path.join(cur_dir, filename), tmpdir) fd = FlowDepends( workspace_id=DaemonID('jworkspace'), filename=filename, envs=Environment(envs=['a=b']), ) with change_cwd(tmpdir): f: Flow = Flow.load_config(fd.params.uses).build() assert f.port_expose == 12345 assert f.protocol == GatewayProtocolType.HTTP assert f['local_replicas'].args.port_in == 45678
def test_flow_depends_load_and_dump_given_context(monkeypatch, tmpdir): filename = os.path.join(cur_dir, 'flow_with_env.yml') monkeypatch.setattr(dependencies, 'get_workspace_path', lambda *args: tmpdir) monkeypatch.setattr(FlowDepends, 'localpath', lambda *args: os.path.join(tmpdir, filename)) monkeypatch.setattr(FlowDepends, 'newfile', os.path.join(tmpdir, 'abc.yml')) monkeypatch.setattr(FlowDepends, 'newname', 'abc.yml') copy(os.path.join(cur_dir, filename), tmpdir) fd = FlowDepends( workspace_id=DaemonID('jworkspace'), filename=filename, envs=Environment(envs=[ 'context_var1=val1', 'context_var2=val2', 'context_var3=val3' ]), ) fd.load_and_dump() f = Flow.load_config(source=os.path.join(tmpdir, 'abc.yml')) envs = f.args.env assert envs['key1'] == 'val1' assert envs['key2'] != 'val2' assert envs['key3'] != 'val3'
def test_flow_depends_ports(): expected_port = 28956 f = FlowDepends(DaemonID('jworkspace'), filename) assert f.port_expose == expected_port assert f.ports == {f'{expected_port}/tcp': expected_port}
lambda **kwargs: MockAiohttpResponse({1: 2}, 200)) assert await client.status() == {1: 2} monkeypatch.setattr(aiohttp, 'request', lambda **kwargs: MockAiohttpResponse({1: 2}, 400)) assert await client.status() is None monkeypatch.setattr(aiohttp, 'request', lambda **kwargs: MockAiohttpException()) assert await client.status() is None @pytest.mark.parametrize( 'identity', [ DaemonID('jworkspace'), DaemonID('jpod'), DaemonID('jdeployment'), DaemonID('jflow'), ], ) @pytest.mark.parametrize('client_cls', all_sync_clients) def test_get(monkeypatch, identity, client_cls): client = client_cls(uri=MOCK_URI, logger=logger) monkeypatch.setattr( aiohttp, 'request', lambda **kwargs: MockAiohttpResponse( { 'detail': [{ 'msg': 'abcd'
lambda **kwargs: MockAiohttpResponse({1: 2}, 200)) assert await client.status() == {1: 2} monkeypatch.setattr(aiohttp, 'request', lambda **kwargs: MockAiohttpResponse({1: 2}, 400)) assert await client.status() is None monkeypatch.setattr(aiohttp, 'request', lambda **kwargs: MockAiohttpException()) assert await client.status() is None @pytest.mark.parametrize( 'identity', [ DaemonID('jworkspace'), DaemonID('jpea'), DaemonID('jpod'), DaemonID('jflow') ], ) @pytest.mark.parametrize('client_cls', all_sync_clients) def test_get(monkeypatch, identity, client_cls): client = client_cls(uri=MOCK_URI, logger=logger) monkeypatch.setattr( aiohttp, 'request', lambda **kwargs: MockAiohttpResponse( { 'detail': [{ 'msg': 'abcd'