def test_change_gateway(protocol, changeto_protocol): f = Flow(protocol=protocol).add().add().add(needs='executor1').needs_all() with f: da = f.post('/', random_docs(10)) assert len(da) == 10 with pytest.raises(RuntimeError): f.protocol = changeto_protocol
def test_change_gateway(protocol, changeto_protocol, mocker): f = Flow(protocol=protocol).add().add().add(needs='pod1').needs_all() with f: mock = mocker.Mock() f.post('/', random_docs(10), on_done=mock) mock.assert_called() mock = mocker.Mock() f.protocol = changeto_protocol f.post('/', random_docs(10), on_done=mock) mock.assert_called()
def test_scale_with_concurrent_client( remote_flow_with_runtime: Flow, deployment_params, protocol ): def peer_client(port, protocol, peer_hash, queue): rv = Client(protocol=protocol, port=port).index( [Document(text=peer_hash) for _ in range(NUM_DOCS_SENT_BY_CLIENTS)], request_size=5, return_results=True, ) for r in rv: for doc in r.docs: # our proto objects are not fit to be sent by queues queue.put(doc.text) num_replicas, scale_to, _ = deployment_params queue = multiprocessing.Queue() remote_flow_with_runtime.protocol = protocol with remote_flow_with_runtime as f: port_expose = f.port_expose thread_pool = [] for peer_id in range(NUM_CONCURRENT_CLIENTS): # test t = multiprocessing.Process( target=partial(peer_client, port_expose, protocol, str(peer_id), queue) ) t.start() thread_pool.append(t) f.scale(deployment_name='executor', replicas=scale_to) for t in thread_pool: t.join() c = Client(protocol=protocol, port=port_expose) rv = c.index( [Document() for _ in range(5)], request_size=1, return_results=True ) all_docs = [] while not queue.empty(): all_docs.append(queue.get()) assert len(all_docs) == NUM_CONCURRENT_CLIENTS * NUM_DOCS_SENT_BY_CLIENTS assert len(rv) == 5 for r in rv: assert len(r.docs) == 1
def query_restful(): flow = Flow(cors=True).load_config('flows/flow-query.yml') flow.rest_api = True flow.protocol = 'http' with flow: flow.block()