def test_flowstore_scale(partial_flow_store, mocker): flow_model = FlowModel() flow_model.uses = f'{cur_dir}/flow.yml' args = ArgNamespace.kwargs2namespace(flow_model.dict(), set_flow_parser()) partial_flow_store.add(args) scale_mock = mocker.Mock() partial_flow_store.object.scale = scale_mock partial_flow_store.scale(pod_name='executor1', replicas=2) scale_mock.assert_called()
def test_flowstore_rolling_update(partial_flow_store, mocker): flow_model = FlowModel() flow_model.uses = f'{cur_dir}/flow.yml' args = ArgNamespace.kwargs2namespace(flow_model.dict(), set_flow_parser()) partial_flow_store.add(args) rolling_update_mock = mocker.Mock() partial_flow_store.object.rolling_update = rolling_update_mock partial_flow_store.rolling_update(pod_name='executor1', uses_with={}) rolling_update_mock.assert_called()
def test_namespace_to_dict(): _namespaces = { 'head': Namespace(host='1.2.3.4', name='encoder', parallel=2, pea_id=-1, polling=PollingType.ANY, port_ctrl=39011, port_expose=8000, uses_after='_pass', uses_before=None), 'tail': Namespace(host='1.2.3.4', name='encoder', parallel=2, pea_id=-1, polling=PollingType.ANY, port_ctrl=46937, port_expose=8000, uses_after='_pass', uses_before=None), 'peas': [ Namespace(host='1.2.3.4', name='encoder', parallel=2, pea_id=-1, polling=PollingType.ANY, port_ctrl=44747, port_expose=8000, uses='helloworld.encoder.yml', uses_after='_pass', uses_before=None), Namespace(host='1.2.3.4', name='encoder', parallel=2, pea_id=-1, polling=PollingType.ANY, port_ctrl=48957, port_expose=8000, uses='helloworld.encoder.yml', uses_after='_pass', uses_before=None) ] } _dict_args = ArgNamespace.flatten_to_dict(args=_namespaces) assert 'head' in _dict_args assert 'tail' in _dict_args assert 'peas' in _dict_args assert len(_dict_args['peas']) == 2 assert _namespaces['head'].polling.value == _dict_args['head']['polling'] assert _namespaces['peas'][0].uses == _dict_args['peas'][0]['uses']
async def test_concurrent_requests(): args = ArgNamespace.kwargs2namespace({}, set_gateway_parser()) mock_zmqlet = ZmqletMock() servicer = PrefetchCaller(args, mock_zmqlet) request = _generate_request() response = servicer.send(iter([request])) async for r in response: assert r.proto == request await servicer.close()
async def test_flowstore_scale(partial_flow_store, mocker): flow_model = FlowModel() flow_model.uses = f'{cur_dir}/flow.yml' args = ArgNamespace.kwargs2namespace(flow_model.dict(), set_flow_parser()) partial_flow_store.add(args) future = asyncio.Future() future.set_result(PartialStoreItem()) mocker.patch('daemon.stores.partial.PartialFlowStore.scale', return_value=future) resp = await partial_flow_store.scale(deployment_name='executor1', replicas=2) assert resp
def dump(self, data: 'Flow') -> Dict: """ :param data: versioned flow object :return: the dictionary given a versioned flow object """ r = {} if data._version: r['version'] = data._version # to maintain order - version -> with -> executors r['with'] = {} if data._kwargs: r['with'].update(data._kwargs) if data._common_kwargs: r['with'].update(data._common_kwargs) if data._deployment_nodes: r['executors'] = [] last_name = 'gateway' for k, v in data._deployment_nodes.items(): kwargs = {} # only add "needs" when the value is not the last deployment name if list(v.needs) != [last_name]: kwargs = {'needs': list(v.needs)} # get nondefault kwargs parser = set_deployment_parser() if v.role == DeploymentRoleType.GATEWAY: parser = set_gateway_parser() non_default_kw = ArgNamespace.get_non_defaults_args(v.args, parser) kwargs.update(non_default_kw) for t in _get_taboo(parser): if t in kwargs: kwargs.pop(t) if k == 'gateway': if 'JINA_FULL_CLI' in os.environ: r['with'].update(kwargs) else: continue else: last_name = kwargs['name'] r['executors'].append(kwargs) return r
def __init__(self, args: Optional[argparse.Namespace] = None, **kwargs): if args and isinstance(args, argparse.Namespace): self.args = args else: self.args = ArgNamespace.kwargs2namespace(kwargs, set_hub_parser()) self.logger = JinaLogger(self.__class__.__name__, **vars(args)) with ImportExtensions(required=True): import rich import cryptography import filelock assert rich #: prevent pycharm auto remove the above line assert cryptography assert filelock
async def _ping(host: str, port: int): """ Ping to check if we can connect to gateway via gRPC `host:port` Note: Make sure Flow is running """ kwargs = {'port_expose': port, 'host': host} _, args, _ = ArgNamespace.get_parsed_args(kwargs, set_client_cli_parser()) client = Client(args) try: # TODO: this introduces side-effect, need to be refactored. (2020.01.10) client.index(input_fn=['abc']) return {'status_code': status.HTTP_200_OK, 'detail': 'connected'} except Exception: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f'Cannot connect to GRPC Server on {host}:{port}')
def test_flowstore_update(partial_flow_store, mocker): flow_model = FlowModel() flow_model.uses = f'{cur_dir}/flow.yml' port_expose = helper.random_port() args = ArgNamespace.kwargs2namespace(flow_model.dict(), set_flow_parser()) partial_flow_store.add(args, port_expose) update_mock = mocker.Mock() partial_flow_store.object.rolling_update = update_mock partial_flow_store.update(kind=UpdateOperation.ROLLING_UPDATE, dump_path='', pod_name='pod1', shards=1) update_mock.assert_called()
async def test_rolloing_update_remote_pod(async_jinad_client, pod_args): payload = replace_enum_to_str(ArgNamespace.flatten_to_dict(pod_args)) workspace_id = await async_jinad_client.workspaces.create( paths=[os.path.join(cur_dir, cur_dir)] ) success, pod_id = await async_jinad_client.deployments.create( workspace_id=workspace_id, payload=payload ) assert success await async_jinad_client.deployments.rolling_update( id=pod_id, uses_with={'foo': 'bar-new', 'dump_path': 'test'} ) # TODO: HOW TO CHECK PEA ARGS IN JINAD? ROLLING UPDATE WON'T CHANGE POD ARGS # TODO: PEA_STORE IS EMPTY _ = await async_jinad_client.deployments.get(pod_id) assert async_jinad_client.deployments.delete(pod_id) assert async_jinad_client.workspaces.delete(workspace_id)
async def test_remote_jinad_pod_async(pod_args, async_jinad_client): payload = replace_enum_to_str(ArgNamespace.flatten_to_dict(pod_args)) workspace_id = await async_jinad_client.workspaces.create( paths=[os.path.join(cur_dir, cur_dir)]) assert await async_jinad_client.deployments.alive() # create pod success, pod_id = await async_jinad_client.deployments.create( workspace_id=workspace_id, payload=payload) assert success # get pod status resp = await async_jinad_client.deployments.get(pod_id) remote_pod_args = resp['arguments']['object']['arguments'] # delete pod assert await async_jinad_client.deployments.delete(pod_id) resp = await async_jinad_client.deployments.get(pod_id) assert resp == pod_id + ' not found in store'
def from_hub( cls: Type[T], uri: str, context: Optional[Dict[str, Any]] = None, uses_with: Optional[Dict] = None, uses_metas: Optional[Dict] = None, uses_requests: Optional[Dict] = None, **kwargs, ) -> T: """Construct an Executor from Hub. :param uri: a hub Executor scheme starts with `jinahub://` :param context: context replacement variables in a dict, the value of the dict is the replacement. :param uses_with: dictionary of parameters to overwrite from the default config's with field :param uses_metas: dictionary of parameters to overwrite from the default config's metas field :param uses_requests: dictionary of parameters to overwrite from the default config's requests field :param kwargs: other kwargs accepted by the CLI ``jina hub pull`` :return: the Hub Executor object. """ from jina.hubble.helper import is_valid_huburi _source = None if is_valid_huburi(uri): from jina.hubble.hubio import HubIO from jina.parsers.hubble import set_hub_pull_parser _args = ArgNamespace.kwargs2namespace( {'no_usage': True, **kwargs}, set_hub_pull_parser(), positional_args=(uri,), ) _source = HubIO(args=_args).pull() if not _source or _source.startswith('docker://'): raise ValueError( f'Can not construct a native Executor from {uri}. Looks like you want to use it as a ' f'Docker container, you may want to use it in the Flow via `.add(uses={uri})` instead.' ) return cls.load_config( _source, context=context, uses_with=uses_with, uses_metas=uses_metas, uses_requests=uses_requests, )
def __init__( self, args: Optional['argparse.Namespace'] = None, **kwargs, ): if args and isinstance(args, argparse.Namespace): self.args = args else: self.args = ArgNamespace.kwargs2namespace( kwargs, set_client_cli_parser(), warn_unknown=True ) self.logger = JinaLogger(self.__class__.__name__, **vars(self.args)) if not self.args.proxy and os.name != 'nt': # (Han 2020 12.12): gRPC channel is over HTTP2 and it does not work when we have proxy # as many enterprise users are behind proxy, a quick way to # surpass it is by temporally unset proxy. Please do NOT panic as it will NOT # affect users os-level envs. os.unsetenv('http_proxy') os.unsetenv('https_proxy') self._inputs = None
async def _create(pod: 'PodModel'): try: args = ArgNamespace.kwargs2namespace(pod.dict(), set_pod_parser()) return store.add(args) except Exception as ex: raise Runtime400Exception from ex
def _docker_run( client: 'DockerClient', args: 'argparse.Namespace', container_name: str, envs: Dict, net_mode: Optional[str], logger: 'JinaLogger', ): # important to notice, that client is not assigned as instance member to avoid potential # heavy copy into new process memory space import warnings import docker docker_version = client.version().get('Version') if not docker_version: raise DockerVersionError('docker version can not be resolved') docker_version = tuple(docker_version.split('.')) # docker daemon versions below 20.0x do not support "host.docker.internal:host-gateway" if docker_version < ('20', ): raise DockerVersionError( f'docker version {".".join(docker_version)} is below 20.0.0 and does not ' f'support "host.docker.internal:host-gateway" : https://github.com/docker/cli/issues/2664' ) if args.uses.startswith('docker://'): uses_img = args.uses.replace('docker://', '') logger.debug(f'will use Docker image: {uses_img}') else: warnings.warn( f'you are using legacy image format {args.uses}, this may create some ambiguity. ' f'please use the new format: "--uses docker://{args.uses}"') uses_img = args.uses # the image arg should be ignored otherwise it keeps using ContainerPod in the container # basically all args in Pod-docker arg group should be ignored. # this prevent setting containerPod twice from pathlib import Path from jina.helper import ArgNamespace from jina.parsers import set_pod_parser args.native = True non_defaults = ArgNamespace.get_non_defaults_args( args, set_pod_parser(), taboo={ 'uses', 'entrypoint', 'volumes', 'pull_latest', 'docker_kwargs', 'gpus', }, ) img_not_found = False try: client.images.get(uses_img) except docker.errors.ImageNotFound: logger.error(f'can not find local image: {uses_img}') img_not_found = True if img_not_found: raise BadImageNameError( f'image: {uses_img} can not be found local & remote.') _volumes = {} if not args.disable_auto_volume and not args.volumes: ( generated_volumes, workspace_in_container, ) = generate_default_volume_and_workspace( workspace_id=args.workspace_id) args.volumes = generated_volumes args.workspace = (workspace_in_container if not args.workspace else args.workspace) if args.volumes: for p in args.volumes: paths = p.split(':') local_path = paths[0] Path(os.path.abspath(local_path)).mkdir(parents=True, exist_ok=True) if len(paths) == 2: container_path = paths[1] else: container_path = '/' + os.path.basename(p) _volumes[os.path.abspath(local_path)] = { 'bind': container_path, 'mode': 'rw', } device_requests = [] if args.gpus: device_requests = get_gpu_device_requests(args.gpus) del args.gpus _args = ArgNamespace.kwargs2list(non_defaults) ports = {f'{args.port}/tcp': args.port} if not net_mode else None docker_kwargs = args.docker_kwargs or {} container = client.containers.run( uses_img, _args, detach=True, auto_remove=True, ports=ports, name=container_name, volumes=_volumes, network_mode=net_mode, entrypoint=args.entrypoint, extra_hosts={__docker_host__: 'host-gateway'}, device_requests=device_requests, environment=envs, **docker_kwargs, ) return container