def test_flow_depends_load_and_dump(monkeypatch, tmpdir): filename = os.path.join(cur_dir, 'flow2.yml') monkeypatch.setattr(dependencies, 'get_workspace_path', lambda *args: tmpdir) monkeypatch.setattr(FlowDepends, 'localpath', lambda *args: os.path.join(tmpdir, filename)) monkeypatch.setattr(FlowDepends, 'newfile', os.path.join(tmpdir, 'abc.yml')) monkeypatch.setattr(FlowDepends, 'newname', 'abc.yml') copy(os.path.join(cur_dir, filename), tmpdir) fd = FlowDepends( workspace_id=DaemonID('jworkspace'), filename=filename, envs=Environment(envs=['a=b']), ) with change_cwd(tmpdir): f: Flow = Flow.load_config(fd.params.uses).build() assert f.port_expose == 12345 assert f.protocol == GatewayProtocolType.HTTP assert f['gateway'].args.runs_in_docker assert f['local_replicas'].args.runs_in_docker assert f['local_replicas'].args.port_in == 45678 assert f['local_replicas'].args.port_in is not None assert all(port in fd.ports.ports for port in [ f.port_expose, f['gateway'].args.port_in, f['gateway'].args.port_out, f['gateway'].args.port_ctrl, f['local_replicas'].args.port_in, f['local_replicas'].args.port_out, f['local_replicas'].args.port_ctrl, f['local_compound'].head_args.port_in, f['local_compound'].tail_args.port_out, ])
def test_dump(monkeypatch, tmpdir): filename = os.path.join(cur_dir, 'flow3.yml') monkeypatch.setattr(dependencies, 'get_workspace_path', lambda *args: tmpdir) monkeypatch.setattr(FlowDepends, 'localpath', lambda *args: os.path.join(tmpdir, filename)) monkeypatch.setattr(FlowDepends, 'newname', os.path.join(tmpdir, 'abc.yml')) monkeypatch.setattr(FlowDepends, 'newfile', 'abc.yml') copy(os.path.join(cur_dir, filename), tmpdir) fd = FlowDepends( workspace_id=DaemonID('jworkspace'), filename=filename, envs=Environment(envs=['a=b']), ) with change_cwd(tmpdir): f: Flow = Flow.load_config(fd.params.uses).build() assert f.port_expose == 12345 assert f.protocol == GatewayProtocolType.HTTP assert f['local_replicas'].args.port_in == 45678
def __enter__(self): self._stack.__enter__() if not self.paths: return self tmpdir = self._stack.enter_context(TemporaryDirectory()) self._stack.enter_context(change_cwd(tmpdir)) for path in map(Path, self.paths): try: filename = path.name if path.is_file(): self.add(path) elif path.is_dir(): make_archive(base_name=filename, format='zip', root_dir=path) self.add(Path(tmpdir) / f'{filename}.zip') except TypeError: self._logger.error(f'invalid path passed {path}') continue self._logger.info(( f'{len(self)} file(s) ready to be uploaded: {", ".join(self.filenames)}' if len(self) > 0 else 'No file to be uploaded')) return self
def load_and_dump(self) -> None: """ every Flow created inside JinaD lives inside a container. It is important to know the list of ports to be published with localhost before actually starting the container. 1. `load` the flow yaml here. - yaml is stored in `workspace` directory, so we'll `cd` there - yaml might include env vars. so we'll set them (passed via query params) 2. `build` the Flow so that `gateway` gets added. - get the list of ports to be published (port_expose, port_in, port_out, port_ctrl) - ports need to be published for gateway & executors that are not `ContainerRuntime` or `JinadRuntime` based - Deployment level args for ports are enough, as we don't need to publish Pod ports 3. `save` the Flow config. - saves port configs of all `executors` into the new yaml. - set `JINA_FULL_CLI` envvar, so that `gateway` args are also added. - save the config into a new file. 4. pass this new file as filename to `partial-daemon` to start the Flow """ with ExitStack() as stack: # set env vars stack.enter_context(change_env('JINA_FULL_CLI', 'true')) # change directory to `workspace` stack.enter_context(change_cwd(get_workspace_path(self.workspace_id))) # load and build f: Flow = Flow.load_config( str(self.localpath()), substitute=True, context=self.envs ).build() # get & set the ports mapping, set `runs_in_docker` port_mapping = [] port_mapping.append( PortMapping( deployment_name='gateway', pod_name='gateway', ports=Ports(port_expose=f.port_expose), ) ) for deployment_name, deployment in f._deployment_nodes.items(): runtime_cls = update_runtime_cls(deployment.args, copy=True).runtime_cls if runtime_cls in ['WorkerRuntime'] + list( GATEWAY_RUNTIME_DICT.values() ): current_ports = Ports() for port_name in Ports.__fields__: setattr( current_ports, port_name, getattr(deployment.args, port_name, None), ) port_mapping.append( PortMapping( deployment_name=deployment_name, pod_name='', ports=current_ports, ) ) elif ( runtime_cls in ['ContainerRuntime'] and hasattr(deployment.args, 'replicas') and deployment.args.replicas > 1 ): for pod_args in [deployment.pod_args['head']]: self._update_port_mapping( pod_args, deployment_name, port_mapping ) self.ports = port_mapping # save to a new file & set it for partial-daemon f.save_config(filename=self.newfile) self.params.uses = self.newname