def start(self, container_config: dict): """Starts Orchest. Raises: ValueError: If the `container_config` does not contain a configuration for every image that is supposed to run on start. """ # Check whether the minimal set of images is present for Orchest # to be started. pulled_images = self.resource_manager.get_images() req_images: Set[str] = reduce(lambda x, y: x.union(y), _on_start_images, set()) missing_images = req_images - set(pulled_images) if missing_images or not self.resource_manager.is_network_installed(): utils.echo( "Before starting Orchest, make sure Orchest is installed. Run:" ) utils.echo("\torchest install") return # Check whether the container config contains the set of # required images. present_imgs = set(config["Image"] for config in container_config.values()) if present_imgs < req_images: # proper subset raise ValueError( "The container_config does not contain a configuration for " " every required image: " + ", ".join(req_images)) # Orchest is already running ids, running_containers = self.resource_manager.get_containers( state="running") if not (req_images - set(running_containers)): # TODO: Ideally this would print the port on which Orchest # is running. (Was started before and so we do not # simply know.) utils.echo("Orchest is already running...") return # Orchest is partially running and thus in an inconsistent # state. Possibly the start command was issued whilst Orchest # is still shutting down. if running_containers: utils.echo( "Orchest seems to be partially running. Before attempting to start" " Orchest, shut the application down first:", ) utils.echo("\torchest stop") return # Remove old lingering containers. ids, exited_containers = self.resource_manager.get_containers( state="exited") self.docker_client.remove_containers(ids) utils.fix_userdir_permissions() logger.info("Fixing permissions on the 'userdir/'.") utils.echo("Starting Orchest...") logger.info("Starting containers:\n" + "\n".join(req_images)) # Start the containers in the correct order, keeping in mind # dependencies between containers. for i, to_start_imgs in enumerate(_on_start_images): filter_ = {"Image": to_start_imgs} config = spec.filter_container_config(container_config, filter=filter_) stdouts = self.docker_client.run_containers(config, use_name=True, detach=True) # TODO: Abstract version of when the next set of images can # be started. In case the `on_start_images` has more # stages. if i == 0: utils.wait_for_zero_exitcode( self.docker_client, stdouts["orchest-database"]["id"], "pg_isready --username postgres", ) utils.wait_for_zero_exitcode( self.docker_client, stdouts["rabbitmq-server"]["id"], ('su rabbitmq -c "/opt/rabbitmq/sbin/rabbitmq-diagnostics ' '-q check_port_connectivity"'), ) # Get the port on which Orchest is running. nginx_proxy = container_config.get("nginx-proxy") if nginx_proxy is not None: for port, port_binding in nginx_proxy["HostConfig"][ "PortBindings"].items(): exposed_port = port_binding[0]["HostPort"] utils.echo( f"Orchest is running at: http://localhost:{exposed_port}")
def start(): # Make sure the installation is complete before starting Orchest. if not utils.is_install_complete(language="none"): typer.echo("Installation required. To install Orchest run:") typer.echo("\torchest install") return # Dynamically mount certs directory based on whether it exists in # nginx-proxy directory on host if proxy_certs_exist_on_host(): CONTAINER_MAPPING["orchest/nginx-proxy:latest"]["mounts"].append( { "source": os.path.join( config.ENVS["HOST_REPO_DIR"], "services", "nginx-proxy", "certs" ), "target": "/etc/ssl/certs", } ) else: # in case no certs are found don't expose 443 on host del CONTAINER_MAPPING["orchest/nginx-proxy:latest"]["ports"]["443/tcp"] if config.RUN_MODE == "dev": logging.info( "Starting Orchest in DEV mode. This mounts host directories " "to monitor for source code changes." ) utils.dev_mount_inject(CONTAINER_MAPPING) else: typer.echo("[Start]: ...") # Clean up lingering, old images from previous starts. utils.clean_containers() # Make sure userdir/ permissions are correct utils.fix_userdir_permissions() # TODO: is the repo tag always the first tag in the Docker # Engine API? # Determine the containers that are already running as we do not # want to run these again. running_containers = docker_client.containers.list() running_container_images = [ running_container.image.tags[0] for running_container in running_containers if len(running_container.image.tags) > 0 ] images_to_start = [ image_name for image_name in config.ON_START_IMAGES if image_name not in running_container_images ] # Run every container that is not already running. Additionally, # use pre-defined container specifications if the container has # any. for container_image in images_to_start: container_spec = CONTAINER_MAPPING.get(container_image, {}) run_config = utils.convert_to_run_config(container_image, container_spec) logging.info("Starting image %s" % container_image) container = docker_client.containers.run(**run_config) # wait for the db to be accepting connections before launching # other containers, this will likely take 1 try or two. # TODO: should we have a generic abstraction when it comes to # dependencies among the services? I don't think it's needed. if container_image.startswith("postgres"): exit_code, _ = container.exec_run("pg_isready --username postgres") while exit_code != 0: exit_code, _ = container.exec_run("pg_isready --username postgres") time.sleep(1) utils.log_server_url()