def upgrade_from_lilac(config: Config) -> None: if not plugins.is_installed("forum"): fmt.echo_alert( "The Open edX forum feature was moved to a separate plugin in Maple. To keep using this feature, " "you must install and enable the tutor-forum plugin: https://github.com/overhangio/tutor-forum" ) elif not plugins.is_loaded("forum"): fmt.echo_info( "The Open edX forum feature was moved to a separate plugin in Maple. To keep using this feature, " "we will now enable the 'forum' plugin. If you do not want to use this feature, you should disable the " "plugin with: `tutor plugins disable forum`.") plugins.load("forum") tutor_config.save_enabled_plugins(config) if not plugins.is_installed("mfe"): fmt.echo_alert( "In Maple the legacy courseware is no longer supported. You need to install and enable the 'mfe' plugin " "to make use of the new learning microfrontend: https://github.com/overhangio/tutor-mfe" ) elif not plugins.is_loaded("mfe"): fmt.echo_info( "In Maple the legacy courseware is no longer supported. To start using the new learning microfrontend, " "we will now enable the 'mfe' plugin. If you do not want to use this feature, you should disable the " "plugin with: `tutor plugins disable mfe`.") plugins.load("mfe") tutor_config.save_enabled_plugins(config)
def upgrade_from_juniper(context: click.Context, config: Config) -> None: click.echo(fmt.title("Upgrading from Juniper")) tutor_env.save(context.obj.root, config) click.echo(fmt.title("Stopping any existing platform")) context.invoke(compose.stop) if not config["RUN_MYSQL"]: fmt.echo_info( "You are not running MySQL (RUN_MYSQL=false). It is your " "responsibility to upgrade your MySQL instance to v5.7. There is " "nothing left to do to upgrade from Juniper.") return click.echo(fmt.title("Upgrading MySQL from v5.6 to v5.7")) context.invoke(compose.start, detach=True, services=["mysql"]) context.invoke( compose.execute, args=[ "mysql", "bash", "-e", "-c", f"mysql_upgrade -u {config['MYSQL_ROOT_USERNAME']} --password='******'MYSQL_ROOT_PASSWORD']}'", ], ) context.invoke(compose.stop)
def upgrade_from_koa(context: click.Context, config: Config) -> None: click.echo(fmt.title("Upgrading from Koa")) if not config["RUN_MONGODB"]: fmt.echo_info( "You are not running MongoDB (RUN_MONGODB=false). It is your " "responsibility to upgrade your MongoDb instance to v4.0. There is " "nothing left to do to upgrade from Koa to Lilac.") return upgrade_mongodb(context, config, "4.0.25", "4.0")
def wait_for_pod_ready(config: Config, service: str) -> None: fmt.echo_info(f"Waiting for a {service} pod to be ready...") utils.kubectl( "wait", *resource_selector(config, f"app.kubernetes.io/name={service}"), "--for=condition=ContainersReady", "--timeout=600s", "pod", )
def _add_mounts( docker_compose: t.Dict[str, t.Any], bind_mounts: t.List[MountParam.MountType] ) -> t.Dict[str, t.Any]: services = docker_compose.setdefault("services", {}) for service, host_path, container_path in bind_mounts: fmt.echo_info(f"Bind-mount: {host_path} -> {container_path} in {service}") services.setdefault(service, {"volumes": []}) services[service]["volumes"].append(f"{host_path}:{container_path}") return docker_compose
def enable(context: Context, plugin_names: t.List[str]) -> None: config = tutor_config.load_minimal(context.root) for plugin in plugin_names: plugins.load(plugin) fmt.echo_info(f"Plugin {plugin} enabled") tutor_config.save_enabled_plugins(config) tutor_config.save_config_file(context.root, config) fmt.echo_info( "You should now re-generate your environment with `tutor config save`." )
def upgrade_from_maple(context: Context, config: Config) -> None: fmt.echo_info("Upgrading from Maple") # The environment needs to be updated because the backpopulate/backfill commands are from Nutmeg tutor_env.save(context.root, config) # Start mysql k8s.kubectl_apply( context.root, "--selector", "app.kubernetes.io/name=mysql", ) k8s.wait_for_pod_ready(config, "mysql") # lms upgrade k8s.kubectl_apply( context.root, "--selector", "app.kubernetes.io/name=lms", ) k8s.wait_for_pod_ready(config, "lms") # Command backpopulate_user_tours k8s.kubectl_exec(config, "lms", ["sh", "-e", "-c", "./manage.py lms migrate user_tours"]) k8s.kubectl_exec( config, "lms", ["sh", "-e", "-c", "./manage.py lms backpopulate_user_tours"]) # cms upgrade k8s.kubectl_apply( context.root, "--selector", "app.kubernetes.io/name=cms", ) k8s.wait_for_pod_ready(config, "cms") # Command backfill_course_tabs k8s.kubectl_exec( config, "cms", ["sh", "-e", "-c", "./manage.py cms migrate contentstore"]) k8s.kubectl_exec( config, "cms", ["sh", "-e", "-c", "./manage.py cms migrate split_modulestore_django"], ) k8s.kubectl_exec( config, "cms", ["sh", "-e", "-c", "./manage.py cms backfill_course_tabs"]) # Command simulate_publish k8s.kubectl_exec( config, "cms", ["sh", "-e", "-c", "./manage.py cms migrate course_overviews"]) k8s.kubectl_exec(config, "cms", ["sh", "-e", "-c", "./manage.py cms simulate_publish"])
def _remove_plugin_config_overrides_on_unload(plugin: str, _root: str, config: Config) -> None: # Find the configuration entries that were overridden by the plugin and # remove them from the current config overriden_config_items: t.Iterator[t.Tuple[ str, ConfigValue]] = hooks.Filters.CONFIG_OVERRIDES.iterate( context=hooks.Contexts.APP(plugin).name) for key, _value in overriden_config_items: value = config.pop(key, None) value = env.render_unknown(config, value) fmt.echo_info(f" config - removing entry: {key}={value}")
def save(root: str, config: Config) -> None: """ Save the full environment, including version information. """ root_env = pathjoin(root) targets: t.Iterator[t.Tuple[ str, str]] = hooks.Filters.ENV_TEMPLATE_TARGETS.iterate() for src, dst in targets: save_all_from(src, os.path.join(root_env, dst), config) upgrade_obsolete(root) fmt.echo_info(f"Environment generated in {base_dir(root)}")
def upgrade(context: click.Context, from_release: t.Optional[str]) -> None: fmt.echo_alert( "This command only performs a partial upgrade of your Open edX platform. " "To perform a full upgrade, you should run `tutor local quickstart`.") if from_release is None: from_release = tutor_env.get_env_release(context.obj.root) if from_release is None: fmt.echo_info("Your environment is already up-to-date") else: upgrade_from(context, from_release) # We update the environment to update the version context.invoke(config_save_command)
def bindmount_command(context: BaseComposeContext, service: str, path: str) -> None: """ This command is made obsolete by the --mount arguments. """ fmt.echo_alert( "The 'bindmount' command is deprecated and will be removed in a later release. Use 'copyfrom' instead." ) config = tutor_config.load(context.root) host_path = bindmounts.create(context.job_runner(config), service, path) fmt.echo_info( f"Bind-mount volume created at {host_path}. You can now use it in all `local` and `dev` " f"commands with the `--volume={path}` option." )
def quickstart(context: click.Context, non_interactive: bool) -> None: run_upgrade_from_release = tutor_env.should_upgrade_from_release( context.obj.root) if run_upgrade_from_release is not None: click.echo(fmt.title("Upgrading from an older release")) context.invoke( upgrade, from_release=tutor_env.get_env_release(context.obj.root), ) click.echo(fmt.title("Interactive platform configuration")) config = tutor_config.load_minimal(context.obj.root) if not non_interactive: interactive_config.ask_questions(config, run_for_prod=True) tutor_config.save_config_file(context.obj.root, config) config = tutor_config.load_full(context.obj.root) tutor_env.save(context.obj.root, config) if run_upgrade_from_release and not non_interactive: question = f"""Your platform is being upgraded from {run_upgrade_from_release.capitalize()}. If you run custom Docker images, you must rebuild and push them to your private repository now by running the following commands in a different shell: tutor images build all # add your custom images here tutor images push all Press enter when you are ready to continue""" click.confirm(fmt.question(question), default=True, abort=True, prompt_suffix=" ") click.echo(fmt.title("Starting the platform")) context.invoke(start) click.echo(fmt.title("Database creation and migrations")) context.invoke(init, limit=None) config = tutor_config.load(context.obj.root) fmt.echo_info( """Your Open edX platform is ready and can be accessed at the following urls: {http}://{lms_host} {http}://{cms_host} """.format( http="https" if config["ENABLE_HTTPS"] else "http", lms_host=config["LMS_HOST"], cms_host=config["CMS_HOST"], ))
def upgrade_from_koa(config: Config) -> None: if not config["RUN_MONGODB"]: fmt.echo_info( "You are not running MongoDB (RUN_MONGODB=false). It is your " "responsibility to upgrade your MongoDb instance to v4.0. There is " "nothing left to do to upgrade to Lilac from Koa.") return message = """Automatic release upgrade is unsupported in Kubernetes. To upgrade from Koa to Lilac, you should upgrade your MongoDb cluster from v3.6 to v4.0. You should run something similar to: tutor k8s stop tutor config save --set DOCKER_IMAGE_MONGODB=mongo:4.0.25 tutor k8s start tutor k8s exec mongodb mongo --eval 'db.adminCommand({ setFeatureCompatibilityVersion: "4.0" })' tutor config save --unset DOCKER_IMAGE_MONGODB """ fmt.echo_info(message)
def upgrade_from_juniper(config: Config) -> None: if not config["RUN_MYSQL"]: fmt.echo_info( "You are not running MySQL (RUN_MYSQL=false). It is your " "responsibility to upgrade your MySQL instance to v5.7. There is " "nothing left to do to upgrade from Juniper.") return message = """Automatic release upgrade is unsupported in Kubernetes. To upgrade from Juniper, you should upgrade your MySQL database from v5.6 to v5.7. You should run something similar to: tutor k8s start tutor k8s exec mysql bash -e -c "mysql_upgrade \ -u $(tutor config printvalue MYSQL_ROOT_USERNAME) \ --password='******' """ fmt.echo_info(message)
def _delete_plugin_templates(plugin: str, root: str, _config: Config) -> None: """ Delete plugin env files on unload. """ targets: t.Iterator[t.Tuple[str, str]] = hooks.Filters.ENV_TEMPLATE_TARGETS.iterate( context=hooks.Contexts.APP(plugin).name ) for src, dst in targets: path = pathjoin(root, dst.replace("/", os.sep), src.replace("/", os.sep)) if os.path.exists(path): fmt.echo_info(f" env - removing folder: {path}") try: shutil.rmtree(path) except PermissionError as e: raise exceptions.TutorError( f"Could not delete file {e.filename} from plugin {plugin} in folder {path}" )
def quickstart(context: click.Context, non_interactive: bool, pullimages: bool) -> None: try: utils.check_macos_docker_memory() except exceptions.TutorError as e: fmt.echo_alert( f"""Could not verify sufficient RAM allocation in Docker: {e} Tutor may not work if Docker is configured with < 4 GB RAM. Please follow instructions from: https://docs.tutor.overhang.io/install.html""" ) click.echo(fmt.title("Interactive platform configuration")) config = tutor_config.load_minimal(context.obj.root) if not non_interactive: interactive_config.ask_questions(config, run_for_prod=False) tutor_config.save_config_file(context.obj.root, config) config = tutor_config.load_full(context.obj.root) tutor_env.save(context.obj.root, config) click.echo(fmt.title("Stopping any existing platform")) context.invoke(compose.stop) if pullimages: click.echo(fmt.title("Docker image updates")) context.invoke(compose.dc_command, command="pull") click.echo(fmt.title("Building Docker image for LMS and CMS development")) context.invoke(compose.dc_command, command="build", args=["lms"]) click.echo(fmt.title("Starting the platform in detached mode")) context.invoke(compose.start, detach=True) click.echo(fmt.title("Database creation and migrations")) context.invoke(compose.init) fmt.echo_info( """The Open edX platform is now running in detached mode Your Open edX platform is ready and can be accessed at the following urls: {http}://{lms_host}:8000 {http}://{cms_host}:8001 """.format( http="https" if config["ENABLE_HTTPS"] else "http", lms_host=config["LMS_HOST"], cms_host=config["CMS_HOST"], ) )
def convert_json2yml(root: str) -> None: """ Older versions of tutor used to have json config files. """ json_path = os.path.join(root, "config.json") if not os.path.exists(json_path): return if os.path.exists(config_path(root)): raise exceptions.TutorError( f"Both config.json and {CONFIG_FILENAME} exist in {root}: only one of these files must exist to continue" ) config = get_yaml_file(json_path) save_config_file(root, config) os.remove(json_path) fmt.echo_info( f"File config.json detected in {root} and converted to {CONFIG_FILENAME}" )
def upgrade_from_ironwood(context: click.Context, config: Config) -> None: click.echo(fmt.title("Upgrading from Ironwood")) tutor_env.save(context.obj.root, config) click.echo(fmt.title("Stopping any existing platform")) context.invoke(compose.stop) if not config["RUN_MONGODB"]: fmt.echo_info( "You are not running MongoDB (RUN_MONGODB=false). It is your " "responsibility to upgrade your MongoDb instance to v3.6. There is " "nothing left to do to upgrade from Ironwood to Juniper.") return upgrade_mongodb(context, config, "3.4", "3.4") context.invoke(compose.stop) upgrade_mongodb(context, config, "3.6", "3.6") context.invoke(compose.stop)
def runserver( context: click.Context, mounts: t.Tuple[t.List[compose.MountParam.MountType]], options: t.List[str], service: str, ) -> None: depr_warning = "'runserver' is deprecated and will be removed in a future release. Use 'start' instead." for option in options: if option.startswith("-v") or option.startswith("--volume"): depr_warning += " Bind-mounts can be specified using '-m/--mount'." break fmt.echo_alert(depr_warning) config = tutor_config.load(context.obj.root) if service in ["lms", "cms"]: port = 8000 if service == "lms" else 8001 host = config["LMS_HOST"] if service == "lms" else config["CMS_HOST"] fmt.echo_info( f"The {service} service will be available at http://{host}:{port}" ) args = ["--service-ports", *options, service] context.invoke(compose.run, mounts=mounts, args=args)
def start(context: K8sContext, names: List[str]) -> None: config = tutor_config.load(context.root) # Create namespace, if necessary # Note that this step should not be run for some users, in particular those # who do not have permission to edit the namespace. try: utils.kubectl("get", "namespaces", k8s_namespace(config)) fmt.echo_info("Namespace already exists: skipping creation.") except exceptions.TutorError: fmt.echo_info("Namespace does not exist: now creating it...") kubectl_apply( context.root, "--wait", "--selector", "app.kubernetes.io/component=namespace", ) names = names or ["all"] for name in names: if name == "all": # Create volumes kubectl_apply( context.root, "--wait", "--selector", "app.kubernetes.io/component=volume", ) # Create everything else except jobs kubectl_apply( context.root, "--selector", "app.kubernetes.io/component notin (job,volume,namespace)", ) else: kubectl_apply( context.root, "--selector", f"app.kubernetes.io/name={name}", )
def install(location: str) -> None: basename = os.path.basename(location) if not basename.endswith(".yml") and not basename.endswith(".py"): basename += ".py" plugin_path = os.path.join(PLUGINS_ROOT, basename) if location.startswith("http"): # Download file response = urllib.request.urlopen(location) content = response.read().decode() elif os.path.isfile(location): # Read file with open(location, encoding="utf-8") as f: content = f.read() else: raise exceptions.TutorError(f"No plugin found at {location}") # Save file if not os.path.exists(PLUGINS_ROOT): os.makedirs(PLUGINS_ROOT) with open(plugin_path, "w", newline="\n", encoding="utf-8") as f: f.write(content) fmt.echo_info(f"Plugin installed at {plugin_path}")
def upgrade_mongodb( context: click.Context, config: Config, to_docker_version: str, to_compatibility_version: str, ) -> None: click.echo(fmt.title(f"Upgrading MongoDb to v{to_docker_version}")) # Note that the DOCKER_IMAGE_MONGODB value is never saved, because we only save the # environment, not the configuration. config["DOCKER_IMAGE_MONGODB"] = f"mongo:{to_docker_version}" tutor_env.save(context.obj.root, config) context.invoke(compose.start, detach=True, services=["mongodb"]) fmt.echo_info("Waiting for mongodb to boot...") sleep(10) context.invoke( compose.execute, args=[ "mongodb", "mongo", "--eval", f'db.adminCommand({{ setFeatureCompatibilityVersion: "{to_compatibility_version}" }})', ], ) context.invoke(compose.stop)
def initialise(runner: BaseJobRunner, limit_to: t.Optional[str] = None) -> None: fmt.echo_info("Initialising all services...") filter_context = hooks.Contexts.APP(limit_to).name if limit_to else None # Pre-init tasks iter_pre_init_tasks: t.Iterator[t.Tuple[ str, t.Iterable[str]]] = hooks.Filters.COMMANDS_PRE_INIT.iterate( context=filter_context) for service, path in iter_pre_init_tasks: fmt.echo_info(f"Running pre-init task: {'/'.join(path)}") runner.run_job_from_template(service, *path) # Init tasks iter_init_tasks: t.Iterator[t.Tuple[ str, t.Iterable[str]]] = hooks.Filters.COMMANDS_INIT.iterate( context=filter_context) for service, path in iter_init_tasks: fmt.echo_info(f"Running init task: {'/'.join(path)}") runner.run_job_from_template(service, *path) fmt.echo_info("All services initialised.")
def disable(context: Context, plugin_names: t.List[str]) -> None: config = tutor_config.load_minimal(context.root) disable_all = "all" in plugin_names disabled: t.List[str] = [] for plugin in tutor_config.get_enabled_plugins(config): if disable_all or plugin in plugin_names: fmt.echo_info(f"Disabling plugin {plugin}...") hooks.Actions.PLUGIN_UNLOADED.do(plugin, context.root, config) disabled.append(plugin) fmt.echo_info(f"Plugin {plugin} disabled") if disabled: tutor_config.save_config_file(context.root, config) fmt.echo_info( "You should now re-generate your environment with `tutor config save`." )
def importdemocourse(context: BaseComposeContext) -> None: config = tutor_config.load(context.root) runner = context.job_runner(config) fmt.echo_info("Importing demo course") jobs.import_demo_course(runner)
def quickstart( context: click.Context, mounts: t.Tuple[t.List[compose.MountParam.MountType]], non_interactive: bool, pullimages: bool, ) -> None: try: utils.check_macos_docker_memory() except exceptions.TutorError as e: fmt.echo_alert( f"""Could not verify sufficient RAM allocation in Docker: {e} Tutor may not work if Docker is configured with < 4 GB RAM. Please follow instructions from: https://docs.tutor.overhang.io/install.html""") run_upgrade_from_release = tutor_env.should_upgrade_from_release( context.obj.root) if run_upgrade_from_release is not None: click.echo(fmt.title("Upgrading from an older release")) if not non_interactive: to_release = tutor_env.get_package_release() question = f"""You are about to upgrade your Open edX platform from {run_upgrade_from_release.capitalize()} to {to_release.capitalize()} It is strongly recommended to make a backup before upgrading. To do so, run: tutor local stop sudo rsync -avr "$(tutor config printroot)"/ /tmp/tutor-backup/ In case of problem, to restore your backup you will then have to run: sudo rsync -avr /tmp/tutor-backup/ "$(tutor config printroot)"/ Are you sure you want to continue?""" click.confirm(fmt.question(question), default=True, abort=True, prompt_suffix=" ") context.invoke( upgrade, from_release=run_upgrade_from_release, ) click.echo(fmt.title("Interactive platform configuration")) config = tutor_config.load_minimal(context.obj.root) if not non_interactive: interactive_config.ask_questions(config) tutor_config.save_config_file(context.obj.root, config) config = tutor_config.load_full(context.obj.root) tutor_env.save(context.obj.root, config) if run_upgrade_from_release and not non_interactive: question = f"""Your platform is being upgraded from {run_upgrade_from_release.capitalize()}. If you run custom Docker images, you must rebuild them now by running the following command in a different shell: tutor images build all # list your custom images here See the documentation for more information: https://docs.tutor.overhang.io/install.html#upgrading-to-a-new-open-edx-release Press enter when you are ready to continue""" click.confirm(fmt.question(question), default=True, abort=True, prompt_suffix=" ") click.echo(fmt.title("Stopping any existing platform")) context.invoke(compose.stop) if pullimages: click.echo(fmt.title("Docker image updates")) context.invoke(compose.dc_command, command="pull") click.echo(fmt.title("Starting the platform in detached mode")) context.invoke(compose.start, mounts=mounts, detach=True) click.echo(fmt.title("Database creation and migrations")) context.invoke(compose.init, mounts=mounts) config = tutor_config.load(context.obj.root) fmt.echo_info("""The Open edX platform is now running in detached mode Your Open edX platform is ready and can be accessed at the following urls: {http}://{lms_host} {http}://{cms_host} """.format( http="https" if config["ENABLE_HTTPS"] else "http", lms_host=config["LMS_HOST"], cms_host=config["CMS_HOST"], ))
def run_job(self, service: str, command: str) -> int: job_name = f"{service}-job" job = self.load_job(job_name) # Create a unique job name to make it deduplicate jobs and make it easier to # find later. Logs of older jobs will remain available for some time. job_name += "-" + datetime.now().strftime("%Y%m%d%H%M%S") # Wait until all other jobs are completed while True: active_jobs = self.active_job_names() if not active_jobs: break fmt.echo_info( f"Waiting for active jobs to terminate: {' '.join(active_jobs)}" ) sleep(5) # Configure job job["metadata"]["name"] = job_name job["metadata"].setdefault("labels", {}) job["metadata"]["labels"]["app.kubernetes.io/name"] = job_name # Define k8s entrypoint/args shell_command = ["sh", "-e", "-c"] if job["spec"]["template"]["spec"]["containers"][0].get( "command") == []: # In some cases, we need to bypass the container entrypoint. # Unfortunately, AFAIK, there is no way to do so in K8s manifests. So we mark # some jobs with "command: []". For these jobs, the entrypoint becomes "sh -e -c". # We do not do this for every job, because some (most) entrypoints are actually useful. job["spec"]["template"]["spec"]["containers"][0][ "command"] = shell_command container_args = [command] else: container_args = shell_command + [command] job["spec"]["template"]["spec"]["containers"][0][ "args"] = container_args job["spec"]["backoffLimit"] = 1 job["spec"]["ttlSecondsAfterFinished"] = 3600 # Save patched job to "jobs.yml" file with open(tutor_env.pathjoin(self.root, "k8s", "jobs.yml"), "w", encoding="utf-8") as job_file: serialize.dump(job, job_file) # We cannot use the k8s API to create the job: configMap and volume names need # to be found with the right suffixes. kubectl_apply( self.root, "--selector", f"app.kubernetes.io/name={job_name}", ) message = ( "Job {job_name} is running. To view the logs from this job, run:\n\n" """ kubectl logs --namespace={namespace} --follow $(kubectl get --namespace={namespace} pods """ """--selector=job-name={job_name} -o=jsonpath="{{.items[0].metadata.name}}")\n\n""" "Waiting for job completion...").format(job_name=job_name, namespace=k8s_namespace( self.config)) fmt.echo_info(message) # Wait for completion field_selector = f"metadata.name={job_name}" while True: namespaced_jobs = K8sClients.instance( ).batch_api.list_namespaced_job(k8s_namespace(self.config), field_selector=field_selector) if not namespaced_jobs.items: continue job = namespaced_jobs.items[0] if not job.status.active: if job.status.succeeded: fmt.echo_info(f"Job {job_name} successful.") break if job.status.failed: raise exceptions.TutorError( f"Job {job_name} failed. View the job logs to debug this issue." ) sleep(5) return 0
def save_config_file(root: str, config: Config) -> None: path = config_path(root) utils.ensure_file_directory_exists(path) with open(path, "w", encoding="utf-8") as of: serialize.dump(config, of) fmt.echo_info(f"Configuration saved to {path}")
def upgrade_from_lilac(config: Config) -> None: common_upgrade.upgrade_from_lilac(config) fmt.echo_info( "All Kubernetes services and deployments need to be deleted during " "upgrade from Lilac to Maple") k8s.delete_resources(config, resources=["deployments", "services"])