def __questions_kobo_postgres(self): """ KoBoToolbox's credentials """ # kobo_db_server self.__dict['kobo_db_server'] = CLI.colored_input( 'KoBoToolbox PostgreSQL server?', CLI.COLOR_QUESTION, self.__dict['kobo_db_server']) # kobo_db_name - Kobo Form CLI.colored_print('KoBoToolbox\'s KoboFORM PostgreSQL database name?', CLI.COLOR_QUESTION) self.__dict['kobo_db_name'] = CLI.get_response( r'~^\w+$', self.__dict['kobo_db_name'], to_lower=False) # kobo_cat_db_name - Kobo Form CLI.colored_print('KoBoToolbox\'s KoboCAT PostgreSQL database name?', CLI.COLOR_QUESTION) self.__dict['kobo_cat_db_name'] = CLI.get_response( r'~^\w+$', self.__dict['kobo_cat_db_name'], to_lower=False) # kobo_db_port self.__dict['kobo_db_port'] = CLI.colored_input( 'KoBoToolbox PostgreSQL Port?', CLI.COLOR_QUESTION, self.__dict['kobo_db_port']) # kobo_db_user self.__dict['kobo_db_user'] = CLI.colored_input( 'KoBoToolbox PostgreSQL User?', CLI.COLOR_QUESTION, self.__dict['kobo_db_user']) # kobo_db_password self.__dict['kobo_db_password'] = CLI.colored_input( 'KoBoToolbox PostgreSQL Password?', CLI.COLOR_QUESTION, self.__dict['kobo_db_password'])
def __questions_postgres_backups(self): """ Asks all questions about backups. """ self.__dict['use_backup'] = CLI.yes_no_question( 'Do you want to activate backups?', default=self.__dict['use_backup']) if self.__dict['use_backup']: self.__dict['use_wal_e'] = False schedule_regex_pattern = ( r'^((((\d+(,\d+)*)|(\d+-\d+)|(\*(\/\d+)?)))' r'(\s+(((\d+(,\d+)*)|(\d+\-\d+)|(\*(\/\d+)?)))){4})$') message = ('Schedules use linux cron syntax with UTC datetimes.\n' 'For example, schedule at 12:00 AM E.S.T every Sunday ' 'would be:\n' '0 5 * * 0\n' '\n' 'Please visit https://crontab.guru/ to generate a ' 'cron schedule.') CLI.colored_print('PostgreSQL backup cron expression?', CLI.COLOR_QUESTION) self.__dict['postgres_backup_schedule'] = CLI.get_response( '~{}'.format(schedule_regex_pattern), self.__dict['postgres_backup_schedule']) if self.aws: self.__questions_aws_backup_settings()
def update(cls): config_object = Config() config = config_object.get_config() Setup.update_kobodocker(config) CLI.colored_print("KoBoToolbox has been updated", CLI.COLOR_SUCCESS) # update itself git_command = ['git', 'pull', 'origin', Config.KOBO_INSTALL_BRANCH] CLI.run_command(git_command) CLI.colored_print("KoBoInstall has been updated", CLI.COLOR_SUCCESS) CLI.colored_print( "╔═════════════════════════════════════════════════════╗", CLI.COLOR_WARNING) CLI.colored_print( "║ After an update, it's strongly recommended to run ║", CLI.COLOR_WARNING) CLI.colored_print( "║ `./run.py --setup` to regenerate environment files. ║", CLI.COLOR_WARNING) CLI.colored_print( "╚═════════════════════════════════════════════════════╝", CLI.COLOR_WARNING) CLI.colored_print("Do you want to proceed?", CLI.COLOR_SUCCESS) CLI.colored_print("\t1) Yes") CLI.colored_print("\t2) No") response = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) if response == Config.TRUE: current_config = config_object.build() Template.render(config_object) config_object.init_letsencrypt() Setup.update_hosts(current_config) CLI.colored_print("Do you want to (re)start containers?", CLI.COLOR_SUCCESS) CLI.colored_print("\t1) Yes") CLI.colored_print("\t2) No") response = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) if response == Config.TRUE: Command.start()
def post_update(cls, cron): config_object = Config() # When `cron` is True, we want to bypass question and just recreate # YML and environment files from new templates if cron is True: current_config = config_object.get_config_template() current_config.update(config_object.get_config()) config_object.set_config(current_config) Template.render(config_object, force=True) sys.exit(0) CLI.colored_print("╔═════════════════════════════════════════════════════╗", CLI.COLOR_WARNING) CLI.colored_print("║ After an update, it's strongly recommended to run ║", CLI.COLOR_WARNING) CLI.colored_print("║ `./run.py --setup` to regenerate environment files. ║", CLI.COLOR_WARNING) CLI.colored_print("╚═════════════════════════════════════════════════════╝", CLI.COLOR_WARNING) CLI.colored_print("Do you want to proceed?", CLI.COLOR_SUCCESS) CLI.colored_print("\t1) Yes") CLI.colored_print("\t2) No") response = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) if response == Config.TRUE: current_config = config_object.build() Template.render(config_object) config_object.init_letsencrypt() Setup.update_hosts(current_config) CLI.colored_print("Do you want to (re)start containers?", CLI.COLOR_SUCCESS) CLI.colored_print("\t1) Yes") CLI.colored_print("\t2) No") response = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) if response == Config.TRUE: Command.start()
def __questions_postgres(self): """ PostgreSQL credentials to be confirmed """ #support_db_user = self.__dict['support_db_user'] #'support_db_server' CLI.colored_print('Support PostgreSQL database name?', CLI.COLOR_QUESTION) support_db_name = CLI.get_response(r'~^\w+$', self.__dict['support_db_name'], to_lower=False) CLI.colored_print("PostgreSQL user's password?", CLI.COLOR_QUESTION) self.__dict['support_db_password'] = CLI.get_response( r'~^.{8,}$', self.__dict['support_db_password'], to_lower=False, error_msg='Too short. 8 characters minimum.') self.__dict['support_db_port'] = CLI.colored_input( 'Support PostgreSQL db Port?', CLI.COLOR_QUESTION, self.__dict['support_db_port'])
def __questions_aws_backup_settings(self): self.__dict['aws_backup_bucket_name'] = CLI.colored_input( 'AWS Backups bucket name', CLI.COLOR_QUESTION, self.__dict['aws_backup_bucket_name']) if self.__dict['aws_backup_bucket_name'] != '': backup_from_primary = self.__dict['backup_from_primary'] CLI.colored_print('How many yearly backups to keep?', CLI.COLOR_QUESTION) self.__dict['aws_backup_yearly_retention'] = CLI.get_response( r'~^\d+$', self.__dict['aws_backup_yearly_retention']) CLI.colored_print('How many monthly backups to keep?', CLI.COLOR_QUESTION) self.__dict['aws_backup_monthly_retention'] = CLI.get_response( r'~^\d+$', self.__dict['aws_backup_monthly_retention']) CLI.colored_print('How many weekly backups to keep?', CLI.COLOR_QUESTION) self.__dict['aws_backup_weekly_retention'] = CLI.get_response( r'~^\d+$', self.__dict['aws_backup_weekly_retention']) CLI.colored_print('How many daily backups to keep?', CLI.COLOR_QUESTION) self.__dict['aws_backup_daily_retention'] = CLI.get_response( r'~^\d+$', self.__dict['aws_backup_daily_retention']) # if (not self.multi_servers or # (self.primary_backend and backup_from_primary) or # (self.secondary_backend and not backup_from_primary)): # CLI.colored_print('PostgresSQL backup minimum size (in MB)?', # CLI.COLOR_QUESTION) CLI.colored_print( 'Files below this size will be ignored when ' 'rotating backups.', CLI.COLOR_INFO) self.__dict['aws_postgres_backup_minimum_size'] = CLI.get_response( r'~^\d+$', self.__dict['aws_postgres_backup_minimum_size']) CLI.colored_print('Chunk size of multipart uploads (in MB)?', CLI.COLOR_QUESTION) self.__dict['aws_backup_upload_chunk_size'] = CLI.get_response( r'~^\d+$', self.__dict['aws_backup_upload_chunk_size']) response = CLI.yes_no_question( 'Use AWS LifeCycle deletion rule?', default=self.__dict['aws_backup_bucket_deletion_rule_enabled']) self.__dict['aws_backup_bucket_deletion_rule_enabled'] = response
def __questions_dashboards(self): """ Dashboards questions """ # dashboards_port self.__dict['dashboards_port'] = CLI.colored_input( 'Dashboards Port?', CLI.COLOR_QUESTION, self.__dict['dashboards_port']) # dashboards_kobo_token self.__dict['dashboards_kobo_token'] = CLI.colored_input( 'KoBoToolbox Access Token', CLI.COLOR_QUESTION, self.__dict['dashboards_kobo_token']) schedule_regex_pattern = ( r'^((((\d+(,\d+)*)|(\d+-\d+)|(\*(\/\d+)?)))' r'(\s+(((\d+(,\d+)*)|(\d+\-\d+)|(\*(\/\d+)?)))){4})$') CLI.colored_print('Dashboards Github Poll cron expression?', CLI.COLOR_QUESTION) self.__dict['dashboards_cron_schedule'] = CLI.get_response( '~{}'.format(schedule_regex_pattern), self.__dict['dashboards_cron_schedule'])
def info(cls, timeout=600): config_object = Config() config = config_object.get_config() main_url = "{}://{}.{}{}".format( "https" if config.get("https") == Config.TRUE else "http", config.get("kpi_subdomain"), config.get("public_domain_name"), ":{}".format(config.get("exposed_nginx_docker_port")) if config.get("exposed_nginx_docker_port") and str( config.get("exposed_nginx_docker_port")) != Config.DEFAULT_NGINX_PORT else "" ) stop = False start = int(time.time()) success = False hostname = "{}.{}".format(config.get("kpi_subdomain"), config.get("public_domain_name")) nginx_port = int(Config.DEFAULT_NGINX_HTTPS_PORT) if config.get("https") == Config.TRUE \ else int(config.get("exposed_nginx_docker_port", Config.DEFAULT_NGINX_PORT)) https = config.get("https") == Config.TRUE already_retried = False while not stop: if Network.status_check(hostname, "/service_health/", nginx_port, https) == Network.STATUS_OK_200: stop = True success = True elif int(time.time()) - start >= timeout: if timeout > 0: CLI.colored_print( "\n`KoBoToolbox` has not started yet. This is can be normal with low CPU/RAM computers.\n", CLI.COLOR_INFO) CLI.colored_print("Wait for another {} seconds?".format(timeout), CLI.COLOR_SUCCESS) CLI.colored_print("\t1) Yes") CLI.colored_print("\t2) No") response = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) if response == Config.TRUE: start = int(time.time()) continue else: if already_retried is False: already_retried = True CLI.colored_print(("\nSometimes frontend containers " "can not communicate with backend containers.\n" "Restarting the frontend containers usually fixes it.\n"), CLI.COLOR_INFO) CLI.colored_print("Do you want to try?".format(timeout), CLI.COLOR_SUCCESS) CLI.colored_print("\t1) Yes") CLI.colored_print("\t2) No") response = CLI.get_response([Config.TRUE, Config.FALSE], Config.TRUE) if response == Config.TRUE: start = int(time.time()) cls.restart_frontend() continue stop = True else: sys.stdout.write(".") sys.stdout.flush() time.sleep(10) # Create a new line print("") if success: username = config.get("super_user_username") password = config.get("super_user_password") username_chars_count = len(username) + 6 password_chars_count = len(password) + 10 url_chars_count = len(main_url) + 6 max_chars_count = max(username_chars_count, password_chars_count, url_chars_count) CLI.colored_print("╔═{}═╗".format("═" * max_chars_count), CLI.COLOR_WARNING) CLI.colored_print("║ Ready {} ║".format( " " * (max_chars_count - len("Ready "))), CLI.COLOR_WARNING) CLI.colored_print("║ URL: {}/{} ║".format( main_url, " " * (max_chars_count - url_chars_count)), CLI.COLOR_WARNING) CLI.colored_print("║ User: {}{} ║".format( username, " " * (max_chars_count - username_chars_count)), CLI.COLOR_WARNING) CLI.colored_print("║ Password: {}{} ║".format( password, " " * (max_chars_count - password_chars_count)), CLI.COLOR_WARNING) CLI.colored_print("╚═{}═╝".format("═" * max_chars_count), CLI.COLOR_WARNING) else: CLI.colored_print("KoBoToolbox could not start! Please try `python3 run.py --logs` to see the logs.", CLI.COLOR_ERROR) return success
def render(cls, config_object, force=False): config = config_object.get_config() template_variables = cls.__get_template_variables(config_object) environment_directory = config_object.get_env_files_path() unique_id = cls.__read_unique_id(environment_directory) if force is not True and \ unique_id is not None and str(config.get("unique_id", "")) != str(unique_id): CLI.colored_print("╔═════════════════════════════════════════════════════════════════════╗", CLI.COLOR_WARNING) CLI.colored_print("║ WARNING! ║", CLI.COLOR_WARNING) CLI.colored_print("║ Existing environment files are detected. Files will be overwritten. ║", CLI.COLOR_WARNING) CLI.colored_print("╚═════════════════════════════════════════════════════════════════════╝", CLI.COLOR_WARNING) CLI.colored_print("Do you want to continue?", CLI.COLOR_SUCCESS) CLI.colored_print("\t1) Yes") CLI.colored_print("\t2) No") if CLI.get_response([Config.TRUE, Config.FALSE], Config.FALSE) == Config.FALSE: sys.exit() cls.__write_unique_id(environment_directory, config.get("unique_id")) base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) templates_path_parent = os.path.join(base_dir, "templates") # Environment templates_path = os.path.join(templates_path_parent, Config.ENV_FILES_DIR, "") for root, dirnames, filenames in os.walk(templates_path): destination_directory = cls.__create_directory(environment_directory, root, templates_path) cls.__write_templates(template_variables, root, destination_directory, filenames) # kobo-docker templates_path = os.path.join(templates_path_parent, "kobo-docker") for root, dirnames, filenames in os.walk(templates_path): destination_directory = config.get("kobodocker_path") cls.__write_templates(template_variables, root, destination_directory, filenames) # nginx-certbox if config_object.use_letsencrypt: templates_path = os.path.join(templates_path_parent, Config.LETSENCRYPT_DOCKER_DIR, "") for root, dirnames, filenames in os.walk(templates_path): destination_directory = cls.__create_directory( config_object.get_letsencrypt_repo_path(), root, templates_path) cls.__write_templates(template_variables, root, destination_directory, filenames)
def update_hosts(cls, config): if config.get("local_installation") == Config.TRUE: start_sentence = "### (BEGIN) KoBoToolbox local routes" end_sentence = "### (END) KoBoToolbox local routes" with open("/etc/hosts", "r") as f: tmp_host = f.read() start_position = tmp_host.find(start_sentence) end_position = tmp_host.find(end_sentence) if start_position > -1: tmp_host = tmp_host[0:start_position] + tmp_host[ end_position + len(end_sentence) + 1:] routes = "{ip_address} " \ "{kpi_subdomain}.{public_domain_name} " \ "{kc_subdomain}.{public_domain_name} " \ "{ee_subdomain}.{public_domain_name}".format( ip_address=config.get("local_interface_ip"), public_domain_name=config.get("public_domain_name"), kpi_subdomain=config.get("kpi_subdomain"), kc_subdomain=config.get("kc_subdomain"), ee_subdomain=config.get("ee_subdomain") ) tmp_host = ("{bof}" "\n{start_sentence}" "\n{routes}" "\n{end_sentence}").format( bof=tmp_host.strip(), start_sentence=start_sentence, routes=routes, end_sentence=end_sentence) with open("/tmp/etchosts", "w") as f: f.write(tmp_host) if config.get("review_host") != Config.FALSE: CLI.colored_print( "╔═══════════════════════════════════════════════════════════════════╗", CLI.COLOR_WARNING) CLI.colored_print( "║ Administrative privileges are required to update your /etc/hosts. ║", CLI.COLOR_WARNING) CLI.colored_print( "╚═══════════════════════════════════════════════════════════════════╝", CLI.COLOR_WARNING) CLI.colored_print( "Do you want to review your /etc/hosts file before overwriting it?", CLI.COLOR_SUCCESS) CLI.colored_print("\t1) Yes") CLI.colored_print("\t2) No") config["review_host"] = CLI.get_response( [Config.TRUE, Config.FALSE], config.get("review_host", Config.FALSE)) if config["review_host"] == Config.TRUE: print(tmp_host) CLI.colored_input("Press any keys when ready") # Save 'review_host' config_ = Config() config_.write_config() return_value = os.system( "sudo mv /etc/hosts /etc/hosts.old && sudo mv /tmp/etchosts /etc/hosts" ) if return_value != 0: sys.exit()
def migrate_single_to_two_databases(): """ Check the contents of the databases. If KPI's is empty or doesn't exist while KoBoCAT's has user data, then we are migrating from a single-database setup """ config_object = Config() config = config_object.get_config() backend_role = config.get("backend_server_role", "primary") def _kpi_db_alias_kludge(command): """ Sorry, this is not very nice. See https://github.com/kobotoolbox/kobo-docker/issues/264. """ set_env = 'DATABASE_URL="${KPI_DATABASE_URL}"' return ["bash", "-c", "{} {}".format(set_env, command)] kpi_run_command = [ "docker-compose", "-f", "docker-compose.frontend.yml", "-f", "docker-compose.frontend.override.yml", "-p", config_object.get_prefix("frontend"), "run", "--rm", "kpi" ] # Make sure Postgres is running # We add this message to users because when AWS backups are activated, # it takes a long time to install the virtualenv in PostgreSQL container, # so the `wait_for_database` below sits there a while. # It makes us think KoBoInstall is frozen. CLI.colored_print("Waiting for PostgreSQL database to be up & running...", CLI.COLOR_INFO) frontend_command = kpi_run_command + _kpi_db_alias_kludge(" ".join( ["python", "manage.py", "wait_for_database", "--retries", "45"])) CLI.run_command(frontend_command, config.get("kobodocker_path")) CLI.colored_print("The PostgreSQL database is running!", CLI.COLOR_SUCCESS) frontend_command = kpi_run_command + _kpi_db_alias_kludge(" ".join( ["python", "manage.py", "is_database_empty", "kpi", "kobocat"])) output = CLI.run_command(frontend_command, config.get("kobodocker_path")) # TODO: read only stdout and don't consider stderr unless the exit code # is non-zero. Currently, `output` combines both stdout and stderr kpi_kc_db_empty = output.strip().split("\n")[-1] if kpi_kc_db_empty == "True\tFalse": # KPI empty but KC is not: run the two-database upgrade script CLI.colored_print( "Upgrading from single-database setup to separate databases " "for KPI and KoBoCAT", CLI.COLOR_INFO) _message_lines = [ '╔══════════════════════════════════════════════════════════════╗', '║ Upgrading to separate databases is required to run the ║', '║ latest release of KoBoToolbox, but it may be a slow process ║', '║ if you have a lot of data. Expect at least one minute of ║', '║ downtime for every 1,500 KPI assets. Assets are surveys and ║', '║ library items: questions, blocks, and templates. ║', '║ Survey *submissions* are not involved. ║', '║ ║', '║ To postpone this process, downgrade to the last ║', '║ single-database release by stopping this script and ║', '║ executing the following commands: ║', '║ ║', '║ python3 run.py --stop ║', '║ git fetch ║', '║ git checkout shared-database-obsolete ║', '║ python3 run.py --update ║', '║ python3 run.py --setup ║', '║ ║', '╚══════════════════════════════════════════════════════════════╝', 'For help, visit https://community.kobotoolbox.org/t/upgrading-to-separate-databases-for-kpi-and-kobocat/7202.', ] CLI.colored_print('\n'.join(_message_lines), CLI.COLOR_WARNING) CLI.colored_print("Do you want to proceed?", CLI.COLOR_SUCCESS) CLI.colored_print("\t1) Yes") CLI.colored_print("\t2) No") response = CLI.get_response([Config.TRUE, Config.FALSE], Config.FALSE) if response != Config.TRUE: sys.exit(0) backend_command = [ "docker-compose", "-f", "docker-compose.backend.{}.yml".format(backend_role), "-f", "docker-compose.backend.{}.override.yml".format(backend_role), "-p", config_object.get_prefix("backend"), "exec", "postgres", "bash", "/kobo-docker-scripts/primary/clone_data_from_kc_to_kpi.sh", "--noinput" ] try: subprocess.check_call(backend_command, cwd=config.get("kobodocker_path")) except subprocess.CalledProcessError: CLI.colored_print("An error has occurred", CLI.COLOR_ERROR) sys.exit(1) elif kpi_kc_db_empty not in [ "True\tTrue", "False\tTrue", "False\tFalse", ]: # The output was invalid CLI.colored_print("An error has occurred", CLI.COLOR_ERROR) sys.stderr.write(kpi_kc_db_empty) sys.exit(1)
def render(cls, config_object): config = config_object.get_config() if config_object.local_install: nginx_port = config.get("exposed_nginx_docker_port", "80") else: nginx_port = config.get("nginx_proxy_port", "80") template_variables = { "PROTOCOL": "https" if config.get("https") == Config.TRUE else "http", "USE_HTTPS": "" if config.get("https") == Config.TRUE else "#", "USE_AWS": "" if config.get("use_aws") == Config.TRUE else "#", "AWS_ACCESS_KEY_ID": config.get("aws_access_key", ""), "AWS_SECRET_ACCESS_KEY": config.get("aws_secret_key", ""), "AWS_BUCKET_NAME": config.get("aws_bucket_name", ""), "GOOGLE_UA": config.get("google_ua", ""), "GOOGLE_API_KEY": config.get("google_api_key", ""), "INTERCOM_APP_ID": config.get("intercom", ""), "INTERNAL_DOMAIN_NAME": config.get("internal_domain_name", ""), "PRIVATE_DOMAIN_NAME": config.get("private_domain_name", ""), "PUBLIC_DOMAIN_NAME": config.get("public_domain_name", ""), "KOBOFORM_SUBDOMAIN": config.get("kpi_subdomain", ""), "KOBOCAT_SUBDOMAIN": config.get("kc_subdomain", ""), "ENKETO_SUBDOMAIN": config.get("ee_subdomain", ""), "KOBO_SUPERUSER_USERNAME": config.get("super_user_username", ""), "KOBO_SUPERUSER_PASSWORD": config.get("super_user_password", ""), "ENKETO_API_TOKEN": config.get("enketo_api_token", binascii.hexlify(os.urandom(60))), "DJANGO_SECRET_KEY": config.get("django_secret_key", binascii.hexlify(os.urandom(24))), "KOBOCAT_RAVEN_DSN": config.get("kobocat_raven", ""), "KPI_RAVEN_DSN": config.get("kpi_raven", ""), "KPI_RAVEN_JS_DSN": config.get("kpi_raven_js", ""), "POSTGRES_DB": config.get("postgres_db", ""), "POSTGRES_USER": config.get("postgres_user", ""), "POSTGRES_PASSWORD": config.get("postgres_password", ""), "DEBUG": config.get("debug", False) == Config.TRUE, "SMTP_HOST": config.get("smtp_host", ""), "SMTP_PORT": config.get("smtp_port", ""), "SMTP_USER": config.get("smtp_user", ""), "SMTP_PASSWORD": config.get("smtp_password", ""), "SMTP_USE_TLS": config.get("smtp_use_tls", Config.TRUE) == Config.TRUE, "DEFAULT_FROM_EMAIL": config.get("default_from_email", ""), "MASTER_BACKEND_IP": config.get("master_backend_ip"), "LOCAL_INTERFACE_IP": config.get("local_interface_ip"), "USE_PUBLIC_DNS": "" if config.get("local_installation") == Config.TRUE else "#", "USE_PRIVATE_DNS": "#" if config.get("use_private_dns") == Config.TRUE else "", "USE_DNS": "" if config.get("local_installation") == Config.TRUE or config.get("use_private_dns") == Config.FALSE else "#", "WORKERS_MAX": config.get("workers_max", ""), "WORKERS_START": config.get("workers_start", ""), "KC_PATH": config.get("kc_path", ""), "KPI_PATH": config.get("kpi_path", ""), "USE_KPI_DEV_MODE": "#" if config.get("kpi_path", "") == "" else "", "USE_KC_DEV_MODE": "#" if config.get("kc_path", "") == "" else "", "KC_DEV_BUILD_ID": config.get("kc_dev_build_id", ""), "KPI_DEV_BUILD_ID": config.get("kpi_dev_build_id", ""), "NGINX_PUBLIC_PORT": config.get("exposed_nginx_docker_port", "80"), "NGINX_EXPOSED_PORT": nginx_port, "MAX_REQUESTS": config.get("max_requests", "512"), "SOFT_LIMIT": int(config.get("soft_limit", "128")) * 1024 * 1024, "POSTGRES_REPLICATION_PASSWORD": config.get("postgres_replication_password"), "WSGI_SERVER": "runserver_plus" if config.get("dev_mode") == Config.TRUE else "uWSGI", "USE_X_FORWARDED_HOST": "" if config.get("dev_mode") == Config.TRUE else "#", "OVERRIDE_POSTGRES_SETTINGS": "" if config.get("postgres_settings") == Config.TRUE else "#", "POSTGRES_APP_PROFILE": config.get("postgres_profile", ""), "POSTGRES_RAM": config.get("postgres_ram", ""), "POSTGRES_SETTINGS": config.get("postgres_settings_content", ""), "POSTGRES_BACKUP_FROM_SLAVE": "" if config.get("backup_from_master") == Config.FALSE else "#", "POSTGRES_PORT": config.get("postgresql_port", "5432"), "MONGO_PORT": config.get("mongo_port", "27017"), "REDIS_MAIN_PORT": config.get("redis_main_port", "6739"), "REDIS_CACHE_PORT": config.get("redis_cache_port", "6380"), "USE_BACKUP": "" if config.get("use_backup") == Config.TRUE else "#", "USE_AWS_BACKUP": "" if config_object.aws and config.get("use_backup") == Config.TRUE and config.get("aws_backup_bucket_name") != "" else "#", "USE_MEDIA_BACKUP": "" if not config_object.aws and config.get("use_backup") == Config.TRUE else "#", "KOBOCAT_MEDIA_BACKUP_SCHEDULE": config.get("kobocat_media_backup_schedule"), "MONGO_BACKUP_SCHEDULE": config.get("mongo_backup_schedule"), "POSTGRES_BACKUP_SCHEDULE": config.get("postgres_backup_schedule"), "REDIS_BACKUP_SCHEDULE": config.get("redis_backup_schedule"), "AWS_BACKUP_BUCKET_NAME": config.get("aws_backup_bucket_name"), "AWS_BACKUP_YEARLY_RETENTION": config.get("aws_backup_yearly_retention"), "AWS_BACKUP_MONTHLY_RETENTION": config.get("aws_backup_monthly_retention"), "AWS_BACKUP_WEEKLY_RETENTION": config.get("aws_backup_weekly_retention"), "AWS_BACKUP_DAILY_RETENTION": config.get("aws_backup_daily_retention"), "AWS_MONGO_BACKUP_MINIMUM_SIZE": config.get("aws_mongo_backup_minimum_size"), "AWS_POSTGRES_BACKUP_MINIMUM_SIZE": config.get("aws_postgres_backup_minimum_size"), "AWS_REDIS_BACKUP_MINIMUM_SIZE": config.get("aws_redis_backup_minimum_size"), "AWS_BACKUP_UPLOAD_CHUNK_SIZE": config.get("aws_backup_upload_chunk_size"), "AWS_BACKUP_BUCKET_DELETION_RULE_ENABLED": "False" if config.get("aws_backup_bucket_deletion_rule_enabled") == Config.FALSE else "True", } environment_directory = os.path.realpath( os.path.normpath( os.path.join(config["kobodocker_path"], "..", "kobo-deployments"))) unique_id = cls.__read_unique_id(environment_directory) if unique_id is not None and str(config.get("unique_id", "")) != str(unique_id): CLI.colored_print( "╔═════════════════════════════════════════════════════════════════════╗", CLI.COLOR_WARNING) CLI.colored_print( "║ WARNING! ║", CLI.COLOR_WARNING) CLI.colored_print( "║ Existing environment files are detected. Files will be overwritten. ║", CLI.COLOR_WARNING) CLI.colored_print( "╚═════════════════════════════════════════════════════════════════════╝", CLI.COLOR_WARNING) CLI.colored_print("Do you want to continue?", CLI.COLOR_SUCCESS) CLI.colored_print("\t1) Yes") CLI.colored_print("\t2) No") if CLI.get_response([Config.TRUE, Config.FALSE], Config.FALSE) == Config.FALSE: sys.exit() cls.__write_unique_id(environment_directory, config.get("unique_id")) base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) templates_path = os.path.join(base_dir, "templates") for root, dirnames, filenames in os.walk(templates_path): destination_directory = cls.__create_directory( environment_directory, root, config, base_dir) for filename in fnmatch.filter(filenames, '*.tpl'): with open(os.path.join(root, filename), "r") as template: t = PyTemplate(template.read()) with open( os.path.join(destination_directory, filename[:-4]), "w") as f: f.write(t.substitute(template_variables))