def post_update(cls, cron): config = Config() # When `cron` is True, we want to bypass question and just recreate # YML and environment files from new templates if cron is True: current_dict = config.get_template() current_dict.update(config.get_dict()) config.set_config(current_dict) Template.render(config, force=True) sys.exit(0) message = ('After an update, it is strongly recommended to run\n' '`python3 run.py --setup` to regenerate environment files.') CLI.framed_print(message, color=CLI.COLOR_INFO) response = CLI.yes_no_question('Do you want to proceed?') if response is True: current_dict = config.build() Template.render(config) config.init_letsencrypt() Setup.update_hosts(current_dict) question = 'Do you want to (re)start containers?' response = CLI.yes_no_question(question) if response is True: Command.start()
def __create_directory(self): """ Create repository directory if it doesn't exist. """ CLI.colored_print('Where do you want to install?', CLI.COLOR_QUESTION) while True: support_api_path = CLI.colored_input( '', CLI.COLOR_QUESTION, self.__dict['support_api_path']) if support_api_path.startswith('.'): base_dir = os.path.dirname( os.path.dirname(os.path.realpath(__file__))) support_api_path = os.path.normpath( os.path.join(base_dir, support_api_path)) question = 'Please confirm path [{}]'.format(support_api_path) response = CLI.yes_no_question(question) if response is True: if os.path.isdir(support_api_path): break else: try: os.makedirs(support_api_path) break except OSError: CLI.colored_print( 'Could not create directory {}!'.format( support_api_path), CLI.COLOR_ERROR) CLI.colored_print( 'Please make sure you have permissions ' 'and path is correct', CLI.COLOR_ERROR) self.__dict['support_api_path'] = support_api_path self.write_unique_id() self.__validate_installation()
def __questions_postgres_backups(self): """ Asks all questions about backups. """ self.__dict['use_backup'] = CLI.yes_no_question( 'Do you want to activate backups?', default=self.__dict['use_backup']) if self.__dict['use_backup']: self.__dict['use_wal_e'] = False schedule_regex_pattern = ( r'^((((\d+(,\d+)*)|(\d+-\d+)|(\*(\/\d+)?)))' r'(\s+(((\d+(,\d+)*)|(\d+\-\d+)|(\*(\/\d+)?)))){4})$') message = ('Schedules use linux cron syntax with UTC datetimes.\n' 'For example, schedule at 12:00 AM E.S.T every Sunday ' 'would be:\n' '0 5 * * 0\n' '\n' 'Please visit https://crontab.guru/ to generate a ' 'cron schedule.') CLI.colored_print('PostgreSQL backup cron expression?', CLI.COLOR_QUESTION) self.__dict['postgres_backup_schedule'] = CLI.get_response( '~{}'.format(schedule_regex_pattern), self.__dict['postgres_backup_schedule']) if self.aws: self.__questions_aws_backup_settings()
def render(cls, config, force=False): """ Write configuration files based on `config` Args: config (helpers.config.Config) force (bool) """ dict_ = config.get_dict() template_variables = cls.__get_template_variables(config) environment_directory = config.get_env_files_path() unique_id = cls.__read_unique_id(environment_directory) if (not force and unique_id and str(dict_.get('unique_id', '')) != str(unique_id)): message = ( 'WARNING!\n\n' 'Existing environment files are detected. Files will be ' 'overwritten.') CLI.framed_print(message) response = CLI.yes_no_question('Do you want to continue?', default=False) if not response: sys.exit(0) cls.__write_unique_id(environment_directory, dict_['unique_id']) base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) templates_path_parent = os.path.join(base_dir, 'templates') # Environment templates_path = os.path.join(templates_path_parent, Config.ENV_FILES_DIR, '') for root, dirnames, filenames in os.walk(templates_path): destination_directory = cls.__create_directory( environment_directory, root, templates_path) cls.__write_templates(template_variables, root, destination_directory, filenames) # kobo-docker templates_path = os.path.join(templates_path_parent, 'kobo-docker') for root, dirnames, filenames in os.walk(templates_path): destination_directory = dict_['kobodocker_path'] cls.__write_templates(template_variables, root, destination_directory, filenames) # nginx-certbox if config.use_letsencrypt: templates_path = os.path.join(templates_path_parent, Config.LETSENCRYPT_DOCKER_DIR, '') for root, dirnames, filenames in os.walk(templates_path): destination_directory = cls.__create_directory( config.get_letsencrypt_repo_path(), root, templates_path) cls.__write_templates(template_variables, root, destination_directory, filenames)
def __questions_aws(self): """ Asks if user wants to see AWS option and asks for credentials if needed. """ self.__dict['use_aws'] = CLI.yes_no_question( 'Do you want to use AWS S3 storage?', default=self.__dict['use_aws']) self.__questions_aws_configuration() self.__questions_aws_validate_credentials()
def __questions_aws_backup_settings(self): self.__dict['aws_backup_bucket_name'] = CLI.colored_input( 'AWS Backups bucket name', CLI.COLOR_QUESTION, self.__dict['aws_backup_bucket_name']) if self.__dict['aws_backup_bucket_name'] != '': backup_from_primary = self.__dict['backup_from_primary'] CLI.colored_print('How many yearly backups to keep?', CLI.COLOR_QUESTION) self.__dict['aws_backup_yearly_retention'] = CLI.get_response( r'~^\d+$', self.__dict['aws_backup_yearly_retention']) CLI.colored_print('How many monthly backups to keep?', CLI.COLOR_QUESTION) self.__dict['aws_backup_monthly_retention'] = CLI.get_response( r'~^\d+$', self.__dict['aws_backup_monthly_retention']) CLI.colored_print('How many weekly backups to keep?', CLI.COLOR_QUESTION) self.__dict['aws_backup_weekly_retention'] = CLI.get_response( r'~^\d+$', self.__dict['aws_backup_weekly_retention']) CLI.colored_print('How many daily backups to keep?', CLI.COLOR_QUESTION) self.__dict['aws_backup_daily_retention'] = CLI.get_response( r'~^\d+$', self.__dict['aws_backup_daily_retention']) # if (not self.multi_servers or # (self.primary_backend and backup_from_primary) or # (self.secondary_backend and not backup_from_primary)): # CLI.colored_print('PostgresSQL backup minimum size (in MB)?', # CLI.COLOR_QUESTION) CLI.colored_print( 'Files below this size will be ignored when ' 'rotating backups.', CLI.COLOR_INFO) self.__dict['aws_postgres_backup_minimum_size'] = CLI.get_response( r'~^\d+$', self.__dict['aws_postgres_backup_minimum_size']) CLI.colored_print('Chunk size of multipart uploads (in MB)?', CLI.COLOR_QUESTION) self.__dict['aws_backup_upload_chunk_size'] = CLI.get_response( r'~^\d+$', self.__dict['aws_backup_upload_chunk_size']) response = CLI.yes_no_question( 'Use AWS LifeCycle deletion rule?', default=self.__dict['aws_backup_bucket_deletion_rule_enabled']) self.__dict['aws_backup_bucket_deletion_rule_enabled'] = response
def __validate_installation(self): """ Validates if installation is not run over existing data. The check is made only the first time the setup is run. :return: bool """ if self.first_time: postgres_dir_path = os.path.join(self.__dict['support_api_path'], '.vols', 'db') postgres_data_exists = os.path.exists( postgres_dir_path) and os.path.isdir(postgres_dir_path) if postgres_data_exists: # Not a reliable way to detect whether folder contains # kobo-install files. We assume that if # `docker-compose.backend.template.yml` is there, Docker # images are the good ones. # TODO Find a better way docker_composer_file_path = os.path.join( self.__dict['support_api_path'], 'docker-compose.backend.template.yml') if not os.path.exists(docker_composer_file_path): message = ( 'WARNING!\n\n' 'You are installing over existing data.\n' '\n' 'It is recommended to backup your data and import it ' 'to a fresh installed (by Support API install) database.\n' '\n' 'support-api-install uses these images:\n' ' - PostgreSQL: mdillon/postgis:9.5\n' '\n' 'Be sure to upgrade to these versions before going ' 'further!') CLI.framed_print(message) response = CLI.yes_no_question( 'Are you sure you want to continue?', default=False) if response is False: sys.exit(0) else: CLI.colored_print( 'Privileges escalation is needed to prepare DB', CLI.COLOR_WARNING) # Write `kobo_first_run` file to run postgres # container's entrypoint flawlessly. os.system( 'echo $(date) | sudo tee -a {} > /dev/null'.format( os.path.join(self.__dict['support_api_path'], '.vols', 'db', 'kobo_first_run')))
def __questions_aws_validate_credentials(self): """ Prompting user whether they would like to validate their entered AWS credentials or continue without validation. """ # Resetting validation when setup is rerun self.__dict['aws_credentials_valid'] = False aws_credential_attempts = 0 if self.__dict['use_aws']: self.__dict['aws_validate_credentials'] = CLI.yes_no_question( 'Would you like to validate your AWS credentials?', default=self.__dict['aws_validate_credentials'], ) if self.__dict['use_aws'] and self.__dict['aws_validate_credentials']: while (not self.__dict['aws_credentials_valid'] and aws_credential_attempts <= self.MAXIMUM_AWS_CREDENTIAL_ATTEMPTS): aws_credential_attempts += 1 self.validate_aws_credentials() attempts_remaining = (self.MAXIMUM_AWS_CREDENTIAL_ATTEMPTS - aws_credential_attempts) if (not self.__dict['aws_credentials_valid'] and attempts_remaining > 0): CLI.colored_print( 'Invalid credentials, please try again.', CLI.COLOR_WARNING, ) CLI.colored_print( 'Attempts remaining for AWS validation: {}'.format( attempts_remaining), CLI.COLOR_INFO, ) self.__questions_aws_configuration() else: if not self.__dict['aws_credentials_valid']: CLI.colored_print('Please restart configuration', CLI.COLOR_ERROR) sys.exit(1) else: CLI.colored_print('AWS credentials successfully validated', CLI.COLOR_SUCCESS)
def update_hosts(cls, dict_): """ Args: dict_ (dict): Dictionary provided by `Config.get_dict()` """ if dict_['local_installation']: start_sentence = '### (BEGIN) KoBoToolbox local routes' end_sentence = '### (END) KoBoToolbox local routes' with open('/etc/hosts', 'r') as f: tmp_host = f.read() start_position = tmp_host.find(start_sentence) end_position = tmp_host.find(end_sentence) if start_position > -1: tmp_host = tmp_host[0: start_position] \ + tmp_host[end_position + len(end_sentence) + 1:] routes = '{ip_address} ' \ '{kpi_subdomain}.{public_domain_name} ' \ '{kc_subdomain}.{public_domain_name} ' \ '{ee_subdomain}.{public_domain_name}'.format( ip_address=dict_['local_interface_ip'], public_domain_name=dict_['public_domain_name'], kpi_subdomain=dict_['kpi_subdomain'], kc_subdomain=dict_['kc_subdomain'], ee_subdomain=dict_['ee_subdomain'] ) tmp_host = ('{bof}' '\n{start_sentence}' '\n{routes}' '\n{end_sentence}').format( bof=tmp_host.strip(), start_sentence=start_sentence, routes=routes, end_sentence=end_sentence) with open('/tmp/etchosts', 'w') as f: f.write(tmp_host) message = ('Privileges escalation is required to update ' 'your `/etc/hosts`.') CLI.framed_print(message, color=CLI.COLOR_INFO) dict_['review_host'] = CLI.yes_no_question( 'Do you want to review your /etc/hosts file ' 'before overwriting it?', default=dict_['review_host']) if dict_['review_host']: print(tmp_host) CLI.colored_input('Press any keys when ready') # Save 'review_host' config = Config() config.write_config() cmd = 'sudo mv /etc/hosts /etc/hosts.old ' \ '&& sudo mv /tmp/etchosts /etc/hosts' return_value = os.system(cmd) if return_value != 0: sys.exit(1)
def migrate_single_to_two_databases(config): """ Check the contents of the databases. If KPI's is empty or doesn't exist while KoBoCAT's has user data, then we are migrating from a single-database setup Args config (helpers.config.Config) """ dict_ = config.get_dict() backend_role = dict_['backend_server_role'] def _kpi_db_alias_kludge(command): """ Sorry, this is not very nice. See https://github.com/kobotoolbox/kobo-docker/issues/264. """ set_env = 'DATABASE_URL="${KPI_DATABASE_URL}"' return ['bash', '-c', '{} {}'.format(set_env, command)] kpi_run_command = [ 'docker-compose', '-f', 'docker-compose.frontend.yml', '-f', 'docker-compose.frontend.override.yml', '-p', config.get_prefix('frontend'), 'run', '--rm', 'kpi' ] # Make sure Postgres is running # We add this message to users because when AWS backups are activated, # it takes a long time to install the virtualenv in PostgreSQL # container, so the `wait_for_database` below sits there a while. # It makes us think kobo-install is frozen. CLI.colored_print( 'Waiting for PostgreSQL database to be up & running...', CLI.COLOR_INFO) frontend_command = kpi_run_command + _kpi_db_alias_kludge(' '.join( ['python', 'manage.py', 'wait_for_database', '--retries', '45'])) CLI.run_command(frontend_command, dict_['kobodocker_path']) CLI.colored_print('The PostgreSQL database is running!', CLI.COLOR_SUCCESS) frontend_command = kpi_run_command + _kpi_db_alias_kludge(' '.join( ['python', 'manage.py', 'is_database_empty', 'kpi', 'kobocat'])) output = CLI.run_command(frontend_command, dict_['kobodocker_path']) # TODO: read only stdout and don't consider stderr unless the exit code # is non-zero. Currently, `output` combines both stdout and stderr kpi_kc_db_empty = output.strip().split('\n')[-1] if kpi_kc_db_empty == 'True\tFalse': # KPI empty but KC is not: run the two-database upgrade script CLI.colored_print( 'Upgrading from single-database setup to separate databases ' 'for KPI and KoBoCAT', CLI.COLOR_INFO) message = ( 'Upgrading to separate databases is required to run the latest ' 'release of KoBoToolbox, but it may be a slow process if you ' 'have a lot of data. Expect at least one minute of downtime ' 'for every 1,500 KPI assets. Assets are surveys and library ' 'items: questions, blocks, and templates.\n' '\n' 'To postpone this process, downgrade to the last ' 'single-database release by stopping this script and executing ' 'the following commands:\n' '\n' ' python3 run.py --stop\n' ' git fetch\n' ' git checkout shared-database-obsolete\n' ' python3 run.py --update\n' ' python3 run.py --setup\n') CLI.framed_print(message) message = ( 'For help, visit https://community.kobotoolbox.org/t/upgrading-' 'to-separate-databases-for-kpi-and-kobocat/7202.') CLI.colored_print(message, CLI.COLOR_WARNING) response = CLI.yes_no_question('Do you want to proceed?', default=False) if response is False: sys.exit(0) backend_command = [ 'docker-compose', '-f', 'docker-compose.backend.{}.yml'.format(backend_role), '-f', 'docker-compose.backend.{}.override.yml'.format(backend_role), '-p', config.get_prefix('backend'), 'exec', 'postgres', 'bash', '/kobo-docker-scripts/primary/clone_data_from_kc_to_kpi.sh', '--noinput' ] try: subprocess.check_call(backend_command, cwd=dict_['kobodocker_path']) except subprocess.CalledProcessError: CLI.colored_print('An error has occurred', CLI.COLOR_ERROR) sys.exit(1) elif kpi_kc_db_empty not in [ 'True\tTrue', 'False\tTrue', 'False\tFalse', ]: # The output was invalid CLI.colored_print('An error has occurred', CLI.COLOR_ERROR) sys.stderr.write(kpi_kc_db_empty) sys.exit(1)
def info(cls, timeout=600): config = Config() dict_ = config.get_dict() nginx_port = dict_['exposed_nginx_docker_port'] main_url = '{}://{}.{}{}'.format( 'https' if dict_['https'] else 'http', dict_['kpi_subdomain'], dict_['public_domain_name'], ':{}'.format(nginx_port) if (nginx_port and str(nginx_port) != Config.DEFAULT_NGINX_PORT) else '') stop = False start = int(time.time()) success = False hostname = '{}.{}'.format(dict_['kpi_subdomain'], dict_['public_domain_name']) https = dict_['https'] nginx_port = int(Config.DEFAULT_NGINX_HTTPS_PORT) \ if https else int(dict_['exposed_nginx_docker_port']) already_retried = False while not stop: if Network.status_check(hostname, '/service_health/', nginx_port, https) == Network.STATUS_OK_200: stop = True success = True elif int(time.time()) - start >= timeout: if timeout > 0: CLI.colored_print( '\n`KoBoToolbox` has not started yet. ' 'This is can be normal with low CPU/RAM computers.\n', CLI.COLOR_INFO) question = 'Wait for another {} seconds?'.format(timeout) response = CLI.yes_no_question(question) if response: start = int(time.time()) continue else: if not already_retried: already_retried = True CLI.colored_print( '\nSometimes front-end containers cannot ' 'communicate with back-end containers.\n' 'Restarting the front-end containers usually ' 'fixes it.\n', CLI.COLOR_INFO) question = 'Would you like to try?' response = CLI.yes_no_question(question) if response: start = int(time.time()) cls.restart_frontend() continue stop = True else: sys.stdout.write('.') sys.stdout.flush() time.sleep(10) # Create a new line print('') if success: username = dict_['super_user_username'] password = dict_['super_user_password'] message = ('Ready\n' 'URL: {url}\n' 'User: {username}\n' 'Password: {password}').format(url=main_url, username=username, password=password) CLI.framed_print(message, color=CLI.COLOR_SUCCESS) else: message = ('KoBoToolbox could not start!\n' 'Please try `python3 run.py --logs` to see the logs.') CLI.framed_print(message, color=CLI.COLOR_ERROR) return success