def wait_for_all_processes_to_stop(environment, ansible_context): while True: still_running = check_for_running_cchq_processes(environment, ansible_context) if not still_running: break options = ['abort', 'wait', 'continue', 'kill',] response = ask_option(inspect.cleandoc( """Some processes are still running. Do you want to:" - abort downtime" - wait for processes to stop" - continue with downtime regardless" - kill running processes """), options, options + ['a', 'w', 'c', 'k'] ) if response in ('a', 'abort'): if ask('This will start all CommCare processes again. Do you want to proceed?'): downtime = get_downtime_record(environment) supervisor_services(environment, ansible_context, 'start') cancel_downtime_record(environment, downtime) return elif response in ('w', 'wait'): time.sleep(30) elif response in ('c', 'continue'): if ask('Are you sure you want to continue with downtime even though there ' 'are still some processes running?'): return elif response in ('k', 'kill'): kill = ask('Are you sure you want to kill all remaining processes?', strict=True) if kill: kill_remaining_processes(environment, ansible_context)
def run_action_with_check_mode(run_check, run_apply, skip_check, quiet=False, always_skip_check=False): if always_skip_check: user_wants_to_apply = ask( 'This command will apply without running the check first. Continue?', quiet=quiet) elif skip_check: user_wants_to_apply = ask('Do you want to apply without running the check first?', quiet=quiet) else: exit_code = run_check() if exit_code == 1: # this means there was an error before ansible was able to start running return exit_code elif exit_code == 0: puts(colored.green(u"✓ Check completed with status code {}".format(exit_code))) user_wants_to_apply = ask('Do you want to apply these changes?', quiet=quiet) else: puts(colored.red(u"✗ Check failed with status code {}".format(exit_code))) user_wants_to_apply = ask('Do you want to try to apply these changes anyway?', quiet=quiet) exit_code = 0 if user_wants_to_apply: exit_code = run_apply() if exit_code == 0: puts(colored.green(u"✓ Apply completed with status code {}".format(exit_code))) else: puts(colored.red(u"✗ Apply failed with status code {}".format(exit_code))) return exit_code
def run_action_with_check_mode(run_check, run_apply, skip_check, quiet=False, always_skip_check=False): if always_skip_check: user_wants_to_apply = ask( 'This command will apply without running the check first. Continue?', quiet=quiet) elif skip_check: user_wants_to_apply = ask('Do you want to apply without running the check first?', quiet=quiet) else: exit_code = run_check() if exit_code == 1: # this means there was an error before ansible was able to start running return exit_code elif exit_code == 0: puts(color_success(u"✓ Check completed with status code {}".format(exit_code))) user_wants_to_apply = ask('Do you want to apply these changes?', quiet=quiet) else: puts(color_error(u"✗ Check failed with status code {}".format(exit_code))) user_wants_to_apply = ask('Do you want to try to apply these changes anyway?', quiet=quiet) exit_code = 0 if user_wants_to_apply: exit_code = run_apply() if exit_code == 0: puts(color_success(u"✓ Apply completed with status code {}".format(exit_code))) else: puts(color_error(u"✗ Apply failed with status code {}".format(exit_code))) return exit_code
def wait_for_all_processes_to_stop(environment, ansible_context): while True: still_running = check_for_running_cchq_processes(environment, ansible_context) if not still_running: break options = ['abort', 'wait', 'continue', 'kill',] response = ask_option(inspect.cleandoc( """Some processes are still running. Do you want to:" - abort downtime" - wait for processes to stop" - continue with downtime regardless" - kill running processes """), options, options + ['a', 'w', 'c', 'k'] ) if response in ('a', 'abort'): if ask('This will start all CommCare processes again. Do you want to proceed?'): downtime = get_downtime_record(environment) supervisor_services(environment, ansible_context, 'start') cancel_downtime_record(environment, downtime) return elif response in ('w', 'wait'): time.sleep(30) elif response in ('c', 'continue'): if ask('Are you sure you want to continue with downtime even though there ' 'are still some processes running?'): return elif response in ('k', 'kill'): kill = ask('Are you sure you want to kill all remaining processes?', strict=True) if kill: kill_remaining_processes(environment, ansible_context)
def migrate(migration, ansible_context, skip_check, no_stop): print_allocation(migration) if not ask("Continue with this plan?"): puts("Abort") return 0 if no_stop: puts( color_notice( "Running migrate with --no-stop will result in data loss")) puts( color_notice("unless each shard of each db has a pivot location.")) if not ask( "Have you manually confirmed that for each shard of each db " "at least one of its new locations is the same as an old location, " "and do you want to continue without stopping couchdb first?"): puts("Abort") return 0 def run_check(): return _run_migration(migration, ansible_context, check_mode=True, no_stop=no_stop) def run_apply(): return _run_migration(migration, ansible_context, check_mode=False, no_stop=no_stop) return run_action_with_check_mode(run_check, run_apply, skip_check)
def run(self, args, unknown_args): environment = get_environment(args.env_name) nice_name = environment.terraform_config.account_alias remote_host_key, remote_port, url_path = self._SERVICES[args.service] if isfunction(remote_host_key): remote_host_key = remote_host_key(environment) loopback_address = f'127.0.{environment.terraform_config.vpc_begin_range}' remote_host = lookup_server_address(args.env_name, remote_host_key) local_port = self.get_random_available_port() while not self.is_loopback_address_set_up(loopback_address): puts( color_error( 'To make this work you will need to run set up a special loopback address on your local machine:' )) puts( color_notice( f' - Mac: Run `sudo ifconfig lo0 alias {loopback_address}`.' )) puts( color_notice( f' - Linux: Run `sudo ip addr add {loopback_address}/8 dev lo`.' )) if not ask( "Follow the instructions above or type n to exit. Ready to continue?" ): return -1 puts() while not self.is_etc_hosts_alias_set_up(loopback_address, nice_name): puts( color_error( 'Okay, now the last step is to set up a special alias in your /etc/hosts:' )) puts( color_notice( f' - Edit /etc/hosts (e.g. `sudo vim /etc/hosts`) and add the line `{loopback_address} {nice_name}` to it.' )) if not ask( "Follow the instructions above or type n to exit. Ready to continue?" ): return -1 puts() puts( color_notice( f'You should now be able to reach {args.env_name} {args.service} at {color_link(f"http://{nice_name}:{local_port}{url_path}")}.' )) puts(f'Interrupt with ^C to stop port-forwarding and exit.') puts() try: return commcare_cloud( args.env_name, 'ssh', 'control', '-NL', f'{loopback_address}:{local_port}:{remote_host}:{remote_port}') except KeyboardInterrupt: puts() puts('Connection closed.') # ^C this is how we expect the user to terminate this command, so no need to print a stacktrace return 0
def run(self, args, unknown_args): args.playbook = 'es_rolling_restart.yml' if not ask('Have you stopped all the elastic pillows?', strict=True, quiet=args.quiet): exit(0) puts(colored.yellow( "This will cause downtime on the order of seconds to minutes,\n" "except in a few cases where an index is replicated across multiple nodes.")) if not ask('Do a rolling restart of the ES cluster?', strict=True, quiet=args.quiet): exit(0) AnsiblePlaybook(self.parser).run(args, unknown_args)
def end_downtime(environment, ansible_context): downtime = get_downtime_record(environment) if not downtime: puts(colored.yellow('Downtime record not found.')) end_downtime = ask("Do you want to continue?") else: end_downtime = ask("Do you want to start all CommCare services?") if end_downtime: supervisor_services(environment, ansible_context, 'start') if downtime: cancel_downtime_record(environment, downtime)
def _determine_environments_dir(quiet): environments_dir = None environ_value = os.environ.get('COMMCARE_CLOUD_ENVIRONMENTS') if quiet: return environ_value or DIMAGI_ENVIRONMENTS_DIR def have_same_realpath(dir1, dir2): return os.path.realpath(dir1) == os.path.realpath(dir2) if not environments_dir: if os.path.exists(DIMAGI_ENVIRONMENTS_DIR): if ask("Do you work or contract for Dimagi?"): print( "OK, we'll give you Dimagi's default environments (production, staging, etc.)." ) environments_dir = DIMAGI_ENVIRONMENTS_DIR if not environments_dir: if environ_value and not have_same_realpath( environ_value, DIMAGI_ENVIRONMENTS_DIR): print( "I see you have COMMCARE_CLOUD_ENVIRONMENTS set to {} in your environment" .format(environ_value)) if ask("Would you like to use environments at that location?"): environments_dir = environ_value if not environments_dir: default_environments_dir = "~/.commcare-cloud/environments" environments_dir = os.path.expanduser(default_environments_dir) print( "To use commcare-cloud, you have to have an environments directory. " "This is where you will store information about your cluster setup, " "such as the IP addresses of the hosts in your cluster, " "how different services are distributed across the machines, " "and all settings specific to your CommCare instance.") if ask("Would you like me to create an empty one for you at " "{}?".format(default_environments_dir)): for dir_name in ['_authorized_keys', '_users']: dir_path = os.path.expanduser( os.path.join(default_environments_dir, dir_name)) if not os.path.exists(dir_path): os.makedirs(dir_path) print( "Okay, I've got the env started for you, " "but you're going to have to fill out the rest before you can do much. " "For more information, see https://dimagi.github.io/commcare-cloud/commcare-cloud/env/ " "and refer to the examples at " "https://github.com/dimagi/commcare-cloud/tree/master/environments." ) return environments_dir
def end_downtime(environment, ansible_context): downtime = get_downtime_record(environment) if not downtime: puts(color_notice('Downtime record not found.')) end_downtime = ask("Do you want to continue?") else: end_downtime = ask("Do you want to start all CommCare services?") if end_downtime: supervisor_services(environment, ansible_context, 'start') if downtime: cancel_downtime_record(environment, downtime)
def start_downtime(environment, ansible_context, args): downtime = get_downtime_record(environment) if downtime: puts(colored.yellow('Downtime already active')) with indent(): print_downtime(downtime) go_down = ask("Do you want to continue?") else: go_down = ask("Are you sure you want to stop all CommCare services?", strict=True) if go_down: if not downtime: create_downtime_record(environment, args.message, args.duration) supervisor_services(environment, ansible_context, 'stop') wait_for_all_processes_to_stop(environment, ansible_context)
def start_downtime(environment, ansible_context, args): downtime = get_downtime_record(environment) if downtime: puts(color_notice('Downtime already active')) with indent(): print_downtime(downtime) go_down = ask("Do you want to continue?") else: go_down = ask("Are you sure you want to stop all CommCare services?", strict=True) if go_down: if not downtime: create_downtime_record(environment, args.message, args.duration) supervisor_services(environment, ansible_context, 'stop') wait_for_all_processes_to_stop(environment, ansible_context)
def run(self, args, unknown_args): environment = get_environment(args.env_name) remote_migration_state_manager = RemoteMigrationStateManager(environment.terraform_config) remote_migration_state = remote_migration_state_manager.fetch() migrations = get_migrations() applied_migrations = migrations[:remote_migration_state.number] unapplied_migrations = migrations[remote_migration_state.number:] # make sure remote checkpoint is consistent with migrations in code if applied_migrations: assert (applied_migrations[-1].number, applied_migrations[-1].slug) == \ (remote_migration_state.number, remote_migration_state.slug), \ (remote_migration_state, applied_migrations[-1]) else: assert (0, None) == (remote_migration_state.number, remote_migration_state.slug), \ remote_migration_state if not unapplied_migrations: print("No migrations to apply") return state = terraform_list_state(args.env_name, unknown_args) print("Applying the following changes:{}".format( ''.join('\n - {:0>4} {}'.format(migration.number, migration.slug) for migration in unapplied_migrations) )) print("which will result in the following moves being made:") migration_plans = make_migration_plans(environment, state, unapplied_migrations, log=print) if ask("Do you want to apply this migration?"): apply_migration_plans( environment, migration_plans, remote_migration_state_manager=remote_migration_state_manager, log=print)
def run(self, args, unknown_args): check_branch(args) environment = get_environment(args.env_name) deploy_revs = self._confirm_deploy_revs(environment, args, quiet=args.quiet) deploy_component = args.component if deploy_component == None: deploy_component = 'both' if environment.meta_config.always_deploy_formplayer else 'commcare' if deploy_component in ['commcare', 'both']: print(color_summary("You are about to deploy the following code:")) with indent(): for name, rev in deploy_revs: print(color_summary("{}: {}".format(name, rev))) if ask('Continue with deploy?', quiet=args.quiet): if deploy_component != 'both': _warn_no_formplayer() self.deploy_commcare(environment, deploy_revs, args, unknown_args) if deploy_component in ['formplayer', 'both']: if deploy_component != 'both': if args.commcare_rev: print(color_warning('--commcare-rev does not apply to a formplayer deploy and will be ignored')) if args.fab_settings: print(color_warning('--set does not apply to a formplayer deploy and will be ignored')) self._announce_formplayer_deploy_start(environment) self.deploy_formplayer(environment, args, unknown_args)
def _confirm_deploy_revs(environment, args, quiet=False): default_branch = environment.fab_settings_config.default_branch branches = [ ('commcare', 'commcare_rev', default_branch), ] for repo in environment.meta_config.git_repositories: branches.append((repo.name, '{}_rev'.format(repo.name), repo.version)) diffs = [] actuals = [] for repo_name, arg_name, default in branches: actual = getattr(args, arg_name, None) actuals.append((repo_name, actual or default)) if actual and actual != default: diffs.append("'{}' repo: {} != {}".format(repo_name, default, actual)) if diffs: message = ( "Whoa there bud! You're deploying non-default. " "\n{}\n" "ARE YOU DOING SOMETHING EXCEPTIONAL THAT WARRANTS THIS?" ).format('/n'.join(diffs)) if not ask(message, quiet=quiet): exit(-1) return actuals
def run(self, args, unknown_args): check_branch(args) environment = get_environment(args.env_name) commcare_branch = self._confirm_commcare_branch(environment, args.commcare_branch, quiet=args.quiet) if args.component == 'commcare': print(color_summary("You are about to deploy commcare")) print(color_summary("branch: {}".format(commcare_branch))) if ask('Deploy commcare?', quiet=args.quiet): print( color_notice("Formplayer will not be deployed right now,")) print( color_notice( "but we recommend deploying formplayer about once a month as well." )) print( color_notice( "It causes about 1 minute of service interruption to Web Apps and App Preview," )) print(color_notice("but keeps these services up to date.")) print( color_notice( "You can do so by running `commcare-cloud <env> deploy formplayer`" )) self.deploy_commcare(environment, commcare_branch, args, unknown_args) elif args.component == 'formplayer': self._announce_formplayer_deploy_start(environment) self.deploy_formplayer(environment, args, unknown_args)
def commit(migration, ansible_context): print_allocation(migration) alloc_docs_by_db = {plan.db_name: plan for plan in migration.shard_plan} puts(color_summary("Checking shards on disk vs plan. Please wait.")) if not assert_files(migration, alloc_docs_by_db, ansible_context): puts(color_error("Some shard files are not where we expect. Have you run 'migrate'?")) puts(color_error("Aborting")) return 1 else: puts(color_success("All shards appear to be where we expect according to the plan.")) if ask("Are you sure you want to update the Couch Database config?"): commit_migration(migration) diff_with_db = diff_plan(migration) if diff_with_db: puts(color_error('DB allocation differs from expected:\n')) puts("{}\n\n".format(diff_with_db)) puts("Check the DB state and logs and maybe try running 'commit' again?") return 1 puts(color_highlight("New shard allocation:\n")) print_shard_table([ get_shard_allocation(migration.target_couch_config, db_name) for db_name in sorted(get_db_list(migration.target_couch_config.get_control_node())) ]) return 0
def run_for_celery(self, service_group, action, args, unknown_args): exit_code = 0 service = "celery" if action == "status" and not args.only: args.shell_command = "supervisorctl %s" % action args.inventory_group = self.get_inventory_group_for_service(service, args.service_group) exit_code = RunShellCommand(self.parser).run(args, unknown_args) else: workers_by_host = self.get_celery_workers_to_work_on(args) puts(colored.blue("This is going to run the following")) for host, workers in workers_by_host.items(): puts(colored.green('Host: [' + host + ']')) puts(colored.green("supervisorctl %s %s" % (action, ' '.join(workers)))) if not ask('Good to go?', strict=True, quiet=args.quiet): return 0 # exit code for host, workers in workers_by_host.items(): args.inventory_group = self.get_inventory_group_for_service(service, args.service_group) # if not applicable for all hosts then limit to a host if host != "*": unknown_args.append('--limit=%s' % host) args.shell_command = "supervisorctl %s %s" % (action, ' '.join(workers)) for service in self.services(service_group, args): exit_code = RunShellCommand(self.parser).run(args, unknown_args) if exit_code is not 0: return exit_code return exit_code
def _write_load_config_sh(self, environments_dir, quiet): puts(colored.blue("Let's get you set up to run commcare-cloud.")) if not environments_dir: environments_dir = self._determine_environments_dir(quiet=quiet) commcare_cloud_dir = os.path.expanduser("~/.commcare-cloud") if not os.path.exists(commcare_cloud_dir): os.makedirs(commcare_cloud_dir) load_config_file = os.path.expanduser("~/.commcare-cloud/load_config.sh") if not os.path.exists(load_config_file) or \ ask("Overwrite your ~/.commcare-cloud/load_config.sh?", quiet=quiet): with open(load_config_file, 'w') as f: f.write(textwrap.dedent(""" # auto-generated with `manage-commcare-cloud configure`: export COMMCARE_CLOUD_ENVIRONMENTS={COMMCARE_CLOUD_ENVIRONMENTS} export PATH=$PATH:{virtualenv_path} source {PACKAGE_BASE}/.bash_completion """.format( COMMCARE_CLOUD_ENVIRONMENTS=shlex_quote(environments_dir), virtualenv_path=get_virtualenv_bin_path(), PACKAGE_BASE=PACKAGE_BASE, )).strip()) puts(colored.blue("Add the following to your ~/.bash_profile:")) puts(colored.cyan("source ~/.commcare-cloud/load_config.sh")) puts(colored.blue( "and then open a new shell. " "You should be able to run `commcare-cloud` without entering your virtualenv."))
def execute_action(self, action, host_pattern=None, process_pattern=None): if action == 'status': return ElasticsearchClassic(self.environment, self.ansible_context).execute_action( action, host_pattern, process_pattern) else: if not ask( "This function does more than stop and start the elasticsearch service. " "For that, use elasticsearch-classic." "\nStop will: stop pillows, stop es, and kill -9 if any processes still exist " "after a period of time. " "\nStart will start pillows and start elasticsearch " "\nRestart is a stop followed by a start.\n Continue?", strict=False): return 0 # exit code if action == 'stop': self._act_on_pillows(action='stop') self._run_rolling_restart_yml(tags='action_stop', limit=host_pattern) elif action == 'start': self._run_rolling_restart_yml(tags='action_start', limit=host_pattern) self._act_on_pillows(action='start') elif action == 'restart': self._act_on_pillows(action='stop') self._run_rolling_restart_yml(tags='action_stop,action_start', limit=host_pattern) self._act_on_pillows(action='start')
def commit(migration, ansible_context): print_allocation(migration) alloc_docs_by_db = {plan.db_name: plan for plan in migration.shard_plan} puts(colored.yellow("Checking shards on disk vs plan. Please wait.")) if not assert_files(migration, alloc_docs_by_db, ansible_context): puts(colored.red("Some shard files are not where we expect. Have you run 'migrate'?")) puts(colored.red("Aborting")) return 1 else: puts(colored.yellow("All shards appear to be where we expect according to the plan.")) if ask("Are you sure you want to update the Couch Database config?"): commit_migration(migration) diff_with_db = diff_plan(migration) if diff_with_db: puts(colored.red('DB allocation differs from expected:\n')) puts("{}\n\n".format(diff_with_db)) puts("Check the DB state and logs and maybe try running 'commit' again?") return 1 puts(colored.yellow("New shard allocation:\n")) print_shard_table([ get_shard_allocation(migration.target_couch_config, db_name) for db_name in sorted(get_db_list(migration.target_couch_config.get_control_node())) ]) return 0
def clean(migration, ansible_context, skip_check, limit): diff_with_db = diff_plan(migration) if diff_with_db: puts(colored.red("Current plan differs with database:\n")) puts("{}\n\n".format(diff_with_db)) puts( "This could mean that the plan hasn't been committed yet\n" "or that the plan was re-generated.\n" "Performing the 'clean' operation is still safe but may\n" "not have the outcome you are expecting.\n" ) if not ask("Do you wish to continue?"): puts(colored.red('Abort.')) return 0 alloc_docs_by_db = get_db_allocations(migration.target_couch_config) puts(colored.yellow("Checking shards on disk vs DB. Please wait.")) if not assert_files(migration, alloc_docs_by_db, ansible_context): puts(colored.red("Not all couch files are accounted for. Aborting.")) return 1 nodes = generate_shard_prune_playbook(migration) if nodes: return run_ansible_playbook( migration.target_environment, migration.prune_playbook_path, ansible_context, skip_check=skip_check, limit=limit )
def clean(migration, ansible_context, skip_check, limit): diff_with_db = diff_plan(migration) if diff_with_db: puts(color_warning("Current plan differs with database:\n")) puts("{}\n\n".format(diff_with_db)) puts( color_notice( "This could mean that the plan hasn't been committed yet\n" "or that the plan was re-generated.\n" "Performing the 'clean' operation is still safe but may\n" "not have the outcome you are expecting.\n")) if not ask("Do you wish to continue?"): puts(color_error('Abort.')) return 0 alloc_docs_by_db = get_db_allocations(migration.target_couch_config) puts(color_summary("Checking shards on disk vs DB. Please wait.")) if not assert_files(migration, alloc_docs_by_db, ansible_context): puts(color_error("Not all couch files are accounted for. Aborting.")) return 1 nodes = generate_shard_prune_playbook(migration) if nodes: return run_ansible_playbook(migration.target_environment, migration.prune_playbook_path, ansible_context, skip_check=skip_check, limit=limit)
def run(self, args, unknown_args): unknown_args += ('-e', '{"_should_update_formplayer_in_place": true}') rc = commcare_cloud(args.env_name, 'ansible-playbook', 'deploy_localsettings.yml', tags='localsettings', branch=args.branch, *unknown_args) if rc == 0 and ask( "Would you like to run Django checks to validate the settings?" ): environment = get_environment(args.env_name) server_args = [] try: limit_arg = unknown_args.index('--limit') except ValueError: pass else: servers = environment.inventory_manager.get_hosts( unknown_args[limit_arg + 1]) server_args.extend(['--server', servers[0]]) commcare_cloud(args.env_name, 'django-manage', *(['check', '--deploy'] + server_args)) commcare_cloud( args.env_name, 'django-manage', *(['check', '--deploy', '-t', 'database'] + server_args)) commcare_cloud(args.env_name, 'django-manage', *(['check_services'] + server_args)) else: return rc
def _write_load_config_sh(self, environments_dir, quiet): puts(color_summary("Let's get you set up to run commcare-cloud.")) if not environments_dir: environments_dir = self._determine_environments_dir(quiet=quiet) commcare_cloud_dir = os.path.expanduser("~/.commcare-cloud") if not os.path.exists(commcare_cloud_dir): os.makedirs(commcare_cloud_dir) load_config_file = os.path.expanduser( "~/.commcare-cloud/load_config.sh") if not os.path.exists(load_config_file) or \ ask("Overwrite your ~/.commcare-cloud/load_config.sh?", quiet=quiet): with open(load_config_file, 'w') as f: f.write( textwrap.dedent(""" # auto-generated with `manage-commcare-cloud configure`: export COMMCARE_CLOUD_ENVIRONMENTS={COMMCARE_CLOUD_ENVIRONMENTS} export PATH=$PATH:{virtualenv_path} source {PACKAGE_BASE}/.bash_completion """.format( COMMCARE_CLOUD_ENVIRONMENTS=shlex_quote( environments_dir), virtualenv_path=get_virtualenv_bin_path(), PACKAGE_BASE=PACKAGE_BASE, )).strip()) puts(color_notice("Add the following to your ~/.bash_profile:")) puts(color_code("source ~/.commcare-cloud/load_config.sh")) puts( color_notice( "and then open a new shell. " "You should be able to run `commcare-cloud` without entering your virtualenv." ))
def _confirm_translated(environment, quiet=False): if datetime.now().isoweekday( ) != 3 or environment.meta_config.deploy_env != 'production': return True return ask( "It's the weekly Wednesday deploy, did you update the translations " "from transifex? Try running this handy script from the root of your " "commcare-hq directory:\n./scripts/update-translations.sh\n", quiet=quiet)
def run(self, args, unknown_args): args.playbook = 'deploy_stack.yml' unknown_args += ('--tags=supervisor,services', ) rc = AnsiblePlaybook(self.parser).run(args, unknown_args) if ask("Some celery configs are still updated through fab. " "Do you want to run that as well (recommended)?"): exec_fab_command(args.env_name, 'update_current_supervisor_config') else: return rc
def _determine_environments_dir(quiet): environments_dir = None environ_value = os.environ.get('COMMCARE_CLOUD_ENVIRONMENTS') if quiet: return environ_value or DIMAGI_ENVIRONMENTS_DIR def have_same_realpath(dir1, dir2): return os.path.realpath(dir1) == os.path.realpath(dir2) if not environments_dir: if os.path.exists(DIMAGI_ENVIRONMENTS_DIR): if ask("Do you work or contract for Dimagi?"): print("OK, we'll give you Dimagi's default environments (production, staging, etc.).") environments_dir = DIMAGI_ENVIRONMENTS_DIR if not environments_dir: if environ_value and not have_same_realpath(environ_value, DIMAGI_ENVIRONMENTS_DIR): print("I see you have COMMCARE_CLOUD_ENVIRONMENTS set to {} in your environment".format(environ_value)) if ask("Would you like to use environments at that location?"): environments_dir = environ_value if not environments_dir: default_environments_dir = "~/.commcare-cloud/environments" environments_dir = os.path.expanduser(default_environments_dir) print("To use commcare-cloud, you have to have an environments directory. " "This is where you will store information about your cluster setup, " "such as the IP addresses of the hosts in your cluster, " "how different services are distributed across the machines, " "and all settings specific to your CommCare instance.") if ask("Would you like me to create an empty one for you at " "{}?".format(default_environments_dir)): for dir_name in ['_authorized_keys', '_users']: dir_path = os.path.expanduser(os.path.join(default_environments_dir, dir_name)) if not os.path.exists(dir_path): os.makedirs(dir_path) print("Okay, I've got the env started for you, " "but you're going to have to fill out the rest before you can do much. " "For more information, see https://dimagi.github.io/commcare-cloud/commcare-cloud/env/ " "and refer to the examples at " "https://github.com/dimagi/commcare-cloud/tree/master/environments.") return environments_dir
def plan(migration): new_plan = True if os.path.exists(migration.shard_plan_path): new_plan = ask("Plan already exists. Do you want to overwrite it?") if new_plan: shard_allocations = generate_shard_plan(migration) else: shard_allocations = migration.shard_plan print_shard_table([shard_allocation_doc for shard_allocation_doc in shard_allocations]) return 0
def plan(migration): new_plan = True if os.path.exists(migration.shard_plan_path): new_plan = ask("Plan already exists. Do you want to overwrite it?") if new_plan: shard_allocations = generate_shard_plan(migration) else: shard_allocations = migration.shard_plan print_shard_table([shard_allocation_doc for shard_allocation_doc in shard_allocations]) return 0
def run(self, args, unknown_args): args.playbook = 'deploy_stack.yml' unknown_args += ('--tags=services', ) rc = AnsiblePlaybook(self.parser).run(args, unknown_args) if ask("Would you like to update supervisor to use the new configurations?" ): exec_fab_command(args.env_name, 'supervisorctl:"reread"') exec_fab_command(args.env_name, 'supervisorctl:"update"') else: return rc
def _confirm_environment_time(environment, quiet=False): if within_maintenance_window(environment): return True window = environment.fab_settings_config.acceptable_maintenance_window d = datetime.now(pytz.timezone(window['timezone'])) message = ( "Whoa there bud! You're deploying '%s' outside the configured maintenance window. " "The current local time is %s.\n" "ARE YOU DOING SOMETHING EXCEPTIONAL THAT WARRANTS THIS?") % ( environment.name, d.strftime("%-I:%M%p on %h. %d %Z")) return ask(message, quiet=quiet)
def confirm_deploy(environment, deploy_revs, diffs, args): if diffs: message = ( "Whoa there bud! You're deploying non-default. " "\n{}\n" "ARE YOU DOING SOMETHING EXCEPTIONAL THAT WARRANTS THIS?").format( '/n'.join(diffs)) if not ask(message, quiet=args.quiet): return False if not (_confirm_translated(environment, quiet=args.quiet) and _confirm_environment_time(environment, quiet=args.quiet)): return False diff = _get_diff(environment, deploy_revs) diff.print_deployer_diff() if diff.deployed_commit_matches_latest_commit and not args.quiet: _print_same_code_warning(deploy_revs['commcare']) return ask('Are you sure you want to preindex and deploy to ' '{env}?'.format(env=environment.name), quiet=args.quiet)
def commit(migration): if ask("Are you sure you want to update the Couch Database config?"): commit_migration(migration) # TODO: verify that shard config in DB matches what we expect puts(colored.yellow("New shard allocation:\n")) print_shard_table([ get_shard_allocation(migration.target_couch_config, db_name) for db_name in sorted( get_db_list(migration.target_couch_config.get_control_node())) ]) return 0
def migrate(migration, ansible_context, skip_check, no_stop): print_allocation(migration) if not ask("Continue with this plan?"): puts("Abort") return 0 if no_stop: puts(colored.yellow("Running migrate with --no-stop will result in data loss")) puts(colored.yellow("unless each shard of each db has a pivot location.")) if not ask("Have you manually confirmed that for each shard of each db " "at least one of its new locations is the same as an old location, " "and do you want to continue without stopping couchdb first?"): puts("Abort") return 0 def run_check(): return _run_migration(migration, ansible_context, check_mode=True, no_stop=no_stop) def run_apply(): return _run_migration(migration, ansible_context, check_mode=False, no_stop=no_stop) return run_action_with_check_mode(run_check, run_apply, skip_check)
def _confirm_commcare_branch(environment, commcare_branch, quiet): default_branch = environment.fab_settings_config.default_branch if not commcare_branch: return default_branch if commcare_branch != default_branch: branch_message = ( "Whoa there bud! You're using branch {commcare_branch}. " "ARE YOU DOING SOMETHING EXCEPTIONAL THAT WARRANTS THIS?" ).format(commcare_branch=commcare_branch) if not ask(branch_message, quiet=quiet): exit(-1) return commcare_branch
def run(self, args, unknown_args): args.playbook = 'deploy_stack.yml' unknown_args += ('--tags=services',) rc = AnsiblePlaybook(self.parser).run(args, unknown_args) if ask("Would you like to update supervisor to use the new configurations?"): carryover_args = [] if args.limit: carryover_args.extend(['--limit', args.limit]) commcare_cloud( args.env_name, 'run-shell-command', 'webworkers:celery:pillowtop:formplayer', 'supervisorctl reread; supervisorctl update', '-b', *carryover_args ) else: return rc
def _confirm_commcare_rev(environment, commcare_rev, quiet=False): default_branch = environment.fab_settings_config.default_branch if not commcare_rev: return default_branch if commcare_rev != default_branch: message = ( "Whoa there bud! You're deploying from {commcare_rev}. " "ARE YOU DOING SOMETHING EXCEPTIONAL THAT WARRANTS THIS?" ).format(commcare_rev=commcare_rev) if not ask(message, quiet=quiet): exit(-1) return commcare_rev
def run(self, args, unknown_args): args.playbook = 'deploy_stack.yml' unknown_args += ('--tags=services', ) rc = AnsiblePlaybook(self.parser).run(args, unknown_args) if ask("Would you like to update supervisor to use the new configurations?" ): carryover_args = [] if args.limit: carryover_args.extend(['--limit', args.limit]) commcare_cloud(args.env_name, 'run-shell-command', 'webworkers:celery:pillowtop:formplayer', 'supervisorctl reread; supervisorctl update', '-b', *carryover_args) else: return rc
def run(self, args, unknown_args): if args.shell_command.strip().startswith('sudo '): puts(color_notice( "To run as another user use `--become` (for root) or `--become-user <user>`.\n" "Using 'sudo' directly in the command is non-standard practice.")) if not ask("Do you know what you're doing and want to run this anyway?", quiet=args.quiet): return 0 # exit code args.module = 'shell' if args.silence_warnings: args.module_args = 'warn=false ' + args.shell_command else: args.module_args = args.shell_command args.skip_check = True args.quiet = True del args.shell_command return RunAnsibleModule(self.parser).run(args, unknown_args)
def run(self, args, unknown_args): if args.shell_command.strip().startswith('sudo '): puts(colored.yellow( "To run as another user use `--become` (for root) or `--become-user <user>`.\n" "Using 'sudo' directly in the command is non-standard practice.")) if not ask("Do you know what you're doing and want to run this anyway?", quiet=args.quiet): return 0 # exit code args.module = 'shell' if args.silence_warnings: args.module_args = 'warn=false ' + args.shell_command else: args.module_args = args.shell_command args.skip_check = True args.quiet = True del args.shell_command return RunAnsibleModule(self.parser).run(args, unknown_args)
def deploy_formplayer(environment, args): print(color_notice("\nPreparing to deploy Formplayer to: "), end="") print(f"{environment.name}\n") tag_commits = environment.fab_settings_config.tag_deploy_commits repo = github_repo('dimagi/formplayer', require_write_permissions=tag_commits) diff = get_deploy_diff(environment, repo) diff.print_deployer_diff() context = DeployContext(service_name="Formplayer", revision=args.commcare_rev, diff=diff, start_time=datetime.utcnow()) if not ask('Continue with deploy?', quiet=args.quiet): return 1 record_deploy_start(environment, context) rc = run_ansible_playbook_command(environment, args) if rc != 0: record_deploy_failed(environment, context) return rc rc = commcare_cloud( args.env_name, 'run-shell-command', 'formplayer', ('supervisorctl reread; ' 'supervisorctl update {project}-{deploy_env}-formsplayer-spring; ' 'supervisorctl restart {project}-{deploy_env}-formsplayer-spring' ).format( project='commcare-hq', deploy_env=environment.meta_config.deploy_env, ), '-b', ) if rc != 0: record_deploy_failed(environment, context) return rc record_deploy_success(environment, context) return 0
def run(self, args, unknown_args): if args.shell_command.strip().startswith('sudo '): puts( colored.yellow( "To run as another user use `--become` (for root) or `--become-user <user>`.\n" "Using 'sudo' directly in the command is non-standard practice." )) if not ask( "Do you know what you're doing and want to run this anyway?", quiet=args.quiet): exit(0) args.module = 'shell' args.module_args = args.shell_command args.skip_check = True args.quiet = True del args.shell_command RunAnsibleModule(self.parser).run(args, unknown_args)
def execute_action(self, action, host_pattern=None, process_pattern=None): if action == 'status': return ElasticsearchClassic(self.environment, self.ansible_context).execute_action(action, host_pattern, process_pattern) else: if not ask( "This function does more than stop and start the elasticsearch service. " "For that, use elasticsearch-classic." "\nStop will: stop pillows, stop es, and kill -9 if any processes still exist " "after a period of time. " "\nStart will start pillows and start elasticsearch " "\nRestart is a stop followed by a start.\n Continue?", strict=False): return 0 # exit code if action == 'stop': self._act_on_pillows(action='stop') self._run_rolling_restart_yml(tags='action_stop', limit=host_pattern) elif action == 'start': self._run_rolling_restart_yml(tags='action_start', limit=host_pattern) self._act_on_pillows(action='start') elif action == 'restart': self._act_on_pillows(action='stop') self._run_rolling_restart_yml(tags='action_stop,action_start', limit=host_pattern) self._act_on_pillows(action='start')
def run(self, args, unknown_args): config = get_config(args.config) keys_to_update = args.update_key or UPDATE_KEYS initialize_datadog(config) remote_monitor_api = RemoteMonitorAPI() local_monitor_api = LocalMonitorAPI(config) local_monitors = local_monitor_api.get_all() remote_monitors = remote_monitor_api.get_all() only_remote = { id: remote_monitors[id] for id in set(remote_monitors) - set(local_monitors) } only_local = { id: local_monitors[id] for id in set(local_monitors) - set(remote_monitors) } shared_local_remote_monitors = { id: (local_monitors[id], remote_monitors[id]) for id in set(local_monitors) & set(remote_monitors) } monitors_with_diffs = {} any_diffs = False if only_local: for id, monitor in only_local.items(): puts(colored.magenta( "\nMonitor missing from datadog: {} ({})\n".format(monitor['name'], id) )) for id, (expected, actual) in shared_local_remote_monitors.items(): diff = list(_unidiff_output( dump_monitor_yaml(get_data_to_update(actual, keys_to_update)), dump_monitor_yaml(get_data_to_update(expected, keys_to_update)))) any_diffs |= bool(diff) if diff: puts(colored.magenta("\nDiff for '{}'".format(expected['name']))) puts(colored.cyan(local_monitor_api.get_filename_for_monitor(expected['id']))) with indent(): print_diff(diff) monitors_with_diffs[id] = expected if any_diffs: if ask("Do you want to push these changes to Datadog?"): for id, expected in monitors_with_diffs.items(): print("Updating '{}'".format(expected['name'])) remote_monitor_api.update(id, get_data_to_update(expected, keys_to_update)) if only_remote: puts(colored.magenta( "FYI you also have some untracked monitors. " "No change will be applied for these:" )) for id, missing_monitor in sorted(only_remote.items()): puts(colored.magenta(" - Untracked monitor {} '{}' (no change will be applied)".format(id, missing_monitor['name']))) if ask("And BTW do you want to dump all untracked monitors as a starting point?"): for id, missing_monitor in sorted(only_remote.items()): local_monitor_api.create(id, missing_monitor)