def migrate(migration, ansible_context, skip_check, no_stop): print_allocation(migration) if not ask("Continue with this plan?"): puts("Abort") return 0 if no_stop: puts( color_notice( "Running migrate with --no-stop will result in data loss")) puts( color_notice("unless each shard of each db has a pivot location.")) if not ask( "Have you manually confirmed that for each shard of each db " "at least one of its new locations is the same as an old location, " "and do you want to continue without stopping couchdb first?"): puts("Abort") return 0 def run_check(): return _run_migration(migration, ansible_context, check_mode=True, no_stop=no_stop) def run_apply(): return _run_migration(migration, ansible_context, check_mode=False, no_stop=no_stop) return run_action_with_check_mode(run_check, run_apply, skip_check)
def _write_load_config_sh(self, environments_dir, quiet): puts(color_summary("Let's get you set up to run commcare-cloud.")) if not environments_dir: environments_dir = self._determine_environments_dir(quiet=quiet) commcare_cloud_dir = os.path.expanduser("~/.commcare-cloud") if not os.path.exists(commcare_cloud_dir): os.makedirs(commcare_cloud_dir) load_config_file = os.path.expanduser( "~/.commcare-cloud/load_config.sh") if not os.path.exists(load_config_file) or \ ask("Overwrite your ~/.commcare-cloud/load_config.sh?", quiet=quiet): with open(load_config_file, 'w') as f: f.write( textwrap.dedent(""" # auto-generated with `manage-commcare-cloud configure`: export COMMCARE_CLOUD_ENVIRONMENTS={COMMCARE_CLOUD_ENVIRONMENTS} export PATH=$PATH:{virtualenv_path} source {PACKAGE_BASE}/.bash_completion """.format( COMMCARE_CLOUD_ENVIRONMENTS=shlex_quote( environments_dir), virtualenv_path=get_virtualenv_bin_path(), PACKAGE_BASE=PACKAGE_BASE, )).strip()) puts(color_notice("Add the following to your ~/.bash_profile:")) puts(color_code("source ~/.commcare-cloud/load_config.sh")) puts( color_notice( "and then open a new shell. " "You should be able to run `commcare-cloud` without entering your virtualenv." ))
def run(self, args, unknown_args): check_branch(args) environment = get_environment(args.env_name) commcare_branch = self._confirm_commcare_branch(environment, args.commcare_branch, quiet=args.quiet) if args.component == 'commcare': print(color_summary("You are about to deploy commcare")) print(color_summary("branch: {}".format(commcare_branch))) if ask('Deploy commcare?', quiet=args.quiet): print( color_notice("Formplayer will not be deployed right now,")) print( color_notice( "but we recommend deploying formplayer about once a month as well." )) print( color_notice( "It causes about 1 minute of service interruption to Web Apps and App Preview," )) print(color_notice("but keeps these services up to date.")) print( color_notice( "You can do so by running `commcare-cloud <env> deploy formplayer`" )) self.deploy_commcare(environment, commcare_branch, args, unknown_args) elif args.component == 'formplayer': self._announce_formplayer_deploy_start(environment) self.deploy_formplayer(environment, args, unknown_args)
def print_help_message_about_the_commcare_cloud_default_username_env_var( username): puts(color_notice("Did you know? You can put")) puts( color_notice( " export COMMCARE_CLOUD_DEFAULT_USERNAME={}".format(username))) puts( color_notice( "in your profile to never have to type that in again! 🌈"))
def wrap(cls, data): for deprecated_property in ('py3_include_venv', 'py3_run_deploy'): if deprecated_property in data: print("{} {} {}".format( color_notice("The property"), color_code(deprecated_property), color_notice("is deprecated and has no effect.") )) print(color_notice("Feel free to remove it from your fab-settings.yml.")) del data[deprecated_property] obj = super(FabSettingsConfig, cls).wrap(data) return obj
def run(self, args, unknown_args): env = os.environ.copy() put_virtualenv_bin_on_the_path() if not os.path.exists(ANSIBLE_ROLES_PATH): os.makedirs(ANSIBLE_ROLES_PATH) if not os.path.exists(ANSIBLE_COLLECTIONS_PATHS): os.makedirs(ANSIBLE_COLLECTIONS_PATHS) env['ANSIBLE_ROLES_PATH'] = ANSIBLE_ROLES_PATH env['ANSIBLE_COLLECTIONS_PATHS'] = ANSIBLE_COLLECTIONS_PATHS requirements_yml = os.path.join(ANSIBLE_DIR, 'requirements.yml') cmd_roles_parts = [ 'ansible-galaxy', 'install', '-f', '-r', requirements_yml ] cmd_collection_parts = [ 'ansible-galaxy', 'collection', 'install', '-f', '-r', requirements_yml ] for cmd_parts in (cmd_roles_parts, cmd_collection_parts): cmd = ' '.join(shlex_quote(arg) for arg in cmd_parts) print_command(cmd) try: subprocess.check_output(cmd, shell=True, env=env) except subprocess.CalledProcessError as err: print("process exited with error: %s" % err.returncode) return err.returncode puts( color_notice( "To finish first-time installation, run `manage-commcare-cloud configure`" )) return 0
def aws_sign_in(aws_profile, duration_minutes=DEFAULT_SIGN_IN_DURATION_MINUTES, force_new=False): """ Create a temp session through MFA for a given aws profile :param aws_profile: The name of an existing aws profile to create a temp session for :param duration_minutes: How long to set the session expiration if a new one is created :param force_new: If set to True, creates new credentials even if valid ones are found :return: The name of temp session profile. (Always the passed in profile followed by ':session') """ aws_session_profile = '{}:session'.format(aws_profile) if not force_new \ and _has_valid_session_credentials(aws_session_profile): return aws_session_profile default_username = get_default_username() if default_username.is_guess: username = input("Enter username associated with credentials [{}]: ".format( default_username)) or default_username print_help_message_about_the_commcare_cloud_default_username_env_var(username) else: username = default_username mfa_token = input("Enter your MFA token: ") generate_session_profile(aws_profile, username, mfa_token, duration_minutes) puts(color_success(u"✓ Sign in accepted")) puts("You will be able to use AWS from the command line for the next {} minutes." .format(duration_minutes)) puts(color_notice( "To use this session outside of commcare-cloud, " "prefix your command with AWS_PROFILE={}:session".format(aws_profile))) return aws_session_profile
def wrap(cls, data): for deprecated_property in ('py3_include_venv', 'py3_run_deploy'): if deprecated_property in data: print("{} {} {}".format( color_notice("The property"), color_code(deprecated_property), color_notice("is deprecated and has no effect.") )) print(color_notice("Feel free to remove it from your fab-settings.yml.")) del data[deprecated_property] obj = super(FabSettingsConfig, cls).wrap(data) if obj.use_shared_dir_for_staticfiles: assert obj.shared_dir_for_staticfiles, \ "Cannot have use_shared_dir_for_staticfiles without shared_dir_for_staticfiles" return obj
def run(self, args, unknown_args): env = os.environ.copy() put_virtualenv_bin_on_the_path() if not os.path.exists(ANSIBLE_ROLES_PATH): os.makedirs(ANSIBLE_ROLES_PATH) if not os.path.exists(ANSIBLE_COLLECTIONS_PATHS): os.makedirs(ANSIBLE_COLLECTIONS_PATHS) env['ANSIBLE_ROLES_PATH'] = ANSIBLE_ROLES_PATH env['ANSIBLE_COLLECTIONS_PATHS'] = ANSIBLE_COLLECTIONS_PATHS cmd_roles_parts = [ 'ansible-galaxy', 'install', '-f', '-r', os.path.join(ANSIBLE_DIR, 'requirements.yml') ] cmd_collection_parts = [ 'ansible-galaxy', 'collection', 'install', '-f', '-r', os.path.join(ANSIBLE_DIR, 'requirements.yml') ] for cmd_parts in (cmd_roles_parts, cmd_collection_parts): cmd = ' '.join(shlex_quote(arg) for arg in cmd_parts) print_command(cmd) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, shell=True, env=env) p.communicate() puts( color_notice( "To finish first-time installation, run `manage-commcare-cloud configure`" .format())) return p.returncode
def run(self, args, unknown_args): assert args.action == 'migrate' or not args.no_stop, \ "You can only use --no-stop with migrate" environment = get_environment(args.env_name) environment.create_generated_yml() migration = CouchMigration(environment, args.migration_plan) check_connection(migration.target_couch_config.get_control_node()) if migration.separate_source_and_target: check_connection(migration.source_couch_config.get_control_node()) ansible_context = AnsibleContext(args) if args.limit and args.action != 'clean': puts( color_notice( 'Ignoring --limit (it only applies to "clean" action).')) if args.action == 'describe': return describe(migration) if args.action == 'plan': return plan(migration) if args.action == 'migrate': return migrate(migration, ansible_context, args.skip_check, args.no_stop) if args.action == 'commit': return commit(migration, ansible_context) if args.action == 'clean': return clean(migration, ansible_context, args.skip_check, args.limit)
def clean(migration, ansible_context, skip_check, limit): diff_with_db = diff_plan(migration) if diff_with_db: puts(color_warning("Current plan differs with database:\n")) puts("{}\n\n".format(diff_with_db)) puts( color_notice( "This could mean that the plan hasn't been committed yet\n" "or that the plan was re-generated.\n" "Performing the 'clean' operation is still safe but may\n" "not have the outcome you are expecting.\n")) if not ask("Do you wish to continue?"): puts(color_error('Abort.')) return 0 alloc_docs_by_db = get_db_allocations(migration.target_couch_config) puts(color_summary("Checking shards on disk vs DB. Please wait.")) if not assert_files(migration, alloc_docs_by_db, ansible_context): puts(color_error("Not all couch files are accounted for. Aborting.")) return 1 nodes = generate_shard_prune_playbook(migration) if nodes: return run_ansible_playbook(migration.target_environment, migration.prune_playbook_path, ansible_context, skip_check=skip_check, limit=limit)
def _aws_sign_in_with_sso(environment): """ Create a temp session through MFA for a given aws profile :param aws_profile: The name of an existing aws profile to create a temp session for :param duration_minutes: How long to set the session expiration if a new one is created :param force_new: If set to True, creates new credentials even if valid ones are found :return: The name of temp session profile. (Always the passed in profile followed by ':session') """ aws_session_profile = '{}:session'.format( environment.terraform_config.aws_profile) # todo: add `... or if _date_modified(AWS_CONFIG_PATH) > _date_modified(AWS_CREDENTIALS_PATH)` if not _has_profile_for_sso(aws_session_profile): puts( color_notice( "Configuring SSO. To further customize, run `aws configure sso --profile {}`" .format(aws_session_profile))) _write_profile_for_sso( aws_session_profile, sso_start_url=environment.aws_config.sso_config.sso_start_url, sso_account_id=environment.aws_config.sso_config.sso_account_id, sso_region=environment.aws_config.sso_config.sso_region, region=environment.aws_config.sso_config.region, ) if not _has_valid_session_credentials_for_sso(): _refresh_sso_credentials(aws_session_profile) if not _has_valid_v1_session_credentials(aws_session_profile): _sync_sso_to_v1_credentials(aws_session_profile) return aws_session_profile
def deploy_commcare(environment, args, unknown_args): deploy_revs, diffs = get_deploy_revs_and_diffs(environment, args) if not confirm_deploy(environment, deploy_revs, diffs, args): print(color_notice("Aborted by user")) return 1 fab_func_args = get_deploy_commcare_fab_func_args(args) fab_settings = [args.fab_settings] if args.fab_settings else [] for name, rev in deploy_revs.items(): var = 'code_branch' if name == 'commcare' else '{}_code_branch'.format( name) fab_settings.append('{}={}'.format(var, rev)) context = DeployContext(service_name="CommCare HQ", revision=args.commcare_rev, diff=_get_diff(environment, deploy_revs), start_time=datetime.utcnow()) record_deploy_start(environment, context) rc = commcare_cloud(environment.name, 'fab', 'deploy_commcare{}'.format(fab_func_args), '--set', ','.join(fab_settings), branch=args.branch, *unknown_args) if rc != 0: record_deploy_failed(environment, context) return rc if not args.skip_record: record_successful_deploy(environment, context) return 0
def _warn_no_formplayer(): print(color_notice(dedent(""" Formplayer will not be deployed right now, but we recommend deploying formplayer about once a month as well. It causes about 1 minute of service interruption to Web Apps and App Preview, but keeps these services up to date. You can do so by running `commcare-cloud <env> deploy formplayer` """)))
def get_github_credentials(repo_name, repo_is_private, require_write_permissions): global GITHUB_TOKEN token, found_in_legacy_location = get_github_credentials_no_prompt() if found_in_legacy_location: print(color_notice(f"[Deprecation Warning] Config file has moved.")) print( color_notice( f"New location is {PROJECT_ROOT}/config.py or else use the " f"'GITHUB_TOKEN' environment variable.")) print( color_notice( f"\nYou can move the config to the new location as follows:")) print( color_notice( f" $ mv {PROJECT_ROOT}/fab/config.py {PROJECT_ROOT}/config.py\n" )) if token is None: print(color_warning("Github credentials not found!")) private = "private " if repo_is_private else "" print(f"Github token is required for {private}repository {repo_name}.") if require_write_permissions: print( "The token must have write permissions to the repository to create release tags." ) print( "\nYou can add a config file to automate this step:\n" f" $ cp {PROJECT_ROOT}/config.example.py {PROJECT_ROOT}/config.py\n" f"Then edit {PROJECT_ROOT}/config.py") print( color_notice( "To generate a GitHub access token, follow these instructions: https://github.com/blog/1509-personal-api-tokens\n" "For permissions choose repo > public_repo")) token = getpass('Github Token: ') os.environ["GITHUB_TOKEN"] = token # set in env for access by subprocesses GITHUB_TOKEN = token return token or None
def end_downtime(environment, ansible_context): downtime = get_downtime_record(environment) if not downtime: puts(color_notice('Downtime record not found.')) end_downtime = ask("Do you want to continue?") else: end_downtime = ask("Do you want to start all CommCare services?") if end_downtime: supervisor_services(environment, ansible_context, 'start') if downtime: cancel_downtime_record(environment, downtime)
def start_downtime(environment, ansible_context, args): downtime = get_downtime_record(environment) if downtime: puts(color_notice('Downtime already active')) with indent(): print_downtime(downtime) go_down = ask("Do you want to continue?") else: go_down = ask("Are you sure you want to stop all CommCare services?", strict=True) if go_down: if not downtime: create_downtime_record(environment, args.message, args.duration) supervisor_services(environment, ansible_context, 'stop') wait_for_all_processes_to_stop(environment, ansible_context)
def run(self, args, unknown_args): if args.shell_command.strip().startswith('sudo '): puts(color_notice( "To run as another user use `--become` (for root) or `--become-user <user>`.\n" "Using 'sudo' directly in the command is non-standard practice.")) if not ask("Do you know what you're doing and want to run this anyway?", quiet=args.quiet): return 0 # exit code args.module = 'shell' if args.silence_warnings: args.module_args = 'warn=false ' + args.shell_command else: args.module_args = args.shell_command args.skip_check = True args.quiet = True del args.shell_command return RunAnsibleModule(self.parser).run(args, unknown_args)
def deploy_formplayer(environment, args): print(color_notice("\nPreparing to deploy Formplayer to: "), end="") print(f"{environment.name}\n") tag_commits = environment.fab_settings_config.tag_deploy_commits repo = github_repo('dimagi/formplayer', require_write_permissions=tag_commits) diff = get_deploy_diff(environment, repo) diff.print_deployer_diff() context = DeployContext(service_name="Formplayer", revision=args.commcare_rev, diff=diff, start_time=datetime.utcnow()) if not ask('Continue with deploy?', quiet=args.quiet): return 1 record_deploy_start(environment, context) rc = run_ansible_playbook_command(environment, args) if rc != 0: record_deploy_failed(environment, context) return rc rc = commcare_cloud( args.env_name, 'run-shell-command', 'formplayer', ('supervisorctl reread; ' 'supervisorctl update {project}-{deploy_env}-formsplayer-spring; ' 'supervisorctl restart {project}-{deploy_env}-formsplayer-spring' ).format( project='commcare-hq', deploy_env=environment.meta_config.deploy_env, ), '-b', ) if rc != 0: record_deploy_failed(environment, context) return rc record_deploy_success(environment, context) return 0
def _check_username(env_name, username, message): default_username = username allowed_users = ["ansible"] + get_environment(env_name).users_config.dev_users.present while True: if not username or default_username.is_guess: username = input(f"Enter your SSH username ({default_username}): ") if not username: username = default_username if username in allowed_users: break env_users = '\n - '.join([''] + allowed_users) puts(color_error( f"Unauthorized user {username}.\n\n" f"Please pass in one of the allowed ssh users:{env_users}" )) username = "" if default_username.is_guess: print(color_notice(message.format(username=username))) return username
def run(self, args, unknown_args): puts( color_notice( "The 'update-user-key' command has been removed. Please use 'update-users' instead." )) return 0 # exit code
def print_help_message_about_the_commcare_cloud_default_username_env_var(username): puts(color_notice(COMMCARE_CLOUD_DEFAULT_USERNAME_ENV_VAR_MESSAGE.format(username)))
def run(self, args, unknown_args): config = get_config(args.config) keys_to_update = args.update_key or UPDATE_KEYS initialize_datadog(config) remote_monitor_api = RemoteMonitorAPI(filtered_ids=args.monitors) local_monitor_api = LocalMonitorAPI(config, filtered_ids=args.monitors) local_monitors = local_monitor_api.get_filtered() remote_monitors = remote_monitor_api.get_filtered() only_remote = { id: remote_monitors[id] for id in set(remote_monitors) - set(local_monitors) } only_local = { id: local_monitors[id] for id in set(local_monitors) - set(remote_monitors) } shared_local_remote_monitors = { id: (local_monitors[id], remote_monitors[id]) for id in set(local_monitors) & set(remote_monitors) } monitors_with_diffs = {} any_diffs = False if only_local: for id, monitor in only_local.items(): puts( color_warning( "\nMonitor missing from datadog: {} ({})\n".format( monitor['name'], id))) for id, (expected, actual) in shared_local_remote_monitors.items(): diff = list( _unidiff_output( dump_monitor_yaml( get_data_to_update(actual, keys_to_update)), dump_monitor_yaml( get_data_to_update(expected, keys_to_update)))) any_diffs |= bool(diff) if diff: puts(color_notice("\nDiff for '{}'".format(expected['name']))) puts(local_monitor_api.get_filename_for_monitor( expected['id'])) with indent(): print_diff(diff) monitors_with_diffs[id] = expected if any_diffs: if ask("Do you want to push these changes to Datadog?"): for id, expected in monitors_with_diffs.items(): print("Updating '{}'".format(expected['name'])) remote_monitor_api.update( id, get_data_to_update(expected, keys_to_update)) if only_remote: puts( color_warning("FYI you also have some untracked monitors. " "No change will be applied for these:")) for id, missing_monitor in sorted(only_remote.items()): puts( " - Untracked monitor {} '{}' (no change will be applied)" .format(id, missing_monitor['name'])) if ask("And BTW do you want to dump all untracked monitors as a starting point?" ): for id, missing_monitor in sorted(only_remote.items()): local_monitor_api.create(id, missing_monitor)