def test_parse_plan():
    target_env = get_environment('target_env')
    expected = Plan(
        source_env=get_environment('source_env'),
        configs={
            '10.0.0.1': [
                SourceFiles(
                    '192.168.33.15',
                    '/opt/data/',
                    '/opt/data/',
                    files=['test/']
                )
            ],
            '10.0.0.2': [
                SourceFiles(
                    '192.168.33.16',
                    '/opt/data/test/',
                    '/opt/data/',
                    rsync_args=['--checksum'],
                    files=['test/file1'],
                    exclude=['logs/*']
                )
            ]
        }
    )

    pla_path = os.path.join(TEST_DATA_DIR, 'test_plan.yml')
    plan = read_plan(pla_path, target_env)
    assert_equal(plan, expected)

    plan = read_plan(pla_path, target_env, 'target_host2')
    assert_equal(list(plan.configs), ['10.0.0.2'])
Esempio n. 2
0
    def run(self, args, unknown_args):
        environment = get_environment(args.env_name)
        environment.create_generated_yml()

        plan = read_plan(args.plan_path, environment, args.limit)
        working_directory = _get_working_dir(args.plan_path, environment)
        ansible_context = AnsibleContext(args)

        environment.get_ansible_vault_password()
        if plan.source_env != environment and args.action in ('prepare', 'cleanup'):
            plan.source_env.get_ansible_vault_password()

        if args.action == 'prepare':
            for target_host, source_configs in plan.configs.items():
                self.log("Creating scripts to copy files.")
                prepare_file_copy_scripts(target_host, source_configs, working_directory)
                self.log("Moving scripts to target hosts.")
                copy_scripts_to_target_host(target_host, working_directory, environment, ansible_context)
            self.log("Establishing auth between target and source.")
            setup_auth(plan, environment, ansible_context, working_directory)

        if args.action == 'copy':
            def run_check():
                return execute_file_copy_scripts(environment, list(plan.configs), check_mode=True)

            def run_apply():
                return execute_file_copy_scripts(environment, list(plan.configs), check_mode=False)

            return run_action_with_check_mode(run_check, run_apply, args.skip_check)

        if args.action == 'cleanup':
            teardown_auth(plan, environment, ansible_context, working_directory)
            shutil.rmtree(working_directory)
Esempio n. 3
0
def read_plan(plan_path, target_env, limit=None):
    with open(plan_path, 'r') as f:
        plan_dict = yaml.safe_load(f)

    source_env = None
    if 'source_env' in plan_dict:
        source_env = get_environment(plan_dict['source_env'])
    else:
        source_env = target_env

    def _get_source_files(config_dict):
        config_dict['source_host'] = source_env.translate_host(config_dict['source_host'], plan_path)
        return SourceFiles(**config_dict)

    configs = {
        target_env.translate_host(target_host, plan_path): [
            _get_source_files(config_dict) for config_dict in config_dicts
        ]
        for target in plan_dict['copy_files']
        for target_host, config_dicts in target.items()
    }
    if limit:
        subset = [host.name for host in target_env.inventory_manager.get_hosts(limit)]
        configs = {
            host: config
            for host, config in configs.items()
            if host in subset
        }
    if not configs:
        raise CommandError("Limit pattern did not match any hosts: {}".format(limit))

    return Plan(
        source_env = source_env or target_env,
        configs=configs
    )
 def run(self, args, ssh_args):
     environment = get_environment(args.env_name)
     public_vars = environment.public_vars
     if args.server == '-':
         args.server = 'django_manage:0'
     # the default 'cchq' is redundant with ansible/group_vars/all.yml
     cchq_user = public_vars.get('cchq_user', 'cchq')
     # Name tabs like "droberts (2018-04-13)"
     window_name_expression = '"`whoami` (`date +%Y-%m-%d`)"'
     if args.remote_command:
         ssh_args = [
             '-t',
             r'sudo -iu {cchq_user} tmux attach \; new-window -n {window_name} {remote_command} '
             r'|| sudo -iu {cchq_user} tmux new -n {window_name} {remote_command}'
             .format(
                 cchq_user=cchq_user,
                 remote_command=shlex_quote('{} ; bash'.format(args.remote_command)),
                 window_name=window_name_expression,
             )
         ] + ssh_args
     else:
         ssh_args = [
             '-t',
             'sudo -iu {cchq_user} tmux attach || sudo -iu {cchq_user} tmux new -n {window_name}'
             .format(cchq_user=cchq_user, window_name=window_name_expression)
         ]
     Ssh(self.parser).run(args, ssh_args)
Esempio n. 5
0
    def run(self, args, unknown_args):
        environment = get_environment(args.env_name)
        if not os.path.exists(environment.paths.inventory_ini_j2):
            print("Env {} not using templated inventory (inventory.ini.j2). Skipping"
                  .format(args.env_name))
            return 0

        if not args.cached:
            resources = get_aws_resources(environment)
            with open(environment.paths.aws_resources_yml, 'w') as f:
                f.write(yaml.safe_dump(resources, default_flow_style=False))
        else:
            with open(environment.paths.aws_resources_yml, 'r') as f:
                resources = yaml.safe_load(f.read())

        with open(environment.paths.inventory_ini_j2) as f:
            inventory_ini_j2 = f.read()

        with open(environment.paths.inventory_ini, 'w') as f:
            # by putting this inside the with
            # we make sure that if the it fails, inventory.ini is made empty
            # reflecting that we were unable to create it
            out_string = AwsFillInventoryHelper(environment, inventory_ini_j2,
                                                resources).render()
            f.write(out_string)
 def run(args):
     describe_instances = raw_describe_instances(args.env)
     environment = get_environment(args.env)
     new_hosts = get_hosts_from_describe_instances(describe_instances)
     inventory = get_inventory_from_file(environment)
     update_inventory_public_ips(inventory, new_hosts)
     save_inventory(environment, inventory)
Esempio n. 7
0
    def run(self, args, unknown_args):
        assert args.action == 'migrate' or not args.no_stop, \
            "You can only use --no-stop with migrate"
        environment = get_environment(args.env_name)
        environment.create_generated_yml()

        migration = CouchMigration(environment, args.migration_plan)
        check_connection(migration.target_couch_config.get_control_node())
        if migration.separate_source_and_target:
            check_connection(migration.source_couch_config.get_control_node())

        ansible_context = AnsibleContext(args)
        if args.limit and args.action != 'clean':
            puts(colored.yellow('Ignoring --limit (it only applies to "clean" action).'))

        if args.action == 'describe':
            return describe(migration)

        if args.action == 'plan':
            return plan(migration)

        if args.action == 'migrate':
            return migrate(migration, ansible_context, args.skip_check, args.no_stop)

        if args.action == 'commit':
            return commit(migration, ansible_context)

        if args.action == 'clean':
            return clean(migration, ansible_context, args.skip_check, args.limit)
 def run(self, args, manage_args):
     environment = get_environment(args.env_name)
     public_vars = environment.public_vars
     # the default 'cchq' is redundant with ansible/group_vars/all.yml
     cchq_user = public_vars.get('cchq_user', 'cchq')
     deploy_env = environment.meta_config.deploy_env
     # the paths here are redundant with ansible/group_vars/all.yml
     if args.release:
         code_dir = '/home/{cchq_user}/www/{deploy_env}/releases/{release}'.format(
             cchq_user=cchq_user, deploy_env=deploy_env, release=args.release)
     else:
         code_dir = '/home/{cchq_user}/www/{deploy_env}/current'.format(
             cchq_user=cchq_user, deploy_env=deploy_env)
     remote_command = (
         'bash -c "cd {code_dir}; python_env/bin/python manage.py {args}"'
         .format(
             cchq_user=cchq_user,
             code_dir=code_dir,
             args=' '.join(shlex_quote(arg) for arg in manage_args),
         )
     )
     args.server = args.server or 'django_manage:0'
     if args.tmux:
         args.remote_command = remote_command
         Tmux(self.parser).run(args, [])
     else:
         ssh_args = ['sudo -u {cchq_user} {remote_command}'.format(
             cchq_user=cchq_user,
             remote_command=remote_command,
         )]
         if manage_args and manage_args[0] in ["shell", "dbshell"]:
             # force ssh to allocate a pseudo-terminal
             ssh_args = ['-t'] + ssh_args
         Ssh(self.parser).run(args, ssh_args)
Esempio n. 9
0
 def get_expected_dbs(args):
     environment = get_environment(args.env_name)
     dbs_expected_on_host = collections.defaultdict(list)
     dbs = environment.postgresql_config.to_generated_variables()['postgresql_dbs']['all']
     for db in dbs:
         dbs_expected_on_host[db['host']].append(db['name'])
     return dbs_expected_on_host
    def run(self, args, unknown_args):
        environment = get_environment(args.env_name)
        remote_migration_state_manager = RemoteMigrationStateManager(environment.terraform_config)
        remote_migration_state = remote_migration_state_manager.fetch()
        migrations = get_migrations()

        applied_migrations = migrations[:remote_migration_state.number]
        unapplied_migrations = migrations[remote_migration_state.number:]

        # make sure remote checkpoint is consistent with migrations in code
        if applied_migrations:
            assert (applied_migrations[-1].number, applied_migrations[-1].slug) == \
                   (remote_migration_state.number, remote_migration_state.slug), \
                (remote_migration_state, applied_migrations[-1])
        else:
            assert (0, None) == (remote_migration_state.number, remote_migration_state.slug), \
                remote_migration_state

        if not unapplied_migrations:
            print("No migrations to apply")
            return
        state = terraform_list_state(args.env_name, unknown_args)
        print("Applying the following changes:{}".format(
            ''.join('\n  - {:0>4} {}'.format(migration.number, migration.slug)
                    for migration in unapplied_migrations)
        ))
        print("which will result in the following moves being made:")
        migration_plans = make_migration_plans(environment, state, unapplied_migrations, log=print)
        if ask("Do you want to apply this migration?"):
            apply_migration_plans(
                environment, migration_plans,
                remote_migration_state_manager=remote_migration_state_manager, log=print)
Esempio n. 11
0
def get_monolith_address(environment, exit=sys.exit):
    env = get_environment(environment)
    hosts = env.inventory_manager.get_hosts()
    if len(hosts) != 1:
        exit("There are {} servers in the environment. Please include the 'server'"
             "argument to select one.".format(len(hosts)))
    else:
        return get_server_address(environment, 'all', exit=exit)
 def run(self, args, ssh_args):
     if args.server == 'control' and '-A' not in ssh_args:
         # Always include ssh agent forwarding on control machine
         ssh_args = ['-A'] + ssh_args
     ukhf = "UserKnownHostsFile="
     if not any(a.startswith((ukhf, "-o" + ukhf)) for a in ssh_args):
         environment = get_environment(args.env_name)
         ssh_args = ["-o", ukhf + environment.paths.known_hosts] + ssh_args
     return super(Ssh, self).run(args, ssh_args)
 def run(self, args, unknown_args, always_skip_check=False, respect_ansible_skip=True):
     environment = get_environment(args.env_name)
     environment.create_generated_yml()
     ansible_context = AnsibleContext(args)
     check_branch(args)
     return run_ansible_playbook(
         environment, args.playbook, ansible_context, args.skip_check, args.quiet,
         always_skip_check, args.limit, args.use_factory_auth, unknown_args,
         respect_ansible_skip=respect_ansible_skip,
     )
    def run(self, args, unknown_args):
        environment = get_environment(args.env_name)
        args.playbook = 'deploy_stack.yml'
        args.use_factory_auth = True
        public_vars = environment.public_vars
        unknown_args += ('--tags=bootstrap-users',) + get_user_arg(public_vars, unknown_args, use_factory_auth=True)

        if not public_vars.get('commcare_cloud_pem'):
            unknown_args += ('--ask-pass',)
        return AnsiblePlaybook(self.parser).run(args, unknown_args, always_skip_check=True)
Esempio n. 15
0
    def run(self, args, unknown_args):
        environment = get_environment(args.env_name)
        environment.create_generated_yml()
        ansible_context = AnsibleContext(args)

        if args.action == 'start':
            start_downtime(environment, ansible_context, args)

        if args.action == 'end':
            end_downtime(environment, ansible_context)
 def run(self, args, unknown_args):
     args.playbook = 'add-ssh-keys.yml'
     args.quiet = True
     environment = get_environment(args.env_name)
     rc = AnsiblePlaybook(self.parser).run(args, unknown_args, always_skip_check=True,
                                           respect_ansible_skip=False)
     with open(environment.paths.known_hosts, 'r') as f:
         known_hosts = sorted(set(f.readlines()))
     with open(environment.paths.known_hosts, 'w') as f:
         f.writelines(known_hosts)
     return rc
def test_pickle_environment(env):
    environment = get_environment(env)
    properties = [property_name for property_name in dir(Environment) if
                  isinstance(getattr(Environment, property_name), property)]
    # Call each property so it will get pickled
    for prop in properties:
        getattr(environment, prop)

    pickled_env = pickle.dumps(environment)
    loaded_env = pickle.loads(pickled_env)
    pickle.dumps(loaded_env)
def test_hostnames(env):
    environment = get_environment(env)
    missing_hostnames = set()
    for group, hosts in environment.sshable_hostnames_by_group.items():
        for host in hosts:
            try:
                ip_address(host)
            except ValueError:
                pass
            else:
                hostname = environment.get_hostname(host)
                if hostname == host:
                    missing_hostnames.add(host)
    assert len(missing_hostnames) == 0, "Environment hosts missing hostnames {}".format(list(missing_hostnames))
Esempio n. 19
0
    def run(self, args, unknown_args):
        environment = get_environment(args.env_name)
        run_dir = environment.paths.get_env_file_path('.generated-terraform')
        modules_dir = os.path.join(TERRAFORM_DIR, 'modules')
        modules_dest = os.path.join(run_dir, 'modules')
        if not os.path.isdir(run_dir):
            os.mkdir(run_dir)
        if not os.path.isdir(run_dir):
            os.mkdir(run_dir)
        if not (os.path.exists(modules_dest) and os.readlink(modules_dest) == modules_dir):
            os.symlink(modules_dir, modules_dest)

        if args.username != get_default_username():
            print_help_message_about_the_commcare_cloud_default_username_env_var(args.username)

        key_name = args.username

        try:
            generate_terraform_entrypoint(environment, key_name, run_dir,
                                          apply_immediately=args.apply_immediately)
        except UnauthorizedUser as e:
            allowed_users = environment.users_config.dev_users.present
            puts(colored.red(
                "Unauthorized user {}.\n\n"
                "Use COMMCARE_CLOUD_DEFAULT_USERNAME or --username to pass in one of the allowed ssh users:{}"
                .format(e.username, '\n  - '.join([''] + allowed_users))))
            return -1

        if not args.skip_secrets and unknown_args and unknown_args[0] in ('plan', 'apply'):
            rds_password = (
                environment.get_vault_variables()['secrets']['POSTGRES_USERS']['root']['password']
                if environment.terraform_config.rds_instances
                else ''
            )

            with open(os.path.join(run_dir, 'secrets.auto.tfvars'), 'w') as f:
                print('rds_password = {}'.format(json.dumps(rds_password)), file=f)

        env_vars = {'AWS_PROFILE': aws_sign_in(environment.terraform_config.aws_profile)}
        all_env_vars = os.environ.copy()
        all_env_vars.update(env_vars)
        cmd_parts = ['terraform'] + unknown_args
        cmd = ' '.join(shlex_quote(arg) for arg in cmd_parts)
        print_command('cd {}; {} {}; cd -'.format(
            run_dir,
            ' '.join('{}={}'.format(key, value) for key, value in env_vars.items()),
            cmd,
        ))
        return subprocess.call(cmd, shell=True, env=all_env_vars, cwd=run_dir)
def provision_machines(spec, env_name=None, create_machines=True):
    if env_name is None:
        env_name = u'hq-{}'.format(
            ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(7))
        )
    environment = get_environment(env_name)
    inventory = bootstrap_inventory(spec, env_name)
    if create_machines:
        instance_ids = ask_aws_for_instances(env_name, spec.aws_config, len(inventory.all_hosts))
    else:
        instance_ids = None

    while True:
        instance_ip_addresses = poll_for_aws_state(env_name, instance_ids)
        if instance_ip_addresses:
            break

    hosts_by_name = {}

    for host, (public_ip, private_ip) in zip(inventory.all_hosts, instance_ip_addresses.values()):
        host.public_ip = public_ip
        host.private_ip = private_ip
        host.vars['hostname'] = host.name
        hosts_by_name[host.name] = host

    for i, host_name in enumerate(inventory.all_groups['kafka'].host_names):
        hosts_by_name[host_name].vars['kafka_broker_id'] = i
        hosts_by_name[host_name].vars['swap_size'] = '2G'

    for host_name in inventory.all_groups['elasticsearch'].host_names:
        hosts_by_name[host_name].vars['elasticsearch_node_name'] = host_name

    if spec.aws_config.data_volume:
        inventory.all_groups['lvm'] = Group(name='lvm')
        for group in inventory.all_groups:
            for host_name in inventory.all_groups[group].host_names:
                hosts_by_name[host_name].vars.update({
                    'datavol_device': '/dev/mapper/consolidated-data',
                    'devices': "'{}'".format(json.dumps([spec.aws_config.data_volume['DeviceName']])),
                    'partitions': "'{}'".format(json.dumps(['{}1'.format(spec.aws_config.data_volume['DeviceName'])])),
                })
                if host_name not in inventory.all_groups['lvm'].host_names:
                    inventory.all_groups['lvm'].host_names.append(host_name)

    save_inventory(environment, inventory)
    copy_default_vars(environment, spec.aws_config)
    save_app_processes_yml(environment, inventory)
    save_fab_settings_yml(environment)
    save_meta_yml(environment, env_name, spec.settings.users)
Esempio n. 21
0
def load_env():
    env.ccc_environment = get_environment(env.env_name)
    vars_not_to_overwrite = {key: value for key, value in env.items()
                             if key not in ('sudo_user', 'keepalive')}

    vars = env.ccc_environment.app_processes_config.to_json()
    vars.update(env.ccc_environment.fab_settings_config.to_json())
    # Variables that were already in `env`
    # take precedence over variables set in app-processes.yml
    # except a short blacklist that we expect app-processes.yml vars to overwrite
    overlap = set(vars_not_to_overwrite) & set(vars)
    for key in overlap:
        print('NOTE: ignoring app-processes.yml var {}={!r}. Using value {!r} instead.'.format(key, vars[key], vars_not_to_overwrite[key]))
    vars.update(vars_not_to_overwrite)
    env.update(vars)
    env.deploy_env = env.ccc_environment.meta_config.deploy_env
Esempio n. 22
0
    def run(self, args, unknown_args):
        envs = sorted(get_available_envs(exclude_symlinks=True))
        var_keys = {}
        for env in envs:
            print('[{}] '.format(env), end='')
            environment = get_environment(env)
            var_keys[env] = set(get_flat_list_of_keys(environment.get_vault_variables()))

        for env in envs:
            print('\t{}'.format(env), end='')
        print()
        for key in sorted(set(chain.from_iterable(var_keys.values()))):
            print('.'.join(part if part is not None else '*' for part in key), end='')
            for env in envs:
                print('\t{}'.format('x' if key in var_keys[env] else ''), end='')
            print()
Esempio n. 23
0
    def run(self, args, unknown_args):
        environment = get_environment(args.env_name)

        services = [
            SERVICES_BY_NAME[name]
            for name in args.services
        ]

        ansible_context = AnsibleContext(args)
        non_zero_exits = []
        for service_cls in services:
            service = service_cls(environment, ansible_context)
            exit_code = service.run(args.action, args.limit, args.process_pattern)
            if exit_code != 0:
                non_zero_exits.append(exit_code)
        return non_zero_exits[0] if non_zero_exits else 0
def ask_aws_for_instances(env_name, aws_config, count):
    cache_file = '{env}-aws-new-instances.json'.format(env=env_name)
    if os.path.exists(cache_file):
        cache_file_response = raw_input("\n{} already exists. Enter: "
                                        "\n(d) to delete the file AND environment directory containing it, and"
                                        " terminate the existing aws instances or "
                                        "\n(anything) to continue using this file and these instances."
                                        "\n Enter selection: ".format(cache_file))
        if cache_file_response == 'd':
            # Remove old cache file and terminate existing instances for this env
            print("Terminating existing instances for {}".format(env_name))
            subprocess.call(['commcare-cloud-bootstrap', 'terminate',  env_name])
            print("Deleting file: {}".format(cache_file))
            os.remove(cache_file)
            env_dir = get_environment(env_name).paths.get_env_file_path('')
            if os.path.isdir(env_dir):
                print("Deleting environment dir: {}".format(env_name))
                shutil.rmtree(env_dir)

    if not os.path.exists(cache_file):
        # Provision new instances for this env
        print("Provisioning new instances.")
        cmd_parts = [
            'aws', 'ec2', 'run-instances',
            '--image-id', aws_config.ami,
            '--count', unicode(int(count)),
            '--instance-type', aws_config.type,
            '--key-name', aws_config.key_name,
            '--security-group-ids', aws_config.security_group_id,
            '--subnet-id', aws_config.subnet,
            '--tag-specifications', 'ResourceType=instance,Tags=[{Key=env,Value=' + env_name + '}]',
        ]
        block_device_mappings = []
        if aws_config.boot_volume:
            block_device_mappings.append(aws_config.boot_volume)
        if aws_config.data_volume:
            block_device_mappings.append(aws_config.data_volume)
        cmd_parts.extend(['--block-device-mappings', json.dumps(block_device_mappings)])
        aws_response = subprocess.check_output(cmd_parts)
        with open(cache_file, 'w') as f:
            f.write(aws_response)
    else:
        # Use the existing instances
        with open(cache_file, 'r') as f:
            aws_response = f.read()
    aws_response = json.loads(aws_response)
    return {instance['InstanceId'] for instance in aws_response["Instances"]}
Esempio n. 25
0
 def run(self, args, unknown_args):
     check_branch(args)
     fab_args = []
     if args.fab_command:
         fab_args.append(args.fab_command)
     fab_args.extend(unknown_args)
     if args.l:
         fab_args.append('-l')
     else:
         env = get_environment(args.env_name)
         fab_args.extend(['--disable-known-hosts',
                          '--system-known-hosts', env.paths.known_hosts])
     # Create known_hosts file if it doesn't exist
     known_hosts_file = env.paths.known_hosts
     if not os.path.isfile(known_hosts_file):
         open(known_hosts_file, 'a').close()
     return exec_fab_command(args.env_name, *fab_args)
Esempio n. 26
0
 def run(self, args, unknown_args):
     args.module = 'datadog_event'
     environment = get_environment(args.env_name)
     vault = environment.get_vault_variables()['secrets']
     tags = "environment:{}".format(args.env_name)
     args.module_args = "api_key={api_key} app_key={app_key} " \
         "tags='{tags}' text='{text}' title='{title}' aggregation_key={agg}".format(
             api_key=vault['DATADOG_API_KEY'],
             app_key=vault['DATADOG_APP_KEY'],
             tags=tags,
             text=args.event_text,
             title=args.event_title,
             agg='commcare-cloud'
         )
     return run_ansible_module(
         environment, AnsibleContext(args),
         '127.0.0.1', args.module, args.module_args,
         False, False, False,
     )
Esempio n. 27
0
    def get_present_dbs( args):
        dbs_present_in_host = collections.defaultdict(list)
        args.server = 'postgresql'
        ansible_username = '******'
        command = "python /usr/local/sbin/db-tools.py  --list-all"

        environment = get_environment(args.env_name)
        ansible_password = environment.get_ansible_user_password()
        host_addresses = get_instance_group(args.env_name, args.server)
        user_as = 'postgres'

        privileged_command = PrivilegedCommand(ansible_username, ansible_password, command, user_as)

        present_db_op = privileged_command.run_command(host_addresses)

        # List from Postgresql query.

        for host_address in present_db_op.keys():
            dbs_present_in_host[host_address] = present_db_op[host_address].split("\r\n")

        return dbs_present_in_host
Esempio n. 28
0
    def run(self, args, unknown_args):
        environment = get_environment(args.env_name)
        environment.create_generated_yml()
        ansible_context = AnsibleContext(args)

        def _run_ansible(args, *unknown_args):
            return run_ansible_module(
                environment, ansible_context,
                args.inventory_group, args.module, args.module_args,
                args.become, args.become_user, args.use_factory_auth,
                *unknown_args
            )

        def run_check():
            with environment.suppress_vault_loaded_event():
                return _run_ansible(args, '--check', *unknown_args)

        def run_apply():
            return _run_ansible(args, *unknown_args)

        return run_action_with_check_mode(run_check, run_apply, args.skip_check, args.quiet)
Esempio n. 29
0
def get_django_webworker_name(environment_name):
    environment = get_environment(environment_name)
    environment_environment = environment.meta_config.deploy_env
    project = environment.fab_settings_config.project
    return "{project}-{environment}-django".format(
        project=project, environment=environment_environment)
Esempio n. 30
0
    def run(self, args, unknown_args):
        if 'destroy' in unknown_args:
            puts(
                color_error(
                    "Refusing to run a terraform command containing the argument 'destroy'."
                ))
            puts(color_error("It's simply not worth the risk."))
            exit(-1)

        environment = get_environment(args.env_name)
        run_dir = environment.paths.get_env_file_path('.generated-terraform')
        modules_dir = os.path.join(TERRAFORM_DIR, 'modules')
        modules_dest = os.path.join(run_dir, 'modules')
        if not os.path.isdir(run_dir):
            os.mkdir(run_dir)
        if not os.path.isdir(run_dir):
            os.mkdir(run_dir)
        if not (os.path.exists(modules_dest)
                and os.readlink(modules_dest) == modules_dir):
            os.symlink(modules_dir, modules_dest)

        if args.username != get_default_username():
            print_help_message_about_the_commcare_cloud_default_username_env_var(
                args.username)

        key_name = args.username

        try:
            generate_terraform_entrypoint(
                environment,
                key_name,
                run_dir,
                apply_immediately=args.apply_immediately)
        except UnauthorizedUser as e:
            allowed_users = environment.users_config.dev_users.present
            puts(
                color_error(
                    "Unauthorized user {}.\n\n"
                    "Use COMMCARE_CLOUD_DEFAULT_USERNAME or --username to pass in one of the allowed ssh users:{}"
                    .format(e.username, '\n  - '.join([''] + allowed_users))))
            return -1

        if not args.skip_secrets and unknown_args and unknown_args[0] in (
                'plan', 'apply'):
            rds_password = (
                environment.get_secret('POSTGRES_USERS.root.password')
                if environment.terraform_config.rds_instances else '')

            with open(os.path.join(run_dir, 'secrets.auto.tfvars'),
                      'w',
                      encoding='utf-8') as f:
                print('rds_password = {}'.format(json.dumps(rds_password)),
                      file=f)

        env_vars = {'AWS_PROFILE': aws_sign_in(environment)}
        all_env_vars = os.environ.copy()
        all_env_vars.update(env_vars)
        cmd_parts = ['terraform'] + unknown_args
        cmd = ' '.join(shlex_quote(arg) for arg in cmd_parts)
        print_command('cd {}; {} {}; cd -'.format(
            run_dir,
            ' '.join('{}={}'.format(key, value)
                     for key, value in env_vars.items()),
            cmd,
        ))
        return subprocess.call(cmd, shell=True, env=all_env_vars, cwd=run_dir)
Esempio n. 31
0
    def run(self, args, unknown_args):
        environment = get_environment(args.environment)
        ansible_context = AnsibleContext(args)
        public_vars = environment.public_vars

        def _run_ansible(args, *unknown_args):
            cmd_parts = (
                'ANSIBLE_CONFIG={}'.format(
                    os.path.join(ANSIBLE_DIR, 'ansible.cfg')),
                'ansible',
                args.inventory_group,
                '-m',
                args.module,
                '-i',
                environment.paths.inventory_ini,
                '-u',
                args.remote_user,
                '-a',
                args.module_args,
                '--diff',
            ) + tuple(unknown_args)

            become = args.become or bool(args.become_user)
            become_user = args.become_user
            include_vars = False
            if become:
                cmd_parts += ('--become', )
                if become_user not in ('cchq', ):
                    # ansible user can do things as cchq without a password,
                    # but needs the ansible user password in order to do things as other users.
                    # In that case, we need to pull in the vault variable containing this password
                    include_vars = True
                if become_user:
                    cmd_parts += ('--become-user', args.become_user)

            if include_vars:
                cmd_parts += (
                    '-e',
                    '@{}'.format(environment.paths.vault_yml),
                    '-e',
                    '@{}'.format(environment.paths.public_yml),
                )

            ask_vault_pass = include_vars and public_vars.get(
                'commcare_cloud_use_vault', True)
            if ask_vault_pass:
                cmd_parts += ('--vault-password-file=/bin/cat', )

            cmd_parts += get_common_ssh_args(public_vars)
            cmd = ' '.join(shlex_quote(arg) for arg in cmd_parts)
            print_command(cmd)
            p = subprocess.Popen(cmd,
                                 stdin=subprocess.PIPE,
                                 shell=True,
                                 env=ansible_context.env_vars)
            if ask_vault_pass:
                p.communicate(input='{}\n'.format(
                    ansible_context.get_ansible_vault_password()))
            else:
                p.communicate()
            return p.returncode

        def run_check():
            return _run_ansible(args, '--check', *unknown_args)

        def run_apply():
            return _run_ansible(args, *unknown_args)

        exit_code = 0

        if args.skip_check:
            user_wants_to_apply = ask(
                'Do you want to apply without running the check first?',
                quiet=args.quiet)
        else:
            exit_code = run_check()
            if exit_code == 1:
                # this means there was an error before ansible was able to start running
                exit(exit_code)
                return  # for IDE
            elif exit_code == 0:
                puts(
                    colored.green(
                        u"✓ Check completed with status code {}".format(
                            exit_code)))
                user_wants_to_apply = ask(
                    'Do you want to apply these changes?', quiet=args.quiet)
            else:
                puts(
                    colored.red(u"✗ Check failed with status code {}".format(
                        exit_code)))
                user_wants_to_apply = ask(
                    'Do you want to try to apply these changes anyway?',
                    quiet=args.quiet)

        if user_wants_to_apply:
            exit_code = run_apply()
            if exit_code == 0:
                puts(
                    colored.green(
                        u"✓ Apply completed with status code {}".format(
                            exit_code)))
            else:
                puts(
                    colored.red(u"✗ Apply failed with status code {}".format(
                        exit_code)))

        exit(exit_code)
Esempio n. 32
0
def get_instance_group(environment, group):
    env = get_environment(environment)
    return env.sshable_hostnames_by_group[group]
Esempio n. 33
0
 def source_environment(self):
     if self.separate_source_and_target:
         return get_environment(self.plan.src_env)
     return self.target_environment
Esempio n. 34
0
    def run(self, args, unknown_args):
        limit = args.limit
        environment = get_environment(args.env_name)
        if limit:
            environment.inventory_manager.subset(limit)

        with open(environment.paths.known_hosts, 'r', encoding='utf-8') as known_hosts:
            original_keys_by_host = _get_host_key_map(
                [line.strip() for line in known_hosts.readlines()]
            )

        procs = {}
        for hostname in environment.inventory_hostname_map:
            port = '22'
            if ':' in hostname:
                hostname, port = hostname.split(':')
            cmd = 'ssh-keyscan -T 10 -p {port} {hostname},$(dig +short {hostname})'.format(
                hostname=hostname,
                port=port
            )
            procs[hostname] = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                                               universal_newlines=True)

        lines = []
        error_hosts = set()
        for hostname, proc in procs.items():
            sys.stdout.write('[{}]: '.format(hostname))
            proc.wait()
            error, host_lines = _get_lines(proc)
            if error:
                sys.stdout.write(error)
            else:
                sys.stdout.write(str(color_success('fetched key\n')))
                lines.extend(host_lines)

        updated_keys_by_host = _get_host_key_map(lines)

        all_keys = set(original_keys_by_host) | set(updated_keys_by_host)
        lines = []
        for host_key_type in sorted(all_keys):
            host, key_type = host_key_type
            original = original_keys_by_host.pop(host_key_type, None)
            updated = updated_keys_by_host.get(host_key_type, None)
            if updated and original:
                if updated != original:
                    print(color_changed('Updating key: {} {}'.format(*host_key_type)))
            elif updated:
                print(color_added('Adding key: {} {}'.format(*host_key_type)))
            elif original:
                if limit or host in error_hosts:
                    # if we're limiting or there was an error keep original key
                    updated = original
                else:
                    print(color_removed('Removing key: {} {}'.format(*host_key_type)))

            if updated:
                lines.append('{} {} {}'.format(host, key_type, updated))

        with open(environment.paths.known_hosts, 'w', encoding='utf-8') as known_hosts:
            known_hosts.write('\n'.join(sorted(lines)))

        try:
            environment.check_known_hosts()
        except EnvironmentException as e:
            print(color_error(str(e)))
            return 1
        return 0
Esempio n. 35
0
 def run(self, args, manage_args):
     environment = get_environment(args.env_name)
     by_process = _get_pillow_resources_by_name(environment)
     _print_table(by_process)
Esempio n. 36
0
def test_app_processes_yml(env):
    environment = get_environment(env)
    environment.app_processes_config.check()
    environment.translated_app_processes_config.check()
Esempio n. 37
0
def test_fab_settings_yml(env):
    environment = get_environment(env)
    environment.fab_settings_config  # check if the schema wraps it
from __future__ import print_function

from __future__ import absolute_import
from __future__ import unicode_literals
import pickle
from ipaddress import ip_address
from parameterized import parameterized

from commcare_cloud.environment.main import Environment, get_environment
from commcare_cloud.environment.paths import get_available_envs

commcare_envs = [(env, ) for env in (get_environment(env_name)
                                     for env_name in get_available_envs())
                 if not env.meta_config.bare_non_cchq_environment]


@parameterized(commcare_envs)
def test_all(environment):
    environment.check()


# useful for python 2 -> 3 migration
@parameterized(commcare_envs)
def test_authorized_key(environment):
    environment.get_authorized_key('gherceg')


@parameterized(commcare_envs)
def test_hostnames(environment):
    missing_hostnames = set()
    for group, hosts in environment.sshable_hostnames_by_group.items():
Esempio n. 39
0
def get_formplayer_spring_instance_name(environment_name):
    environment = get_environment(environment_name)
    environment_environment = environment.meta_config.deploy_env
    project = environment.fab_settings_config.project
    return "{project}-{environment}-formsplayer-spring".format(
        project=project, environment=environment_environment)
Esempio n. 40
0
def _get_migration(plan_name):
    plan_path = os.path.join(PLANS_DIR, plan_name, 'plan.yml')
    migration = CouchMigration(get_environment('env1'), plan_path)
    return migration
def test_all(env):
    environment = get_environment(env)
    environment.check()
Esempio n. 42
0
def get_instance_group(environment, group):
    env = get_environment(environment)
    return env.inventory_hosts_by_group[group]
Esempio n. 43
0
def initialize_datadog(config):
    env = get_environment(config.env_with_datadog_auth)
    initialize(api_key=env.get_vault_var('secrets.DATADOG_API_KEY'),
               app_key=env.get_vault_var('secrets.DATADOG_APP_KEY'))
Esempio n. 44
0
    def run(self, args, manage_args):
        environment = get_environment(args.env_name)
        public_vars = environment.public_vars
        # the default 'cchq' is redundant with ansible/group_vars/all.yml
        cchq_user = public_vars.get('cchq_user', 'cchq')
        deploy_env = environment.meta_config.deploy_env
        # the paths here are redundant with ansible/group_vars/all.yml
        if args.release:
            code_dir = '/home/{cchq_user}/www/{deploy_env}/releases/{release}'.format(
                cchq_user=cchq_user,
                deploy_env=deploy_env,
                release=args.release)
        else:
            code_dir = '/home/{cchq_user}/www/{deploy_env}/current'.format(
                cchq_user=cchq_user, deploy_env=deploy_env)

        def _get_ssh_args(remote_command):
            return [
                'sudo -iu {cchq_user} bash -c {remote_command}'.format(
                    cchq_user=cchq_user,
                    remote_command=shlex_quote(remote_command),
                )
            ]

        if args.tee_file:
            rc = Ssh(self.parser).run(
                args,
                _get_ssh_args('cd {code_dir}; [[ -f {tee_file} ]]'.format(
                    code_dir=code_dir, tee_file=shlex_quote(args.tee_file))))
            if rc in (0, 1):
                file_already_exists = (rc == 0)
            else:
                return rc

            if file_already_exists:
                puts(
                    color_error(
                        "Refusing to --tee to a file that already exists ({})".
                        format(args.tee_file)))
                return 1

            tee_file_cmd = ' | tee {}'.format(shlex_quote(args.tee_file))
        else:
            tee_file_cmd = ''

        python_env = 'python_env-3.6'
        remote_command = (
            'cd {code_dir}; {python_env}/bin/python manage.py {args}{tee_file_cmd}'
            .format(
                python_env=python_env,
                cchq_user=cchq_user,
                code_dir=code_dir,
                args=' '.join(shlex_quote(arg) for arg in manage_args),
                tee_file_cmd=tee_file_cmd,
            ))
        if args.tmux:
            args.remote_command = remote_command
            return Tmux(self.parser).run(args, [])
        else:
            ssh_args = _get_ssh_args(remote_command)
            if manage_args and manage_args[0] in ["shell", "dbshell"]:
                # force ssh to allocate a pseudo-terminal
                ssh_args = ['-t'] + ssh_args
            return Ssh(self.parser).run(args, ssh_args)
Esempio n. 45
0
 def run(self, args, unknown_args):
     environment = get_environment(args.env_name)
     resources = get_aws_resources(environment)
     for name, address in sorted(resources.items()):
         print('{}\t{}'.format(address, name))
     return 0
Esempio n. 46
0
def get_instance_group(environment, group):
    env = get_environment(environment)
    return env.sshable_hostnames_by_group[group]
Esempio n. 47
0
 def run(self, args, unknown_args):
     environment = get_environment(args.env_name)
     duration_minutes = args.duration_minutes
     aws_profile = environment.terraform_config.aws_profile
     aws_sign_in(aws_profile, duration_minutes, force_new=True)
Esempio n. 48
0
 def run(self, args, unknown_args):
     environment = get_environment(args.env_name)
     duration_minutes = args.duration_minutes
     aws_sign_in(environment, duration_minutes, force_new=True)
def ask_aws_for_instances(env_name, aws_config, count):
    cache_file = '{env}-aws-new-instances.json'.format(env=env_name)
    if os.path.exists(cache_file):
        cache_file_response = input(
            "\n{} already exists. Enter: "
            "\n(d) to delete the file AND environment directory containing it, and"
            " terminate the existing aws instances or "
            "\n(anything) to continue using this file and these instances."
            "\n Enter selection: ".format(cache_file))
        if cache_file_response == 'd':
            # Remove old cache file and terminate existing instances for this env
            print("Terminating existing instances for {}".format(env_name))
            subprocess.call(
                ['commcare-cloud-bootstrap', 'terminate', env_name])
            print("Deleting file: {}".format(cache_file))
            os.remove(cache_file)
            env_dir = get_environment(env_name).paths.get_env_file_path('')
            if os.path.isdir(env_dir):
                print("Deleting environment dir: {}".format(env_name))
                shutil.rmtree(env_dir)

    if not os.path.exists(cache_file):
        # Provision new instances for this env
        print("Provisioning new instances.")
        cmd_parts = [
            'aws',
            'ec2',
            'run-instances',
            '--image-id',
            aws_config.ami,
            '--count',
            six.text_type(int(count)),
            '--instance-type',
            aws_config.type,
            '--key-name',
            aws_config.key_name,
            '--security-group-ids',
            aws_config.security_group_id,
            '--subnet-id',
            aws_config.subnet,
            '--tag-specifications',
            'ResourceType=instance,Tags=[{Key=env,Value=' + env_name + '}]',
        ]
        block_device_mappings = []
        if aws_config.boot_volume:
            block_device_mappings.append(aws_config.boot_volume)
        if aws_config.data_volume:
            block_device_mappings.append(aws_config.data_volume)
        cmd_parts.extend(
            ['--block-device-mappings',
             json.dumps(block_device_mappings)])
        aws_response = subprocess.check_output(cmd_parts)
        with open(cache_file, 'wb') as f:
            # PY2: check_output returns a byte string
            # PY3: would need to specify universal_newlines=True in check_output to pass in str and receive str
            # easiest to continue using bytes on both python versions
            f.write(aws_response)
    else:
        # Use the existing instances
        with open(cache_file, 'r', encoding='utf-8') as f:
            aws_response = f.read()
    aws_response = json.loads(aws_response)
    return {instance['InstanceId'] for instance in aws_response["Instances"]}