def is_bootstrap_done(hosts):
    """
    Loops through a list of IPs and checks for the prescence of
    /tmp/bootstrap_done to ensure that the launch config has finished
    executing. The call will also try to handle the case where we lose
    ssh during the wait, which can happen if another action triggers
    a reboot.

    Args:
        hosts(list): A list of IPs to check
    """
    ret = []
    for host in hosts:
        env.host_string = '{0}@{1}'.format(env.user, host)
        target_file = '{}/bootstrap_done'.format(env.bootstrap_tmp_path)
        try:
            file_exists = files.exists(target_file)
            ret.append(file_exists)
            if file_exists:
                logging.info("Salt bootstrap file {} on host {}...".format(
                    target_file, host))
            else:
                logging.info(
                    "Salt bootstrap file {} not found on host {}...".format(
                        target_file, host))
        except NetworkError:
            logging.warning(
                "Could not connect to host {}, attempting to recover connection..."
                .format(host))
            # Catch a network error and try again
            stack_name = get_stack_name()
            ec2 = get_connection(EC2)
            ec2.wait_for_ssh(stack_name)
    return all(ret)
def get_instance_ips():
    """
    Get a list of the public IPs of the current instances in the stack.
    """
    ec2 = get_connection(EC2)
    stack_name = get_stack_name()
    return ec2.get_stack_instance_public_ips(stack_name)
def delete_tar_task():
    """
    Remove the encrypted salt tree from the s3 bucket.

    This needs to be called before invoking cfn_delete otherwise the S3 bucket
    will fail to be deleted. This will only delete the one file the
    ``upload_salt`` task creates so if any other files are placed in there then
    this task will still fail.
    """
    _validate_fabric_env()
    stack_name = get_stack_name()
    delete_tar(stack_name=stack_name)
    def test_get_stack_name(self, get_first_public_elb_function,
                            get_zone_id_function, get_legacy_name_function,
                            get_zone_name_function, get_connection_function):
        '''
        test if it returns correct stack name
        Args:
            get_first_public_elb_function: get_first_public_elb()
            get_zone_id_function: get_zone_id()
            get_legacy_name_function: get_legacy_name(): [application-environment]
            get_zone_name_function: get_zone_name()
            get_connection_function: get_connection(klass)
        '''
        get_connection_function.side_effect = self.connection_side_effect

        stack_name = fab_tasks.get_stack_name(False)
        self.assertTrue(stack_name)
        self.assertEqual(stack_name, "unittest-dev-12345678")
def wait_for_minions(timeout=600, interval=20):
    """
    This task ensures that the initial bootstrap has finished on all
    stack instances.

    Args:
        timeout(int): time to wait for bootstrap to finish
        interval(int): time to wait in-between checks
    """
    _validate_fabric_env()
    stack_name = get_stack_name()
    ec2 = get_connection(EC2)
    logging.info("Waiting for SSH on all instances...")
    ec2.wait_for_ssh(stack_name)
    fab_hosts = get_instance_ips()
    logging.info("Waiting for bootstrap script to finish on all instances...")
    utils.timeout(timeout, interval)(is_bootstrap_done)(fab_hosts)
def upload_salt():
    """
    Get encrypted key from one of the stack hosts,
    Create tar file with salt states, pillar, formula etc.
    Encrypt tar using KMS and GPG(AES).
    Upload tar to S3.
    """
    _validate_fabric_env()
    stack_name = get_stack_name()

    work_dir = os.path.dirname(env.real_fabfile)

    project_config = config.ProjectConfig(env.config, env.environment,
                                          env.stack_passwords)
    cfg = project_config.config

    salt_cfg = cfg.get('salt', {})

    local_salt_dir = os.path.join(
        work_dir,
        salt_cfg.get('local_salt_dir', 'salt'),
    )
    local_pillar_dir = os.path.join(
        work_dir,
        salt_cfg.get('local_pillar_dir', 'pillar'),
        env.environment,
    )
    local_vendor_dir = os.path.join(
        work_dir,
        salt_cfg.get('local_vendor_dir', 'vendor'),
    )

    # TODO: This patth actuall appears in the minion conf which isn't
    # templated, so die if this path is different (it's better to error
    # explicitly than just not work.
    remote_state_dir = salt_cfg.get('remote_state_dir', '/srv/salt')
    remote_pillar_dir = salt_cfg.get('remote_pillar_dir', '/srv/pillar')

    vendor_root = os.path.join(local_vendor_dir, '_root')
    bs_path = pkgutil.get_loader('bootstrap_salt').filename
    dirs = {
        local_salt_dir:
        remote_state_dir,
        local_pillar_dir:
        remote_pillar_dir,
        vendor_root:
        '/srv/salt-formulas/',
        '{0}/contrib/srv/salt/_grains'.format(bs_path):
        os.path.join(remote_state_dir, "_grains", ""),
        '{0}/contrib/etc/'.format(bs_path):
        '/etc/',
        '{0}/contrib/usr/'.format(bs_path):
        '/usr/',
    }

    tmp_folder = tempfile.mkdtemp()
    for local_dir, dest_dir in dirs.items():
        # Since dest dir will likely start with "/" (which would make join then
        # ignore the tmp_folder we speciffy) make it start with "./" instead so
        # it is contained
        stage_path = os.path.join(tmp_folder, "." + dest_dir)

        utils.copytree(local_dir, stage_path, symlinks=False)

    cfg_path = os.path.join(tmp_folder, "./{0}".format(remote_pillar_dir))
    with open(os.path.join(cfg_path, 'cloudformation.sls'), 'w') as cfg_file:
        yaml.dump(cfg, cfg_file)

    local("chmod -R 755 {0}".format(tmp_folder))
    local("chmod -R 700 {0}{1}".format(tmp_folder, quote(remote_state_dir)))
    local("chmod -R 700 {0}{1}".format(tmp_folder, quote(remote_pillar_dir)))

    shutil.make_archive("srv", format="tar", root_dir=tmp_folder)
    shutil.rmtree(tmp_folder)

    env.host_string = '{0}@{1}'.format(env.user, get_instance_ips()[0])
    # Here we get the encypted data key for this specific stack, we then use
    # KMS to get the plaintext key and use that key to encrypt the salt content
    # We get the key over SSH because it is unique to each stack.

    key = StringIO.StringIO()

    get(remote_path='/etc/salt.key.enc', local_path=key, use_sudo=True)
    key.seek(0)
    encrypt_file('./srv.tar', key_file=key)
    key.close()

    os.unlink("srv.tar")
    local("aws s3 --profile {0} cp ./srv.tar.gpg s3://{1}-salt/".format(
        quote(env.aws), quote(stack_name)))
    os.unlink("srv.tar.gpg")