Esempio n. 1
0
 def override_profile_config(self, name, data):
     conf_path = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "cloud.profiles.d",
                              "ec2.conf")
     with salt.utils.files.fopen(conf_path, "r") as fp:
         conf = yaml.safe_load(fp)
     conf[name].update(data)
     with salt.utils.files.fopen(conf_path, "w") as fp:
         salt.utils.yaml.safe_dump(conf, fp)
Esempio n. 2
0
 def add_profile_config(self, name, data, conf, new_profile):
     """
     copy the current profile and add a new profile in the same file
     """
     conf_path = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "cloud.profiles.d", conf)
     with salt.utils.files.fopen(conf_path, "r") as fp:
         conf = safe_load(fp)
     conf[new_profile] = conf[name].copy()
     conf[new_profile].update(data)
     with salt.utils.files.fopen(conf_path, "w") as fp:
         salt.utils.yaml.safe_dump(conf, fp)
    def assertDestroyInstance(self, instance_name=None, timeout=None):
        if timeout is None:
            timeout = TIMEOUT
        if not instance_name:
            instance_name = self.instance_name
        log.debug('Deleting instance "%s"', instance_name)
        delete_str = self.run_cloud(
            "-d {} --assume-yes --out=yaml".format(instance_name),
            timeout=timeout)
        if delete_str:
            delete = safe_load("\n".join(delete_str))
            self.assertIn(self.profile_str, delete)
            self.assertIn(self.PROVIDER, delete[self.profile_str])
            self.assertIn(instance_name,
                          delete[self.profile_str][self.PROVIDER])

            delete_status = delete[self.profile_str][
                self.PROVIDER][instance_name]
            if isinstance(delete_status, str):
                self.assertEqual(delete_status, "True")
                return
            elif isinstance(delete_status, dict):
                current_state = delete_status.get("currentState")
                if current_state:
                    if current_state.get("ACTION"):
                        self.assertIn(".delete", current_state.get("ACTION"))
                        return
                    else:
                        self.assertEqual(current_state.get("name"),
                                         "shutting-down")
                        return
        # It's not clear from the delete string that deletion was successful, ask salt-cloud after a delay
        query = self.query_instances()
        # some instances take a while to report their destruction
        for tries in range(6):
            if self._instance_exists(query=query):
                sleep(30)
                log.debug(
                    'Instance "%s" still found in query after %s tries: %s',
                    instance_name,
                    tries,
                    query,
                )
                query = self.query_instances()
        # The last query should have been successful
        self.assertNotIn(instance_name, self.query_instances())
Esempio n. 4
0
    def read_confs(cloud_dir, section):
        '''
        read through cloud config files
        '''
        for file_name in os.listdir(cloud_dir):
            with open(os.path.join(cloud_dir, file_name)) as file_:
                try:
                    data = yaml.safe_load(file_.read())
                except yaml.reader.ReaderError:
                    continue

                if section in data:
                    return {
                        'driver': data[section].get('driver'),
                        'provider': data[section].get('provider'),
                        'ssh_username': data[section].get('ssh_username'),
                        'password': data[section].get('password'),
                        'ssh_key_file': data[section].get('ssh_key_file'),
                        'private_key': data[section].get('private_key')
                    }

        return {}
Esempio n. 5
0
def list_(
    show_all=False, show_disabled=True, where=None, return_yaml=True, offline=False
):
    """
    List the jobs currently scheduled on the minion

    CLI Example:

    .. code-block:: bash

        salt '*' schedule.list

        # Show all jobs including hidden internal jobs
        salt '*' schedule.list show_all=True

        # Hide disabled jobs from list of jobs
        salt '*' schedule.list show_disabled=False

    """

    schedule = {}
    if offline:
        schedule_config = _get_schedule_config_file()
        if os.path.exists(schedule_config):
            with salt.utils.files.fopen(schedule_config) as fp_:
                schedule_yaml = fp_.read()
                if schedule_yaml:
                    schedule_contents = yaml.safe_load(schedule_yaml)
                    schedule = schedule_contents.get("schedule", {})
    else:
        try:
            with salt.utils.event.get_event("minion", opts=__opts__) as event_bus:
                res = __salt__["event.fire"](
                    {"func": "list", "where": where}, "manage_schedule"
                )
                if res:
                    event_ret = event_bus.get_event(
                        tag="/salt/minion/minion_schedule_list_complete", wait=30
                    )
                    if event_ret and event_ret["complete"]:
                        schedule = event_ret["schedule"]
        except KeyError:
            # Effectively a no-op, since we can't really return without an event system
            ret = {}
            ret["comment"] = "Event module not available. Schedule list failed."
            ret["result"] = True
            log.debug("Event module not available. Schedule list failed.")
            return ret

    _hidden = ["enabled", "skip_function", "skip_during_range"]
    for job in list(schedule.keys()):  # iterate over a copy since we will mutate it
        if job in _hidden:
            continue

        # Default jobs added by salt begin with __
        # by default hide them unless show_all is True.
        if job.startswith("__") and not show_all:
            del schedule[job]
            continue

        # if enabled is not included in the job,
        # assume job is enabled.
        if "enabled" not in schedule[job]:
            schedule[job]["enabled"] = True

        for item in pycopy.copy(schedule[job]):
            if item not in SCHEDULE_CONF:
                del schedule[job][item]
                continue
            if schedule[job][item] is None:
                del schedule[job][item]
                continue
            if schedule[job][item] == "true":
                schedule[job][item] = True
            if schedule[job][item] == "false":
                schedule[job][item] = False

        # if the job is disabled and show_disabled is False, skip job
        if not show_disabled and not schedule[job]["enabled"]:
            del schedule[job]
            continue

        if "_seconds" in schedule[job]:
            # remove _seconds from the listing
            del schedule[job]["_seconds"]

    if return_yaml:
        tmp = {"schedule": schedule}
        return salt.utils.yaml.safe_dump(tmp, default_flow_style=False)
    else:
        return schedule
Esempio n. 6
0
def create_node(name=None,
                profile=None,
                user='******',
                roster='/etc/salt/roster',
                sudo=True,
                use_map=False,
                map_file=None):
    '''
    Create a cloud instance using salt-cloud and add it to the cluster roster

    .. code-block:: bash

        salt master-minion salt_cluster.create_node jmoney-master linode-centos-7 root /tmp/roster
    '''
    if not use_map:
        creds = _get_driver_creds(profile)

        if not creds:
            raise CommandExecutionError(
                'Could not find profile or provider data for {0}'.format(
                    profile))

        if 'driver' in creds:
            driver = creds['driver']
        else:
            raise CommandExecutionError(
                'Could not find cloud driver info for {0}'.format(profile))

        if 'ssh_username' in creds:
            user = creds['ssh_username']

        if 'password' in creds and 'private_key' not in creds:
            auth = {'passwd': creds['password']}
        elif 'ssh_key_file' in creds:
            auth = {'priv': creds['ssh_key_file']}
        elif 'private_key' in creds:
            auth = {'priv': creds['private_key']}
        else:
            raise CommandExecutionError(
                'Could not find login auth info for {0}'.format(profile))

    ret = ''

    if use_map:
        if not map_file:
            raise CommandExecutionError(
                'map_file is not specified alongside use_map')
        args = ['-m', map_file]
    else:
        args = ['--no-deploy', '--profile', profile, name]

    res = __salt__['cmd.run_all'](_cmd(*args))

    # assume that the cloud response is a json object or list and strip any
    # non-json messages
    stdout = res['stdout'].splitlines()
    index = 0
    for index, line in enumerate(stdout):
        line = line.strip()
        if line.startswith('[') or line.startswith('{'):
            break
    ret += '\n'.join(stdout[:index])  # return message to user
    log.debug('return value: {0}'.format(stdout))
    res['stdout'] = '\n'.join(stdout[index:])

    try:
        info = json.loads(res['stdout'])
    except (TypeError, ValueError) as error:
        raise CommandExecutionError(
            'Could not read json from salt-cloud: {0}: {1}'.format(
                error, res['stderr']))

    if use_map:
        with open(map_file, 'r') as conf:
            try:
                file = yaml.safe_load(conf.read()) or {}
            except yaml.YAMLError:
                raise "Yaml Error. Could not parse map file"

        msg = []
        for profile in file:
            for name in file[profile]:
                [(name, args)] = name.items()
                creds = _get_driver_creds(profile)

                if not creds:
                    raise CommandExecutionError(
                        'Could not find profile or provider data for {0}'.
                        format(profile))

                if 'driver' in creds:
                    driver = creds['driver']
                else:
                    raise CommandExecutionError(
                        'Could not find cloud driver info for {0}'.format(
                            profile))

                if 'ssh_username' in creds:
                    user = creds['ssh_username']

                if 'password' in creds and 'private_key' not in creds:
                    auth = {'passwd': creds['password']}
                elif 'ssh_key_file' in creds:
                    auth = {'priv': creds['ssh_key_file']}
                elif 'private_key' in creds:
                    auth = {'priv': creds['private_key']}
                else:
                    raise CommandExecutionError(
                        'Could not find login auth info for {0}'.format(
                            profile))

                ip_addr = _get_ip_addr(driver, info, name)
                if ip_addr:
                    #Don't add windows host to ssh roster
                    if 'win' in name:
                        msg.append(
                            'Did not add {0} to roster file because its a windows VM'
                            .format(name))
                    else:
                        add_roster = _add_to_roster(roster, name, ip_addr,
                                                    user, auth, sudo)
                        log.debug(
                            'add_to_roster call is : {0}'.format(add_roster))
                        msg.append('Created node {0} from profile {1}'.format(
                            name, profile))
        if msg:
            return '\n'.join(msg)
    else:
        ip_addr = _get_ip_addr(driver, info, name)
        if ip_addr:
            add_roster = _add_to_roster(roster, name, ip_addr, user, auth,
                                        sudo)
            log.debug('add_to_roster call is : {0}'.format(add_roster))
            msg = 'Created node {0} from profile {1}'.format(name, profile)
            return True

    error = 'Failed to create node {0} from profile {1}: {2}'.format(
        name, profile, res['stderr'])
    log.error(error)
    return (False, error)