Esempio n. 1
0
def tables(schema):
    '''
    Displays all the tables that are
    present in the given schema

    CLI Example::

        salt '*' drizzle.tables schema_name
    '''

    # Initializing the required variables
    ret_val = {}
    drizzle_db = _connect()
    cursor = drizzle_db.cursor()

    # Fetching tables
    try:
        cursor.execute('SHOW TABLES IN {0}'.format(schema))
    except MySQLdb.OperationalError:
        return 'Unknown Schema'

    for iter, count in zip(list(range(cursor.rowcount)), list(range(1, cursor.rowcount+1))):
        table = cursor.fetchone()
        ret_val[count] = table[0]

    cursor.close()
    drizzle_db.close()
    return ret_val
Esempio n. 2
0
def plugins():
    '''
    Fetches the plugins added to the database server

    CLI Example::

        salt '*' drizzle.plugins
    '''

    # Initializing the required variables
    ret_val = {}
    count = 1
    drizzle_db = _connect()
    cursor = drizzle_db.cursor()

    # Fetching the plugins
    query = 'SELECT PLUGIN_NAME FROM DATA_DICTIONARY.PLUGINS WHERE IS_ACTIVE LIKE "YES"'
    cursor.execute(query)
    for iter, count in zip(list(range(cursor.rowcount)), list(range(1, cursor.rowcount+1))):
        table = cursor.fetchone()
        ret_val[count] = table[0]

    cursor.close()
    drizzle_db.close()
    return ret_val
Esempio n. 3
0
    def test_reload(self):
        # ensure it doesn't exist
        self.assertNotIn(self.module_key, self.loader)

        # update both the module and the lib
        for x in range(1, 3):
            self.update_module()
            self.update_lib()
            self.loader.clear()
            self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))

        # update just the module
        for x in range(1, 3):
            self.update_module()
            self.loader.clear()
            self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))

        # update just the lib
        for x in range(1, 3):
            self.update_lib()
            self.loader.clear()
            self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))

        self.rm_module()
        # make sure that even if we remove the module, its still loaded until a clear
        self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
        self.loader.clear()
        self.assertNotIn(self.module_key, self.loader)
Esempio n. 4
0
def _xfs_inventory_output(out):
    '''
    Transform xfsrestore inventory data output to a Python dict source and evaluate it.
    '''
    data = []
    out = [line for line in out.split("\n") if line.strip()]

    # No inventory yet
    if len(out) == 1 and 'restore status' in out[0].lower():
        return {'restore_status': out[0]}

    ident = 0
    data.append("{")
    for line in out[:-1]:
        if len([elm for elm in line.strip().split(":") if elm]) == 1:
            n_ident = len(re.sub("[^\t]", "", line))
            if ident > n_ident:
                for step in range(ident):
                    data.append("},")
            ident = n_ident
            data.append(_xr_to_keyset(line))
            data.append("{")
        else:
            data.append(_xr_to_keyset(line))
    for step in range(ident + 1):
        data.append("},")
    data.append("},")

    # We are evaling into a python dict, a json load
    # would be safer
    data = eval('\n'.join(data))[0]  # pylint: disable=W0123
    data['restore_status'] = out[-1]

    return data
Esempio n. 5
0
def hex2ip(hex_ip, invert=False):
    '''
    Convert a hex string to an ip, if a failure occurs the original hex is
    returned. If 'invert=True' assume that ip from /proc/net/<proto>
    '''
    if len(hex_ip) == 32:  # ipv6
        ip = []
        for i in range(0, 32, 8):
            ip_part = hex_ip[i:i + 8]
            ip_part = [ip_part[x:x + 2] for x in range(0, 8, 2)]
            if invert:
                ip.append("{0[3]}{0[2]}:{0[1]}{0[0]}".format(ip_part))
            else:
                ip.append("{0[0]}{0[1]}:{0[2]}{0[3]}".format(ip_part))
        try:
            return ipaddress.IPv6Address(":".join(ip)).compressed
        except ipaddress.AddressValueError as ex:
            log.error('hex2ip - ipv6 address error: {0}'.format(ex))
            return hex_ip

    try:
        hip = int(hex_ip, 16)
    except ValueError:
        return hex_ip
    if invert:
        return '{3}.{2}.{1}.{0}'.format(hip >> 24 & 255,
                                        hip >> 16 & 255,
                                        hip >> 8 & 255,
                                        hip & 255)
    return '{0}.{1}.{2}.{3}'.format(hip >> 24 & 255,
                                    hip >> 16 & 255,
                                    hip >> 8 & 255,
                                    hip & 255)
Esempio n. 6
0
    def sunos_netdev():
        '''
        sunos specific implementation of netdev
        '''
        ret = {}
        ##NOTE: we cannot use hwaddr_interfaces here, so we grab both ip4 and ip6
        for dev in __grains__['ip4_interfaces'].keys() + __grains__['ip6_interfaces'].keys():
            # fetch device info
            netstat_ipv4 = __salt__['cmd.run']('netstat -i -I {dev} -n -f inet'.format(dev=dev)).splitlines()
            netstat_ipv6 = __salt__['cmd.run']('netstat -i -I {dev} -n -f inet6'.format(dev=dev)).splitlines()

            # prepare data
            netstat_ipv4[0] = netstat_ipv4[0].split()
            netstat_ipv4[1] = netstat_ipv4[1].split()
            netstat_ipv6[0] = netstat_ipv6[0].split()
            netstat_ipv6[1] = netstat_ipv6[1].split()

            # add data
            ret[dev] = {}
            for i in range(len(netstat_ipv4[0])-1):
                if netstat_ipv4[0][i] == 'Name':
                    continue
                if netstat_ipv4[0][i] in ['Address', 'Net/Dest']:
                    ret[dev]['IPv4 {field}'.format(field=netstat_ipv4[0][i])] = netstat_ipv4[1][i]
                else:
                    ret[dev][netstat_ipv4[0][i]] = _number(netstat_ipv4[1][i])
            for i in range(len(netstat_ipv6[0])-1):
                if netstat_ipv6[0][i] == 'Name':
                    continue
                if netstat_ipv6[0][i] in ['Address', 'Net/Dest']:
                    ret[dev]['IPv6 {field}'.format(field=netstat_ipv6[0][i])] = netstat_ipv6[1][i]
                else:
                    ret[dev][netstat_ipv6[0][i]] = _number(netstat_ipv6[1][i])

        return ret
Esempio n. 7
0
 def test_event_many_backlog(self):
     '''Test a large number of events, send all then recv all'''
     with eventpublisher_process():
         me = event.MasterEvent(SOCK_DIR, listen=True)
         # Must not exceed zmq HWM
         for i in range(500):
             me.fire_event({'data': '{0}'.format(i)}, 'testevents')
         for i in range(500):
             evt = me.get_event(tag='testevents')
             self.assertGotEvent(evt, {'data': '{0}'.format(i)}, 'Event {0}'.format(i))
Esempio n. 8
0
def _gather_update_categories(updateCollection):
    categories = []
    for i in range(updateCollection.Count):
        update = updateCollection.Item(i)
        for j in range(update.Categories.Count):
            name = update.Categories.Item(j).Name
            if name not in categories:
                log.debug('found category: {0}'.format(name))
                categories.append(name)
    return categories
Esempio n. 9
0
 def linux_netstats():
     '''
     freebsd specific netstats implementation
     '''
     procf = '/proc/net/netstat'
     if not os.path.isfile(procf):
         return {}
     stats = salt.utils.fopen(procf, 'r').read().splitlines()
     ret = {}
     headers = ['']
     for line in stats:
         if not line:
             continue
         comps = line.split()
         if comps[0] == headers[0]:
             index = len(headers) - 1
             row = {}
             for field in range(index):
                 if field < 1:
                     continue
                 else:
                     row[headers[field]] = _number(comps[field])
             rowname = headers[0].replace(':', '')
             ret[rowname] = row
         else:
             headers = comps
     return ret
Esempio n. 10
0
    def GetInstallationResults(self):
        '''
        this gets results of installation process.
        '''
        # if the blugger is empty, the results are nil.
        log.debug('blugger has {0} updates in it'.format(self.install_collection.Count))
        if self.install_collection.Count == 0:
            return {}

        updates = []
        log.debug('repairing update list')
        for i in range(self.install_collection.Count):
            # this gets the result from install_results, but the title comes from the update
            # collection install_collection.
            updates.append('{0}: {1}'.format(
                self.install_results.GetUpdateResult(i).ResultCode,
                self.install_collection.Item(i).Title))

        log.debug('Update results enumerated, now making a library to pass back')
        results = {}

        # translates the list of update results into a library that salt expects.
        for i, update in enumerate(updates):
            results['update {0}'.format(i)] = update

        log.debug('Update information complied. returning')
        return results
Esempio n. 11
0
File: cron.py Progetto: bryson/salt
def rm_special(user, special, cmd):
    '''
    Remove a special cron job for a specified user.

    CLI Example:

    .. code-block:: bash

        salt '*' cron.rm_job root @hourly /usr/bin/foo
    '''
    lst = list_tab(user)
    ret = 'absent'
    rm_ = None
    for ind in range(len(lst['special'])):
        if lst['special'][ind]['cmd'] == cmd and \
                lst['special'][ind]['spec'] == special:
            lst['special'].pop(ind)
            rm_ = ind
    if rm_ is not None:
        ret = 'removed'
        comdat = _write_cron_lines(user, _render_tab(lst))
        if comdat['retcode']:
            # Failed to commit
            return comdat['stderr']
    return ret
Esempio n. 12
0
def update_function_config(FunctionName, Role=None, Handler=None,
            Description=None, Timeout=None, MemorySize=None,
            region=None, key=None, keyid=None, profile=None, VpcConfig=None,
            WaitForRole=False, RoleRetries=5):
    '''
    Update the named lambda function to the configuration.

    Returns {updated: true} if the function was updated and returns
    {updated: False} if the function was not updated.

    CLI Example:

    .. code-block:: bash

        salt myminion boto_lamba.update_function_config my_function my_role my_file.my_function "my lambda function"

    '''

    args = dict(FunctionName=FunctionName)
    options = {'Handler': Handler,
               'Description': Description,
               'Timeout': Timeout,
               'MemorySize': MemorySize,
               'VpcConfig': VpcConfig}

    for val, var in six.iteritems(options):
        if var:
            args[val] = var
    if Role:
        role_arn = _get_role_arn(Role, region, key, keyid, profile)
        args['Role'] = role_arn
    try:
        conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
        if WaitForRole:
            retrycount = RoleRetries
        else:
            retrycount = 1
        for retry in range(retrycount, 0, -1):
            try:
                r = conn.update_function_configuration(**args)
            except ClientError as e:
                if retry > 1 and e.response.get('Error', {}).get('Code') == 'InvalidParameterValueException':
                    log.info('Function not updated but IAM role may not have propagated, will retry')
                    # exponential backoff
                    time.sleep((2 ** (RoleRetries - retry)) + (random.randint(0, 1000) / 1000))
                    continue
                else:
                    raise
            else:
                break
        if r:
            keys = ('FunctionName', 'Runtime', 'Role', 'Handler', 'CodeSha256',
                'CodeSize', 'Description', 'Timeout', 'MemorySize', 'FunctionArn',
                'LastModified', 'VpcConfig')
            return {'updated': True, 'function': dict([(k, r.get(k)) for k in keys])}
        else:
            log.warning('Function was not updated')
            return {'updated': False}
    except ClientError as e:
        return {'updated': False, 'error': salt.utils.boto3.get_error(e)}
Esempio n. 13
0
def _retry_get_url(url, num_retries=10, timeout=5):
    '''
    Retry grabbing a URL.
    Based heavily on boto.utils.retry_url
    '''
    for i in range(0, num_retries):
        try:
            result = requests.get(url, timeout=timeout, proxies={'http': ''})
            if hasattr(result, 'text'):
                return result.text
            elif hasattr(result, 'content'):
                return result.content
            else:
                return ''
        except requests.exceptions.HTTPError as exc:
            return ''
        except Exception as exc:
            pass

        log.warning(
            'Caught exception reading from URL. Retry no. {0}'.format(i)
        )
        log.warning(pprint.pformat(exc))
        time.sleep(2 ** i)
    log.error(
        'Failed to read from URL for {0} times. Giving up.'.format(num_retries)
    )
    return ''
Esempio n. 14
0
def check_inheritance(path, objectType):
    '''
    check a specified path to verify if inheritance is enabled
    returns 'Inheritance' of True/False

    hkey: HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER, etc
    path:  path of the registry key to check
    '''

    ret = {'result': False,
           'Inheritance': False,
           'comment': []}
    dc = daclConstants()
    objectType = dc.getObjectTypeBit(objectType)
    path = dc.processPath(path, objectType)

    try:
        sd = win32security.GetNamedSecurityInfo(path, objectType, win32security.DACL_SECURITY_INFORMATION)
        dacls = sd.GetSecurityDescriptorDacl()
    except Exception as e:
        ret['result'] = False
        ret['comment'].append((
            'Error obtaining the Security Descriptor or DACL of the path:  {0}'
            ).format(e))
        return ret

    for counter in range(0, dacls.GetAceCount()):
        ace = dacls.GetAce(counter)
        if (ace[0][1] & win32security.INHERITED_ACE) == win32security.INHERITED_ACE:
            ret['Inheritance'] = True
    ret['result'] = True
    return ret
Esempio n. 15
0
def exactly_n(l, n=1):
    '''
    Tests that exactly N items in an iterable are "truthy" (neither None,
    False, nor 0).
    '''
    i = iter(l)
    return all(any(i) for j in range(n)) and not any(i)
Esempio n. 16
0
File: dracr.py Progetto: mahak/salt
def nameservers(ns,
                host=None,
                admin_username=None,
                admin_password=None,
                module=None):
    '''
    Configure the nameservers on the DRAC

    CLI Example:

    .. code-block:: bash

        salt dell dracr.nameservers [NAMESERVERS]
        salt dell dracr.nameservers ns1.example.com ns2.example.com
            admin_username=root admin_password=calvin module=server-1
            host=192.168.1.1
    '''
    if len(ns) > 2:
        log.warning('racadm only supports two nameservers')
        return False

    for i in range(1, len(ns) + 1):
        if not __execute_cmd('config -g cfgLanNetworking -o '
                             'cfgDNSServer{0} {1}'.format(i, ns[i - 1]),
                             host=host,
                             admin_username=admin_username,
                             admin_password=admin_password,
                             module=module):
            return False

    return True
Esempio n. 17
0
    def test_any_future(self):
        '''
        Test that the Any Future does what we think it does
        '''
        # create a few futures
        futures = []
        for x in range(0, 3):
            future = tornado.concurrent.Future()
            future.add_done_callback(self.stop)
            futures.append(future)

        # create an any future, make sure it isn't immediately done
        any_ = saltnado.Any(futures)
        self.assertIs(any_.done(), False)

        # finish one, lets see who finishes
        futures[0].set_result('foo')
        self.wait()

        self.assertIs(any_.done(), True)
        self.assertIs(futures[0].done(), True)
        self.assertIs(futures[1].done(), False)
        self.assertIs(futures[2].done(), False)

        # make sure it returned the one that finished
        self.assertEqual(any_.result(), futures[0])

        futures = futures[1:]
        # re-wait on some other futures
        any_ = saltnado.Any(futures)
        futures[0].set_result('foo')
        self.wait()
        self.assertIs(any_.done(), True)
        self.assertIs(futures[0].done(), True)
        self.assertIs(futures[1].done(), False)
Esempio n. 18
0
File: znc.py Progetto: DaveQB/salt
def _makepass(password, hasher='sha256'):
    '''
    Create a znc compatible hashed password
    '''
    # Setup the hasher
    if hasher == 'sha256':
        h = hashlib.sha256(password)
    elif hasher == 'md5':
        h = hashlib.md5(password)
    else:
        return NotImplemented

    c = "abcdefghijklmnopqrstuvwxyz" \
        "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
        "0123456789!?.,:;/*-+_()"
    r = {
        'Method': h.name,
        'Salt': ''.join(random.SystemRandom().choice(c) for x in range(20)),
    }

    # Salt the password hash
    h.update(r['Salt'])
    r['Hash'] = h.hexdigest()

    return r
Esempio n. 19
0
def _pretty_hex(hex_str):
    '''
    Nicely formats hex strings
    '''
    if len(hex_str) % 2 != 0:
        hex_str = '0' + hex_str
    return ':'.join([hex_str[i:i+2] for i in range(0, len(hex_str), 2)]).upper()
Esempio n. 20
0
File: cloud.py Progetto: DaveQB/salt
 def test_mutually_exclusive_list_options(self):
     test_options = ['--list-locations', '--list-images', '--list-sizes']
     while True:
         for idx in range(1, len(test_options)):
             output = self.run_cloud(
                 '{0} ec2 {1} ec2'.format(
                     test_options[0], test_options[idx]
                 ), catch_stderr=True
             )
             try:
                 self.assertIn(
                     'salt-cloud: error: The options {0}/{1} are mutually '
                     'exclusive. Please only choose one of them'.format(
                         test_options[0], test_options[idx]
                     ),
                     output[1]
                 )
             except AssertionError:
                 print(output)
                 raise
         # Remove the first option from the list
         test_options.pop(0)
         if len(test_options) <= 1:
             # Only one left? Stop iterating
             break
Esempio n. 21
0
def find_room(name, api_key=None):
    '''
    Find a room by name and return it.

    :param name:    The room name.
    :param api_key: The Slack admin api key.
    :return:        The room object.

    CLI Example:

    .. code-block:: bash

        salt '*' slack.find_room name="random"

        salt '*' slack.find_room name="random" api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15
    '''
    if not api_key:
        api_key = _get_api_key()

    # search results don't include the name of the
    # channel with a hash, if the passed channel name
    # has a hash we remove it.
    if name.startswith('#'):
        name = name[1:]
    ret = list_rooms(api_key)
    if ret['res']:
        rooms = ret['message']
        if rooms:
            for room in range(0, len(rooms)):
                if rooms[room]['name'] == name:
                    return rooms[room]
    return False
Esempio n. 22
0
def find_user(name, api_key=None):
    '''
    Find a user by name and return it.

    :param name:        The user name.
    :param api_key:     The Slack admin api key.
    :return:            The user object.

    CLI Example:

    .. code-block:: bash

        salt '*' slack.find_user name="ThomasHatch"

        salt '*' slack.find_user name="ThomasHatch" api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15
    '''
    if not api_key:
        api_key = _get_api_key()

    ret = list_users(api_key)
    if ret['res']:
        users = ret['message']
        if users:
            for user in range(0, len(users)):
                if users[user]['name'] == name:
                    return users[user]
    return False
Esempio n. 23
0
def _dmi_isclean(key, val):
    '''
    Clean out well-known bogus values
    '''
    if val is None or not len(val) or re.match('none', val, flags=re.IGNORECASE):
        # log.debug('DMI {0} value {1} seems invalid or empty'.format(key, val))
        return False
    elif 'uuid' in key:
        # Try each version (1-5) of RFC4122 to check if it's actually a UUID
        for uuidver in range(1, 5):
            try:
                uuid.UUID(val, version=uuidver)
                return True
            except ValueError:
                continue
        log.trace('DMI {0} value {1} is an invalid UUID'.format(key, val.replace('\n', ' ')))
        return False
    elif re.search('serial|part|version', key):
        # 'To be filled by O.E.M.
        # 'Not specified' etc.
        # 0000000, 1234667 etc.
        # begone!
        return not re.match(r'^[0]+$', val) \
                and not re.match(r'[0]?1234567[8]?[9]?[0]?', val) \
                and not re.search(r'sernum|part[_-]?number|specified|filled', val, flags=re.IGNORECASE)
    elif re.search('asset|manufacturer', key):
        # AssetTag0. Manufacturer04. Begone.
        return not re.search(r'manufacturer|to be filled|available|asset|^no(ne|t)', val, flags=re.IGNORECASE)
    else:
        # map unspecified, undefined, unknown & whatever to None
        return not re.search(r'to be filled', val, flags=re.IGNORECASE) \
            and not re.search(r'un(known|specified)|no(t|ne)? (asset|provided|defined|available|present|specified)',
                              val, flags=re.IGNORECASE)
Esempio n. 24
0
    def test_update_secret(self):
        name = self.name
        filename = "/tmp/{0}.json".format(name)
        with open(filename, 'w') as f:
            json.dump(self.request, f)

        create = Popen(["kubectl", "--namespace=default", "create", "-f", filename], stdout=PIPE)
        # wee need to give kubernetes time save data in etcd
        time.sleep(0.1)
        expected_data = {}
        names = []
        for i in range(3):
            names.append("/tmp/{0}-{1}-updated".format(name, i))
            with open("/tmp/{0}-{1}-updated".format(name, i), 'w') as f:
                expected_data["{0}-{1}-updated".format(name, i)] = base64.b64encode("{0}{1}-updated".format(name, i))
                f.write("{0}{1}-updated".format(name, i))

        res = k8s.update_secret("default", name, names, apiserver_url="http://127.0.0.1:8080")
        # if creation is failed, kubernetes return non json error message
        proc = Popen(["kubectl", "--namespace=default", "get", "secrets", name, "-o", "json"], stdout=PIPE)
        kubectl_out = json.loads(proc.communicate()[0])
        # if creation is failed, kubernetes return non json error message
        b = kubectl_out.get("data", {})
        self.assertTrue(isinstance(kubectl_out, dict))
        self.assertEqual(expected_data, b)
def generate_selectors(labels=None, *fields, **kwargs):
    '''
    Create an element list based in another objects labels that will create
    a value of True in the corresponding element if in either selectors
    or kwargs, otherwise False.

    Args:
        labels:

    Example:
        >>> labels = ['one', 'two', 'three', 'four']
        >>> fields = ['two', 'three']
        >>> generate_selectors(labels, fields)
        [False, True, True, False]
    '''
    if not labels:
        return []

    enabled = True if 'all' in fields or 'all' in kwargs else False
    selectors = [enabled for i in range(len(labels))]  # pylint: disable=W0612

    if enabled:
        return selectors

    for index, selector in enumerate(labels):
        if selector in fields or selector in kwargs:
            selectors[index] = True
    return selectors
Esempio n. 26
0
def netstats():
    '''
    Return the network stats for this minion

    CLI Example:

    .. code-block:: bash

        salt '*' status.netstats
    '''
    procf = '/proc/net/netstat'
    if not os.path.isfile(procf):
        return {}
    stats = salt.utils.fopen(procf, 'r').read().splitlines()
    ret = {}
    headers = ['']
    for line in stats:
        if not line:
            continue
        comps = line.split()
        if comps[0] == headers[0]:
            index = len(headers) - 1
            row = {}
            for field in range(index):
                if field < 1:
                    continue
                else:
                    row[headers[field]] = _number(comps[field])
            rowname = headers[0].replace(':', '')
            ret[rowname] = row
        else:
            headers = comps
    return ret
Esempio n. 27
0
def stop(name):
    '''
    Stop the specified service

    CLI Example:

    .. code-block:: bash

        salt '*' service.stop <service name>
    '''
    # net stop issues a stop command and waits briefly (~30s), but will give
    # up if the service takes too long to stop with a misleading
    # "service could not be stopped" message and RC 0.

    cmd = ['net', 'stop', name]
    res = __salt__['cmd.run'](cmd, python_shell=False)
    if 'service was stopped' in res:
        return True

    # we requested a stop, but the service is still thinking about it.
    # poll for the real status
    for attempt in range(SERVICE_STOP_POLL_MAX_ATTEMPTS):
        if not status(name):
            return True
        log.debug('Waiting for %s to stop', name)
        time.sleep(SERVICE_STOP_DELAY_SECONDS)

    log.warning('Giving up on waiting for service `%s` to stop', name)
    return False
Esempio n. 28
0
def waitfor_job(conn=None, LinodeID=None, JobID=None, timeout=300, quiet=True):

    if not conn:
        conn = get_conn()

    interval = 5
    iterations = int(timeout / interval)

    for i in range(0, iterations):
        try:
            result = conn.linode_job_list(LinodeID=LinodeID, JobID=JobID)
        except linode.ApiError as exc:
            log.info('Waiting for job {0} on host {1} returned {2}'.
                         format(LinodeID, JobID, exc))
            return False

        if result[0]['HOST_SUCCESS'] == 1:
            return True

        time.sleep(interval)
        if not quiet:
            log.info('Still waiting on Job {0} for {1}'.format(JobID,
                                                               LinodeID))
        else:
            log.debug('Still waiting on Job {0} for {1}'.format(JobID,
                                                                LinodeID))
    return False
Esempio n. 29
0
    def test_start_stop(self):
        for i in range(2):
            machine = vb_start_vm(BOOTABLE_BASE_BOX_NAME, 20000)
            self.assertEqual(machine_get_machinestate_str(machine), "Running")

            machine = vb_stop_vm(BOOTABLE_BASE_BOX_NAME)
            self.assertEqual(machine_get_machinestate_str(machine), "PoweredOff")
Esempio n. 30
0
def waitfor_status(conn=None, LinodeID=None, status=None,
                   timeout=300, quiet=True):
    '''
    Wait for a certain status
    '''
    if not conn:
        conn = get_conn()

    if status is None:
        status = 'Brand New'

    interval = 5
    iterations = int(timeout / interval)

    for i in range(0, iterations):
        result = get_node(LinodeID)

        if result['state'] == status:
            return True

        time.sleep(interval)
        if not quiet:
            log.info('Status for {0} is {1}'.format(LinodeID, result['state']))
        else:
            log.debug('Status for {0} is {1}'.format(LinodeID, result))

    return False
Esempio n. 31
0
File: reg.py Progetto: Sygnia/salt-1
def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
    '''
    Enumerates the values in a registry key or hive.

    :param str hive: The name of the hive. Can be one of the following

        - HKEY_LOCAL_MACHINE or HKLM
        - HKEY_CURRENT_USER or HKCU
        - HKEY_USER or HKU
        - HKEY_CLASSES_ROOT or HKCR
        - HKEY_CURRENT_CONFIG or HKCC

    :param str key: The key (looks like a path) to the value name. If a key is
        not passed, the values under the hive will be returned.

    :param bool use_32bit_registry: Accesses the 32bit portion of the registry
        on 64 bit installations. On 32bit machines this is ignored.

    :param bool include_default: Toggle whether to include the '(Default)' value.

    :return: A list of values under the hive or key.
    :rtype: list

    CLI Example:

    .. code-block:: bash

        salt '*' reg.list_values HKLM 'SYSTEM\\CurrentControlSet\\Services\\Tcpip'
    '''
    local_hive = _to_unicode(hive)
    local_key = _to_unicode(key)

    registry = Registry()
    hkey = registry.hkeys[local_hive]
    access_mask = registry.registry_32[use_32bit_registry]
    handle = None
    values = list()

    try:
        handle = win32api.RegOpenKeyEx(hkey, local_key, 0, access_mask)

        for i in range(win32api.RegQueryInfoKey(handle)[1]):
            vname, vdata, vtype = win32api.RegEnumValue(handle, i)

            if not vname:
                vname = "(Default)"

            value = {'hive':   local_hive,
                     'key':    local_key,
                     'vname':  _to_mbcs(vname),
                     'vtype':  registry.vtype_reverse[vtype],
                     'success': True}
            # Only convert text types to unicode
            if vtype == win32con.REG_MULTI_SZ:
                value['vdata'] = [_to_mbcs(i) for i in vdata]
            elif vtype in [win32con.REG_SZ, win32con.REG_EXPAND_SZ]:
                value['vdata'] = _to_mbcs(vdata)
            else:
                value['vdata'] = vdata
            values.append(value)
    except pywintypes.error as exc:  # pylint: disable=E0602
        log.debug(r'Cannot find key: %s\%s', hive, key, exc_info=True)
        return False, r'Cannot find key: {0}\{1}'.format(hive, key)
    finally:
        if handle:
            handle.Close()
    return values
Esempio n. 32
0
def port_bindings(val, **kwargs):
    '''
    On the CLI, these are passed as multiple instances of a given CLI option.
    In Salt, we accept these as a comma-delimited list but the API expects a
    Python dictionary mapping ports to their bindings. The format the API
    expects is complicated depending on whether or not the external port maps
    to a different internal port, or if the port binding is for UDP instead of
    TCP (the default). For reference, see the "Port bindings" section in the
    docker-py documentation at the following URL:
    http://docker-py.readthedocs.io/en/stable/api.html
    '''
    validate_ip_addrs = kwargs.get('validate_ip_addrs', True)
    if not isinstance(val, dict):
        if not isinstance(val, list):
            try:
                val = helpers.split(val)
            except AttributeError:
                val = helpers.split(six.text_type(val))

        for idx in range(len(val)):
            if not isinstance(val[idx], six.string_types):
                val[idx] = six.text_type(val[idx])

        def _format_port(port_num, proto):
            return six.text_type(port_num) + '/udp' if proto.lower(
            ) == 'udp' else port_num

        bindings = {}
        for binding in val:
            bind_parts = helpers.split(binding, ':')
            num_bind_parts = len(bind_parts)
            if num_bind_parts == 1:
                # Single port or port range being passed through (no
                # special mapping)
                container_port = six.text_type(bind_parts[0])
                if container_port == '':
                    raise SaltInvocationError(
                        'Empty port binding definition found')
                container_port, _, proto = container_port.partition('/')
                try:
                    start, end = helpers.get_port_range(container_port)
                except ValueError as exc:
                    # Using __str__() to avoid deprecation warning for using
                    # the message attribute of the ValueError.
                    raise SaltInvocationError(exc.__str__())
                bind_vals = [(_format_port(port_num, proto), None)
                             for port_num in range(start, end + 1)]
            elif num_bind_parts == 2:
                if bind_parts[0] == '':
                    raise SaltInvocationError(
                        'Empty host port in port binding definition '
                        '\'{0}\''.format(binding))
                if bind_parts[1] == '':
                    raise SaltInvocationError(
                        'Empty container port in port binding definition '
                        '\'{0}\''.format(binding))
                container_port, _, proto = bind_parts[1].partition('/')
                try:
                    cport_start, cport_end = \
                        helpers.get_port_range(container_port)
                    hport_start, hport_end = \
                        helpers.get_port_range(bind_parts[0])
                except ValueError as exc:
                    # Using __str__() to avoid deprecation warning for
                    # using the message attribute of the ValueError.
                    raise SaltInvocationError(exc.__str__())
                if (hport_end - hport_start) != (cport_end - cport_start):
                    # Port range is mismatched
                    raise SaltInvocationError(
                        'Host port range ({0}) does not have the same '
                        'number of ports as the container port range '
                        '({1})'.format(bind_parts[0], container_port))
                cport_list = list(range(cport_start, cport_end + 1))
                hport_list = list(range(hport_start, hport_end + 1))
                bind_vals = [(_format_port(cport_list[x],
                                           proto), hport_list[x])
                             for x in range(len(cport_list))]
            elif num_bind_parts == 3:
                host_ip, host_port = bind_parts[0:2]
                if validate_ip_addrs:
                    helpers.validate_ip(host_ip)
                container_port, _, proto = bind_parts[2].partition('/')
                try:
                    cport_start, cport_end = \
                        helpers.get_port_range(container_port)
                except ValueError as exc:
                    # Using __str__() to avoid deprecation warning for
                    # using the message attribute of the ValueError.
                    raise SaltInvocationError(exc.__str__())
                cport_list = list(range(cport_start, cport_end + 1))
                if host_port == '':
                    hport_list = [None] * len(cport_list)
                else:
                    try:
                        hport_start, hport_end = \
                            helpers.get_port_range(host_port)
                    except ValueError as exc:
                        # Using __str__() to avoid deprecation warning for
                        # using the message attribute of the ValueError.
                        raise SaltInvocationError(exc.__str__())
                    hport_list = list(range(hport_start, hport_end + 1))

                    if (hport_end - hport_start) != (cport_end - cport_start):
                        # Port range is mismatched
                        raise SaltInvocationError(
                            'Host port range ({0}) does not have the same '
                            'number of ports as the container port range '
                            '({1})'.format(host_port, container_port))

                bind_vals = [(_format_port(val, proto),
                              (host_ip, ) if hport_list[idx] is None else
                              (host_ip, hport_list[idx]))
                             for idx, val in enumerate(cport_list)]
            else:
                raise SaltInvocationError(
                    '\'{0}\' is an invalid port binding definition (at most '
                    '3 components are allowed, found {1})'.format(
                        binding, num_bind_parts))

            for cport, bind_def in bind_vals:
                if cport not in bindings:
                    bindings[cport] = bind_def
                else:
                    if isinstance(bindings[cport], list):
                        # Append to existing list of bindings for this
                        # container port.
                        bindings[cport].append(bind_def)
                    else:
                        bindings[cport] = [bindings[cport], bind_def]
                    for idx in range(len(bindings[cport])):
                        if bindings[cport][idx] is None:
                            # Now that we are adding multiple
                            # bindings
                            try:
                                # Convert 1234/udp to 1234
                                bindings[cport][idx] = int(cport.split('/')[0])
                            except AttributeError:
                                # Port was tcp, the AttributeError
                                # signifies that the split failed
                                # because the port number was
                                # already defined as an integer.
                                # Just use the cport.
                                bindings[cport][idx] = cport
        val = bindings
    return val
Esempio n. 33
0
    def process_results(self, rows):
        '''
            This function takes a list of database results and iterates over,
            merging them into a dict form.
        '''
        listify = OrderedDict()
        listify_dicts = OrderedDict()
        for ret in rows:
            # crd is the Current Return Data level, to make this non-recursive.
            crd = self.focus
            # Walk and create dicts above the final layer
            for i in range(0, self.depth - 1):
                # At the end we'll use listify to find values to make a list of
                if i + 1 in self.with_lists:
                    if id(crd) not in listify:
                        listify[id(crd)] = []
                        listify_dicts[id(crd)] = crd
                    if ret[i] not in listify[id(crd)]:
                        listify[id(crd)].append(ret[i])
                if ret[i] not in crd:
                    # Key missing
                    crd[ret[i]] = {}
                    crd = crd[ret[i]]
                else:
                    # Check type of collision
                    ty = type(crd[ret[i]])
                    if ty is list:
                        # Already made list
                        temp = {}
                        crd[ret[i]].append(temp)
                        crd = temp
                    elif ty is not dict:
                        # Not a list, not a dict
                        if self.as_list:
                            # Make list
                            temp = {}
                            crd[ret[i]] = [crd[ret[i]], temp]
                            crd = temp
                        else:
                            # Overwrite
                            crd[ret[i]] = {}
                            crd = crd[ret[i]]
                    else:
                        # dict, descend.
                        crd = crd[ret[i]]

            # If this test is true, the penultimate field is the key
            if self.depth == self.num_fields - 1:
                nk = self.num_fields - 2  # Aka, self.depth-1
                # Should we and will we have a list at the end?
                if ((self.as_list and (ret[nk] in crd))
                        or (nk + 1 in self.with_lists)):
                    if ret[nk] in crd:
                        if not isinstance(crd[ret[nk]], list):
                            crd[ret[nk]] = [crd[ret[nk]]]
                        # if it's already a list, do nothing
                    else:
                        crd[ret[nk]] = []
                    crd[ret[nk]].append(ret[self.num_fields - 1])
                else:
                    if not self.ignore_null or ret[self.num_fields -
                                                   1] is not None:
                        crd[ret[nk]] = ret[self.num_fields - 1]
            else:
                # Otherwise, the field name is the key but we have a spare.
                # The spare results because of {c: d} vs {c: {"d": d, "e": e }}
                # So, make that last dict
                if ret[self.depth - 1] not in crd:
                    crd[ret[self.depth - 1]] = {}
                # This bit doesn't escape listify
                if self.depth in self.with_lists:
                    if id(crd) not in listify:
                        listify[id(crd)] = []
                        listify_dicts[id(crd)] = crd
                    if ret[self.depth - 1] not in listify[id(crd)]:
                        listify[id(crd)].append(ret[self.depth - 1])
                crd = crd[ret[self.depth - 1]]
                # Now for the remaining keys, we put them into the dict
                for i in range(self.depth, self.num_fields):
                    nk = self.field_names[i]
                    # Listify
                    if i + 1 in self.with_lists:
                        if id(crd) not in listify:
                            listify[id(crd)] = []
                            listify_dicts[id(crd)] = crd
                        if nk not in listify[id(crd)]:
                            listify[id(crd)].append(nk)
                    # Collision detection
                    if self.as_list and (nk in crd):
                        # Same as before...
                        if isinstance(crd[nk], list):
                            crd[nk].append(ret[i])
                        else:
                            crd[nk] = [crd[nk], ret[i]]
                    else:
                        if not self.ignore_null or ret[i] is not None:
                            crd[nk] = ret[i]
        # Get key list and work backwards.  This is inner-out processing
        ks = list(listify_dicts.keys())
        ks.reverse()
        for i in ks:
            d = listify_dicts[i]
            for k in listify[i]:
                if isinstance(d[k], dict):
                    d[k] = list(d[k].values())
                elif isinstance(d[k], list):
                    d[k] = [d[k]]
Esempio n. 34
0
 def clear(self):
     for i in self._by_level:
         self._by_level[i] = []
     for i in range(len(self._msgs)):
         self._msgs.pop()
Esempio n. 35
0
def managed(name,
            dns_proto=None,
            dns_servers=None,
            ip_proto=None,
            ip_addrs=None,
            gateway=None,
            enabled=True,
            **kwargs):
    '''
    Ensure that the named interface is configured properly.

    Args:

        name (str):
            The name of the interface to manage

        dns_proto (str): None
            Set to ``static`` and use the ``dns_servers`` parameter to provide a
            list of DNS nameservers. set to ``dhcp`` to use DHCP to get the DNS
            servers.

        dns_servers (list): None
            A list of static DNS servers. To clear the list of DNS servers pass
            an empty list (``[]``). ``None`` will make no changes.

        ip_proto (str): None
            Set to ``static`` and use the ``ip_addrs`` and (optionally)
            ``gateway`` parameters to provide a list of static IP addresses and
            the default gateway. Set to ``dhcp`` to use DHCP.

        ip_addrs (list): None
            A list of static IP addresses with netmask flag, ie: 192.168.0.11/24

        gateway (str): None
            The gateway to set for the interface

        enabled (bool): True
            Set to ``False`` to ensure that this interface is disabled.

    Returns:
        dict: A dictionary of old and new settings

    Example:

    .. code-block:: yaml

        Ethernet1:
          network.managed:
            - dns_proto: static
            - dns_servers:
              - 8.8.8.8
              - 8.8.8.4
            - ip_proto: static
            - ip_addrs:
              - 192.168.0.100/24

    Clear DNS entries example:

    .. code-block:: yaml

        Ethernet1:
          network.managed:
            - dns_proto: static
            - dns_servers: []
            - ip_proto: dhcp
    '''
    ret = {
        'name': name,
        'changes': {},
        'result': True,
        'comment': 'Interface \'{0}\' is up to date'.format(name)
    }

    dns_proto = six.text_type(dns_proto).lower()
    ip_proto = six.text_type(ip_proto).lower()

    errors = []
    if dns_proto not in __VALID_PROTO:
        ret['result'] = False
        errors.append('dns_proto must be one of the following: {0}'.format(
            ', '.join(__VALID_PROTO)))

    if ip_proto not in __VALID_PROTO:
        errors.append('ip_proto must be one of the following: {0}'.format(
            ', '.join(__VALID_PROTO)))

    if errors:
        ret['result'] = False
        ret['comment'] = '\n'.join(errors)
        return ret

    try:
        currently_enabled = __salt__['ip.is_enabled'](name)
    except CommandExecutionError:
        currently_enabled = False

    if not enabled:
        if currently_enabled:
            if __opts__['test']:
                ret['result'] = None
                ret['comment'] = (
                    'Interface \'{0}\' will be disabled'.format(name))
            else:
                ret['result'] = __salt__['ip.disable'](name)
                if not ret['result']:
                    ret['comment'] = (
                        'Failed to disable interface \'{0}\''.format(name))
        else:
            ret['comment'] += ' (already disabled)'
        return ret
    else:
        if not currently_enabled:
            if __opts__['test']:
                ret['result'] = None
                ret['comment'] = (
                    'Interface \'{0}\' will be enabled'.format(name))
            else:
                if not __salt__['ip.enable'](name):
                    ret['result'] = False
                    ret['comment'] = ('Failed to enable interface \'{0}\' to '
                                      'make changes'.format(name))
                    return ret

        errors = _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway)
        if errors:
            ret['result'] = False
            ret['comment'] = ('The following SLS configuration errors were '
                              'detected:\n- {0}'.format('\n- '.join(errors)))
            return ret

        old = __salt__['ip.get_interface'](name)
        if not old:
            ret['result'] = False
            ret['comment'] = ('Unable to get current configuration for '
                              'interface \'{0}\''.format(name))
            return ret

        changes = _changes(old, dns_proto, dns_servers, ip_proto, ip_addrs,
                           gateway)

        # If dns_servers is the default `None` make no changes
        # To clear the list, pass an empty dict
        if str(dns_servers).lower() == 'none':
            changes.pop('dns_servers', None)

        if not changes:
            return ret

        if __opts__['test']:
            comments = []
            if 'dns_proto' in changes:
                comments.append('DNS protocol will be changed to: {0}'.format(
                    changes['dns_proto']))
            if dns_proto == 'static' and 'dns_servers' in changes:
                if len(changes['dns_servers']) == 0:
                    comments.append('The list of DNS servers will be cleared')
                else:
                    comments.append(
                        'DNS servers will be set to the following: {0}'.format(
                            ', '.join(changes['dns_servers'])))
            if 'ip_proto' in changes:
                comments.append('IP protocol will be changed to: {0}'.format(
                    changes['ip_proto']))
            if ip_proto == 'static':
                if 'ip_addrs' in changes:
                    comments.append(
                        'IP addresses will be set to the following: {0}'.
                        format(', '.join(changes['ip_addrs'])))
                if 'gateway' in changes:
                    if changes['gateway'] is None:
                        comments.append('Default gateway will be removed')
                    else:
                        comments.append(
                            'Default gateway will be set to {0}'.format(
                                changes['gateway']))

            ret['result'] = None
            ret['comment'] = ('The following changes will be made to '
                              'interface \'{0}\':\n- {1}'.format(
                                  name, '\n- '.join(comments)))
            return ret

        if changes.get('dns_proto') == 'dhcp':
            __salt__['ip.set_dhcp_dns'](name)

        elif 'dns_servers' in changes:
            if len(changes['dns_servers']) == 0:
                # To clear the list of DNS servers you have to pass []. Later
                # changes gets passed like *args and a single empty list is
                # converted to an empty tuple. So, you have to add [] here
                changes['dns_servers'] = [[]]

            __salt__['ip.set_static_dns'](name, *changes['dns_servers'])

        if changes.get('ip_proto') == 'dhcp':
            __salt__['ip.set_dhcp_ip'](name)
        elif changes.get('ip_addrs') or changes.get('gateway') or changes.get(
                'ip_proto') == 'static':
            if changes.get('gateway') and not changes.get('ip_addrs'):
                changes['ip_addrs'] = ip_addrs
            if changes.get(
                    'ip_proto') == 'static' and not changes.get('ip_addrs'):
                changes['ip_addrs'] = ip_addrs
            for idx in range(len(changes['ip_addrs'])):
                if idx == 0:
                    __salt__['ip.set_static_ip'](name,
                                                 changes['ip_addrs'][idx],
                                                 gateway=gateway,
                                                 append=False)
                else:
                    __salt__['ip.set_static_ip'](name,
                                                 changes['ip_addrs'][idx],
                                                 gateway=None,
                                                 append=True)

        new = __salt__['ip.get_interface'](name)
        ret['changes'] = salt.utils.data.compare_dicts(old, new)
        if _changes(new, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
            ret['result'] = False
            ret['comment'] = ('Failed to set desired configuration settings '
                              'for interface \'{0}\''.format(name))
        else:
            ret['comment'] = ('Successfully updated configuration for '
                              'interface \'{0}\''.format(name))
        return ret
Esempio n. 36
0
def _first_avail_uid():
    uids = set(x.pw_uid for x in pwd.getpwall())
    for idx in range(501, 2**24):
        if idx not in uids:
            return idx
Esempio n. 37
0
 def test_very_big_message(self):
     long_str = ''.join([six.text_type(num) for num in range(10**5)])
     msg = {'long_str': long_str, 'stop': True}
     self.channel.send(msg)
     self.wait()
     self.assertEqual(msg, self.payloads[0])
Esempio n. 38
0
    def setUp(self):
        boto_s3_bucket.__context__ = {}
        context.clear()
        # connections keep getting cached from prior tests, can't find the
        # correct context object to clear it. So randomize the cache key, to prevent any
        # cache hits
        conn_parameters['key'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))

        self.patcher = patch('boto3.session.Session')
        self.addCleanup(self.patcher.stop)
        mock_session = self.patcher.start()

        session_instance = mock_session.return_value
        self.conn = MagicMock()
        session_instance.client.return_value = self.conn
Esempio n. 39
0
def _run(name, **kwargs):
    '''
    .. deprecated:: 2017.7.0
       Function name stays the same, behaviour will change.

    Run a single module function

    ``name``
        The module function to execute

    ``returner``
        Specify the returner to send the return of the module execution to

    ``kwargs``
        Pass any arguments needed to execute the function
    '''
    ret = {'name': name, 'changes': {}, 'comment': '', 'result': None}
    if name not in __salt__:
        ret['comment'] = 'Module function {0} is not available'.format(name)
        ret['result'] = False
        return ret

    if __opts__['test']:
        ret['comment'] = 'Module function {0} is set to execute'.format(name)
        return ret

    aspec = salt.utils.args.get_function_argspec(__salt__[name])
    args = []
    defaults = {}

    arglen = 0
    deflen = 0
    if isinstance(aspec.args, list):
        arglen = len(aspec.args)
    if isinstance(aspec.defaults, tuple):
        deflen = len(aspec.defaults)
    # Match up the defaults with the respective args
    for ind in range(arglen - 1, -1, -1):
        minus = arglen - ind
        if deflen - minus > -1:
            defaults[aspec.args[ind]] = aspec.defaults[-minus]
    # overwrite passed default kwargs
    for arg in defaults:
        if arg == 'name':
            if 'm_name' in kwargs:
                defaults[arg] = kwargs.pop('m_name')
        elif arg == 'fun':
            if 'm_fun' in kwargs:
                defaults[arg] = kwargs.pop('m_fun')
        elif arg == 'state':
            if 'm_state' in kwargs:
                defaults[arg] = kwargs.pop('m_state')
        elif arg == 'saltenv':
            if 'm_saltenv' in kwargs:
                defaults[arg] = kwargs.pop('m_saltenv')
        if arg in kwargs:
            defaults[arg] = kwargs.pop(arg)
    missing = set()
    for arg in aspec.args:
        if arg == 'name':
            rarg = 'm_name'
        elif arg == 'fun':
            rarg = 'm_fun'
        elif arg == 'names':
            rarg = 'm_names'
        elif arg == 'state':
            rarg = 'm_state'
        elif arg == 'saltenv':
            rarg = 'm_saltenv'
        else:
            rarg = arg
        if rarg not in kwargs and arg not in defaults:
            missing.add(rarg)
            continue
        if arg in defaults:
            args.append(defaults[arg])
        else:
            args.append(kwargs.pop(rarg))
    if missing:
        comment = 'The following arguments are missing:'
        for arg in missing:
            comment += ' {0}'.format(arg)
        ret['comment'] = comment
        ret['result'] = False
        return ret

    if aspec.varargs:
        if aspec.varargs == 'name':
            rarg = 'm_name'
        elif aspec.varargs == 'fun':
            rarg = 'm_fun'
        elif aspec.varargs == 'names':
            rarg = 'm_names'
        elif aspec.varargs == 'state':
            rarg = 'm_state'
        elif aspec.varargs == 'saltenv':
            rarg = 'm_saltenv'
        else:
            rarg = aspec.varargs

        if rarg in kwargs:
            varargs = kwargs.pop(rarg)

            if not isinstance(varargs, list):
                msg = "'{0}' must be a list."
                ret['comment'] = msg.format(aspec.varargs)
                ret['result'] = False
                return ret

            args.extend(varargs)

    nkwargs = {}
    if aspec.keywords and aspec.keywords in kwargs:
        nkwargs = kwargs.pop(aspec.keywords)

        if not isinstance(nkwargs, dict):
            msg = "'{0}' must be a dict."
            ret['comment'] = msg.format(aspec.keywords)
            ret['result'] = False
            return ret

    try:
        if aspec.keywords:
            mret = __salt__[name](*args, **nkwargs)
        else:
            mret = __salt__[name](*args)
    except Exception as e:
        ret['comment'] = 'Module function {0} threw an exception. Exception: {1}'.format(
            name, e)
        ret['result'] = False
        return ret
    else:
        if mret is not None or mret is not {}:
            ret['changes']['ret'] = mret

    if 'returner' in kwargs:
        ret_ret = {
            'id': __opts__['id'],
            'ret': mret,
            'fun': name,
            'jid': salt.utils.jid.gen_jid(__opts__)
        }
        returners = salt.loader.returners(__opts__, __salt__)
        if kwargs['returner'] in returners:
            returners[kwargs['returner']](ret_ret)
    ret['comment'] = 'Module function {0} executed'.format(name)
    ret['result'] = _get_result(mret, ret['changes'])

    return ret
Esempio n. 40
0
File: data.py Progetto: sjtbham/salt
def subdict_match(data,
                  expr,
                  delimiter=DEFAULT_TARGET_DELIM,
                  regex_match=False,
                  exact_match=False):
    '''
    Check for a match in a dictionary using a delimiter character to denote
    levels of subdicts, and also allowing the delimiter character to be
    matched. Thus, 'foo:bar:baz' will match data['foo'] == 'bar:baz' and
    data['foo']['bar'] == 'baz'. The former would take priority over the
    latter.
    '''
    def _match(target, pattern, regex_match=False, exact_match=False):
        if regex_match:
            try:
                return re.match(pattern.lower(), six.text_type(target).lower())
            except Exception:
                log.error('Invalid regex \'%s\' in match', pattern)
                return False
        elif exact_match:
            return six.text_type(target).lower() == pattern.lower()
        else:
            return fnmatch.fnmatch(
                six.text_type(target).lower(), pattern.lower())

    def _dict_match(target, pattern, regex_match=False, exact_match=False):
        wildcard = pattern.startswith('*:')
        if wildcard:
            pattern = pattern[2:]

        if pattern == '*':
            # We are just checking that the key exists
            return True
        elif pattern in target:
            # We might want to search for a key
            return True
        elif subdict_match(target,
                           pattern,
                           regex_match=regex_match,
                           exact_match=exact_match):
            return True
        if wildcard:
            for key in target:
                if _match(key,
                          pattern,
                          regex_match=regex_match,
                          exact_match=exact_match):
                    return True
                if isinstance(target[key], dict):
                    if _dict_match(target[key],
                                   pattern,
                                   regex_match=regex_match,
                                   exact_match=exact_match):
                        return True
                elif isinstance(target[key], list):
                    for item in target[key]:
                        if _match(item,
                                  pattern,
                                  regex_match=regex_match,
                                  exact_match=exact_match):
                            return True
        return False

    for idx in range(1, expr.count(delimiter) + 1):
        splits = expr.split(delimiter)
        key = delimiter.join(splits[:idx])
        matchstr = delimiter.join(splits[idx:])
        log.debug("Attempting to match '%s' in '%s' using delimiter '%s'",
                  matchstr, key, delimiter)
        match = traverse_dict_and_list(data, key, {}, delimiter=delimiter)
        if match == {}:
            continue
        if isinstance(match, dict):
            if _dict_match(match,
                           matchstr,
                           regex_match=regex_match,
                           exact_match=exact_match):
                return True
            continue
        if isinstance(match, (list, tuple)):
            # We are matching a single component to a single list member
            for member in match:
                if isinstance(member, dict):
                    if _dict_match(member,
                                   matchstr,
                                   regex_match=regex_match,
                                   exact_match=exact_match):
                        return True
                if _match(member,
                          matchstr,
                          regex_match=regex_match,
                          exact_match=exact_match):
                    return True
            continue
        if _match(match,
                  matchstr,
                  regex_match=regex_match,
                  exact_match=exact_match):
            return True
    return False
Esempio n. 41
0
File: drac.py Progetto: zxstar/salt
def create_user(username, password, permissions, users=None):
    """
    Create user accounts

    CLI Example:

    .. code-block:: bash

        salt dell drac.create_user [USERNAME] [PASSWORD] [PRIVILEGES]
        salt dell drac.create_user diana secret login,test_alerts,clear_logs

    DRAC Privileges
      * login                   : Login to iDRAC
      * drac                    : Configure iDRAC
      * user_management         : Configure Users
      * clear_logs              : Clear Logs
      * server_control_commands : Execute Server Control Commands
      * console_redirection     : Access Console Redirection
      * virtual_media           : Access Virtual Media
      * test_alerts             : Test Alerts
      * debug_commands          : Execute Debug Commands
    """
    _uids = set()

    if users is None:
        users = list_users()

    if username in users:
        log.warning("'{0}' already exists".format(username))
        return False

    for idx in six.iterkeys(users):
        _uids.add(users[idx]["index"])

    uid = sorted(list(set(range(2, 12)) - _uids), reverse=True).pop()

    # Create user accountvfirst
    if not __execute_cmd("config -g cfgUserAdmin -o \
                 cfgUserAdminUserName -i {0} {1}".format(uid, username)):
        delete_user(username, uid)
        return False

    # Configure users permissions
    if not set_permissions(username, permissions, uid):
        log.warning("unable to set user permissions")
        delete_user(username, uid)
        return False

    # Configure users password
    if not change_password(username, password, uid):
        log.warning("unable to set user password")
        delete_user(username, uid)
        return False

    # Enable users admin
    if not __execute_cmd("config -g cfgUserAdmin -o \
                          cfgUserAdminEnable -i {0} 1".format(uid)):
        delete_user(username, uid)
        return False

    return True
Esempio n. 42
0
def create(vm_):
    '''
    Create a single VM from a data dict

    CLI Example:

    .. code-block:: bash

        salt-cloud -p proxmox-ubuntu vmhostname
    '''
    try:
        # Check for required profile parameters before sending any API calls.
        if vm_['profile'] and config.is_profile_configured(
                __opts__,
                __active_provider_name__ or 'proxmox',
                vm_['profile'],
                vm_=vm_) is False:
            return False
    except AttributeError:
        pass

    ret = {}

    __utils__['cloud.fire_event'](
        'event',
        'starting create',
        'salt/cloud/{0}/creating'.format(vm_['name']),
        args=__utils__['cloud.filter_event'](
            'creating', vm_, ['name', 'profile', 'provider', 'driver']),
        sock_dir=__opts__['sock_dir'],
        transport=__opts__['transport'])

    log.info('Creating Cloud VM %s', vm_['name'])

    if 'use_dns' in vm_ and 'ip_address' not in vm_:
        use_dns = vm_['use_dns']
        if use_dns:
            from socket import gethostbyname, gaierror
            try:
                ip_address = gethostbyname(six.text_type(vm_['name']))
            except gaierror:
                log.debug('Resolving of %s failed', vm_['name'])
            else:
                vm_['ip_address'] = six.text_type(ip_address)

    try:
        newid = _get_next_vmid()
        data = create_node(vm_, newid)
    except Exception as exc:
        log.error(
            'Error creating %s on PROXMOX\n\n'
            'The following exception was thrown when trying to '
            'run the initial deployment: \n%s',
            vm_['name'],
            exc,
            # Show the traceback if the debug logging level is enabled
            exc_info_on_loglevel=logging.DEBUG)
        return False

    ret['creation_data'] = data
    name = vm_['name']  # hostname which we know
    if 'clone' in vm_ and vm_['clone'] is True:
        vmid = newid
    else:
        vmid = data['vmid']  # vmid which we have received
    host = data['node']  # host which we have received
    nodeType = data['technology']  # VM tech (Qemu / OpenVZ)

    # Determine which IP to use in order of preference:
    if 'ip_address' in vm_:
        ip_address = six.text_type(vm_['ip_address'])
    elif 'public_ips' in data:
        ip_address = six.text_type(data['public_ips'][0])  # first IP
    elif 'private_ips' in data:
        ip_address = six.text_type(data['private_ips'][0])  # first IP
    else:
        raise SaltCloudExecutionFailure(
            "Could not determine an IP address to use")

    log.debug('Using IP address %s', ip_address)

    # wait until the vm has been created so we can start it
    if not wait_for_created(data['upid'], timeout=300):
        return {
            'Error': 'Unable to create {0}, command timed out'.format(name)
        }

    if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu':
        # If we cloned a machine, see if we need to reconfigure any of the options such as net0,
        # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's
        # brought up

        # TODO: Support other settings here too as these are not the only ones that can be modified
        # after a clone operation
        log.info('Configuring cloned VM')

        # Modify the settings for the VM one at a time so we can see any problems with the values
        # as quickly as possible
        for setting_number in range(3):
            setting = 'ide{0}'.format(setting_number)
            if setting in vm_:
                postParams = {}
                postParams[setting] = vm_[setting]
                query('post',
                      'nodes/{0}/qemu/{1}/config'.format(vm_['host'],
                                                         vmid), postParams)

        for setting_number in range(5):
            setting = 'sata{0}'.format(setting_number)
            if setting in vm_:
                postParams = {}
                postParams[setting] = vm_[setting]
                query('post',
                      'nodes/{0}/qemu/{1}/config'.format(vm_['host'],
                                                         vmid), postParams)

        for setting_number in range(13):
            setting = 'scsi{0}'.format(setting_number)
            if setting in vm_:
                postParams = {}
                postParams[setting] = vm_[setting]
                query('post',
                      'nodes/{0}/qemu/{1}/config'.format(vm_['host'],
                                                         vmid), postParams)

        # net strings are a list of comma seperated settings. We need to merge the settings so that
        # the setting in the profile only changes the settings it touches and the other settings
        # are left alone. An example of why this is necessary is because the MAC address is set
        # in here and generally you don't want to alter or have to know the MAC address of the new
        # instance, but you may want to set the VLAN bridge for example
        for setting_number in range(20):
            setting = 'net{0}'.format(setting_number)
            if setting in vm_:
                data = query(
                    'get',
                    'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid))

                # Generate a dictionary of settings from the existing string
                new_setting = {}
                if setting in data:
                    new_setting.update(_stringlist_to_dictionary(
                        data[setting]))

                # Merge the new settings (as a dictionary) into the existing dictionary to get the
                # new merged settings
                new_setting.update(_stringlist_to_dictionary(vm_[setting]))

                # Convert the dictionary back into a string list
                postParams = {setting: _dictionary_to_stringlist(new_setting)}
                query('post',
                      'nodes/{0}/qemu/{1}/config'.format(vm_['host'],
                                                         vmid), postParams)

    # VM has been created. Starting..
    if not start(name, vmid, call='action'):
        log.error('Node %s (%s) failed to start!', name, vmid)
        raise SaltCloudExecutionFailure

    # Wait until the VM has fully started
    log.debug('Waiting for state "running" for vm %s on %s', vmid, host)
    if not wait_for_state(vmid, 'running'):
        return {'Error': 'Unable to start {0}, command timed out'.format(name)}

    ssh_username = config.get_cloud_config_value('ssh_username',
                                                 vm_,
                                                 __opts__,
                                                 default='root')
    ssh_password = config.get_cloud_config_value(
        'password',
        vm_,
        __opts__,
    )

    ret['ip_address'] = ip_address
    ret['username'] = ssh_username
    ret['password'] = ssh_password

    vm_['ssh_host'] = ip_address
    vm_['password'] = ssh_password
    ret = __utils__['cloud.bootstrap'](vm_, __opts__)

    # Report success!
    log.info('Created Cloud VM \'%s\'', vm_['name'])
    log.debug('\'%s\' VM creation details:\n%s', vm_['name'],
              pprint.pformat(data))

    __utils__['cloud.fire_event'](
        'event',
        'created instance',
        'salt/cloud/{0}/created'.format(vm_['name']),
        args=__utils__['cloud.filter_event'](
            'created', vm_, ['name', 'profile', 'provider', 'driver']),
        sock_dir=__opts__['sock_dir'],
    )

    return ret
Esempio n. 43
0
def _netstat_bsd():
    '''
    Return netstat information for BSD flavors
    '''
    ret = []
    if __grains__['kernel'] == 'NetBSD':
        for addr_family in ('inet', 'inet6'):
            cmd = 'netstat -f {0} -an | tail -n+3'.format(addr_family)
            out = __salt__['cmd.run'](cmd, python_shell=True)
            for line in out.splitlines():
                comps = line.split()
                entry = {
                    'proto': comps[0],
                    'recv-q': comps[1],
                    'send-q': comps[2],
                    'local-address': comps[3],
                    'remote-address': comps[4]
                }
                if entry['proto'].startswith('tcp'):
                    entry['state'] = comps[5]
                ret.append(entry)
    else:
        # Lookup TCP connections
        cmd = 'netstat -p tcp -an | tail -n+3'
        out = __salt__['cmd.run'](cmd, python_shell=True)
        for line in out.splitlines():
            comps = line.split()
            ret.append({
                'proto': comps[0],
                'recv-q': comps[1],
                'send-q': comps[2],
                'local-address': comps[3],
                'remote-address': comps[4],
                'state': comps[5]
            })
        # Lookup UDP connections
        cmd = 'netstat -p udp -an | tail -n+3'
        out = __salt__['cmd.run'](cmd, python_shell=True)
        for line in out.splitlines():
            comps = line.split()
            ret.append({
                'proto': comps[0],
                'recv-q': comps[1],
                'send-q': comps[2],
                'local-address': comps[3],
                'remote-address': comps[4]
            })

    # Add in user and program info
    ppid = _ppid()
    if __grains__['kernel'] == 'OpenBSD':
        netinfo = _netinfo_openbsd()
    elif __grains__['kernel'] in ('FreeBSD', 'NetBSD'):
        netinfo = _netinfo_freebsd_netbsd()
    for idx in range(len(ret)):
        local = ret[idx]['local-address']
        remote = ret[idx]['remote-address']
        proto = ret[idx]['proto']
        try:
            # Make a pointer to the info for this connection for easier
            # reference below
            ptr = netinfo[local][remote][proto]
        except KeyError:
            continue
        # Get the pid-to-ppid mappings for this connection
        conn_ppid = dict((x, y) for x, y in six.iteritems(ppid) if x in ptr)
        try:
            # Master pid for this connection will be the pid whose ppid isn't
            # in the subset dict we created above
            master_pid = next(
                iter(x for x, y in six.iteritems(conn_ppid) if y not in ptr))
        except StopIteration:
            continue
        ret[idx]['user'] = ptr[master_pid]['user']
        ret[idx]['program'] = '/'.join((master_pid, ptr[master_pid]['cmd']))
    return ret
Esempio n. 44
0
def traceroute(host):
    '''
    Performs a traceroute to a 3rd party host

    CLI Example:

    .. code-block:: bash

        salt '*' network.traceroute archlinux.org
    '''
    ret = []
    if not salt.utils.which('traceroute'):
        log.info('This minion does not have traceroute installed')
        return ret

    cmd = 'traceroute {0}'.format(salt.utils.network.sanitize_host(host))

    out = __salt__['cmd.run'](cmd)

    # Parse version of traceroute
    cmd2 = 'traceroute --version'
    out2 = __salt__['cmd.run'](cmd2)
    try:
        # Linux traceroute version looks like:
        #   Modern traceroute for Linux, version 2.0.19, Dec 10 2012
        # Darwin and FreeBSD traceroute version looks like: Version 1.4a12+[FreeBSD|Darwin]

        traceroute_version_raw = re.findall(
            r'.*[Vv]ersion (\d+)\.([\w\+]+)\.*(\w*)', out2)[0]
        log.debug('traceroute_version_raw: {0}'.format(traceroute_version_raw))
        traceroute_version = []
        for t in traceroute_version_raw:
            try:
                traceroute_version.append(int(t))
            except ValueError:
                traceroute_version.append(t)

        if len(traceroute_version) < 3:
            traceroute_version.append(0)

        log.debug('traceroute_version: {0}'.format(traceroute_version))

    except IndexError:
        traceroute_version = [0, 0, 0]

    for line in out.splitlines():
        if ' ' not in line:
            continue
        if line.startswith('traceroute'):
            continue

        if 'Darwin' in str(traceroute_version[1]) or 'FreeBSD' in str(
                traceroute_version[1]):
            try:
                traceline = re.findall(r'\s*(\d*)\s+(.*)\s+\((.*)\)\s+(.*)$',
                                       line)[0]
            except IndexError:
                traceline = re.findall(r'\s*(\d*)\s+(\*\s+\*\s+\*)', line)[0]

            log.debug('traceline: {0}'.format(traceline))
            delays = re.findall(r'(\d+\.\d+)\s*ms', str(traceline))

            try:
                if traceline[1] == '* * *':
                    result = {'count': traceline[0], 'hostname': '*'}
                else:
                    result = {
                        'count': traceline[0],
                        'hostname': traceline[1],
                        'ip': traceline[2],
                    }
                    for idx in range(0, len(delays)):
                        result['ms{0}'.format(idx + 1)] = delays[idx]
            except IndexError:
                result = {}

        elif (traceroute_version[0] >= 2 and traceroute_version[2] >= 14
              or traceroute_version[0] >= 2 and traceroute_version[1] > 0):
            comps = line.split('  ')
            if comps[1] == '* * *':
                result = {'count': int(comps[0]), 'hostname': '*'}
            else:
                result = {
                    'count': int(comps[0]),
                    'hostname': comps[1].split()[0],
                    'ip': comps[1].split()[1].strip('()'),
                    'ms1': float(comps[2].split()[0]),
                    'ms2': float(comps[3].split()[0]),
                    'ms3': float(comps[4].split()[0])
                }
        else:
            comps = line.split()
            result = {
                'count': comps[0],
                'hostname': comps[1],
                'ip': comps[2],
                'ms1': comps[4],
                'ms2': comps[6],
                'ms3': comps[8],
                'ping1': comps[3],
                'ping2': comps[5],
                'ping3': comps[7]
            }

        ret.append(result)

    return ret
Esempio n. 45
0
def _run(name, **kwargs):
    """
    .. deprecated:: 2017.7.0
       Function name stays the same, behaviour will change.

    Run a single module function

    ``name``
        The module function to execute

    ``returner``
        Specify the returner to send the return of the module execution to

    ``kwargs``
        Pass any arguments needed to execute the function
    """
    ret = {"name": name, "changes": {}, "comment": "", "result": None}
    if name not in __salt__:
        ret["comment"] = "Module function {0} is not available".format(name)
        ret["result"] = False
        return ret

    if __opts__["test"]:
        ret["comment"] = "Module function {0} is set to execute".format(name)
        return ret

    aspec = salt.utils.args.get_function_argspec(__salt__[name])
    args = []
    defaults = {}

    arglen = 0
    deflen = 0
    if isinstance(aspec.args, list):
        arglen = len(aspec.args)
    if isinstance(aspec.defaults, tuple):
        deflen = len(aspec.defaults)
    # Match up the defaults with the respective args
    for ind in range(arglen - 1, -1, -1):
        minus = arglen - ind
        if deflen - minus > -1:
            defaults[aspec.args[ind]] = aspec.defaults[-minus]
    # overwrite passed default kwargs
    for arg in defaults:
        if arg == "name":
            if "m_name" in kwargs:
                defaults[arg] = kwargs.pop("m_name")
        elif arg == "fun":
            if "m_fun" in kwargs:
                defaults[arg] = kwargs.pop("m_fun")
        elif arg == "state":
            if "m_state" in kwargs:
                defaults[arg] = kwargs.pop("m_state")
        elif arg == "saltenv":
            if "m_saltenv" in kwargs:
                defaults[arg] = kwargs.pop("m_saltenv")
        if arg in kwargs:
            defaults[arg] = kwargs.pop(arg)
    missing = set()
    for arg in aspec.args:
        if arg == "name":
            rarg = "m_name"
        elif arg == "fun":
            rarg = "m_fun"
        elif arg == "names":
            rarg = "m_names"
        elif arg == "state":
            rarg = "m_state"
        elif arg == "saltenv":
            rarg = "m_saltenv"
        else:
            rarg = arg
        if rarg not in kwargs and arg not in defaults:
            missing.add(rarg)
            continue
        if arg in defaults:
            args.append(defaults[arg])
        else:
            args.append(kwargs.pop(rarg))
    if missing:
        comment = "The following arguments are missing:"
        for arg in missing:
            comment += " {0}".format(arg)
        ret["comment"] = comment
        ret["result"] = False
        return ret

    if aspec.varargs:
        if aspec.varargs == "name":
            rarg = "m_name"
        elif aspec.varargs == "fun":
            rarg = "m_fun"
        elif aspec.varargs == "names":
            rarg = "m_names"
        elif aspec.varargs == "state":
            rarg = "m_state"
        elif aspec.varargs == "saltenv":
            rarg = "m_saltenv"
        else:
            rarg = aspec.varargs

        if rarg in kwargs:
            varargs = kwargs.pop(rarg)

            if not isinstance(varargs, list):
                msg = "'{0}' must be a list."
                ret["comment"] = msg.format(aspec.varargs)
                ret["result"] = False
                return ret

            args.extend(varargs)

    nkwargs = {}
    if aspec.keywords and aspec.keywords in kwargs:
        nkwargs = kwargs.pop(aspec.keywords)
        if not isinstance(nkwargs, dict):
            msg = "'{0}' must be a dict."
            ret["comment"] = msg.format(aspec.keywords)
            ret["result"] = False
            return ret

    try:
        if aspec.keywords:
            mret = __salt__[name](*args, **nkwargs)
        else:
            mret = __salt__[name](*args)
    except Exception as e:  # pylint: disable=broad-except
        ret["comment"] = "Module function {0} threw an exception. Exception: {1}".format(
            name, e)
        ret["result"] = False
        return ret
    else:
        if mret is not None or mret is not {}:
            ret["changes"]["ret"] = mret

    if "returner" in kwargs:
        ret_ret = {
            "id": __opts__["id"],
            "ret": mret,
            "fun": name,
            "jid": salt.utils.jid.gen_jid(__opts__),
        }
        returners = salt.loader.returners(__opts__, __salt__)
        if kwargs["returner"] in returners:
            returners[kwargs["returner"]](ret_ret)
    ret["comment"] = "Module function {0} executed".format(name)
    ret["result"] = _get_result(mret, ret["changes"])

    return ret
Esempio n. 46
0
 def __random_string(self, size=6):
     return ''.join(
         random.choice(string.ascii_uppercase + string.digits)
         for x in range(size))
Esempio n. 47
0
def present(name,
            skip_translate=None,
            ignore_collisions=False,
            validate_ip_addrs=True,
            containers=None,
            reconnect=True,
            **kwargs):
    '''
    .. versionchanged:: 2018.3.0
        Support added for network configuration options other than ``driver``
        and ``driver_opts``, as well as IPAM configuration.

    Ensure that a network is present

    .. note::
        This state supports all arguments for network and IPAM pool
        configuration which are available for the release of docker-py
        installed on the minion. For that reason, the arguments described below
        in the :ref:`NETWORK CONFIGURATION
        <salt-states-docker-network-present-netconf>` and :ref:`IP ADDRESS
        MANAGEMENT (IPAM) <salt-states-docker-network-present-ipam>` sections
        may not accurately reflect what is available on the minion. The
        :py:func:`docker.get_client_args
        <salt.modules.dockermod.get_client_args>` function can be used to check
        the available arguments for the installed version of docker-py (they
        are found in the ``network_config`` and ``ipam_config`` sections of the
        return data), but Salt will not prevent a user from attempting to use
        an argument which is unsupported in the release of Docker which is
        installed. In those cases, network creation be attempted but will fail.

    name
        Network name

    skip_translate
        This function translates Salt SLS input into the format which
        docker-py expects. However, in the event that Salt's translation logic
        fails (due to potential changes in the Docker Remote API, or to bugs in
        the translation code), this argument can be used to exert granular
        control over which arguments are translated and which are not.

        Pass this argument as a comma-separated list (or Python list) of
        arguments, and translation for each passed argument name will be
        skipped. Alternatively, pass ``True`` and *all* translation will be
        skipped.

        Skipping tranlsation allows for arguments to be formatted directly in
        the format which docker-py expects. This allows for API changes and
        other issues to be more easily worked around. See the following links
        for more information:

        - `docker-py Low-level API`_
        - `Docker Engine API`_

        .. versionadded:: 2018.3.0

    .. _`docker-py Low-level API`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container
    .. _`Docker Engine API`: https://docs.docker.com/engine/api/v1.33/#operation/ContainerCreate

    ignore_collisions : False
        Since many of docker-py's arguments differ in name from their CLI
        counterparts (with which most Docker users are more familiar), Salt
        detects usage of these and aliases them to the docker-py version of
        that argument. However, if both the alias and the docker-py version of
        the same argument (e.g. ``options`` and ``driver_opts``) are used, an error
        will be raised. Set this argument to ``True`` to suppress these errors
        and keep the docker-py version of the argument.

        .. versionadded:: 2018.3.0

    validate_ip_addrs : True
        For parameters which accept IP addresses/subnets as input, validation
        will be performed. To disable, set this to ``False``.

        .. versionadded:: 2018.3.0

    containers
        A list of containers which should be connected to this network.

        .. note::
            As of the 2018.3.0 release, this is not the recommended way of
            managing a container's membership in a network, for a couple
            reasons:

            1. It does not support setting static IPs, aliases, or links in the
               container's IP configuration.
            2. If a :py:func:`docker_container.running
               <salt.states.docker_container.running>` state replaces a
               container, it will not be reconnected to the network until the
               ``docker_network.present`` state is run again. Since containers
               often have ``require`` requisites to ensure that the network
               is present, this means that the ``docker_network.present`` state
               ends up being run *before* the :py:func:`docker_container.running
               <salt.states.docker_container.running>`, leaving the container
               unattached at the end of the Salt run.

            For these reasons, it is recommended to use
            :ref:`docker_container.running's network management support
            <salt-states-docker-container-network-management>`.

    reconnect : True
        If ``containers`` is not used, and the network is replaced, then Salt
        will keep track of the containers which were connected to the network
        and reconnect them to the network after it is replaced. Salt will first
        attempt to reconnect using the same IP the container had before the
        network was replaced. If that fails (for instance, if the network was
        replaced because the subnet was modified), then the container will be
        reconnected without an explicit IP address, and its IP will be assigned
        by Docker.

        Set this option to ``False`` to keep Salt from trying to reconnect
        containers. This can be useful in some cases when :ref:`managing static
        IPs in docker_container.running
        <salt-states-docker-container-network-management>`. For instance, if a
        network's subnet is modified, it is likely that the static IP will need
        to be updated in the ``docker_container.running`` state as well. When
        the network is replaced, the initial reconnect attempt would fail, and
        the container would be reconnected with an automatically-assigned IP
        address. Then, when the ``docker_container.running`` state executes, it
        would disconnect the network *again* and reconnect using the new static
        IP. Disabling the reconnect behavior in these cases would prevent the
        unnecessary extra reconnection.

        .. versionadded:: 2018.3.0

    .. _salt-states-docker-network-present-netconf:

    **NETWORK CONFIGURATION ARGUMENTS**

    driver
        Network driver

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - driver: macvlan

    driver_opts (or *driver_opt*, or *options*)
        Options for the network driver. Either a dictionary of option names and
        values or a Python list of strings in the format ``varname=value``. The
        below three examples are equivalent:

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - driver: macvlan
                - driver_opts: macvlan_mode=bridge,parent=eth0

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - driver: macvlan
                - driver_opts:
                  - macvlan_mode=bridge
                  - parent=eth0

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - driver: macvlan
                - driver_opts:
                  - macvlan_mode: bridge
                  - parent: eth0

        The options can also simply be passed as a dictionary, though this can
        be error-prone due to some :ref:`idiosyncrasies <yaml-idiosyncrasies>`
        with how PyYAML loads nested data structures:

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - driver: macvlan
                - driver_opts:
                    macvlan_mode: bridge
                    parent: eth0

    check_duplicate : True
        If ``True``, checks for networks with duplicate names. Since networks
        are primarily keyed based on a random ID and not on the name, and
        network name is strictly a user-friendly alias to the network which is
        uniquely identified using ID, there is no guaranteed way to check for
        duplicates. This option providess a best effort, checking for any
        networks which have the same name, but it is not guaranteed to catch
        all name collisions.

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - check_duplicate: False

    internal : False
        If ``True``, restricts external access to the network

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - internal: True

    labels
        Add metadata to the network. Labels can be set both with and without
        values, and labels with values can be passed either as ``key=value`` or
        ``key: value`` pairs. For example, while the below would be very
        confusing to read, it is technically valid, and demonstrates the
        different ways in which labels can be passed:

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - labels:
                  - foo
                  - bar=baz
                  - hello: world

        The labels can also simply be passed as a YAML dictionary, though this
        can be error-prone due to some :ref:`idiosyncrasies
        <yaml-idiosyncrasies>` with how PyYAML loads nested data structures:

        .. code-block:: yaml

            foo:
              docker_network.present:
                - labels:
                    foo: ''
                    bar: baz
                    hello: world

        .. versionchanged:: 2018.3.0
            Methods for specifying labels can now be mixed. Earlier releases
            required either labels with or without values.

    enable_ipv6 (or *ipv6*) : False
        Enable IPv6 on the network

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - enable_ipv6: True

        .. note::
            While it should go without saying, this argument must be set to
            ``True`` to :ref:`configure an IPv6 subnet
            <salt-states-docker-network-present-ipam>`. Also, if this option is
            turned on without an IPv6 subnet explicitly configured, you will
            get an error unless you have set up a fixed IPv6 subnet. Consult
            the `Docker IPv6 docs`_ for information on how to do this.

            .. _`Docker IPv6 docs`: https://docs.docker.com/v17.09/engine/userguide/networking/default_network/ipv6/

    attachable : False
        If ``True``, and the network is in the global scope, non-service
        containers on worker nodes will be able to connect to the network.

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - attachable: True

        .. note::
            This option cannot be reliably managed on CentOS 7. This is because
            while support for this option was added in API version 1.24, its
            value was not added to the inpsect results until API version 1.26.
            The version of Docker which is available for CentOS 7 runs API
            version 1.24, meaning that while Salt can pass this argument to the
            API, it has no way of knowing the value of this config option in an
            existing Docker network.

    scope
        Specify the network's scope (``local``, ``global`` or ``swarm``)

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - scope: local

    ingress : False
        If ``True``, create an ingress network which provides the routing-mesh in
        swarm mode

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - ingress: True

    .. _salt-states-docker-network-present-ipam:

    **IP ADDRESS MANAGEMENT (IPAM)**

    This state supports networks with either IPv4, or both IPv4 and IPv6. If
    configuring IPv4, then you can pass the :ref:`IPAM pool arguments
    <salt-states-docker-network-present-ipam-pool-arguments>` below as
    individual arguments. However, if configuring IPv4 and IPv6, the arguments
    must be passed as a list of dictionaries, in the ``ipam_pools`` argument
    (click :ref:`here <salt-states-docker-network-present-ipam-examples>` for
    some examples). `These docs`_ also have more information on these
    arguments.

    .. _`These docs`: http://docker-py.readthedocs.io/en/stable/api.html#docker.types.IPAMPool

    *IPAM ARGUMENTS*

    ipam_driver
        IPAM driver to use, if different from the default one

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - ipam_driver: foo

    ipam_opts
        Options for the IPAM driver. Either a dictionary of option names and
        values or a Python list of strings in the format ``varname=value``. The
        below three examples are equivalent:

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - ipam_driver: foo
                - ipam_opts: foo=bar,baz=qux

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - ipam_driver: foo
                - ipam_opts:
                  - foo=bar
                  - baz=qux

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - ipam_driver: foo
                - ipam_opts:
                  - foo: bar
                  - baz: qux

        The options can also simply be passed as a dictionary, though this can
        be error-prone due to some :ref:`idiosyncrasies <yaml-idiosyncrasies>`
        with how PyYAML loads nested data structures:

        .. code-block:: yaml

            mynet:
              docker_network.present:
                - ipam_driver: macvlan
                - ipam_opts:
                    foo: bar
                    baz: qux

    .. _salt-states-docker-network-present-ipam-pool-arguments:

    *IPAM POOL ARGUMENTS*

    subnet
        Subnet in CIDR format that represents a network segment

    iprange (or *ip_range*)
        Allocate container IP from a sub-range within the subnet

        Subnet in CIDR format that represents a network segment

    gateway
        IPv4 or IPv6 gateway for the master subnet

    aux_addresses (or *aux_address*)
        A dictionary of mapping container names to IP addresses which should be
        allocated for them should they connect to the network. Either a
        dictionary of option names and values or a Python list of strings in
        the format ``host=ipaddr``.

    .. _salt-states-docker-network-present-ipam-examples:

    *IPAM CONFIGURATION EXAMPLES*

    Below is an example of an IPv4-only network (keep in mind that ``subnet``
    is the only required argument).

    .. code-block:: yaml

        mynet:
          docker_network.present:
            - subnet: 10.0.20.0/24
            - iprange: 10.0.20.128/25
            - gateway: 10.0.20.254
            - aux_addresses:
              - foo.bar.tld: 10.0.20.50
              - hello.world.tld: 10.0.20.51

    .. note::
        The ``aux_addresses`` can be passed differently, in the same way that
        ``driver_opts`` and ``ipam_opts`` can.

    This same network could also be configured this way:

    .. code-block:: yaml

        mynet:
          docker_network.present:
            - ipam_pools:
              - subnet: 10.0.20.0/24
                iprange: 10.0.20.128/25
                gateway: 10.0.20.254
                aux_addresses:
                  foo.bar.tld: 10.0.20.50
                  hello.world.tld: 10.0.20.51

    Here is an example of a mixed IPv4/IPv6 subnet.

    .. code-block:: yaml

        mynet:
          docker_network.present:
            - ipam_pools:
              - subnet: 10.0.20.0/24
                gateway: 10.0.20.1
              - subnet: fe3f:2180:26:1::/123
                gateway: fe3f:2180:26:1::1
    '''
    ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}

    try:
        network = __salt__['docker.inspect_network'](name)
    except CommandExecutionError as exc:
        msg = exc.__str__()
        if '404' in msg:
            # Network not present
            network = None
        else:
            ret['comment'] = msg
            return ret

    # map container's IDs to names
    to_connect = {}
    missing_containers = []
    stopped_containers = []
    for cname in __utils__['args.split_input'](containers or []):
        try:
            cinfo = __salt__['docker.inspect_container'](cname)
        except CommandExecutionError:
            missing_containers.append(cname)
        else:
            try:
                cid = cinfo['Id']
            except KeyError:
                missing_containers.append(cname)
            else:
                if not cinfo.get('State', {}).get('Running', False):
                    stopped_containers.append(cname)
                else:
                    to_connect[cid] = {'Name': cname}

    if missing_containers:
        ret.setdefault('warnings', []).append(
            'The following containers do not exist: {0}.'.format(
                ', '.join(missing_containers)))

    if stopped_containers:
        ret.setdefault('warnings', []).append(
            'The following containers are not running: {0}.'.format(
                ', '.join(stopped_containers)))

    # We might disconnect containers in the process of recreating the network,
    # we'll need to keep track these containers so we can reconnect them later.
    disconnected_containers = {}

    try:
        kwargs = __utils__['docker.translate_input'](
            salt.utils.docker.translate.network,
            skip_translate=skip_translate,
            ignore_collisions=ignore_collisions,
            validate_ip_addrs=validate_ip_addrs,
            **__utils__['args.clean_kwargs'](**kwargs))
    except Exception as exc:
        ret['comment'] = exc.__str__()
        return ret

    # Separate out the IPAM config options and build the IPAM config dict
    ipam_kwargs = {}
    ipam_kwarg_names = ['ipam', 'ipam_driver', 'ipam_opts', 'ipam_pools']
    ipam_kwarg_names.extend(
        __salt__['docker.get_client_args']('ipam_config')['ipam_config'])
    for key in ipam_kwarg_names:
        try:
            ipam_kwargs[key] = kwargs.pop(key)
        except KeyError:
            pass
    if 'ipam' in ipam_kwargs:
        if len(ipam_kwargs) > 1:
            ret['comment'] = (
                'Cannot mix the \'ipam\' argument with any of the IPAM config '
                'arguments. See documentation for details.')
            return ret
        ipam_config = ipam_kwargs['ipam']
    else:
        ipam_pools = ipam_kwargs.pop('ipam_pools', ())
        try:
            ipam_config = __utils__['docker.create_ipam_config'](*ipam_pools,
                                                                 **ipam_kwargs)
        except Exception as exc:
            ret['comment'] = exc.__str__()
            return ret

    # We'll turn this off if we decide below that creating the network is not
    # necessary.
    create_network = True

    if network is not None:
        log.debug('Docker network \'%s\' already exists', name)

        # Set the comment now to say that it already exists, if we need to
        # recreate the network with new config we'll update the comment later.
        ret['comment'] = ('Network \'{0}\' already exists, and is configured '
                          'as specified'.format(name))
        log.trace('Details of docker network \'%s\': %s', name, network)

        temp_net_name = ''.join(
            random.choice(string.ascii_lowercase) for _ in range(20))

        try:
            # When using enable_ipv6, you *must* provide a subnet. But we don't
            # care about the subnet when we make our temp network, we only care
            # about the non-IPAM values in the network. And we also do not want
            # to try some hacky workaround where we choose a small IPv6 subnet
            # to pass when creating the temp network, that may end up
            # overlapping with a large IPv6 subnet already in use by Docker.
            # So, for purposes of comparison we will create the temp network
            # with enable_ipv6=False and then munge the inspect results before
            # performing the comparison. Note that technically it is not
            # required that one specify both v4 and v6 subnets when creating a
            # network, but not specifying IPv4 makes it impossible for us to
            # reliably compare the SLS input to the existing network, as we
            # wouldng't know if the IPv4 subnet in the existing network was
            # explicitly configured or was automatically assigned by Docker.
            enable_ipv6 = kwargs.pop('enable_ipv6', None)
            __salt__['docker.create_network'](
                temp_net_name,
                skip_translate=True,  # No need to translate (already did)
                enable_ipv6=False,
                **kwargs)
        except CommandExecutionError as exc:
            ret['comment'] = (
                'Failed to create temp network for comparison: {0}'.format(
                    exc.__str__()))
            return ret
        else:
            # Replace the value so we can use it later
            if enable_ipv6 is not None:
                kwargs['enable_ipv6'] = enable_ipv6

        try:
            try:
                temp_net_info = __salt__['docker.inspect_network'](
                    temp_net_name)
            except CommandExecutionError as exc:
                ret['comment'] = 'Failed to inspect temp network: {0}'.format(
                    exc.__str__())
                return ret
            else:
                temp_net_info['EnableIPv6'] = bool(enable_ipv6)

            # Replace the IPAM configuration in the temp network with the IPAM
            # config dict we created earlier, for comparison purposes. This is
            # necessary because we cannot create two networks that have
            # overlapping subnets (the Docker Engine will throw an error).
            temp_net_info['IPAM'] = ipam_config

            existing_pool_count = len(network['IPAM']['Config'])
            desired_pool_count = len(temp_net_info['IPAM']['Config'])

            is_default_pool = lambda x: True \
                if sorted(x) == ['Gateway', 'Subnet'] \
                else False

            if desired_pool_count == 0 \
                    and existing_pool_count == 1 \
                    and is_default_pool(network['IPAM']['Config'][0]):
                # If we're not explicitly configuring an IPAM pool, then we
                # don't care what the subnet is. Docker networks created with
                # no explicit IPAM configuration are assigned a single IPAM
                # pool containing just a subnet and gateway. If the above if
                # statement resolves as True, then we know that both A) we
                # aren't explicitly configuring IPAM, and B) the existing
                # network appears to be one that was created without an
                # explicit IPAM configuration (since it has the default pool
                # config values). Of course, it could be possible that the
                # existing network was created with a single custom IPAM pool,
                # with just a subnet and gateway. But even if this was the
                # case, the fact that we aren't explicitly enforcing IPAM
                # configuration means we don't really care what the existing
                # IPAM configuration is. At any rate, to avoid IPAM differences
                # when comparing the existing network to the temp network, we
                # need to clear the existing network's IPAM configuration.
                network['IPAM']['Config'] = []

            changes = __salt__['docker.compare_networks'](
                network, temp_net_info, ignore='Name,Id,Created,Containers')

            if not changes:
                # No changes to the network, so we'll be keeping the existing
                # network and at most just connecting containers to it.
                create_network = False

            else:
                ret['changes'][name] = changes
                if __opts__['test']:
                    ret['result'] = None
                    ret['comment'] = 'Network would be recreated with new config'
                    return ret

                if network['Containers']:
                    # We've removed the network, so there are now no containers
                    # attached to it. However, once we recreate the network
                    # with the new configuration we may need to reconnect the
                    # containers that were previously connected. Even if we're
                    # not reconnecting, we still need to track the containers
                    # so that we can report on which were disconnected.
                    disconnected_containers = copy.deepcopy(
                        network['Containers'])
                    if not containers and reconnect:
                        # Grab the links and aliases from each connected
                        # container so that we have them when we attempt to
                        # reconnect later
                        for cid in disconnected_containers:
                            try:
                                cinfo = __salt__['docker.inspect_container'](
                                    cid)
                                netinfo = cinfo['NetworkSettings']['Networks'][
                                    name]
                                # Links and Aliases will be None if not
                                # explicitly set, hence using "or" instead of
                                # placing the empty list inside the dict.get
                                net_links = netinfo.get('Links') or []
                                net_aliases = netinfo.get('Aliases') or []
                                if net_links:
                                    disconnected_containers[cid][
                                        'Links'] = net_links
                                if net_aliases:
                                    disconnected_containers[cid][
                                        'Aliases'] = net_aliases
                            except (CommandExecutionError, KeyError,
                                    ValueError):
                                continue

                remove_result = _remove_network(network)
                if not remove_result['result']:
                    return remove_result

                # Replace the Containers key with an empty dict so that when we
                # check for connnected containers below, we correctly see that
                # there are none connected.
                network['Containers'] = {}
        finally:
            try:
                __salt__['docker.remove_network'](temp_net_name)
            except CommandExecutionError as exc:
                ret.setdefault('warnings', []).append(
                    'Failed to remove temp network \'{0}\': {1}.'.format(
                        temp_net_name, exc.__str__()))

    if create_network:
        log.debug('Network \'%s\' will be created', name)
        if __opts__['test']:
            # NOTE: if the container already existed and needed to be
            # recreated, and we were in test mode, we would have already exited
            # above with a comment about the network needing to be recreated.
            # So, even though the below block to create the network would be
            # executed to create the network both when it's being recreated and
            # when it's being created for the first time, the below comment is
            # still accurate.
            ret['result'] = None
            ret['comment'] = 'Network will be created'
            return ret

        kwargs['ipam'] = ipam_config
        try:
            __salt__['docker.create_network'](
                name,
                skip_translate=True,  # No need to translate (already did)
                **kwargs)
        except Exception as exc:
            ret['comment'] = 'Failed to create network \'{0}\': {1}'.format(
                name, exc.__str__())
            return ret
        else:
            action = 'recreated' if network is not None else 'created'
            ret['changes'][action] = True
            ret['comment'] = 'Network \'{0}\' {1}'.format(
                name, 'created'
                if network is None else 'was replaced with updated config')
            # Make sure the "Containers" key exists for logic below
            network = {'Containers': {}}

    # If no containers were specified in the state but we have disconnected
    # some in the process of recreating the network, we should reconnect those
    # containers.
    if containers is None and reconnect and disconnected_containers:
        to_connect = disconnected_containers

    # Don't try to connect any containers which are already connected. If we
    # created/re-created the network, then network['Containers'] will be empty
    # and no containers will be deleted from the to_connect dict (the result
    # being that we will reconnect all containers in the to_connect dict).
    # list() is used here because we will potentially be modifying the
    # dictionary during iteration.
    for cid in list(to_connect):
        if cid in network['Containers']:
            del to_connect[cid]

    errors = []
    if to_connect:
        for cid, connect_info in six.iteritems(to_connect):
            connect_kwargs = {}
            if cid in disconnected_containers:
                for key_name, arg_name in (('IPv4Address', 'ipv4_address'),
                                           ('IPV6Address', 'ipv6_address'),
                                           ('Links', 'links'), ('Aliases',
                                                                'aliases')):
                    try:
                        connect_kwargs[arg_name] = connect_info[key_name]
                    except (KeyError, AttributeError):
                        continue
                    else:
                        if key_name.endswith('Address'):
                            connect_kwargs[arg_name] = \
                                connect_kwargs[arg_name].rsplit('/', 1)[0]
            try:
                __salt__['docker.connect_container_to_network'](
                    cid, name, **connect_kwargs)
            except CommandExecutionError as exc:
                if not connect_kwargs:
                    errors.append(exc.__str__())
                else:
                    # We failed to reconnect with the container's old IP
                    # configuration. Reconnect using automatic IP config.
                    try:
                        __salt__['docker.connect_container_to_network'](cid,
                                                                        name)
                    except CommandExecutionError as exc:
                        errors.append(exc.__str__())
                    else:
                        ret['changes'].setdefault(
                            'reconnected' if cid in disconnected_containers
                            else 'connected', []).append(connect_info['Name'])
            else:
                ret['changes'].setdefault(
                    'reconnected' if cid in disconnected_containers else
                    'connected', []).append(connect_info['Name'])

    if errors:
        if ret['comment']:
            ret['comment'] += '. '
        ret['comment'] += '. '.join(errors) + '.'
    else:
        ret['result'] = True

    # Figure out if we removed any containers as a result of replacing the
    # network and did not reconnect them. We only would not have reconnected if
    # a list of containers was passed in the "containers" argument, and there
    # were containers connected to the network prior to its replacement which
    # were not part of that list.
    for cid, c_info in six.iteritems(disconnected_containers):
        if cid not in to_connect:
            ret['changes'].setdefault('disconnected',
                                      []).append(c_info['Name'])

    return ret
Esempio n. 48
0
    def run(self):
        '''
        Execute the batch run
        '''
        args = [[],
                self.opts['fun'],
                self.opts['arg'],
                self.opts['timeout'],
                'list',
                ]
        bnum = self.get_bnum()
        # No targets to run
        if not self.minions:
            return
        to_run = copy.deepcopy(self.minions)
        active = []
        ret = {}
        iters = []
        # wait the specified time before decide a job is actually done
        bwait = self.opts.get('batch_wait', 0)
        wait = []

        if self.options:
            show_jid = self.options.show_jid
            show_verbose = self.options.verbose
        else:
            show_jid = False
            show_verbose = False

        # the minion tracker keeps track of responses and iterators
        # - it removes finished iterators from iters[]
        # - if a previously detected minion does not respond, its
        #   added with an empty answer to ret{} once the timeout is reached
        # - unresponsive minions are removed from active[] to make
        #   sure that the main while loop finishes even with unresp minions
        minion_tracker = {}

        # We already know some minions didn't respond to the ping, so inform
        # the user we won't be attempting to run a job on them
        for down_minion in self.down_minions:
            print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion))

        # Iterate while we still have things to execute
        while len(ret) < len(self.minions):
            next_ = []
            if bwait and wait:
                self.__update_wait(wait)
            if len(to_run) <= bnum - len(wait) and not active:
                # last bit of them, add them all to next iterator
                while to_run:
                    next_.append(to_run.pop())
            else:
                for i in range(bnum - len(active) - len(wait)):
                    if to_run:
                        minion_id = to_run.pop()
                        if isinstance(minion_id, dict):
                            next_.append(minion_id.keys()[0])
                        else:
                            next_.append(minion_id)

            active += next_
            args[0] = next_

            if next_:
                if not self.quiet:
                    print_cli('\nExecuting run on {0}\n'.format(next_))
                # create a new iterator for this batch of minions
                new_iter = self.local.cmd_iter_no_block(
                                *args,
                                raw=self.opts.get('raw', False),
                                ret=self.opts.get('return', ''),
                                show_jid=show_jid,
                                verbose=show_verbose,
                                **self.eauth)
                # add it to our iterators and to the minion_tracker
                iters.append(new_iter)
                minion_tracker[new_iter] = {}
                # every iterator added is 'active' and has its set of minions
                minion_tracker[new_iter]['minions'] = next_
                minion_tracker[new_iter]['active'] = True

            else:
                time.sleep(0.02)
            parts = {}

            # see if we found more minions
            for ping_ret in self.ping_gen:
                if ping_ret is None:
                    break
                m = next(ping_ret.iterkeys())
                if m not in self.minions:
                    self.minions.append(m)
                    to_run.append(m)

            for queue in iters:
                try:
                    # Gather returns until we get to the bottom
                    ncnt = 0
                    while True:
                        part = next(queue)
                        if part is None:
                            time.sleep(0.01)
                            ncnt += 1
                            if ncnt > 5:
                                break
                            continue
                        if self.opts.get('raw'):
                            parts.update({part['data']['id']: part})
                            if part['data']['id'] in minion_tracker[queue]['minions']:
                                minion_tracker[queue]['minions'].remove(part['data']['id'])
                            else:
                                print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(part['id']))
                        else:
                            parts.update(part)
                            for id in part.keys():
                                if id in minion_tracker[queue]['minions']:
                                    minion_tracker[queue]['minions'].remove(id)
                                else:
                                    print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(id))
                except StopIteration:
                    # if a iterator is done:
                    # - set it to inactive
                    # - add minions that have not responded to parts{}

                    # check if the tracker contains the iterator
                    if queue in minion_tracker:
                        minion_tracker[queue]['active'] = False

                        # add all minions that belong to this iterator and
                        # that have not responded to parts{} with an empty response
                        for minion in minion_tracker[queue]['minions']:
                            if minion not in parts:
                                parts[minion] = {}
                                parts[minion]['ret'] = {}

            for minion, data in six.iteritems(parts):
                if minion in active:
                    active.remove(minion)
                    if bwait:
                        wait.append(datetime.now() + timedelta(seconds=bwait))
                if self.opts.get('raw'):
                    ret[minion] = data
                    yield data
                else:
                    ret[minion] = data
                    yield {minion: data}
                if not self.quiet:
                    ret[minion] = data['ret']
                    data[minion] = data.pop('ret')
                    if 'out' in data:
                        out = data.pop('out')
                    else:
                        out = None
                    salt.output.display_output(
                            data,
                            out,
                            self.opts)

            # remove inactive iterators from the iters list
            for queue in minion_tracker:
                # only remove inactive queues
                if not minion_tracker[queue]['active'] and queue in iters:
                    iters.remove(queue)
                    # also remove the iterator's minions from the active list
                    for minion in minion_tracker[queue]['minions']:
                        if minion in active:
                            active.remove(minion)
                            if bwait:
                                wait.append(datetime.now() + timedelta(seconds=bwait))
Esempio n. 49
0
def _connect(contact_points=None,
             port=None,
             cql_user=None,
             cql_pass=None,
             protocol_version=4):
    '''
    Connect to a Cassandra cluster.

    :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs.
    :type  contact_points: str or list of str
    :param cql_user:       The Cassandra user if authentication is turned on.
    :type  cql_user:       str
    :param cql_pass:       The Cassandra user password if authentication is turned on.
    :type  cql_pass:       str
    :param port:           The Cassandra cluster port, defaults to None.
    :type  port:           int
    :param protocol_version:  Cassandra protocol version to use.
    :type  port:           int
    :return:               The session and cluster objects.
    :rtype:                cluster object, session object
    '''
    # Lazy load the Cassandra cluster and session for this module by creating a
    # cluster and session when cql_query is called the first time. Get the
    # Cassandra cluster and session from this module's __context__ after it is
    # loaded the first time cql_query is called.
    #
    # TODO: Call cluster.shutdown() when the module is unloaded on
    # master/minion shutdown. Currently, Master.shutdown() and Minion.shutdown()
    # do nothing to allow loaded modules to gracefully handle resources stored
    # in __context__ (i.e. connection pools). This means that the the connection
    # pool is orphaned and Salt relies on Cassandra to reclaim connections.
    # Perhaps if Master/Minion daemons could be enhanced to call an "__unload__"
    # function, or something similar for each loaded module, connection pools
    # and the like can be gracefully reclaimed/shutdown.
    if (__context__ and 'cassandra_cql_returner_cluster' in __context__
            and 'cassandra_cql_returner_session' in __context__):
        return __context__['cassandra_cql_returner_cluster'], __context__[
            'cassandra_cql_returner_session']
    else:

        contact_points = _load_properties(property_name=contact_points,
                                          config_option='cluster')
        contact_points = contact_points if isinstance(
            contact_points, list) else contact_points.split(',')
        port = _load_properties(property_name=port,
                                config_option='port',
                                set_default=True,
                                default=9042)
        cql_user = _load_properties(property_name=cql_user,
                                    config_option='username',
                                    set_default=True,
                                    default="cassandra")
        cql_pass = _load_properties(property_name=cql_pass,
                                    config_option='password',
                                    set_default=True,
                                    default="cassandra")
        protocol_version = _load_properties(property_name=protocol_version,
                                            config_option='protocol_version',
                                            set_default=True,
                                            default=4)

        try:
            auth_provider = PlainTextAuthProvider(username=cql_user,
                                                  password=cql_pass)
            ssl_opts = _get_ssl_opts()
            if ssl_opts:
                cluster = Cluster(contact_points,
                                  port=port,
                                  auth_provider=auth_provider,
                                  ssl_options=ssl_opts,
                                  protocol_version=protocol_version,
                                  compression=True)
            else:
                cluster = Cluster(contact_points,
                                  port=port,
                                  auth_provider=auth_provider,
                                  protocol_version=protocol_version,
                                  compression=True)
            for recontimes in range(1, 4):
                try:
                    session = cluster.connect()
                    break
                except OperationTimedOut:
                    log.warning(
                        'Cassandra cluster.connect timed out, try {0}'.format(
                            recontimes))
                    if recontimes >= 3:
                        raise

            # TODO: Call cluster.shutdown() when the module is unloaded on shutdown.
            __context__['cassandra_cql_returner_cluster'] = cluster
            __context__['cassandra_cql_returner_session'] = session
            __context__['cassandra_cql_prepared'] = {}

            log.debug(
                'Successfully connected to Cassandra cluster at {0}'.format(
                    contact_points))
            return cluster, session
        except TypeError:
            pass
        except (ConnectionException, ConnectionShutdown, NoHostAvailable):
            log.error('Could not connect to Cassandra cluster at {0}'.format(
                contact_points))
            raise CommandExecutionError(
                'ERROR: Could not connect to Cassandra cluster.')
Esempio n. 50
0
File: data.py Progetto: veym4os/salt
def subdict_match(data,
                  expr,
                  delimiter=DEFAULT_TARGET_DELIM,
                  regex_match=False,
                  exact_match=False):
    '''
    Check for a match in a dictionary using a delimiter character to denote
    levels of subdicts, and also allowing the delimiter character to be
    matched. Thus, 'foo:bar:baz' will match data['foo'] == 'bar:baz' and
    data['foo']['bar'] == 'baz'. The latter would take priority over the
    former, as more deeply-nested matches are tried first.
    '''
    def _match(target, pattern, regex_match=False, exact_match=False):
        # The reason for using six.text_type first and _then_ using
        # to_unicode as a fallback is because we want to eventually have
        # unicode types for comparison below. If either value is numeric then
        # six.text_type will turn it into a unicode string. However, if the
        # value is a PY2 str type with non-ascii chars, then the result will be
        # a UnicodeDecodeError. In those cases, we simply use to_unicode to
        # decode it to unicode. The reason we can't simply use to_unicode to
        # begin with is that (by design) to_unicode will raise a TypeError if a
        # non-string/bytestring/bytearray value is passed.
        try:
            target = six.text_type(target).lower()
        except UnicodeDecodeError:
            target = salt.utils.stringutils.to_unicode(target).lower()
        try:
            pattern = six.text_type(pattern).lower()
        except UnicodeDecodeError:
            pattern = salt.utils.stringutils.to_unicode(pattern).lower()

        if regex_match:
            try:
                return re.match(pattern, target)
            except Exception:
                log.error('Invalid regex \'%s\' in match', pattern)
                return False
        else:
            return target == pattern if exact_match \
                else fnmatch.fnmatch(target, pattern)

    def _dict_match(target, pattern, regex_match=False, exact_match=False):
        wildcard = pattern.startswith('*:')
        if wildcard:
            pattern = pattern[2:]

        if pattern == '*':
            # We are just checking that the key exists
            return True
        elif pattern in target:
            # We might want to search for a key
            return True
        elif subdict_match(target,
                           pattern,
                           regex_match=regex_match,
                           exact_match=exact_match):
            return True
        if wildcard:
            for key in target:
                if isinstance(target[key], dict):
                    if _dict_match(target[key],
                                   pattern,
                                   regex_match=regex_match,
                                   exact_match=exact_match):
                        return True
                elif isinstance(target[key], list):
                    for item in target[key]:
                        if _match(item,
                                  pattern,
                                  regex_match=regex_match,
                                  exact_match=exact_match):
                            return True
                elif _match(target[key],
                            pattern,
                            regex_match=regex_match,
                            exact_match=exact_match):
                    return True
        return False

    splits = expr.split(delimiter)
    num_splits = len(splits)
    if num_splits == 1:
        # Delimiter not present, this can't possibly be a match
        return False

    splits = expr.split(delimiter)
    num_splits = len(splits)
    if num_splits == 1:
        # Delimiter not present, this can't possibly be a match
        return False

    # If we have 4 splits, then we have three delimiters. Thus, the indexes we
    # want to use are 3, 2, and 1, in that order.
    for idx in range(num_splits - 1, 0, -1):
        key = delimiter.join(splits[:idx])
        if key == '*':
            # We are matching on everything under the top level, so we need to
            # treat the match as the entire data being passed in
            matchstr = expr
            match = data
        else:
            matchstr = delimiter.join(splits[idx:])
            match = traverse_dict_and_list(data, key, {}, delimiter=delimiter)
        log.debug("Attempting to match '%s' in '%s' using delimiter '%s'",
                  matchstr, key, delimiter)
        if match == {}:
            continue
        if isinstance(match, dict):
            if _dict_match(match,
                           matchstr,
                           regex_match=regex_match,
                           exact_match=exact_match):
                return True
            continue
        if isinstance(match, (list, tuple)):
            # We are matching a single component to a single list member
            for member in match:
                if isinstance(member, dict):
                    if _dict_match(member,
                                   matchstr,
                                   regex_match=regex_match,
                                   exact_match=exact_match):
                        return True
                if _match(member,
                          matchstr,
                          regex_match=regex_match,
                          exact_match=exact_match):
                    return True
            continue
        if _match(match,
                  matchstr,
                  regex_match=regex_match,
                  exact_match=exact_match):
            return True
    return False
Esempio n. 51
0
    def _test_hwclock_sync(self):
        """
        Check that hw and sw clocks are sync'd.
        """
        if not self.run_function("system.has_settable_hwclock"):
            return None
        if not self._hwclock_has_compare():
            return None

        class CompareTimeout(BaseException):
            pass

        def _alrm_handler(sig, frame):
            log.warning(
                "hwclock --compare failed to produce output after 3 seconds")
            raise CompareTimeout

        for _ in range(2):
            try:
                orig_handler = signal.signal(signal.SIGALRM, _alrm_handler)
                signal.alarm(3)
                rpipeFd, wpipeFd = os.pipe()
                log.debug("Comparing hwclock to sys clock")
                with os.fdopen(rpipeFd, "r") as rpipe:
                    with os.fdopen(wpipeFd, "w") as wpipe:
                        with salt.utils.files.fopen(os.devnull, "r") as nulFd:
                            p = subprocess.Popen(
                                args=["hwclock", "--compare"],
                                stdin=nulFd,
                                stdout=wpipeFd,
                                stderr=subprocess.PIPE,
                            )
                            p.communicate()

                            # read header
                            rpipe.readline()

                            # read first time comparison
                            timeCompStr = rpipe.readline()

                            # stop
                            p.terminate()

                            timeComp = timeCompStr.split()
                            hwTime = float(timeComp[0])
                            swTime = float(timeComp[1])
                            diff = abs(hwTime - swTime)

                            self.assertTrue(
                                diff <= 2.0,
                                msg=("hwclock difference too big: " +
                                     six.text_type(timeCompStr)),
                            )
                            break
            except CompareTimeout:
                p.terminate()
            finally:
                signal.alarm(0)
                signal.signal(signal.SIGALRM, orig_handler)
        else:
            log.error("Failed to check hwclock sync")
Esempio n. 52
0
    def test_call_success(self):
        '''
        test module calling inside containers
        '''
        ret = None
        docker_run_all_mock = MagicMock(
            return_value={
                'retcode': 0,
                'stdout': '{"retcode": 0, "comment": "container cmd"}',
                'stderr': 'err',
            })
        docker_copy_to_mock = MagicMock(
            return_value={
                'retcode': 0
            })
        docker_config_mock = MagicMock(
            return_value=''
            )
        client = Mock()
        client.put_archive = Mock()
        get_client_mock = MagicMock(return_value=client)

        context = {'docker.exec_driver': 'docker-exec'}
        salt_dunder = {'config.option': docker_config_mock}

        with patch.object(docker_mod, 'run_all', docker_run_all_mock), \
                patch.object(docker_mod, 'copy_to', docker_copy_to_mock), \
                patch.object(docker_mod, '_get_client', get_client_mock), \
                patch.dict(docker_mod.__opts__, {'cachedir': '/tmp'}), \
                patch.dict(docker_mod.__salt__, salt_dunder), \
                patch.dict(docker_mod.__context__, context):
            # call twice to verify tmp path later
            for i in range(2):
                ret = docker_mod.call('ID', 'test.arg', 1, 2, arg1='val1')

        # Check that the directory is different each time
        # [ call(name, [args]), ...
        self.maxDiff = None
        self.assertIn('mkdir', docker_run_all_mock.mock_calls[0][1][1])
        self.assertIn('mkdir', docker_run_all_mock.mock_calls[4][1][1])
        self.assertNotEqual(docker_run_all_mock.mock_calls[0][1][1],
                            docker_run_all_mock.mock_calls[4][1][1])

        self.assertIn('salt-call', docker_run_all_mock.mock_calls[2][1][1])
        self.assertIn('salt-call', docker_run_all_mock.mock_calls[6][1][1])
        self.assertNotEqual(docker_run_all_mock.mock_calls[2][1][1],
                            docker_run_all_mock.mock_calls[6][1][1])

        # check thin untar
        self.assertIn('tarfile', docker_run_all_mock.mock_calls[1][1][1])
        self.assertIn('tarfile', docker_run_all_mock.mock_calls[5][1][1])
        self.assertNotEqual(docker_run_all_mock.mock_calls[1][1][1],
                            docker_run_all_mock.mock_calls[5][1][1])

        # check directory cleanup
        self.assertIn('rm -rf', docker_run_all_mock.mock_calls[3][1][1])
        self.assertIn('rm -rf', docker_run_all_mock.mock_calls[7][1][1])
        self.assertNotEqual(docker_run_all_mock.mock_calls[3][1][1],
                            docker_run_all_mock.mock_calls[7][1][1])

        self.assertEqual({"retcode": 0, "comment": "container cmd"}, ret)
Esempio n. 53
0
def diff(*args, **kwargs):
    '''
    Return the DIFFERENCE of the result sets returned by each matching minion
    pool

    .. versionadded:: 2014.7.0

    These pools are determined from the aggregated and sorted results of
    a salt command.

    This command displays the "diffs" as a series of 2-way differences --
    namely the difference between the FIRST displayed minion pool
    (according to sort order) and EACH SUBSEQUENT minion pool result set.

    Differences are displayed according to the Python ``difflib.unified_diff()``
    as in the case of the salt execution module ``file.get_diff``.

    This command is submitted via a salt runner using the general form::

        salt-run survey.diff [survey_sort=up/down] <target>
                     <salt-execution-module> <salt-execution-module parameters>

    Optionally accept a ``survey_sort=`` parameter. Default:
    ``survey_sort=down``

    CLI Example #1: (Example to display the "differences of files")

    .. code-block:: bash

        salt-run survey.diff survey_sort=up "*" cp.get_file_str file:///etc/hosts
    '''
    # TODO: The salt execution module "cp.get_file_str file:///..." is a
    # non-obvious way to display the differences between files using
    # survey.diff .  A more obvious method needs to be found or developed.

    import difflib

    bulk_ret = _get_pool_results(*args, **kwargs)

    is_first_time = True
    for k in bulk_ret:
        print('minion pool :\n' '------------')
        print(k['pool'])
        print('pool size :\n' '----------')
        print('    ' + str(len(k['pool'])))
        if is_first_time:
            is_first_time = False
            print('pool result :\n' '------------')
            print('    ' + bulk_ret[0]['result'])
            print()
            continue

        outs = ('differences from "{0}" results :').format(
            bulk_ret[0]['pool'][0])
        print(outs)
        print('-' * (len(outs) - 1))
        from_result = bulk_ret[0]['result'].splitlines()
        for i in range(0, len(from_result)):
            from_result[i] += '\n'
        to_result = k['result'].splitlines()
        for i in range(0, len(to_result)):
            to_result[i] += '\n'
        outs = ''
        outs += ''.join(
            difflib.unified_diff(from_result,
                                 to_result,
                                 fromfile=bulk_ret[0]['pool'][0],
                                 tofile=k['pool'][0],
                                 n=0))
        print(outs)
        print()

    return bulk_ret
Esempio n. 54
0
def query(params=None,
          setname=None,
          requesturl=None,
          location=None,
          return_url=False,
          return_root=False,
          opts=None,
          provider=None,
          endpoint=None,
          product='ec2',
          sigver='2'):
    '''
    Perform a query against AWS services using Signature Version 2 Signing
    Process. This is documented at:

    http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html

    Regions and endpoints are documented at:

    http://docs.aws.amazon.com/general/latest/gr/rande.html

    Default ``product`` is ``ec2``. Valid ``product`` names are:

    .. code-block: yaml

        - autoscaling (Auto Scaling)
        - cloudformation (CloudFormation)
        - ec2 (Elastic Compute Cloud)
        - elasticache (ElastiCache)
        - elasticbeanstalk (Elastic BeanStalk)
        - elasticloadbalancing (Elastic Load Balancing)
        - elasticmapreduce (Elastic MapReduce)
        - iam (Identity and Access Management)
        - importexport (Import/Export)
        - monitoring (CloudWatch)
        - rds (Relational Database Service)
        - simpledb (SimpleDB)
        - sns (Simple Notification Service)
        - sqs (Simple Queue Service)
    '''
    if params is None:
        params = {}

    if opts is None:
        opts = {}

    function = opts.get('function', (None, product))
    providers = opts.get('providers', {})

    if provider is None:
        prov_dict = providers.get(function[1], {}).get(product, {})
        if prov_dict:
            driver = list(list(prov_dict.keys()))[0]
            provider = providers.get(driver, product)
    else:
        prov_dict = providers.get(provider, {}).get(product, {})

    service_url = prov_dict.get('service_url', 'amazonaws.com')

    if not location:
        location = get_location(opts, provider)

    if endpoint is None:
        if not requesturl:
            endpoint = prov_dict.get(
                'endpoint', '{0}.{1}.{2}'.format(product, location,
                                                 service_url))

            requesturl = 'https://{0}/'.format(endpoint)
        else:
            endpoint = urlparse(requesturl).netloc
            if endpoint == '':
                endpoint_err = (
                    'Could not find a valid endpoint in the '
                    'requesturl: {0}. Looking for something '
                    'like https://some.aws.endpoint/?args').format(requesturl)
                LOG.error(endpoint_err)
                if return_url is True:
                    return {'error': endpoint_err}, requesturl
                return {'error': endpoint_err}

    LOG.debug('Using AWS endpoint: {0}'.format(endpoint))
    method = 'GET'

    aws_api_version = prov_dict.get(
        'aws_api_version',
        prov_dict.get('{0}_api_version'.format(product),
                      DEFAULT_AWS_API_VERSION))

    if sigver == '4':
        headers, requesturl = sig4(method,
                                   endpoint,
                                   params,
                                   prov_dict,
                                   aws_api_version,
                                   location,
                                   product,
                                   requesturl=requesturl)
        params_with_headers = {}
    else:
        params_with_headers = sig2(method, endpoint, params, prov_dict,
                                   aws_api_version)
        headers = {}

    attempts = 5
    while attempts > 0:
        LOG.debug('AWS Request: {0}'.format(requesturl))
        LOG.trace('AWS Request Parameters: {0}'.format(params_with_headers))
        try:
            result = requests.get(requesturl,
                                  headers=headers,
                                  params=params_with_headers)
            LOG.debug('AWS Response Status Code: {0}'.format(
                result.status_code))
            LOG.trace('AWS Response Text: {0}'.format(result.text))
            result.raise_for_status()
            break
        except requests.exceptions.HTTPError as exc:
            root = ET.fromstring(exc.response.content)
            data = xml.to_dict(root)

            # check to see if we should retry the query
            err_code = data.get('Errors', {}).get('Error', {}).get('Code', '')
            if attempts > 0 and err_code and err_code in AWS_RETRY_CODES:
                attempts -= 1
                LOG.error('AWS Response Status Code and Error: [{0} {1}] {2}; '
                          'Attempts remaining: {3}'.format(
                              exc.response.status_code, exc, data, attempts))
                # Wait a bit before continuing to prevent throttling
                time.sleep(2)
                continue

            LOG.error(
                'AWS Response Status Code and Error: [{0} {1}] {2}'.format(
                    exc.response.status_code, exc, data))
            if return_url is True:
                return {'error': data}, requesturl
            return {'error': data}
    else:
        LOG.error('AWS Response Status Code and Error: [{0} {1}] {2}'.format(
            exc.response.status_code, exc, data))
        if return_url is True:
            return {'error': data}, requesturl
        return {'error': data}

    response = result.text

    root = ET.fromstring(response)
    items = root[1]
    if return_root is True:
        items = root

    if setname:
        if sys.version_info < (2, 7):
            children_len = len(root.getchildren())
        else:
            children_len = len(root)

        for item in range(0, children_len):
            comps = root[item].tag.split('}')
            if comps[1] == setname:
                items = root[item]

    ret = []
    for item in items:
        ret.append(xml.to_dict(item))

    if return_url is True:
        return ret, requesturl

    return ret
Esempio n. 55
0
 def wrap(cls):
     result = None
     for attempt in range(1, times + 1):
         log.info('%s test run %d of %s times', cls, attempt, times)
         caller(cls)
     return cls
Esempio n. 56
0
def managed(name,
            dns_proto=None,
            dns_servers=None,
            ip_proto=None,
            ip_addrs=None,
            gateway=None,
            enabled=True,
            **kwargs):
    """
    Ensure that the named interface is configured properly.

    Args:

        name (str):
            The name of the interface to manage

        dns_proto (str): None
            Set to ``static`` and use the ``dns_servers`` parameter to provide a
            list of DNS nameservers. set to ``dhcp`` to use DHCP to get the DNS
            servers.

        dns_servers (list): None
            A list of static DNS servers. To clear the list of DNS servers pass
            an empty list (``[]``). ``None`` will make no changes.

        ip_proto (str): None
            Set to ``static`` and use the ``ip_addrs`` and (optionally)
            ``gateway`` parameters to provide a list of static IP addresses and
            the default gateway. Set to ``dhcp`` to use DHCP.

        ip_addrs (list): None
            A list of static IP addresses with netmask flag, ie: 192.168.0.11/24

        gateway (str): None
            The gateway to set for the interface

        enabled (bool): True
            Set to ``False`` to ensure that this interface is disabled.

    Returns:
        dict: A dictionary of old and new settings

    Example:

    .. code-block:: yaml

        Ethernet1:
          network.managed:
            - dns_proto: static
            - dns_servers:
              - 8.8.8.8
              - 8.8.8.4
            - ip_proto: static
            - ip_addrs:
              - 192.168.0.100/24

    Clear DNS entries example:

    .. code-block:: yaml

        Ethernet1:
          network.managed:
            - dns_proto: static
            - dns_servers: []
            - ip_proto: dhcp
    """
    ret = {
        "name": name,
        "changes": {},
        "result": True,
        "comment": "Interface '{0}' is up to date".format(name),
    }

    dns_proto = six.text_type(dns_proto).lower()
    ip_proto = six.text_type(ip_proto).lower()

    errors = []
    if dns_proto not in __VALID_PROTO:
        ret["result"] = False
        errors.append("dns_proto must be one of the following: {0}".format(
            ", ".join(__VALID_PROTO)))

    if ip_proto not in __VALID_PROTO:
        errors.append("ip_proto must be one of the following: {0}".format(
            ", ".join(__VALID_PROTO)))

    if errors:
        ret["result"] = False
        ret["comment"] = "\n".join(errors)
        return ret

    try:
        currently_enabled = __salt__["ip.is_enabled"](name)
    except CommandExecutionError:
        currently_enabled = False

    if not enabled:
        if currently_enabled:
            if __opts__["test"]:
                ret["result"] = None
                ret["comment"] = "Interface '{0}' will be disabled".format(
                    name)
            else:
                ret["result"] = __salt__["ip.disable"](name)
                if not ret["result"]:
                    ret["comment"] = "Failed to disable interface '{0}'".format(
                        name)
        else:
            ret["comment"] += " (already disabled)"
        return ret
    else:
        if not currently_enabled:
            if __opts__["test"]:
                ret["result"] = None
                ret["comment"] = "Interface '{0}' will be enabled".format(name)
            else:
                if not __salt__["ip.enable"](name):
                    ret["result"] = False
                    ret["comment"] = ("Failed to enable interface '{0}' to "
                                      "make changes".format(name))
                    return ret

        errors = _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway)
        if errors:
            ret["result"] = False
            ret["comment"] = ("The following SLS configuration errors were "
                              "detected:\n- {0}".format("\n- ".join(errors)))
            return ret

        old = __salt__["ip.get_interface"](name)
        if not old:
            ret["result"] = False
            ret["comment"] = ("Unable to get current configuration for "
                              "interface '{0}'".format(name))
            return ret

        changes = _changes(old, dns_proto, dns_servers, ip_proto, ip_addrs,
                           gateway)

        # If dns_servers is the default `None` make no changes
        # To clear the list, pass an empty dict
        if str(dns_servers).lower() == "none":
            changes.pop("dns_servers", None)

        if not changes:
            return ret

        if __opts__["test"]:
            comments = []
            if "dns_proto" in changes:
                comments.append("DNS protocol will be changed to: {0}".format(
                    changes["dns_proto"]))
            if dns_proto == "static" and "dns_servers" in changes:
                if len(changes["dns_servers"]) == 0:
                    comments.append("The list of DNS servers will be cleared")
                else:
                    comments.append(
                        "DNS servers will be set to the following: {0}".format(
                            ", ".join(changes["dns_servers"])))
            if "ip_proto" in changes:
                comments.append("IP protocol will be changed to: {0}".format(
                    changes["ip_proto"]))
            if ip_proto == "static":
                if "ip_addrs" in changes:
                    comments.append(
                        "IP addresses will be set to the following: {0}".
                        format(", ".join(changes["ip_addrs"])))
                if "gateway" in changes:
                    if changes["gateway"] is None:
                        comments.append("Default gateway will be removed")
                    else:
                        comments.append(
                            "Default gateway will be set to {0}".format(
                                changes["gateway"]))

            ret["result"] = None
            ret["comment"] = ("The following changes will be made to "
                              "interface '{0}':\n- {1}".format(
                                  name, "\n- ".join(comments)))
            return ret

        if changes.get("dns_proto") == "dhcp":
            __salt__["ip.set_dhcp_dns"](name)

        elif "dns_servers" in changes:
            if len(changes["dns_servers"]) == 0:
                # To clear the list of DNS servers you have to pass []. Later
                # changes gets passed like *args and a single empty list is
                # converted to an empty tuple. So, you have to add [] here
                changes["dns_servers"] = [[]]

            __salt__["ip.set_static_dns"](name, *changes["dns_servers"])

        if changes.get("ip_proto") == "dhcp":
            __salt__["ip.set_dhcp_ip"](name)
        elif (changes.get("ip_addrs") or changes.get("gateway")
              or changes.get("ip_proto") == "static"):
            if changes.get("gateway") and not changes.get("ip_addrs"):
                changes["ip_addrs"] = ip_addrs
            if changes.get(
                    "ip_proto") == "static" and not changes.get("ip_addrs"):
                changes["ip_addrs"] = ip_addrs
            for idx in range(len(changes["ip_addrs"])):
                if idx == 0:
                    __salt__["ip.set_static_ip"](name,
                                                 changes["ip_addrs"][idx],
                                                 gateway=gateway,
                                                 append=False)
                else:
                    __salt__["ip.set_static_ip"](name,
                                                 changes["ip_addrs"][idx],
                                                 gateway=None,
                                                 append=True)

        new = __salt__["ip.get_interface"](name)
        ret["changes"] = salt.utils.data.compare_dicts(old, new)
        if _changes(new, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
            ret["result"] = False
            ret["comment"] = ("Failed to set desired configuration settings "
                              "for interface '{0}'".format(name))
        else:
            ret["comment"] = "Successfully updated configuration for " "interface '{0}'".format(
                name)
        return ret
Esempio n. 57
0
File: tls.py Progetto: saltyus/salt
def create_ca_signed_cert(ca_name, CN, days=365, cacert_path=None, cert_filename=None, digest='sha256'):
    '''
    Create a Certificate (CERT) signed by a named Certificate Authority (CA)

    If the certificate file already exists, the function just returns assuming
    the CERT already exists.

    The CN *must* match an existing CSR generated by create_csr. If it
    does not, this method does nothing.


    ca_name
        name of the CA

    CN
        common name matching the certificate signing request

    days
        number of days certificate is valid, default is 365 (1 year)

    cacert_path
        absolute path to ca certificates root directory

    cert_filename
        alternative filename for the certificate, useful when using special characters in the CN

    digest
        The message digest algorithm. Must be a string describing a digest
        algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically).
        For example, "md5" or "sha1". Default: 'sha256'

    If the following values were set:

    .. code-block:: text

        ca.cert_base_path='/etc/pki'
        ca_name='koji'
        CN='test.egavas.org'

    the resulting signed certificate would be written in the following
    location:

    .. code-block:: text

        /etc/pki/koji/certs/test.egavas.org.crt

    CLI Example:

    .. code-block:: bash

        salt '*' tls.create_ca_signed_cert test localhost
    '''
    set_ca_path(cacert_path)

    if not cert_filename:
        cert_filename = CN

    if os.path.exists(
            '{0}/{1}/{2}.crt'.format(cert_base_path(),
                                     ca_name, cert_filename)
    ):
        return 'Certificate "{0}" already exists'.format(cert_filename)

    try:
        maybe_fix_ssl_version(ca_name)
        with salt.utils.fopen('{0}/{1}/{2}_ca_cert.crt'.format(cert_base_path(),
                                                               ca_name,
                                                               ca_name)) as fhr:
            ca_cert = OpenSSL.crypto.load_certificate(
                    OpenSSL.crypto.FILETYPE_PEM, fhr.read()
                )
        with salt.utils.fopen('{0}/{1}/{2}_ca_cert.key'.format(cert_base_path(),
                                                               ca_name,
                                                               ca_name)) as fhr:
            ca_key = OpenSSL.crypto.load_privatekey(
                    OpenSSL.crypto.FILETYPE_PEM,
                    fhr.read()
                )
    except IOError:
        return 'There is no CA named "{0}"'.format(ca_name)

    try:
        with salt.utils.fopen('{0}/{1}/certs/{2}.csr'.format(cert_base_path(),
                                                             ca_name,
                                                             cert_filename)) as fhr:
            req = OpenSSL.crypto.load_certificate_request(
                    OpenSSL.crypto.FILETYPE_PEM,
                    fhr.read()
                    )
    except IOError:
        return 'There is no CSR that matches the CN "{0}"'.format(cert_filename)

    exts = []
    try:
        # see: http://bazaar.launchpad.net/~exarkun/pyopenssl/master/revision/189
        # support is there from quite a long time, but without API
        # so we mimic the newly get_extensions method present in ultra
        # recent pyopenssl distros
        native_exts_obj = OpenSSL._util.lib.X509_REQ_get_extensions(req._req)
        for i in range(OpenSSL._util.lib.sk_X509_EXTENSION_num(native_exts_obj)):
            ext = OpenSSL.crypto.X509Extension.__new__(OpenSSL.crypto.X509Extension)
            ext._extension = OpenSSL._util.lib.sk_X509_EXTENSION_value(native_exts_obj, i)
            exts.append(ext)
    except Exception:
        log.error('Support for extensions is not available, upgrade PyOpenSSL')

    cert = OpenSSL.crypto.X509()
    cert.set_version(2)
    cert.set_subject(req.get_subject())
    cert.gmtime_adj_notBefore(0)
    cert.gmtime_adj_notAfter(int(days) * 24 * 60 * 60)
    if exts:
        cert.add_extensions(exts)
    cert.set_serial_number(_new_serial(ca_name, CN))
    cert.set_issuer(ca_cert.get_subject())
    cert.set_pubkey(req.get_pubkey())
    cert.sign(ca_key, digest)

    with salt.utils.fopen('{0}/{1}/certs/{2}.crt'.format(cert_base_path(),
                                                         ca_name,
                                                         cert_filename), 'w+') as crt:
        crt.write(
            OpenSSL.crypto.dump_certificate(
                OpenSSL.crypto.FILETYPE_PEM,
                cert
                )
            )

    _write_cert_to_database(ca_name, cert)

    return ('Created Certificate for "{0}": '
            '"{1}/{2}/certs/{3}.crt"').format(
                    CN,
                    cert_base_path(),
                    ca_name,
                    cert_filename
                    )
Esempio n. 58
0
def keys_present(name, number, save_dir, region=None, key=None, keyid=None, profile=None):
    '''

    .. versionadded:: 2015.8

    Ensure the IAM access keys are present.

    name (string)
        The name of the new user.

    number (int)
        Number of keys that user should have.

    save_dir (string)
        The directory that the key/keys will be saved. Keys are saved to a file named according
        to the username privided.

    region (string)
        Region to connect to.

    key (string)
        Secret key to be used.

    keyid (string)
        Access key to be used.

    profile (dict)
        A dict with region, key and keyid, or a pillar key (string)
        that contains a dict with region, key and keyid.
    '''
    ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
    if not __salt__['boto_iam.get_user'](name, region, key, keyid, profile):
        ret['result'] = False
        ret['comment'] = 'IAM User {0} does not exist.'.format(name)
        return ret
    if not isinstance(number, int):
        ret['comment'] = 'The number of keys must be an integer.'
        ret['result'] = False
        return ret
    if not os.path.isdir(save_dir):
        ret['comment'] = 'The directory {0} does not exist.'.format(save_dir)
        ret['result'] = False
        return ret
    keys = __salt__['boto_iam.get_all_access_keys'](user_name=name, region=region, key=key,
                                                    keyid=keyid, profile=profile)
    if isinstance(keys, str):
        log.debug('keys are : false {0}'.format(keys))
        error, message = _get_error(keys)
        ret['comment'] = 'Could not get keys.\n{0}\n{1}'.format(error, message)
        ret['result'] = False
        return ret
    keys = keys['list_access_keys_response']['list_access_keys_result']['access_key_metadata']
    log.debug('Keys are : {0}.'.format(keys))
    if len(keys) >= number:
        ret['comment'] = 'The number of keys exist for user {0}'.format(name)
        ret['result'] = True
        return ret
    if __opts__['test']:
        ret['comment'] = 'Access key is set to be created for {0}.'.format(name)
        ret['result'] = None
        return ret
    new_keys = {}
    for i in range(number-len(keys)):
        created = __salt__['boto_iam.create_access_key'](name, region, key, keyid, profile)
        if isinstance(created, str):
            error, message = _get_error(created)
            ret['comment'] = 'Could not create keys.\n{0}\n{1}'.format(error, message)
            ret['result'] = False
            return ret
        log.debug('Created is : {0}'.format(created))
        response = 'create_access_key_response'
        result = 'create_access_key_result'
        new_keys['key-{0}'.format(i)] = created[response][result]['access_key']['access_key_id']
        new_keys['key_id-{0}'.format(i)] = created[response][result]['access_key']['secret_access_key']
    try:
        with salt.utils.fopen('{0}/{1}'.format(save_dir, name), 'a') as _wrf:
            for key_id, access_key in new_keys.items():
                _wrf.write('{0}\n{1}\n'.format(key_id, access_key))
        ret['comment'] = 'Keys have been written to file {0}/{1}.'.format(save_dir, name)
        ret['result'] = True
        ret['changes'] = new_keys
        return ret
    except IOError:
        ret['comment'] = 'Could not write to file {0}/{1}.'.format(save_dir, name)
        ret['result'] = False
        return ret
Esempio n. 59
0
def read_certificate(certificate):
    '''
    Returns a dict containing details of a certificate. Input can be a PEM string or file path.

    certificate:
        The certificate to be read. Can be a path to a certificate file, or a string containing
        the PEM formatted text of the certificate.

    CLI Example:

    .. code-block:: bash

        salt '*' x509.read_certificate /etc/pki/mycert.crt
    '''
    if isinstance(certificate, M2Crypto.X509.X509):
        cert = certificate
    else:
        cert = _get_certificate_obj(certificate)

    ret = {
        # X509 Verison 3 has a value of 2 in the field.
        # Version 2 has a value of 1.
        # https://tools.ietf.org/html/rfc5280#section-4.1.2.1
        'Version':
        cert.get_version() + 1,
        # Get size returns in bytes. The world thinks of key sizes in bits.
        'Key Size':
        cert.get_pubkey().size() * 8,
        'Serial Number':
        _dec2hex(cert.get_serial_number()),
        'SHA-256 Finger Print':
        _pretty_hex(cert.get_fingerprint(md='sha256')),
        'MD5 Finger Print':
        _pretty_hex(cert.get_fingerprint(md='md5')),
        'SHA1 Finger Print':
        _pretty_hex(cert.get_fingerprint(md='sha1')),
        'Subject':
        _parse_subject(cert.get_subject()),
        'Subject Hash':
        _dec2hex(cert.get_subject().as_hash()),
        'Issuer':
        _parse_subject(cert.get_issuer()),
        'Issuer Hash':
        _dec2hex(cert.get_issuer().as_hash()),
        'Not Before':
        cert.get_not_before().get_datetime().strftime('%Y-%m-%d %H:%M:%S'),
        'Not After':
        cert.get_not_after().get_datetime().strftime('%Y-%m-%d %H:%M:%S'),
        'Public Key':
        get_public_key(cert.as_pem())
    }

    exts = OrderedDict()
    for ext_index in range(0, cert.get_ext_count()):
        ext = cert.get_ext_at(ext_index)
        name = ext.get_name()
        val = ext.get_value()
        if ext.get_critical():
            val = 'critical ' + val
        exts[name] = val

    if exts:
        ret['X509v3 Extensions'] = exts

    return ret
Esempio n. 60
0
def create_table(
    table_name,
    region=None,
    key=None,
    keyid=None,
    profile=None,
    read_capacity_units=None,
    write_capacity_units=None,
    hash_key=None,
    hash_key_data_type=None,
    range_key=None,
    range_key_data_type=None,
    local_indexes=None,
    global_indexes=None,
):
    """
    Creates a DynamoDB table.

    CLI Example:

    .. code-block:: bash

        salt myminion boto_dynamodb.create_table table_name /
        region=us-east-1 /
        hash_key=id /
        hash_key_data_type=N /
        range_key=created_at /
        range_key_data_type=N /
        read_capacity_units=1 /
        write_capacity_units=1
    """
    schema = []
    primary_index_fields = []
    primary_index_name = ""
    if hash_key:
        hash_key_obj = HashKey(hash_key, data_type=hash_key_data_type)
        schema.append(hash_key_obj)
        primary_index_fields.append(hash_key_obj)
        primary_index_name += hash_key
    if range_key:
        range_key_obj = RangeKey(range_key, data_type=range_key_data_type)
        schema.append(range_key_obj)
        primary_index_fields.append(range_key_obj)
        primary_index_name += "_"
        primary_index_name += range_key
    primary_index_name += "_index"
    throughput = {"read": read_capacity_units, "write": write_capacity_units}
    local_table_indexes = []
    if local_indexes:
        for index in local_indexes:
            local_table_indexes.append(extract_index(index))
    global_table_indexes = []
    if global_indexes:
        for index in global_indexes:
            global_table_indexes.append(extract_index(index,
                                                      global_index=True))

    conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)

    Table.create(
        table_name,
        schema=schema,
        throughput=throughput,
        indexes=local_table_indexes,
        global_indexes=global_table_indexes,
        connection=conn,
    )

    # Table creation can take several seconds to propagate.
    # We will check MAX_ATTEMPTS times.
    MAX_ATTEMPTS = 30
    for i in range(MAX_ATTEMPTS):
        if exists(table_name, region, key, keyid, profile):
            return True
        else:
            time.sleep(1)  # sleep for one second and try again
    return False