Exemplo n.º 1
0
def split_input(val, mapper=None):
    '''
    Take an input value and split it into a list, returning the resulting list
    '''
    if mapper is None:
        mapper = lambda x: x
    if isinstance(val, list):
        return list(map(mapper, val))
    try:
        return list(map(mapper, [x.strip() for x in val.split(',')]))
    except AttributeError:
        return list(map(mapper, [x.strip() for x in six.text_type(val).split(',')]))
Exemplo n.º 2
0
 def targets(self):
     '''
     Return ip addrs based on netmask, sitting in the "glob" spot because
     it is the default
     '''
     addrs = ()
     ret = {}
     ports = __opts__['ssh_scan_ports']
     if not isinstance(ports, list):
         # Comma-separate list of integers
         ports = list(map(int, str(ports).split(',')))
     try:
         addrs = [ipaddress.ip_address(self.tgt)]
     except ValueError:
         try:
             addrs = ipaddress.ip_network(self.tgt).hosts()
         except ValueError:
             pass
     for addr in addrs:
         for port in ports:
             try:
                 sock = salt.utils.network.get_socket(addr, socket.SOCK_STREAM)
                 sock.settimeout(float(__opts__['ssh_scan_timeout']))
                 sock.connect((str(addr), port))
                 sock.shutdown(socket.SHUT_RDWR)
                 sock.close()
                 ret[addr] = {'host': addr, 'port': port}
             except socket.error:
                 pass
     return ret
Exemplo n.º 3
0
Arquivo: ipset.py Projeto: bryson/salt
def _parse_member(settype, member, strict=False):
    subtypes = settype.split(':')[1].split(',')

    parts = member.split(' ')

    parsed_member = []
    for i in range(len(subtypes)):
        subtype = subtypes[i]
        part = parts[i]

        if subtype in ['ip', 'net']:
            try:
                if '/' in part:
                    part = ipaddress.ip_network(part, strict=strict)
                elif '-' in part:
                    start, end = list(map(ipaddress.ip_address, part.split('-')))

                    part = list(ipaddress.summarize_address_range(start, end))
                else:
                    part = ipaddress.ip_address(part)
            except ValueError:
                pass

        elif subtype == 'port':
            part = int(part)

        parsed_member.append(part)

    if len(parts) > len(subtypes):
        parsed_member.append(' '.join(parts[len(subtypes):]))

    return parsed_member
Exemplo n.º 4
0
def launchctl(sub_cmd, *args, **kwargs):
	'''
	Run a launchctl command and raise an error if it fails

	Args: additional args are passed to launchctl
		sub_cmd (str): Sub command supplied to launchctl

	Kwargs: passed to ``cmd.run_all``
		return_stdout (bool): A keyword argument. If true return the stdout of
			the launchctl command

	Returns:
		bool: ``True`` if successful
		str: The stdout of the launchctl command if requested

	Raises:
		CommandExecutionError: If command fails

	CLI Example:

	.. code-block:: bash

		import salt.utils.mac_service
		salt.utils.mac_service.launchctl('debug', 'org.cups.cupsd')
	'''
	# Get return type
	log.debug('Our current kwargs are {}'.format(kwargs))
	return_stdout = kwargs.pop('return_stdout', False)

	# Construct command
	cmd = ['launchctl', sub_cmd]
	cmd.extend(args)

	if 'runas' in kwargs and kwargs.get('runas'):
		# we need to insert the user simulation into the command itself and not
		# just run it from the environment on macOS as that
		# method doesn't work properly when run as root for certain commands.
		runas = kwargs.get('runas')
		if isinstance(cmd, (list, tuple)):
			cmd = ' '.join(map(_cmd_quote, cmd))

		cmd = 'su -l {0} -c "{1}"'.format(runas, cmd)
		# set runas to None, because if you try to run `su -l` as well as
		# simulate the environment macOS will prompt for the password of the
		# user and will cause salt to hang.
		kwargs['runas'] = None

	# Run command
	kwargs['python_shell'] = False
	ret = __salt__['cmd.run_all'](cmd, **kwargs)

	# Raise an error or return successful result
	if ret['retcode']:
		out = 'Failed to {0} service:\n'.format(sub_cmd)
		out += 'stdout: {0}\n'.format(ret['stdout'])
		out += 'stderr: {0}\n'.format(ret['stderr'])
		out += 'retcode: {0}'.format(ret['retcode'])
		raise CommandExecutionError(out)
	else:
		return ret['stdout'] if return_stdout else True
Exemplo n.º 5
0
def targets(tgt, tgt_type='glob', **kwargs):
    '''
    Return the targets
    '''
    ret = {}
    ports = __opts__['ssh_scan_ports']
    if not isinstance(ports, list):
        # Comma-separate list of integers
        ports = list(map(int, str(ports).split(',')))

    hosts = list(NodeSet(tgt))
    host_addrs = dict([(h, socket.gethostbyname(h)) for h in hosts])

    for host, addr in host_addrs.items():
        addr = str(addr)
        for port in ports:
            try:
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.settimeout(float(__opts__['ssh_scan_timeout']))
                sock.connect((addr, port))
                sock.shutdown(socket.SHUT_RDWR)
                sock.close()
                ret[host] = {'host': host, 'port': port}
            except socket.error:
                pass
    return ret
Exemplo n.º 6
0
Arquivo: csf.py Projeto: bryson/salt
def allow_ports(ports, proto="tcp", direction="in"):
    """
    Fully replace the incoming or outgoing ports
    line in the csf.conf file - e.g. TCP_IN, TCP_OUT,
    UDP_IN, UDP_OUT, etc.

    CLI Example:

    .. code-block:: bash

        salt '*' csf.allow_ports ports="[22,80,443,4505,4506]" proto='tcp' direction='in'
    """

    results = []
    ports = set(ports)
    ports = list(ports)
    proto = proto.upper()
    direction = direction.upper()
    _validate_direction_and_proto(direction, proto)
    ports_csv = ",".join(map(str, ports))
    directions = build_directions(direction)

    for direction in directions:
        result = __salt__["file.replace"](
            "/etc/csf/csf.conf",
            pattern='^{0}_{1}(\ +)?\=(\ +)?".*"$'.format(proto, direction),  # pylint: disable=W1401
            repl='{0}_{1} = "{2}"'.format(proto, direction, ports_csv),
        )
        results.append(result)

    return results
Exemplo n.º 7
0
def sig2(method, endpoint, params, provider, aws_api_version):
    '''
    Sign a query against AWS services using Signature Version 2 Signing
    Process. This is documented at:

    http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
    '''
    timenow = datetime.datetime.utcnow()
    timestamp = timenow.strftime('%Y-%m-%dT%H:%M:%SZ')

    params_with_headers = params.copy()
    params_with_headers['AWSAccessKeyId'] = provider.get('id', '')
    params_with_headers['SignatureVersion'] = '2'
    params_with_headers['SignatureMethod'] = 'HmacSHA256'
    params_with_headers['Timestamp'] = '{0}'.format(timestamp)
    params_with_headers['Version'] = aws_api_version
    keys = sorted(params_with_headers.keys())
    values = list(list(map(params_with_headers.get, keys)))
    querystring = urlencode(list(zip(keys, values)))

    canonical = '{0}\n{1}\n/\n{2}'.format(
        method.encode('utf-8'),
        endpoint.encode('utf-8'),
        querystring.encode('utf-8'),
    )

    hashed = hmac.new(provider['key'], canonical, hashlib.sha256)
    sig = binascii.b2a_base64(hashed.digest())
    params_with_headers['Signature'] = sig.strip()
    return params_with_headers
Exemplo n.º 8
0
def _get_beacon_config_dict(beacon_config):
    beacon_config_dict = {}
    if isinstance(beacon_config, list):
        list(map(beacon_config_dict.update, beacon_config))
    else:
        beacon_config_dict = beacon_config

    return beacon_config_dict
Exemplo n.º 9
0
def get_path():
    '''
    Returns the system path
    '''
    ret = __salt__['reg.read_key']('HKEY_LOCAL_MACHINE', 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment', 'PATH').split(';')

    # Trim ending backslash
    return list(map(_normalize_dir, ret))
Exemplo n.º 10
0
def _send_textmetrics(metrics):
    '''
    Format metrics for the carbon plaintext protocol
    '''

    data = [' '.join(map(str, metric)) for metric in metrics] + ['']

    return '\n'.join(data)
Exemplo n.º 11
0
def atrm(*args):
    '''
    Remove jobs from the queue.

    CLI Example:

    .. code-block:: bash

        salt '*' at.atrm <jobid> <jobid> .. <jobid>
        salt '*' at.atrm all
        salt '*' at.atrm all [tag]
    '''

    if not args:
        return {'jobs': {'removed': [], 'tag': None}}

    if args[0] == 'all':
        if len(args) > 1:
            opts = list(list(map(str, [j['job'] for j in atq(args[1])['jobs']])))
            ret = {'jobs': {'removed': opts, 'tag': args[1]}}
        else:
            opts = list(list(map(str, [j['job'] for j in atq()['jobs']])))
            ret = {'jobs': {'removed': opts, 'tag': None}}
    else:
        opts = list(list(map(str, [i['job'] for i in atq()['jobs']
            if i['job'] in args])))
        ret = {'jobs': {'removed': opts, 'tag': None}}

    # call atrm for each job in ret['jobs']['removed']
    for job in ret['jobs']['removed']:
        res_job = __salt__['cmd.run_all']('atrm {job}'.format(
            job=job
        ))
        if res_job['retcode'] > 0:
            if 'failed' not in ret['jobs']:
                ret['jobs']['failed'] = {}
            ret['jobs']['failed'][job] = res_job['stderr']

    # remove failed from list
    if 'failed' in ret['jobs']:
        for job in ret['jobs']['failed']:
            ret['jobs']['removed'].remove(job)

    return ret
Exemplo n.º 12
0
def runSome():
    """
    Unittest runner
    """
    tests = []
    names = ["testParseHostname", "testExtractMastersSingle", "testExtractMastersMultiple"]

    tests.extend(list(list(map(BasicTestCase, names))))

    suite = unittest.TestSuite(tests)
    unittest.TextTestRunner(verbosity=2).run(suite)
Exemplo n.º 13
0
 def row_wrapper(row):
     new_rows = [
         self.wrapfunc(item).split('\n')
         for item in row
     ]
     rows = []
     for item in map(None, *new_rows):
         if isinstance(item, (tuple, list)):
             rows.append([substr or '' for substr in item])
         else:
             rows.append([item])
     return rows
Exemplo n.º 14
0
def dependency_information(include_salt_cloud=False):
    '''
    Report versions of library dependencies.
    '''
    libs = [
        ('Python', None, sys.version.rsplit('\n')[0].strip()),
        ('Jinja2', 'jinja2', '__version__'),
        ('M2Crypto', 'M2Crypto', 'version'),
        ('msgpack-python', 'msgpack', 'version'),
        ('msgpack-pure', 'msgpack_pure', 'version'),
        ('pycrypto', 'Crypto', '__version__'),
        ('libnacl', 'libnacl', '__version__'),
        ('PyYAML', 'yaml', '__version__'),
        ('ioflo', 'ioflo', '__version__'),
        ('PyZMQ', 'zmq', '__version__'),
        ('RAET', 'raet', '__version__'),
        ('ZMQ', 'zmq', 'zmq_version'),
        ('Mako', 'mako', '__version__'),
        ('Tornado', 'tornado', 'version'),
        ('timelib', 'timelib', 'version'),
        ('dateutil', 'dateutil', '__version__'),
        ('pygit2', 'pygit2', '__version__'),
        ('libgit2', 'pygit2', 'LIBGIT2_VERSION'),
        ('smmap', 'smmap', '__version__'),
        ('cffi', 'cffi', '__version__'),
        ('pycparser', 'pycparser', '__version__'),
        ('gitdb', 'gitdb', '__version__'),
        ('gitpython', 'git', '__version__'),
        ('python-gnupg', 'gnupg', '__version__'),
        ('mysql-python', 'MySQLdb', '__version__'),
        ('cherrypy', 'cherrypy', '__version__'),
    ]

    if include_salt_cloud:
        libs.append(
            ('Apache Libcloud', 'libcloud', '__version__'),
        )

    for name, imp, attr in libs:
        if imp is None:
            yield name, attr
            continue
        try:
            imp = __import__(imp)
            version = getattr(imp, attr)
            if callable(version):
                version = version()
            if isinstance(version, (tuple, list)):
                version = '.'.join(map(str, version))
            yield name, version
        except Exception:
            yield name, None
Exemplo n.º 15
0
def runSome():
    '''
    Unittest runner
    '''
    tests =  []
    names = ['testAutoAccept',
             'testManualAccept',
             'testDelete']

    tests.extend(list(list(map(BasicTestCase, names))))

    suite = unittest.TestSuite(tests)
    unittest.TextTestRunner(verbosity=2).run(suite)
Exemplo n.º 16
0
def __virtual__():
    '''
    Only work on OpenBSD
    '''
    if __grains__['os'] == 'OpenBSD' and os.path.exists('/etc/rc.d/rc.subr'):
        krel = list(list(map(int, __grains__['kernelrelease'].split('.'))))
        # The -f flag, used to force a script to run even if disabled,
        # was added after the 5.0 release.
        # the rcctl(8) command is the preferred way to manage services.
        if krel[0] > 5 or (krel[0] == 5 and krel[1] > 0):
            if not os.path.exists('/usr/sbin/rcctl'):
                return __virtualname__
    return False
Exemplo n.º 17
0
Arquivo: at.py Projeto: mahak/salt
def atrm(*args):
    '''
    Remove jobs from the queue.

    CLI Example:

    .. code-block:: bash

        salt '*' at.atrm <jobid> <jobid> .. <jobid>
        salt '*' at.atrm all
        salt '*' at.atrm all [tag]
    '''

    # Need to do this here also since we use atq()
    if not salt.utils.which('at'):
        return '\'at.atrm\' is not available.'

    if not args:
        return {'jobs': {'removed': [], 'tag': None}}

    if args[0] == 'all':
        if len(args) > 1:
            opts = list(list(map(str, [j['job'] for j in atq(args[1])['jobs']])))
            ret = {'jobs': {'removed': opts, 'tag': args[1]}}
        else:
            opts = list(list(map(str, [j['job'] for j in atq()['jobs']])))
            ret = {'jobs': {'removed': opts, 'tag': None}}
    else:
        opts = list(list(map(str, [i['job'] for i in atq()['jobs']
            if i['job'] in args])))
        ret = {'jobs': {'removed': opts, 'tag': None}}

    # Shim to produce output similar to what __virtual__() should do
    # but __salt__ isn't available in __virtual__()
    output = _cmd('at', '-d', ' '.join(opts))
    if output is None:
        return '\'at.atrm\' is not available.'

    return ret
Exemplo n.º 18
0
Arquivo: csf.py Projeto: bryson/salt
def skip_nics(nics, ipv6=False):
    if ipv6:
        ipv6 = "6"
    else:
        ipv6 = ""
    nics_csv = ",".join(map(str, nics))
    result = __salt__["file.replace"](
        "/etc/csf/csf.conf",
        pattern='^ETH{0}_DEVICE_SKIP(\ +)?\=(\ +)?".*"'.format(ipv6),  # pylint: disable=W1401
        repl='ETH{0}_DEVICE_SKIP = "{1}"'.format(ipv6, nics_csv),
    )

    return result
Exemplo n.º 19
0
def dependency_information(include_salt_cloud=False):
    """
    Report versions of library dependencies.
    """
    libs = [
        ("Python", None, sys.version.rsplit("\n")[0].strip()),
        ("Jinja2", "jinja2", "__version__"),
        ("M2Crypto", "M2Crypto", "version"),
        ("msgpack-python", "msgpack", "version"),
        ("msgpack-pure", "msgpack_pure", "version"),
        ("pycrypto", "Crypto", "__version__"),
        ("libnacl", "libnacl", "__version__"),
        ("PyYAML", "yaml", "__version__"),
        ("ioflo", "ioflo", "__version__"),
        ("PyZMQ", "zmq", "__version__"),
        ("RAET", "raet", "__version__"),
        ("ZMQ", "zmq", "zmq_version"),
        ("Mako", "mako", "__version__"),
        ("Tornado", "tornado", "version"),
        ("timelib", "timelib", "version"),
        ("dateutil", "dateutil", "__version__"),
        ("pygit2", "pygit2", "__version__"),
        ("libgit2", "pygit2", "LIBGIT2_VERSION"),
        ("smmap", "smmap", "__version__"),
        ("cffi", "cffi", "__version__"),
        ("pycparser", "pycparser", "__version__"),
        ("gitdb", "gitdb", "__version__"),
        ("gitpython", "git", "__version__"),
        ("python-gnupg", "gnupg", "__version__"),
        ("mysql-python", "MySQLdb", "__version__"),
        ("cherrypy", "cherrypy", "__version__"),
    ]

    if include_salt_cloud:
        libs.append(("Apache Libcloud", "libcloud", "__version__"))

    for name, imp, attr in libs:
        if imp is None:
            yield name, attr
            continue
        try:
            imp = __import__(imp)
            version = getattr(imp, attr)
            if callable(version):
                version = version()
            if isinstance(version, (tuple, list)):
                version = ".".join(map(str, version))
            yield name, version
        except Exception:
            yield name, None
Exemplo n.º 20
0
def get_path():
    '''
    Returns a list of items in the SYSTEM path

    CLI Example:

    .. code-block:: bash

        salt '*' win_path.get_path
    '''
    ret = __salt__['reg.read_value']('HKEY_LOCAL_MACHINE',
                                   'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment',
                                   'PATH')['vdata'].split(';')

    # Trim ending backslash
    return list(map(_normalize_dir, ret))
Exemplo n.º 21
0
def get_path():
    """
    Returns a list of items in the SYSTEM path

    CLI Example:

    .. code-block:: bash

        salt '*' win_path.get_path
    """
    ret = __salt__["reg.read_key"](
        "HKEY_LOCAL_MACHINE", "SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment", "PATH"
    ).split(";")

    # Trim ending backslash
    return list(map(_normalize_dir, ret))
Exemplo n.º 22
0
Arquivo: xapi.py Projeto: DaveQB/salt
 def cpu_make_map(cpulist):
     cpus = []
     for c in cpulist.split(','):
         if c == '':
             continue
         if '-' in c:
             (x, y) = c.split('-')
             for i in range(int(x), int(y) + 1):
                 cpus.append(int(i))
         else:
             # remove this element from the list
             if c[0] == '^':
                 cpus = [x for x in cpus if x != int(c[1:])]
             else:
                 cpus.append(int(c))
     cpus.sort()
     return ','.join(map(str, cpus))
Exemplo n.º 23
0
def validate(config):
    '''
    Validate the beacon configuration
    '''
    valid = True
    messages = []

    if not isinstance(config, list):
        valid = False
        messages.append('[-] Configuration for %s beacon must be a list', config)
    else:
        _config = {}
        list(map(_config.update, config))

    try:
        sites = _config.get('sites', {})
    except AttributeError:
        valid = False
        messages.append('[-] Sites for %s beacon must be a dict', __virtualname__)

    if not sites:
        valid = False
        messages.append('[-] Configuration does not contain sites')

    for site, settings in sites.items():
        if required_site_attributes.isdisjoint(set(settings.keys())):
            valid = False
            messages.append('[-] Sites for {} beacon requires {}'.format(__virtualname__,
                                                                         required_site_attributes))
        log.debug('[+] site: %s', site)
        log.debug('[+] settings: %s', settings)

        for optional_attrs in itertools.chain(settings.get(attr, []) for attr in optional_site_attributes):
            for item in optional_attrs:
                cmp = item.get('comp')
                if cmp and not cmp in comparisons:
                    valid = False
                    messages.append('[-] Invalid comparison operator %s', cmp)

    messages.append('[+] Valid beacon configuration')
    return valid, messages
Exemplo n.º 24
0
Arquivo: csf.py Projeto: bryson/salt
def ports_open(name, ports, proto='tcp', direction='in'):
    '''
    Ensure ports are open for a protocol, in a direction.
    e.g. - proto='tcp', direction='in' would set the values
    for TCP_IN in the csf.conf file.

    ports
        A list of ports that should be open.

    proto
        The protocol. May be one of 'tcp', 'udp',
        'tcp6', or 'udp6'.

    direction
        Choose 'in', 'out', or both to indicate the port
        should be opened for inbound traffic, outbound
        traffic, or both.
    '''

    ports = list(map(str, ports))
    diff = False
    ret = {'name': ','.join(ports),
           'changes': {},
           'result': True,
           'comment': 'Ports open.'}

    current_ports = __salt__['csf.get_ports'](proto=proto, direction=direction)
    direction = direction.upper()
    directions = __salt__['csf.build_directions'](direction)
    for direction in directions:
        print(current_ports[direction])  # pylint: disable=C0325
        print(ports)  # pylint: disable=C0325
        if current_ports[direction] != ports:
            diff = True
    if diff:
        result = __salt__['csf.allow_ports'](ports, proto=proto, direction=direction)
        ret['changes']['Ports'] = 'Changed'
        ret['comment'] = result
    return ret
Exemplo n.º 25
0
Arquivo: aws.py Projeto: DaveQB/salt
def sig2(method, endpoint, params, provider, aws_api_version):
    '''
    Sign a query against AWS services using Signature Version 2 Signing
    Process. This is documented at:

    http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
    '''
    timenow = datetime.datetime.utcnow()
    timestamp = timenow.strftime('%Y-%m-%dT%H:%M:%SZ')

    # Retrieve access credentials from meta-data, or use provided
    access_key_id, secret_access_key, token = creds(provider)

    params_with_headers = params.copy()
    params_with_headers['AWSAccessKeyId'] = access_key_id
    params_with_headers['SignatureVersion'] = '2'
    params_with_headers['SignatureMethod'] = 'HmacSHA256'
    params_with_headers['Timestamp'] = '{0}'.format(timestamp)
    params_with_headers['Version'] = aws_api_version
    keys = sorted(params_with_headers.keys())
    values = list(list(map(params_with_headers.get, keys)))
    querystring = urlencode(list(zip(keys, values)))

    canonical = '{0}\n{1}\n/\n{2}'.format(
        method.encode('utf-8'),
        endpoint.encode('utf-8'),
        querystring.encode('utf-8'),
    )

    hashed = hmac.new(secret_access_key, canonical, hashlib.sha256)
    sig = binascii.b2a_base64(hashed.digest())
    params_with_headers['Signature'] = sig.strip()

    # Add in security token if we have one
    if token != '':
        params_with_headers['SecurityToken'] = token

    return params_with_headers
Exemplo n.º 26
0
Arquivo: aws.py Projeto: makearl/salt
def sig2(method, endpoint, params, provider, aws_api_version):
    '''
    Sign a query against AWS services using Signature Version 2 Signing
    Process. This is documented at:

    http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
    '''
    timenow = datetime.datetime.utcnow()
    timestamp = timenow.strftime('%Y-%m-%dT%H:%M:%SZ')

    # Retrieve access credentials from meta-data, or use provided
    access_key_id, secret_access_key, token = creds(provider)

    params_with_headers = params.copy()
    params_with_headers['AWSAccessKeyId'] = access_key_id
    params_with_headers['SignatureVersion'] = '2'
    params_with_headers['SignatureMethod'] = 'HmacSHA256'
    params_with_headers['Timestamp'] = '{0}'.format(timestamp)
    params_with_headers['Version'] = aws_api_version
    keys = sorted(params_with_headers.keys())
    values = list(list(map(params_with_headers.get, keys)))
    querystring = urlencode(list(zip(keys, values)))

    canonical = '{0}\n{1}\n/\n{2}'.format(
        method.encode('utf-8'),
        endpoint.encode('utf-8'),
        querystring.encode('utf-8'),
    )

    hashed = hmac.new(secret_access_key, canonical, hashlib.sha256)
    sig = binascii.b2a_base64(hashed.digest())
    params_with_headers['Signature'] = sig.strip()

    # Add in security token if we have one
    if token != '':
        params_with_headers['SecurityToken'] = token

    return params_with_headers
Exemplo n.º 27
0
 def targets(self):
     """
     Return ip addrs based on netmask, sitting in the "glob" spot because
     it is the default
     """
     addrs = ()
     ret = {}
     ports = __opts__["ssh_scan_ports"]
     if not isinstance(ports, list):
         # Comma-separate list of integers
         ports = list(map(int, six.text_type(ports).split(",")))
     try:
         if self.tgt_type == "list":
             addrs = [ipaddress.ip_address(tgt) for tgt in self.tgt]
         else:
             addrs = [ipaddress.ip_address(self.tgt)]
     except ValueError:
         try:
             addrs = ipaddress.ip_network(self.tgt).hosts()
         except ValueError:
             pass
     for addr in addrs:
         addr = six.text_type(addr)
         ret[addr] = copy.deepcopy(__opts__.get("roster_defaults", {}))
         log.trace("Scanning host: %s", addr)
         for port in ports:
             log.trace("Scanning port: %s", port)
             try:
                 sock = salt.utils.network.get_socket(
                     addr, socket.SOCK_STREAM)
                 sock.settimeout(float(__opts__["ssh_scan_timeout"]))
                 sock.connect((addr, port))
                 sock.shutdown(socket.SHUT_RDWR)
                 sock.close()
                 ret[addr].update({"host": addr, "port": port})
             except socket.error:
                 pass
     return ret
Exemplo n.º 28
0
def runSome():
    '''
    Unittest runner
    '''
    tests = []
    names = [
        'testContextSetup',
        'testPresenceAvailable',
        'testPresenceJoined',
        'testPresenceAllowed',
        'testPresenceAlived',
        'testPresenceReaped',
        'testPresenceNoRequest',
        'testPresenceUnknownSrc',
        'testPresenceAvailableNoMinions',
        'testPresenceAvailableOneMinion',
        'testPresenceAvailableSomeIpUnknown',
        'testPresenceAllowedNoMinions',
        'testPresenceAllowedOneMinion',
    ]
    tests.extend(list(map(PresenterTestCase, names)))
    suite = unittest.TestSuite(tests)
    unittest.TextTestRunner(verbosity=2).run(suite)
Exemplo n.º 29
0
def list_plugins(path, user):
    '''
    List plugins in an installed wordpress path

    path
        path to wordpress install location

    user
        user to run the command as

    CLI Example:

    .. code-block:: bash

        salt '*' wordpress.list_plugins /var/www/html apache
    '''
    ret = []
    resp = __salt__['cmd.shell']((
        'wp --path={0} plugin list'
    ).format(path), runas=user)
    for line in resp.split('\n')[1:]:
        ret.append(line.split('\t'))
    return [plugin.__dict__ for plugin in map(_get_plugins, ret)]
Exemplo n.º 30
0
def beacon(config):
    '''
    Called several times each second
    https://docs.saltstack.com/en/latest/topics/beacons/#the-beacon-function

    .. code-block:: yaml

        beacons:
          proxy_example:
            - endpoint: beacon
    '''
    # Important!!!
    # Although this toy example makes an HTTP call
    # to get beacon information
    # please be advised that doing CPU or IO intensive
    # operations in this method will cause the beacon loop
    # to block.
    _config = {}
    list(map(_config.update, config))

    beacon_url = '{0}{1}'.format(__opts__['proxy']['url'], _config['endpoint'])
    ret = salt.utils.http.query(beacon_url, decode_type='json', decode=True)
    return [ret['dict']]
Exemplo n.º 31
0
    def test_authentication_exception_consistency(self):
        '''
        Test consistency of authentication exception of each clients.
        '''
        valid_response = {'return': ['Failed to authenticate']}

        clients = ['local', 'local_async', 'runner', 'runner_async']
        request_lowstates = map(
            lambda client: {
                "client": client,
                "tgt": "*",
                "fun": "test.fib",
                "arg": ["10"]
            }, clients)

        for request_lowstate in request_lowstates:
            response = self.fetch(
                '/run',
                method='POST',
                body=json.dumps(request_lowstate),
                headers={'Content-Type': self.content_type_map['json']})

            self.assertEqual(valid_response, json.loads(response.body))
Exemplo n.º 32
0
def validate(config):
    '''
    Validate the beacon configuration
    '''
    if not isinstance(config, list):
        return False, ('Configuration for network_settings '
                       'beacon must be a list.')
    else:
        _config = {}
        list(map(_config.update, config))

        interfaces = _config.get('interfaces', {})
        if isinstance(interfaces, list):
            #Old syntax
            return False, ('interfaces section for network_settings beacon'
                           ' must be a dictionary.')

        for item in interfaces:
            if not isinstance(_config['interfaces'][item], dict):
                return False, ('Interface attributes for network_settings beacon'
                               ' must be a dictionary.')
            if not all(j in ATTRS for j in _config['interfaces'][item]):
                return False, ('Invalid attributes in beacon configuration.')
    return True, 'Valid beacon configuration'
Exemplo n.º 33
0
def beacon(config):
    '''
    Watch the configured directories

    Example Config

    .. code-block:: yaml

        beacons:
          watchdog:
            - directories:
                /path/to/dir:
                  mask:
                    - create
                    - modify
                    - delete
                    - move

    The mask list can contain the following events (the default mask is create,
    modify delete, and move):
    * create  - File or directory is created in watched directory
    * modify  - The watched directory is modified
    * delete  - File or directory is deleted from watched directory
    * move    - File or directory is moved or renamed in the watched directory
    '''

    _config = {}
    list(map(_config.update, config))

    queue = _get_queue(_config)

    ret = []
    while queue:
        ret.append(to_salt_event(queue.popleft()))

    return ret
Exemplo n.º 34
0
def _generate_payload(author_icon, title, report):
    '''
    Prepare the payload for Slack
    :param author_icon: The url for the thumbnail to be displayed
    :param title: The title of the message
    :param report: A dictionary with the report of the Salt function
    :return: The payload ready for Slack
    '''

    title = _sprinkle(title)

    unchanged = {
        'color': 'good',
        'title': 'Unchanged: {unchanged}'.format(unchanged=report['unchanged'].get('counter', None))
    }

    changed = {
        'color': 'warning',
        'title': 'Changed: {changed}'.format(changed=report['changed'].get('counter', None))
    }

    if report['changed'].get('tasks'):
        changed['fields'] = list(
            map(_format_task, report['changed'].get('tasks')))

    failed = {
        'color': 'danger',
        'title': 'Failed: {failed}'.format(failed=report['failed'].get('counter', None))
    }

    if report['failed'].get('tasks'):
        failed['fields'] = list(
            map(_format_task, report['failed'].get('tasks')))

    text = 'Function: {function}\n'.format(function=report.get('function'))
    if report.get('arguments'):
        text += 'Function Args: {arguments}\n'.format(
            arguments=str(list(map(str, report.get('arguments')))))

    text += 'JID: {jid}\n'.format(jid=report.get('jid'))
    text += 'Total: {total}\n'.format(total=report.get('total'))
    text += 'Duration: {duration:.2f} secs'.format(
        duration=float(report.get('duration')))

    payload = {
        'attachments': [
            {
                'fallback': title,
                'color': "#272727",
                'author_name': _sprinkle('{id}'),
                'author_link': _sprinkle('{localhost}'),
                'author_icon': author_icon,
                'title': 'Success: {success}'.format(success=str(report.get('success'))),
                'text': text
            },
            unchanged,
            changed,
            failed
        ]
    }

    return payload
Exemplo n.º 35
0
Arquivo: net.py Projeto: chonty/salt
def interfaces(
    device=None,
    interface=None,
    title=None,
    pattern=None,
    ipnet=None,
    best=True,
    display=_DEFAULT_DISPLAY,
):
    """
    Search for interfaces details in the following mine functions:

    - net.interfaces
    - net.ipaddrs

    Optional arguments:

    device
        Return interface data from a certain device only.

    interface
        Return data selecting by interface name.

    pattern
        Return interfaces that contain a certain pattern in their description.

    ipnet
        Return interfaces whose IP networks associated include this IP network.

    best: ``True``
        When ``ipnet`` is specified, this argument says if the runner should return only the best match
        (the output will contain at most one row). Default: ``True`` (return only the best match).

    display: True
        Display on the screen or return structured object? Default: ``True`` (return on the CLI).

    title
        Display a custom title for the table.

    CLI Example:

    .. code-block:: bash

        $ sudo salt-run net.interfaces interface=vt-0/0/10

    Output Example:

    .. code-block:: text

        Details for interface xe-0/0/0
        _________________________________________________________________________________________________________________
        |    Device    | Interface | Interface Description |  UP  | Enabled | Speed [Mbps] | MAC Address | IP Addresses |
        _________________________________________________________________________________________________________________
        | edge01.bjm01 | vt-0/0/10 |                       | True |   True  |     1000     |             |              |
        _________________________________________________________________________________________________________________
        | edge01.flw01 | vt-0/0/10 |                       | True |   True  |     1000     |             |              |
        _________________________________________________________________________________________________________________
        | edge01.pos01 | vt-0/0/10 |                       | True |   True  |     1000     |             |              |
        _________________________________________________________________________________________________________________
        | edge01.oua01 | vt-0/0/10 |                       | True |   True  |     1000     |             |              |
        _________________________________________________________________________________________________________________
    """
    def _ipnet_belongs(net):
        """
        Helper to tell if a IP address or network belong to a certain network.
        """
        if net == "0.0.0.0/0":
            return False
        net_obj = _get_network_obj(net)
        if not net_obj:
            return False
        return ipnet in net_obj or net_obj in ipnet

    labels = {
        "device": "Device",
        "interface": "Interface",
        "interface_description": "Interface Description",
        "is_up": "UP",
        "is_enabled": "Enabled",
        "speed": "Speed [Mbps]",
        "mac": "MAC Address",
        "ips": "IP Addresses",
    }
    rows = []

    net_runner_opts = _get_net_runner_opts()

    if pattern:
        title = 'Pattern "{0}" found in the description of the following interfaces'.format(
            pattern)
    if not title:
        title = "Details"
        if interface:
            title += " for interface {0}".format(interface)
        else:
            title += " for all interfaces"
        if device:
            title += " on device {0}".format(device)
        if ipnet:
            title += " that include network {net}".format(
                net=six.text_type(ipnet))
            if best:
                title += " - only best match returned"

    all_interfaces = _get_mine("net.interfaces")
    all_ipaddrs = _get_mine("net.ipaddrs")

    if device:
        all_interfaces = {device: all_interfaces.get(device, {})}

    if ipnet and not isinstance(ipnet, IPNetwork):
        ipnet = _get_network_obj(ipnet)

    best_row = {}
    best_net_match = IPNetwork("0.0.0.0/0")
    for device, net_interfaces_out in six.iteritems(all_interfaces):  # pylint: disable=too-many-nested-blocks
        if not net_interfaces_out:
            continue
        if not net_interfaces_out.get("result", False):
            continue
        selected_device_interfaces = net_interfaces_out.get("out", {})
        if interface:
            selected_device_interfaces = {
                interface: selected_device_interfaces.get(interface, {})
            }
        for interface_name, interface_details in six.iteritems(
                selected_device_interfaces):
            if not interface_details:
                continue
            if ipnet and interface_name in net_runner_opts.get(
                    "ignore_interfaces"):
                continue
            interface_description = interface_details.get("description",
                                                          "") or ""
            if pattern:
                if pattern.lower() not in interface_description.lower():
                    continue
            if not all_ipaddrs.get(device, {}).get("result", False):
                continue
            ips = []
            device_entry = {
                "device":
                device,
                "interface":
                interface_name,
                "interface_description":
                interface_description,
                "is_up": (interface_details.get("is_up", "") or ""),
                "is_enabled": (interface_details.get("is_enabled", "") or ""),
                "speed": (interface_details.get("speed", "") or ""),
                "mac":
                napalm_helpers.convert(
                    napalm_helpers.mac,
                    (interface_details.get("mac_address", "") or "")),
                "ips": [],
            }
            intf_entry_found = False
            for intrf, interface_ips in six.iteritems(
                    all_ipaddrs.get(device, {}).get("out", {})):
                if intrf.split(".")[0] == interface_name:
                    ip_addresses = interface_ips.get("ipv4",
                                                     {})  # all IPv4 addresses
                    ip_addresses.update(interface_ips.get(
                        "ipv6", {}))  # and all IPv6 addresses
                    ips = [
                        "{0}/{1}".format(
                            ip_addr, addr_details.get("prefix_length", "32"))
                        for ip_addr, addr_details in six.iteritems(
                            ip_addresses)
                    ]
                    interf_entry = {}
                    interf_entry.update(device_entry)
                    interf_entry["ips"] = ips
                    if display:
                        interf_entry["ips"] = "\n".join(interf_entry["ips"])
                    if ipnet:
                        inet_ips = [
                            six.text_type(ip) for ip in ips
                            if _ipnet_belongs(ip)
                        ]  # filter and get only IP include ipnet
                        if inet_ips:  # if any
                            if best:
                                # determine the global best match
                                compare = [best_net_match]
                                if not best_net_match: compare = []
                                compare.extend(
                                    list(map(_get_network_obj, inet_ips)))
                                new_best_net_match = max(compare)
                                if new_best_net_match != best_net_match:
                                    best_net_match = new_best_net_match
                                    best_row = interf_entry
                            else:
                                # or include all
                                intf_entry_found = True
                                rows.append(interf_entry)
                    else:
                        intf_entry_found = True
                        rows.append(interf_entry)
            if not intf_entry_found and not ipnet:
                interf_entry = {}
                interf_entry.update(device_entry)
                if display:
                    interf_entry["ips"] = ""
                rows.append(interf_entry)

    if ipnet and best and best_row:
        rows = [best_row]

    return _display_runner(rows, labels, title, display=display)
Exemplo n.º 36
0
    def process(self, config, grains):
        '''
        Process the configured beacons

        The config must be a list and looks like this in yaml

        .. code_block:: yaml
            beacons:
              inotify:
                - files:
                    - /etc/fstab: {}
                    - /var/cache/foo: {}
        '''
        ret = []
        b_config = copy.deepcopy(config)
        if 'enabled' in b_config and not b_config['enabled']:
            return
        for mod in config:
            if mod == 'enabled':
                continue

            # Convert beacons that are lists to a dict to make processing easier
            current_beacon_config = None
            if isinstance(config[mod], list):
                current_beacon_config = {}
                list(map(current_beacon_config.update, config[mod]))
            elif isinstance(config[mod], dict):
                current_beacon_config = config[mod]

            if 'enabled' in current_beacon_config:
                if not current_beacon_config['enabled']:
                    log.trace('Beacon %s disabled', mod)
                    continue
                else:
                    # remove 'enabled' item before processing the beacon
                    if isinstance(config[mod], dict):
                        del config[mod]['enabled']
                    else:
                        self._remove_list_item(config[mod], 'enabled')

            log.trace('Beacon processing: %s', mod)
            fun_str = '{0}.beacon'.format(mod)
            validate_str = '{0}.validate'.format(mod)
            if fun_str in self.beacons:
                runonce = self._determine_beacon_config(
                    current_beacon_config, 'run_once')
                interval = self._determine_beacon_config(
                    current_beacon_config, 'interval')
                if interval:
                    b_config = self._trim_config(b_config, mod, 'interval')
                    if not self._process_interval(mod, interval):
                        log.trace('Skipping beacon %s. Interval not reached.',
                                  mod)
                        continue
                if self._determine_beacon_config(current_beacon_config,
                                                 'disable_during_state_run'):
                    log.trace(
                        'Evaluting if beacon %s should be skipped due to a state run.',
                        mod)
                    b_config = self._trim_config(b_config, mod,
                                                 'disable_during_state_run')
                    is_running = False
                    running_jobs = salt.utils.minion.running(self.opts)
                    for job in running_jobs:
                        if re.match('state.*', job['fun']):
                            is_running = True
                    if is_running:
                        close_str = '{0}.close'.format(mod)
                        if close_str in self.beacons:
                            log.info(
                                'Closing beacon %s. State run in progress.',
                                mod)
                            self.beacons[close_str](b_config[mod])
                        else:
                            log.info(
                                'Skipping beacon %s. State run in progress.',
                                mod)
                        continue
                # Update __grains__ on the beacon
                self.beacons[fun_str].__globals__['__grains__'] = grains

                # Run the validate function if it's available,
                # otherwise there is a warning about it being missing
                if validate_str in self.beacons:
                    valid, vcomment = self.beacons[validate_str](b_config[mod])

                    if not valid:
                        log.info(
                            'Beacon %s configuration invalid, '
                            'not running.\n%s', mod, vcomment)
                        continue

                # Fire the beacon!
                raw = self.beacons[fun_str](b_config[mod])
                for data in raw:
                    tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod)
                    if 'tag' in data:
                        tag += data.pop('tag')
                    if 'id' not in data:
                        data['id'] = self.opts['id']
                    ret.append({'tag': tag, 'data': data})
                if runonce:
                    self.disable_beacon(mod)
            else:
                log.warning('Unable to process beacon %s', mod)
        return ret
Exemplo n.º 37
0
def interfaces(device=None,
               interface=None,
               title=None,
               pattern=None,
               ipnet=None,
               best=True,
               display=_DEFAULT_DISPLAY):
    '''
    Search for interfaces details in the following mine functions:

    - net.interfaces
    - net.ipaddrs

    Optional arguments:

    device
        Return interface data from a certain device only.

    interface
        Return data selecting by interface name.

    pattern
        Return interfaces that contain a certain pattern in their description.

    ipnet
        Return interfaces whose IP networks associated include this IP network.

    best: ``True``
        When ``ipnet`` is specified, this argument says if the runner should return only the best match
        (the output will contain at most one row). Default: ``True`` (return only the best match).

    display: True
        Display on the screen or return structured object? Default: ``True`` (return on the CLI).

    title
        Display a custom title for the table.

    CLI Example:

    .. code-block:: bash

        $ sudo salt-run net.interfaces interface=vt-0/0/10

    Output Example:

    .. code-block:: text

        Details for interface xe-0/0/0
        _________________________________________________________________________________________________________________
        |    Device    | Interface | Interface Description |  UP  | Enabled | Speed [Mbps] | MAC Address | IP Addresses |
        _________________________________________________________________________________________________________________
        | edge01.bjm01 | vt-0/0/10 |                       | True |   True  |     1000     |             |              |
        _________________________________________________________________________________________________________________
        | edge01.flw01 | vt-0/0/10 |                       | True |   True  |     1000     |             |              |
        _________________________________________________________________________________________________________________
        | edge01.pos01 | vt-0/0/10 |                       | True |   True  |     1000     |             |              |
        _________________________________________________________________________________________________________________
        | edge01.oua01 | vt-0/0/10 |                       | True |   True  |     1000     |             |              |
        _________________________________________________________________________________________________________________
    '''
    def _ipnet_belongs(net):
        '''
        Helper to tell if a IP address or network belong to a certain network.
        '''
        if net == '0.0.0.0/0':
            return False
        net_obj = _get_network_obj(net)
        if not net_obj:
            return False
        return ipnet in net_obj or net_obj in ipnet

    labels = {
        'device': 'Device',
        'interface': 'Interface',
        'interface_description': 'Interface Description',
        'is_up': 'UP',
        'is_enabled': 'Enabled',
        'speed': 'Speed [Mbps]',
        'mac': 'MAC Address',
        'ips': 'IP Addresses'
    }
    rows = []

    net_runner_opts = _get_net_runner_opts()

    if pattern:
        title = 'Pattern "{0}" found in the description of the following interfaces'.format(
            pattern)
    if not title:
        title = 'Details'
        if interface:
            title += ' for interface {0}'.format(interface)
        else:
            title += ' for all interfaces'
        if device:
            title += ' on device {0}'.format(device)
        if ipnet:
            title += ' that include network {net}'.format(
                net=six.text_type(ipnet))
            if best:
                title += ' - only best match returned'

    all_interfaces = _get_mine('net.interfaces')
    all_ipaddrs = _get_mine('net.ipaddrs')

    if device:
        all_interfaces = {device: all_interfaces.get(device, {})}

    if ipnet and not isinstance(ipnet, IPNetwork):
        ipnet = _get_network_obj(ipnet)

    best_row = {}
    best_net_match = None
    for device, net_interfaces_out in six.iteritems(all_interfaces):  # pylint: disable=too-many-nested-blocks
        if not net_interfaces_out:
            continue
        if not net_interfaces_out.get('result', False):
            continue
        selected_device_interfaces = net_interfaces_out.get('out', {})
        if interface:
            selected_device_interfaces = {
                interface: selected_device_interfaces.get(interface, {})
            }
        for interface_name, interface_details in six.iteritems(
                selected_device_interfaces):
            if not interface_details:
                continue
            if ipnet and interface_name in net_runner_opts.get(
                    'ignore_interfaces'):
                continue
            interface_description = (interface_details.get('description', '')
                                     or '')
            if pattern:
                if pattern.lower() not in interface_description.lower():
                    continue
            if not all_ipaddrs.get(device, {}).get('result', False):
                continue
            ips = []
            device_entry = {
                'device':
                device,
                'interface':
                interface_name,
                'interface_description':
                interface_description,
                'is_up': (interface_details.get('is_up', '') or ''),
                'is_enabled': (interface_details.get('is_enabled', '') or ''),
                'speed': (interface_details.get('speed', '') or ''),
                'mac':
                napalm_helpers.convert(
                    napalm_helpers.mac,
                    (interface_details.get('mac_address', '') or '')),
                'ips': []
            }
            intf_entry_found = False
            for intrf, interface_ips in six.iteritems(
                    all_ipaddrs.get(device, {}).get('out', {})):
                if intrf.split('.')[0] == interface_name:
                    ip_addresses = interface_ips.get('ipv4',
                                                     {})  # all IPv4 addresses
                    ip_addresses.update(interface_ips.get(
                        'ipv6', {}))  # and all IPv6 addresses
                    ips = [
                        '{0}/{1}'.format(
                            ip_addr, addr_details.get('prefix_length', '32'))
                        for ip_addr, addr_details in six.iteritems(
                            ip_addresses)
                    ]
                    interf_entry = {}
                    interf_entry.update(device_entry)
                    interf_entry['ips'] = ips
                    if display:
                        interf_entry['ips'] = '\n'.join(interf_entry['ips'])
                    if ipnet:
                        inet_ips = [
                            six.text_type(ip) for ip in ips
                            if _ipnet_belongs(ip)
                        ]  # filter and get only IP include ipnet
                        if inet_ips:  # if any
                            if best:
                                # determine the global best match
                                compare = [best_net_match]
                                compare.extend(
                                    list(map(_get_network_obj, inet_ips)))
                                new_best_net_match = max(compare)
                                if new_best_net_match != best_net_match:
                                    best_net_match = new_best_net_match
                                    best_row = interf_entry
                            else:
                                # or include all
                                intf_entry_found = True
                                rows.append(interf_entry)
                    else:
                        intf_entry_found = True
                        rows.append(interf_entry)
            if not intf_entry_found and not ipnet:
                interf_entry = {}
                interf_entry.update(device_entry)
                if display:
                    interf_entry['ips'] = ''
                rows.append(interf_entry)

    if ipnet and best and best_row:
        rows = [best_row]

    return _display_runner(rows, labels, title, display=display)
Exemplo n.º 38
0
def sig4(method,
         endpoint,
         params,
         prov_dict,
         aws_api_version=DEFAULT_AWS_API_VERSION,
         location=None,
         product='ec2',
         uri='/',
         requesturl=None,
         data='',
         headers=None,
         role_arn=None,
         payload_hash=None):
    '''
    Sign a query against AWS services using Signature Version 4 Signing
    Process. This is documented at:

    http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
    http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
    http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
    '''
    timenow = datetime.utcnow()

    # Retrieve access credentials from meta-data, or use provided
    if role_arn is None:
        access_key_id, secret_access_key, token = creds(prov_dict)
    else:
        access_key_id, secret_access_key, token = assumed_creds(
            prov_dict, role_arn, location=location)

    if location is None:
        location = get_region_from_metadata()
    if location is None:
        location = DEFAULT_LOCATION

    params_with_headers = params.copy()
    if product not in ('s3', 'ssm'):
        params_with_headers['Version'] = aws_api_version
    keys = sorted(params_with_headers.keys())
    values = list(map(params_with_headers.get, keys))
    querystring = urlencode(list(zip(keys, values))).replace('+', '%20')

    amzdate = timenow.strftime('%Y%m%dT%H%M%SZ')
    datestamp = timenow.strftime('%Y%m%d')
    new_headers = {}
    if isinstance(headers, dict):
        new_headers = headers.copy()

    # Create payload hash (hash of the request body content). For GET
    # requests, the payload is an empty string ('').
    if not payload_hash:
        payload_hash = salt.utils.hashutils.sha256_digest(data)

    new_headers['X-Amz-date'] = amzdate
    new_headers['host'] = endpoint
    new_headers['x-amz-content-sha256'] = payload_hash
    a_canonical_headers = []
    a_signed_headers = []

    if token != '':
        new_headers['X-Amz-security-token'] = token

    for header in sorted(new_headers.keys(), key=six.text_type.lower):
        lower_header = header.lower()
        a_canonical_headers.append('{0}:{1}'.format(
            lower_header, new_headers[header].strip()))
        a_signed_headers.append(lower_header)
    canonical_headers = '\n'.join(a_canonical_headers) + '\n'
    signed_headers = ';'.join(a_signed_headers)

    algorithm = 'AWS4-HMAC-SHA256'

    # Combine elements to create create canonical request
    canonical_request = '\n'.join((method, uri, querystring, canonical_headers,
                                   signed_headers, payload_hash))

    # Create the string to sign
    credential_scope = '/'.join((datestamp, location, product, 'aws4_request'))
    string_to_sign = '\n'.join(
        (algorithm, amzdate, credential_scope,
         salt.utils.hashutils.sha256_digest(canonical_request)))

    # Create the signing key using the function defined above.
    signing_key = _sig_key(secret_access_key, datestamp, location, product)

    # Sign the string_to_sign using the signing_key
    signature = hmac.new(signing_key, string_to_sign.encode('utf-8'),
                         hashlib.sha256).hexdigest()

    # Add signing information to the request
    authorization_header = (
        '{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}').format(
            algorithm,
            access_key_id,
            credential_scope,
            signed_headers,
            signature,
        )

    new_headers['Authorization'] = authorization_header

    requesturl = '{0}?{1}'.format(requesturl, querystring)
    return new_headers, requesturl
Exemplo n.º 39
0
Arquivo: dracr.py Projeto: mahak/salt
def inventory(host=None, admin_username=None, admin_password=None):
    def mapit(x, y):
        return {x: y}

    fields = {}
    fields['server'] = ['name', 'idrac_version', 'blade_type', 'gen',
                        'updateable']
    fields['switch'] = ['name', 'model_name', 'hw_version', 'fw_version']
    fields['cmc'] = ['name', 'cmc_version', 'updateable']
    fields['chassis'] = ['name', 'fw_version', 'fqdd']

    rawinv = __execute_ret('getversion', host=host,
                           admin_username=admin_username,
                           admin_password=admin_password)

    if rawinv['retcode'] != 0:
        return rawinv

    in_server = False
    in_switch = False
    in_cmc = False
    in_chassis = False
    ret = {}
    ret['server'] = {}
    ret['switch'] = {}
    ret['cmc'] = {}
    ret['chassis'] = {}
    for l in rawinv['stdout'].splitlines():
        if l.startswith('<Server>'):
            in_server = True
            in_switch = False
            in_cmc = False
            in_chassis = False
            continue

        if l.startswith('<Switch>'):
            in_server = False
            in_switch = True
            in_cmc = False
            in_chassis = False
            continue

        if l.startswith('<CMC>'):
            in_server = False
            in_switch = False
            in_cmc = True
            in_chassis = False
            continue

        if l.startswith('<Chassis Infrastructure>'):
            in_server = False
            in_switch = False
            in_cmc = False
            in_chassis = True
            continue

        if len(l) < 1:
            continue

        line = re.split('  +', l.strip())

        if in_server:
            ret['server'][line[0]] = dict(
                (k, v) for d in map(mapit, fields['server'], line) for (k, v)
                in d.items())
        if in_switch:
            ret['switch'][line[0]] = dict(
                (k, v) for d in map(mapit, fields['switch'], line) for (k, v)
                in d.items())
        if in_cmc:
            ret['cmc'][line[0]] = dict(
                (k, v) for d in map(mapit, fields['cmc'], line) for (k, v) in
                d.items())
        if in_chassis:
            ret['chassis'][line[0]] = dict(
                (k, v) for d in map(mapit, fields['chassis'], line) for k, v in
                d.items())

    return ret
Exemplo n.º 40
0
    def prepare_rows(self,
                     rows,
                     indent,
                     has_header):

        '''Prepare rows content to be displayed.'''

        out = []

        def row_wrapper(row):
            new_rows = [
                self.wrapfunc(item).split('\n')
                for item in row
            ]
            rows = []
            for item in map(None, *new_rows):
                if isinstance(item, (tuple, list)):
                    rows.append([substr or '' for substr in item])
                else:
                    rows.append([item])
            return rows

        logical_rows = [
            row_wrapper(row)
            for row in rows
        ]

        columns = map(None, *reduce(operator.add, logical_rows))

        max_widths = [
            max([len(str(item)) for item in column])
            for column in columns
        ]
        row_separator = self.row_delimiter * (len(self.prefix) + len(self.suffix) + sum(max_widths) +
                                              len(self.delim) * (len(max_widths) - 1))

        justify = self._JUSTIFY_MAP[self.justify.lower()]

        if self.separate_rows:
            out.append(
                self.ustring(
                    indent,
                    self.LIGHT_GRAY,  # pylint: disable=no-member
                    row_separator
                )
            )
        for physical_rows in logical_rows:
            for row in physical_rows:
                line = self.prefix \
                        + self.delim.join([
                                justify(str(item), width)
                                for (item, width) in zip(row, max_widths)
                        ]) + self.suffix
                out.append(
                    self.ustring(
                        indent,
                        self.WHITE,  # pylint: disable=no-member
                        line
                    )
                )
            if self.separate_rows or has_header:
                out.append(
                    self.ustring(
                        indent,
                        self.LIGHT_GRAY,  # pylint: disable=no-member
                        row_separator
                    )
                )
                has_header = False
        return out
Exemplo n.º 41
0
def validate(config):
    """
    Validate the beacon configuration
    """

    # Configuration for load beacon should be a list of dicts
    if not isinstance(config, list):
        return False, ("Configuration for load beacon must be a list.")
    else:
        _config = {}
        list(map(_config.update, config))

        if "emitatstartup" in _config:
            if not isinstance(_config["emitatstartup"], bool):
                return (
                    False,
                    (
                        "Configuration for load beacon option "
                        "emitatstartup must be a boolean."
                    ),
                )

        if "onchangeonly" in _config:
            if not isinstance(_config["onchangeonly"], bool):
                return (
                    False,
                    (
                        "Configuration for load beacon option "
                        "onchangeonly must be a boolean."
                    ),
                )

        if "averages" not in _config:
            return False, ("Averages configuration is required for load beacon.")
        else:

            if not any(j in ["1m", "5m", "15m"] for j in _config.get("averages", {})):
                return (
                    False,
                    (
                        "Averages configuration for load beacon "
                        "must contain 1m, 5m or 15m items."
                    ),
                )

            for item in ["1m", "5m", "15m"]:
                if not isinstance(_config["averages"][item], list):
                    return (
                        False,
                        (
                            "Averages configuration for load beacon: "
                            "1m, 5m and 15m items must be "
                            "a list of two items."
                        ),
                    )
                else:
                    if len(_config["averages"][item]) != 2:
                        return (
                            False,
                            (
                                "Configuration for load beacon: "
                                "1m, 5m and 15m items must be "
                                "a list of two items."
                            ),
                        )

    return True, "Valid beacon configuration"
Exemplo n.º 42
0
def present(
    name,
    source,
    aliases=None,
    public=None,
    auto_update=None,
    remote_addr=None,
    cert=None,
    key=None,
    verify_cert=True,
):
    """
    Ensure an image exists, copy it else from source

    name :
        An alias of the image, this is used to check if the image exists and
        it will be added as alias to the image on copy/create.

    source :
        Source dict.

        For an LXD to LXD copy:

        .. code-block:: yaml

            source:
                type: lxd
                name: ubuntu/xenial/amd64  # This can also be a fingerprint.
                remote_addr: https://images.linuxcontainers.org:8443
                cert: ~/.config/lxd/client.crt
                key: ~/.config/lxd/client.key
                verify_cert: False

        .. attention:

            For this kind of remote you also need to provide:
            - a https:// remote_addr
            - a cert and key
            - verify_cert

        From file:

        .. code-block:: yaml

            source:
                type: file
                filename: salt://lxd/files/busybox.tar.xz
                saltenv: base

        From simplestreams:

        .. code-block:: yaml

            source:
                type: simplestreams
                server: https://cloud-images.ubuntu.com/releases
                name: xenial/amd64

        From an URL:

        .. code-block:: yaml

            source:
                type: url
                url: https://dl.stgraber.org/lxd

    aliases :
        List of aliases to append, can be empty.

    public :
        Make this image public available on this instance?
            None on source_type LXD means copy source
            None on source_type file means False

    auto_update :
        Try to auto-update from the original source?
            None on source_type LXD means copy source
            source_type file does not have auto-update.

    remote_addr :
        An URL to a remote Server, you also have to give cert and key if you
        provide remote_addr!

        Examples:
            https://myserver.lan:8443
            /var/lib/mysocket.sock

    cert :
        PEM Formatted SSL Zertifikate.

        Examples:
            ~/.config/lxc/client.crt

    key :
        PEM Formatted SSL Key.

        Examples:
            ~/.config/lxc/client.key

    verify_cert : True
        Wherever to verify the cert, this is by default True
        but in the most cases you want to set it off as LXD
        normaly uses self-signed certificates.
    """
    if aliases is None:
        aliases = []

    # Create a copy of aliases, since we're modifying it here
    aliases = aliases[:]
    ret = {
        "name": name,
        "source": source,
        "aliases": aliases,
        "public": public,
        "auto_update": auto_update,
        "remote_addr": remote_addr,
        "cert": cert,
        "key": key,
        "verify_cert": verify_cert,
        "changes": {},
    }

    image = None
    try:
        image = __salt__["lxd.image_get_by_alias"](name,
                                                   remote_addr,
                                                   cert,
                                                   key,
                                                   verify_cert,
                                                   _raw=True)
    except CommandExecutionError as e:
        return _error(ret, six.text_type(e))
    except SaltInvocationError as e:
        # Image not found
        pass

    if image is None:
        if __opts__["test"]:
            # Test is on, just return that we would create the image
            msg = 'Would create the image "{0}"'.format(name)
            ret["changes"] = {"created": msg}
            return _unchanged(ret, msg)

        try:
            if source["type"] == "lxd":
                image = __salt__["lxd.image_copy_lxd"](
                    source["name"],
                    src_remote_addr=source["remote_addr"],
                    src_cert=source["cert"],
                    src_key=source["key"],
                    src_verify_cert=source.get("verify_cert", True),
                    remote_addr=remote_addr,
                    cert=cert,
                    key=key,
                    verify_cert=verify_cert,
                    aliases=aliases,
                    public=public,
                    auto_update=auto_update,
                    _raw=True,
                )

            if source["type"] == "file":
                if "saltenv" not in source:
                    source["saltenv"] = __env__
                image = __salt__["lxd.image_from_file"](
                    source["filename"],
                    remote_addr=remote_addr,
                    cert=cert,
                    key=key,
                    verify_cert=verify_cert,
                    aliases=aliases,
                    public=False if public is None else public,
                    saltenv=source["saltenv"],
                    _raw=True,
                )

            if source["type"] == "simplestreams":
                image = __salt__["lxd.image_from_simplestreams"](
                    source["server"],
                    source["name"],
                    remote_addr=remote_addr,
                    cert=cert,
                    key=key,
                    verify_cert=verify_cert,
                    aliases=aliases,
                    public=False if public is None else public,
                    auto_update=False if auto_update is None else auto_update,
                    _raw=True,
                )

            if source["type"] == "url":
                image = __salt__["lxd.image_from_url"](
                    source["url"],
                    remote_addr=remote_addr,
                    cert=cert,
                    key=key,
                    verify_cert=verify_cert,
                    aliases=aliases,
                    public=False if public is None else public,
                    auto_update=False if auto_update is None else auto_update,
                    _raw=True,
                )
        except CommandExecutionError as e:
            return _error(ret, six.text_type(e))

    # Sync aliases
    if name not in aliases:
        aliases.append(name)

    old_aliases = set([six.text_type(a["name"]) for a in image.aliases])
    new_aliases = set(map(six.text_type, aliases))

    alias_changes = []
    # Removed aliases
    for k in old_aliases.difference(new_aliases):
        if not __opts__["test"]:
            __salt__["lxd.image_alias_delete"](image, k)
            alias_changes.append('Removed alias "{0}"'.format(k))
        else:
            alias_changes.append('Would remove alias "{0}"'.format(k))

    # New aliases
    for k in new_aliases.difference(old_aliases):
        if not __opts__["test"]:
            __salt__["lxd.image_alias_add"](image, k, "")
            alias_changes.append('Added alias "{0}"'.format(k))
        else:
            alias_changes.append('Would add alias "{0}"'.format(k))

    if alias_changes:
        ret["changes"]["aliases"] = alias_changes

    # Set public
    if public is not None and image.public != public:
        if not __opts__["test"]:
            ret["changes"][
                "public"] = "Setting the image public to {0!s}".format(public)
            image.public = public
            __salt__["lxd.pylxd_save_object"](image)
        else:
            ret["changes"]["public"] = "Would set public to {0!s}".format(
                public)

    if __opts__["test"] and ret["changes"]:
        return _unchanged(
            ret, "Would do {0} changes".format(len(ret["changes"].keys())))

    return _success(ret, "{0} changes".format(len(ret["changes"].keys())))
Exemplo n.º 43
0
def neighbors(*asns, **kwargs):
    """
    Search for BGP neighbors details in the mines of the ``bgp.neighbors`` function.

    Arguments:

    asns
        A list of AS numbers to search for.
        The runner will return only the neighbors of these AS numbers.

    device
        Filter by device name (minion ID).

    ip
        Search BGP neighbor using the IP address.
        In multi-VRF environments, the same IP address could be used by
        more than one neighbors, in different routing tables.

    network
        Search neighbors within a certain IP network.

    title
        Custom title.

    display: ``True``
        Display on the screen or return structured object? Default: ``True`` (return on the CLI).

    outputter: ``table``
        Specify the outputter name when displaying on the CLI. Default: :mod:`table <salt.output.table_out>`.

    In addition, any field from the output of the ``neighbors`` function
    from the :mod:`NAPALM BGP module <salt.modules.napalm_bgp.neighbors>` can be used as a filter.

    CLI Example:

    .. code-block:: bash

        salt-run bgp.neighbors 13335 15169
        salt-run bgp.neighbors 13335 ip=172.17.19.1
        salt-run bgp.neighbors multipath=True
        salt-run bgp.neighbors up=False export_policy=my-export-policy multihop=False
        salt-run bgp.neighbors network=192.168.0.0/16

    Output example:

    .. code-block:: text

        BGP Neighbors for 13335, 15169
        ________________________________________________________________________________________________________________________________________________________________
        |    Device    | AS Number |         Neighbor Address        | State|#Active/Received/Accepted/Damped |         Policy IN         |         Policy OUT         |
        ________________________________________________________________________________________________________________________________________________________________
        | edge01.bjm01 |   13335   |          172.17.109.11          |        Established 0/398/398/0         |       import-policy       |        export-policy       |
        ________________________________________________________________________________________________________________________________________________________________
        | edge01.bjm01 |   13335   |          172.17.109.12          |       Established 397/398/398/0        |       import-policy       |        export-policy       |
        ________________________________________________________________________________________________________________________________________________________________
        | edge01.flw01 |   13335   |          192.168.172.11         |        Established 1/398/398/0         |       import-policy       |        export-policy       |
        ________________________________________________________________________________________________________________________________________________________________
        | edge01.oua01 |   13335   |          172.17.109.17          |          Established 0/0/0/0           |       import-policy       |        export-policy       |
        ________________________________________________________________________________________________________________________________________________________________
        | edge01.bjm01 |   15169   |             2001::1             |       Established 102/102/102/0        |       import-policy       |        export-policy       |
        ________________________________________________________________________________________________________________________________________________________________
        | edge01.bjm01 |   15169   |             2001::2             |       Established 102/102/102/0        |       import-policy       |        export-policy       |
        ________________________________________________________________________________________________________________________________________________________________
        | edge01.tbg01 |   13335   |          192.168.172.17         |          Established 0/1/1/0           |       import-policy       |        export-policy       |
        ________________________________________________________________________________________________________________________________________________________________
    """
    opts = _get_bgp_runner_opts()
    title = kwargs.pop("title", None)
    display = kwargs.pop("display", opts["display"])
    outputter = kwargs.pop("outputter", opts["outputter"])

    # cleaning up the kwargs
    # __pub args not used in this runner (yet)
    kwargs_copy = {}
    kwargs_copy.update(kwargs)
    for karg, _ in six.iteritems(kwargs_copy):
        if karg.startswith("__pub"):
            kwargs.pop(karg)
    if not asns and not kwargs:
        if display:
            print("Please specify at least an AS Number or an output filter")
        return []
    device = kwargs.pop("device", None)
    neighbor_ip = kwargs.pop("ip", None)
    ipnet = kwargs.pop("network", None)
    ipnet_obj = IPNetwork(ipnet) if ipnet else None
    # any other key passed on the CLI can be used as a filter

    rows = []
    # building the labels
    labels = {}
    for field in opts["return_fields"]:
        if field in _DEFAULT_LABELS_MAPPING:
            labels[field] = _DEFAULT_LABELS_MAPPING[field]
        else:
            # transform from 'previous_connection_state' to 'Previous Connection State'
            labels[field] = " ".join(
                map(lambda word: word.title(), field.split("_")))
    display_fields = list(
        set(opts["return_fields"]) - set(_DEFAULT_INCLUDED_FIELDS))
    get_bgp_neighbors_all = _get_mine(opts=opts)

    if not title:
        title_parts = []
        if asns:
            title_parts.append("BGP Neighbors for {asns}".format(
                asns=", ".join([six.text_type(asn) for asn in asns])))
        if neighbor_ip:
            title_parts.append(
                "Selecting neighbors having the remote IP address: {ipaddr}".
                format(ipaddr=neighbor_ip))
        if ipnet:
            title_parts.append(
                "Selecting neighbors within the IP network: {ipnet}".format(
                    ipnet=ipnet))
        if kwargs:
            title_parts.append(
                "Searching for BGP neighbors having the attributes: {attrmap}".
                format(attrmap=", ".join(
                    map(
                        lambda key: "{key}={value}".format(key=key,
                                                           value=kwargs[key]),
                        kwargs,
                    ))))
        title = "\n".join(title_parts)
    for minion, get_bgp_neighbors_minion in six.iteritems(
            get_bgp_neighbors_all):  # pylint: disable=too-many-nested-blocks
        if not get_bgp_neighbors_minion.get("result"):
            continue  # ignore empty or failed mines
        if device and minion != device:
            # when requested to display only the neighbors on a certain device
            continue
        get_bgp_neighbors_minion_out = get_bgp_neighbors_minion.get("out", {})
        for vrf, vrf_bgp_neighbors in six.iteritems(
                get_bgp_neighbors_minion_out):  # pylint: disable=unused-variable
            for asn, get_bgp_neighbors_minion_asn in six.iteritems(
                    vrf_bgp_neighbors):
                if asns and asn not in asns:
                    # if filtering by AS number(s),
                    # will ignore if this AS number key not in that list
                    # and continue the search
                    continue
                for neighbor in get_bgp_neighbors_minion_asn:
                    if kwargs and not _compare_match(kwargs, neighbor):
                        # requested filtering by neighbors stats
                        # but this one does not correspond
                        continue
                    if neighbor_ip and neighbor_ip != neighbor.get(
                            "remote_address"):
                        # requested filtering by neighbors IP addr
                        continue
                    if ipnet_obj and neighbor.get("remote_address"):
                        neighbor_ip_obj = IPAddress(
                            neighbor.get("remote_address"))
                        if neighbor_ip_obj not in ipnet_obj:
                            # Neighbor not in this network
                            continue
                    row = {
                        "device": minion,
                        "neighbor_address": neighbor.get("remote_address"),
                        "as_number": asn,
                    }
                    if "vrf" in display_fields:
                        row["vrf"] = vrf
                    if "connection_stats" in display_fields:
                        connection_stats = "{state} {active}/{received}/{accepted}/{damped}".format(
                            state=neighbor.get("connection_state", -1),
                            active=neighbor.get("active_prefix_count", -1),
                            received=neighbor.get("received_prefix_count", -1),
                            accepted=neighbor.get("accepted_prefix_count", -1),
                            damped=neighbor.get("suppressed_prefix_count", -1),
                        )
                        row["connection_stats"] = connection_stats
                    if ("interface_description" in display_fields
                            or "interface_name" in display_fields):
                        net_find = __salt__["net.interfaces"](
                            device=minion,
                            ipnet=neighbor.get("remote_address"),
                            display=False,
                        )
                        if net_find:
                            if "interface_description" in display_fields:
                                row["interface_description"] = net_find[0][
                                    "interface_description"]
                            if "interface_name" in display_fields:
                                row["interface_name"] = net_find[0][
                                    "interface"]
                        else:
                            # if unable to find anything, leave blank
                            if "interface_description" in display_fields:
                                row["interface_description"] = ""
                            if "interface_name" in display_fields:
                                row["interface_name"] = ""
                    for field in display_fields:
                        if field in neighbor:
                            row[field] = neighbor[field]
                    rows.append(row)
    return _display_runner(rows,
                           labels,
                           title,
                           display=display,
                           outputter=outputter)
Exemplo n.º 44
0
def sig4(method,
         endpoint,
         params,
         prov_dict,
         aws_api_version,
         location,
         product='ec2',
         uri='/',
         requesturl=None):
    '''
    Sign a query against AWS services using Signature Version 4 Signing
    Process. This is documented at:

    http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
    http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
    http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
    '''
    timenow = datetime.datetime.utcnow()
    timestamp = timenow.strftime('%Y-%m-%dT%H:%M:%SZ')

    # Retrieve access credentials from meta-data, or use provided
    access_key_id, secret_access_key, token = creds(prov_dict)

    params_with_headers = params.copy()
    params_with_headers['Version'] = aws_api_version
    keys = sorted(params_with_headers.keys())
    values = list(map(params_with_headers.get, keys))
    querystring = urlencode(list(zip(keys, values)))

    amzdate = timenow.strftime('%Y%m%dT%H%M%SZ')
    datestamp = timenow.strftime('%Y%m%d')
    payload_hash = hashlib.sha256('').hexdigest()

    canonical_headers = 'host:{0}\nx-amz-date:{1}\n'.format(
        endpoint,
        amzdate,
    )
    signed_headers = 'host;x-amz-date'

    request = '\n'.join((method, endpoint, querystring, canonical_headers,
                         signed_headers, payload_hash))

    algorithm = 'AWS4-HMAC-SHA256'

    # Create payload hash (hash of the request body content). For GET
    # requests, the payload is an empty string ('').
    payload_hash = hashlib.sha256('').hexdigest()

    # Combine elements to create create canonical request
    canonical_request = '\n'.join((method, uri, querystring, canonical_headers,
                                   signed_headers, payload_hash))

    # Create the string to sign
    credential_scope = '/'.join((datestamp, location, product, 'aws4_request'))
    string_to_sign = '\n'.join((algorithm, amzdate, credential_scope,
                                hashlib.sha256(canonical_request).hexdigest()))

    # Create the signing key using the function defined above.
    signing_key = _sig_key(secret_access_key, datestamp, location, product)

    # Sign the string_to_sign using the signing_key
    signature = hmac.new(signing_key, string_to_sign.encode('utf-8'),
                         hashlib.sha256).hexdigest()

    # Add signing information to the request
    authorization_header = (
        '{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}').format(
            algorithm,
            access_key_id,
            credential_scope,
            signed_headers,
            signature,
        )

    headers = {'x-amz-date': amzdate, 'Authorization': authorization_header}

    # Add in security token if we have one
    if token != '':
        headers['X-Amz-Security-Token'] = token

    requesturl = '{0}?{1}'.format(requesturl, querystring)
    return headers, requesturl
Exemplo n.º 45
0
Arquivo: aws.py Projeto: DaveQB/salt
def sig4(method, endpoint, params, prov_dict,
         aws_api_version=DEFAULT_AWS_API_VERSION, location=DEFAULT_LOCATION,
         product='ec2', uri='/', requesturl=None, data='', headers=None):
    '''
    Sign a query against AWS services using Signature Version 4 Signing
    Process. This is documented at:

    http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
    http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
    http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
    '''
    timenow = datetime.datetime.utcnow()

    # Retrieve access credentials from meta-data, or use provided
    access_key_id, secret_access_key, token = creds(prov_dict)

    params_with_headers = params.copy()
    if product != 's3':
        params_with_headers['Version'] = aws_api_version
    keys = sorted(params_with_headers.keys())
    values = list(map(params_with_headers.get, keys))
    querystring = urlencode(list(zip(keys, values))).replace('+', '%20')

    amzdate = timenow.strftime('%Y%m%dT%H%M%SZ')
    datestamp = timenow.strftime('%Y%m%d')

    canonical_headers = 'host:{0}\nx-amz-date:{1}'.format(
        endpoint,
        amzdate,
    )
    signed_headers = 'host;x-amz-date'

    if isinstance(headers, dict):
        for header in sorted(headers.keys()):
            canonical_headers += '\n{0}:{1}'.format(header, headers[header])
            signed_headers += ';{0}'.format(header)
    canonical_headers += '\n'

    algorithm = 'AWS4-HMAC-SHA256'

    # Create payload hash (hash of the request body content). For GET
    # requests, the payload is an empty string ('').
    payload_hash = hashlib.sha256(data).hexdigest()

    # Combine elements to create create canonical request
    canonical_request = '\n'.join((
        method,
        uri,
        querystring,
        canonical_headers,
        signed_headers,
        payload_hash
    ))

    # Create the string to sign
    credential_scope = '/'.join((
        datestamp, location, product, 'aws4_request'
    ))
    string_to_sign = '\n'.join((
        algorithm,
        amzdate,
        credential_scope,
        hashlib.sha256(canonical_request).hexdigest()
    ))

    # Create the signing key using the function defined above.
    signing_key = _sig_key(
        secret_access_key,
        datestamp,
        location,
        product
    )

    # Sign the string_to_sign using the signing_key
    signature = hmac.new(
        signing_key,
        string_to_sign.encode('utf-8'),
        hashlib.sha256).hexdigest()

    # Add signing information to the request
    authorization_header = (
            '{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}'
        ).format(
            algorithm,
            access_key_id,
            credential_scope,
            signed_headers,
            signature,
        )

    new_headers = {
        'x-amz-date': amzdate,
        'x-amz-content-sha256': payload_hash,
        'Authorization': authorization_header,
    }
    if isinstance(headers, dict):
        for header in sorted(headers.keys()):
            new_headers[header] = headers[header]

    # Add in security token if we have one
    if token != '':
        new_headers['X-Amz-Security-Token'] = token

    requesturl = '{0}?{1}'.format(requesturl, querystring)
    return new_headers, requesturl
Exemplo n.º 46
0
def beacon(config):
    """
    Watch the configured files

    Example Config

    .. code-block:: yaml

        beacons:
          inotify:
            - files:
                /path/to/file/or/dir:
                  mask:
                    - open
                    - create
                    - close_write
                  recurse: True
                  auto_add: True
                  exclude:
                    - /path/to/file/or/dir/exclude1
                    - /path/to/file/or/dir/exclude2
                    - /path/to/file/or/dir/regex[a-m]*$:
                        regex: True
            - coalesce: True

    The mask list can contain the following events (the default mask is create,
    delete, and modify):

    * access            - File accessed
    * attrib            - File metadata changed
    * close_nowrite     - Unwritable file closed
    * close_write       - Writable file closed
    * create            - File created in watched directory
    * delete            - File deleted from watched directory
    * delete_self       - Watched file or directory deleted
    * modify            - File modified
    * moved_from        - File moved out of watched directory
    * moved_to          - File moved into watched directory
    * move_self         - Watched file moved
    * open              - File opened

    The mask can also contain the following options:

    * dont_follow       - Don't dereference symbolic links
    * excl_unlink       - Omit events for children after they have been unlinked
    * oneshot           - Remove watch after one event
    * onlydir           - Operate only if name is directory

    recurse:
      Recursively watch files in the directory
    auto_add:
      Automatically start watching files that are created in the watched directory
    exclude:
      Exclude directories or files from triggering events in the watched directory.
      Can use regex if regex is set to True
    coalesce:
      If this coalescing option is enabled, events are filtered based on
      their unicity, only unique events are enqueued, doublons are discarded.
      An event is unique when the combination of its fields (wd, mask,
      cookie, name) is unique among events of a same batch. After a batch of
      events is processed any events are accepted again.
      This option is top-level (at the same level as the path) and therefore
      affects all paths that are being watched. This is due to this option
      being at the Notifier level in pyinotify.
    """
    _config = {}
    list(map(_config.update, config))

    ret = []
    notifier = _get_notifier(_config)
    wm = notifier._watch_manager

    # Read in existing events
    if notifier.check_events(1):
        notifier.read_events()
        notifier.process_events()
        queue = __context__["inotify.queue"]
        while queue:
            event = queue.popleft()

            _append = True
            # Find the matching path in config
            path = event.path
            while path != "/":
                if path in _config.get("files", {}):
                    break
                path = os.path.dirname(path)

            excludes = _config["files"][path].get("exclude", "")

            if excludes and isinstance(excludes, list):
                for exclude in excludes:
                    if isinstance(exclude, dict):
                        _exclude = next(iter(exclude))
                        if exclude[_exclude].get("regex", False):
                            try:
                                if re.search(_exclude, event.pathname):
                                    _append = False
                            except Exception:  # pylint: disable=broad-except
                                log.warning("Failed to compile regex: %s", _exclude)
                        else:
                            exclude = _exclude
                    elif "*" in exclude:
                        if fnmatch.fnmatch(event.pathname, exclude):
                            _append = False
                    else:
                        if event.pathname.startswith(exclude):
                            _append = False

            if _append:
                sub = {
                    "tag": event.path,
                    "path": event.pathname,
                    "change": event.maskname,
                }
                ret.append(sub)
            else:
                log.info("Excluding %s from event for %s", event.pathname, path)

    # Get paths currently being watched
    current = set()
    for wd in wm.watches:
        current.add(wm.watches[wd].path)

    # Update existing watches and add new ones
    # TODO: make the config handle more options
    for path in _config.get("files", ()):

        if isinstance(_config["files"][path], dict):
            mask = _config["files"][path].get("mask", DEFAULT_MASK)
            if isinstance(mask, list):
                r_mask = 0
                for sub in mask:
                    r_mask |= _get_mask(sub)
            elif isinstance(mask, salt.ext.six.binary_type):
                r_mask = _get_mask(mask)
            else:
                r_mask = mask
            mask = r_mask
            rec = _config["files"][path].get("recurse", False)
            auto_add = _config["files"][path].get("auto_add", False)
        else:
            mask = DEFAULT_MASK
            rec = False
            auto_add = False

        if path in current:
            for wd in wm.watches:
                if path == wm.watches[wd].path:
                    update = False
                    if wm.watches[wd].mask != mask:
                        update = True
                    if wm.watches[wd].auto_add != auto_add:
                        update = True
                    if update:
                        wm.update_watch(wd, mask=mask, rec=rec, auto_add=auto_add)
        elif os.path.exists(path):
            excludes = _config["files"][path].get("exclude", "")
            excl = None
            if isinstance(excludes, list):
                excl = []
                for exclude in excludes:
                    if isinstance(exclude, dict):
                        excl.append(list(exclude)[0])
                    else:
                        excl.append(exclude)
                excl = pyinotify.ExcludeFilter(excl)

            wm.add_watch(path, mask, rec=rec, auto_add=auto_add, exclude_filter=excl)

    # Return event data
    return ret
Exemplo n.º 47
0
    def prepare_rows(self,
                     rows,
                     indent,
                     has_header):

        '''Prepare rows content to be displayed.'''

        out = []

        def row_wrapper(row):
            new_rows = [
                self.wrapfunc(item).split('\n')
                for item in row
            ]
            rows = []
            for item in map(None, *new_rows):
                if isinstance(item, (tuple, list)):
                    rows.append([substr or '' for substr in item])
                else:
                    rows.append([item])
            return rows

        logical_rows = [
            row_wrapper(row)
            for row in rows
        ]

        columns = map(None, *reduce(operator.add, logical_rows))

        max_widths = [
            max([len(str(item)) for item in column])
            for column in columns
        ]
        row_separator = self.row_delimiter * (len(self.prefix) + len(self.suffix) + sum(max_widths) +
                                              len(self.delim) * (len(max_widths) - 1))

        justify = self._JUSTIFY_MAP[self.justify.lower()]

        if self.separate_rows:
            out.append(
                self.ustring(
                    indent,
                    self.LIGHT_GRAY,  # pylint: disable=no-member
                    row_separator
                )
            )
        for physical_rows in logical_rows:
            for row in physical_rows:
                line = self.prefix \
                        + self.delim.join([
                                justify(str(item), width)
                                for (item, width) in zip(row, max_widths)
                        ]) + self.suffix
                out.append(
                    self.ustring(
                        indent,
                        self.WHITE,  # pylint: disable=no-member
                        line
                    )
                )
            if self.separate_rows or has_header:
                out.append(
                    self.ustring(
                        indent,
                        self.LIGHT_GRAY,  # pylint: disable=no-member
                        row_separator
                    )
                )
                has_header = False
        return out
Exemplo n.º 48
0
def beacon(config):
    '''
    Scan for the configured services and fire events

    Example Config

    .. code-block:: yaml

        beacons:
          service:
            - services:
                salt-master:
                mysql:

    The config above sets up beacons to check for
    the salt-master and mysql services.

    The config also supports two other parameters for each service:

    `onchangeonly`: when `onchangeonly` is True the beacon will fire
    events only when the service status changes.  Otherwise, it will fire an
    event at each beacon interval.  The default is False.

    `delay`: when `delay` is greater than 0 the beacon will fire events only
    after the service status changes, and the delay (in seconds) has passed.
    Applicable only when `onchangeonly` is True.  The default is 0.

    `emitatstartup`: when `emitatstartup` is False the beacon will not fire
    event when the minion is reload. Applicable only when `onchangeonly` is True.
    The default is True.

    `uncleanshutdown`: If `uncleanshutdown` is present it should point to the
    location of a pid file for the service.  Most services will not clean up
    this pid file if they are shutdown uncleanly (e.g. via `kill -9`) or if they
    are terminated through a crash such as a segmentation fault.  If the file is
    present, then the beacon will add `uncleanshutdown: True` to the event.  If
    not present, the field will be False.  The field is only added when the
    service is NOT running. Omitting the configuration variable altogether will
    turn this feature off.

    Please note that some init systems can remove the pid file if the service
    registers as crashed. One such example is nginx on CentOS 7, where the
    service unit removes the pid file when the service shuts down (IE: the pid
    file is observed as removed when kill -9 is sent to the nginx master
    process). The 'uncleanshutdown' option might not be of much use there,
    unless the unit file is modified.

    Here is an example that will fire an event 30 seconds after the state of nginx
    changes and report an uncleanshutdown.  This example is for Arch, which
    places nginx's pid file in `/run`.

    .. code-block:: yaml

        beacons:
          service:
            - services:
                nginx:
                  onchangeonly: True
                  delay: 30
                  uncleanshutdown: /run/nginx.pid
    '''
    ret = []
    _config = {}
    list(map(_config.update, config))

    for service in _config.get('services', {}):
        ret_dict = {}

        service_config = _config['services'][service]

        ret_dict[service] = {'running': __salt__['service.status'](service)}
        ret_dict['service_name'] = service
        ret_dict['tag'] = service
        currtime = time.time()

        # If no options is given to the service, we fall back to the defaults
        # assign a False value to oncleanshutdown and onchangeonly. Those
        # key:values are then added to the service dictionary.
        if not service_config:
            service_config = {}
        if 'oncleanshutdown' not in service_config:
            service_config['oncleanshutdown'] = False
        if 'emitatstartup' not in service_config:
            service_config['emitatstartup'] = True
        if 'onchangeonly' not in service_config:
            service_config['onchangeonly'] = False
        if 'delay' not in service_config:
            service_config['delay'] = 0

        # We only want to report the nature of the shutdown
        # if the current running status is False
        # as well as if the config for the beacon asks for it
        if 'uncleanshutdown' in service_config and not ret_dict[service][
                'running']:
            filename = service_config['uncleanshutdown']
            ret_dict[service]['uncleanshutdown'] = True if os.path.exists(
                filename) else False
        if 'onchangeonly' in service_config and service_config[
                'onchangeonly'] is True:
            if service not in LAST_STATUS:
                LAST_STATUS[service] = ret_dict[service]
                if service_config['delay'] > 0:
                    LAST_STATUS[service]['time'] = currtime
                elif not service_config['emitatstartup']:
                    continue
                else:
                    ret.append(ret_dict)

            if LAST_STATUS[service]['running'] != ret_dict[service]['running']:
                LAST_STATUS[service] = ret_dict[service]
                if service_config['delay'] > 0:
                    LAST_STATUS[service]['time'] = currtime
                else:
                    ret.append(ret_dict)

            if 'time' in LAST_STATUS[service]:
                elapsedtime = int(
                    round(currtime - LAST_STATUS[service]['time']))
                if elapsedtime > service_config['delay']:
                    del LAST_STATUS[service]['time']
                    ret.append(ret_dict)
        else:
            ret.append(ret_dict)

    return ret
Exemplo n.º 49
0
def beacon(config):
    '''
    Emit the status of all devices returned by adb

    Specify the device states that should emit an event,
    there will be an event for each device with the
    event type and device specified.

    .. code-block:: yaml

        beacons:
          adb:
            - states:
                - offline
                - unauthorized
                - missing
            - no_devices_event: True
            - battery_low: 25

    '''

    log.trace('adb beacon starting')
    ret = []

    _config = {}
    list(map(_config.update, config))

    out = __salt__['cmd.run']('adb devices', runas=_config.get('user', None))

    lines = out.split('\n')[1:]
    last_state_devices = list(last_state.keys())
    found_devices = []

    for line in lines:
        try:
            device, state = line.split('\t')
            found_devices.append(device)
            if device not in last_state_devices or \
                    ('state' in last_state[device] and last_state[device]['state'] != state):
                if state in _config['states']:
                    ret.append({
                        'device': device,
                        'state': state,
                        'tag': state
                    })
                    last_state[device] = {'state': state}

            if 'battery_low' in _config:
                val = last_state.get(device, {})
                cmd = 'adb -s {0} shell cat /sys/class/power_supply/*/capacity'.format(
                    device)
                battery_levels = __salt__['cmd.run'](cmd,
                                                     runas=_config.get(
                                                         'user',
                                                         None)).split('\n')

                for l in battery_levels:
                    battery_level = int(l)
                    if 0 < battery_level < 100:
                        if 'battery' not in val or battery_level != val[
                                'battery']:
                            if ('battery' not in val or val['battery'] > _config['battery_low']) and \
                                            battery_level <= _config['battery_low']:
                                ret.append({
                                    'device': device,
                                    'battery_level': battery_level,
                                    'tag': 'battery_low'
                                })

                        if device not in last_state:
                            last_state[device] = {}

                        last_state[device].update({'battery': battery_level})

        except ValueError:
            continue

    # Find missing devices and remove them / send an event
    for device in last_state_devices:
        if device not in found_devices:
            if 'missing' in _config['states']:
                ret.append({
                    'device': device,
                    'state': 'missing',
                    'tag': 'missing'
                })

            del last_state[device]

    # Maybe send an event if we don't have any devices
    if 'no_devices_event' in _config and _config['no_devices_event'] is True:
        if len(found_devices) == 0 and not last_state_extra['no_devices']:
            ret.append({'tag': 'no_devices'})

    # Did we have no devices listed this time around?

    last_state_extra['no_devices'] = len(found_devices) == 0

    return ret
Exemplo n.º 50
0
Arquivo: at.py Projeto: mahak/salt
def absent(name, jobid=None, **kwargs):
    '''
    Remove a job from queue
    The 'kwargs' can include hour. minute. day. month. year

    limit
        Target range

    tag
        Job's tag

    runas
        Runs user-specified jobs

    .. code-block:: yaml

        example1:
          at.absent:
            - limit: all

    .. code-block:: yaml

        example2:
          at.absent:
            - limit: all
            - year: 13

    .. code-block:: yaml

        example3:
          at.absent:
            - limit: all
            - tag: rose
            - runas: jim

    .. code-block:: yaml

        example4:
          at.absent:
            - limit: all
            - tag: rose
            - day: 13
            - hour: 16
    '''
    if 'limit' in kwargs:
        name = kwargs['limit']
    ret = {'name': name,
           'changes': {},
           'result': True,
           'comment': ''}

    binary = salt.utils.which('at')

    if __opts__['test']:
        ret['result'] = None
        ret['comment'] = 'Remove jobs()'
        return ret

    if name != 'all':
        ret['comment'] = 'limit parameter not supported {0}'.format(name)
        ret['result'] = False
        return ret

    #if jobid:
    #    output = __salt__['cmd.run']('{0} -d {1}'.format(binary, jobid))
    #    if i in map(str, [j['job'] for j in __salt__['at.atq']()['jobs']]):
    #        ret['result'] = False
    #        return ret
    #    ret['comment'] = 'Remove job({0}) from queue'.format(' '.join(opts))
    #    return ret

    if kwargs:
        opts = list(list(map(str, [j['job'] for j in __salt__['at.jobcheck'](**kwargs)['jobs']])))
    else:
        opts = list(list(map(str, [j['job'] for j in __salt__['at.atq']()['jobs']])))

    if not opts:
        ret['result'] = False
        ret['comment'] = 'No match jobs or time format error'
        return ret

    __salt__['cmd.run']('{0} -d {1}'.format(binary, ' '.join(opts)))
    fail = []
    for i in opts:
        if i in list(list(map(str, [j['job'] for j in __salt__['at.atq']()['jobs']]))):
            fail.append(i)

    if fail:
        ret['comment'] = 'Remove job({0}) from queue but ({1}) fail'.format(
            ' '.join(opts), fail
       )
    else:
        ret['comment'] = 'Remove job({0}) from queue'.format(' '.join(opts))

    return ret
Exemplo n.º 51
0
def beacon(config):
    """
    Emit the load averages of this host.

    Specify thresholds for each load average
    and only emit a beacon if any of them are
    exceeded.

    `onchangeonly`: when `onchangeonly` is True the beacon will fire
    events only when the load average pass one threshold.  Otherwise, it will fire an
    event at each beacon interval.  The default is False.

    `emitatstartup`: when `emitatstartup` is False the beacon will not fire
     event when the minion is reload. Applicable only when `onchangeonly` is True.
     The default is True.

    .. code-block:: yaml

        beacons:
          load:
            - averages:
                1m:
                  - 0.0
                  - 2.0
                5m:
                  - 0.0
                  - 1.5
                15m:
                  - 0.1
                  - 1.0
            - emitatstartup: True
            - onchangeonly: False

    """
    log.trace("load beacon starting")

    _config = {}
    list(map(_config.update, config))

    # Default config if not present
    if "emitatstartup" not in _config:
        _config["emitatstartup"] = True
    if "onchangeonly" not in _config:
        _config["onchangeonly"] = False

    ret = []
    avgs = os.getloadavg()
    avg_keys = ["1m", "5m", "15m"]
    avg_dict = dict(zip(avg_keys, avgs))

    if _config["onchangeonly"]:
        if not LAST_STATUS:
            for k in ["1m", "5m", "15m"]:
                LAST_STATUS[k] = avg_dict[k]
            if not _config["emitatstartup"]:
                log.debug("Don't emit because emitatstartup is False")
                return ret

    send_beacon = False

    # Check each entry for threshold
    for k in ["1m", "5m", "15m"]:
        if k in _config.get("averages", {}):
            if _config["onchangeonly"]:
                # Emit if current is more that threshold and old value less
                # that threshold
                if float(avg_dict[k]) > float(_config["averages"][k][1]) and float(
                    LAST_STATUS[k]
                ) < float(_config["averages"][k][1]):
                    log.debug(
                        "Emit because %f > %f and last was " "%f",
                        float(avg_dict[k]),
                        float(_config["averages"][k][1]),
                        float(LAST_STATUS[k]),
                    )
                    send_beacon = True
                    break
                # Emit if current is less that threshold and old value more
                # that threshold
                if float(avg_dict[k]) < float(_config["averages"][k][0]) and float(
                    LAST_STATUS[k]
                ) > float(_config["averages"][k][0]):
                    log.debug(
                        "Emit because %f < %f and last was" "%f",
                        float(avg_dict[k]),
                        float(_config["averages"][k][0]),
                        float(LAST_STATUS[k]),
                    )
                    send_beacon = True
                    break
            else:
                # Emit no matter LAST_STATUS
                if float(avg_dict[k]) < float(_config["averages"][k][0]) or float(
                    avg_dict[k]
                ) > float(_config["averages"][k][1]):
                    log.debug(
                        "Emit because %f < %f or > " "%f",
                        float(avg_dict[k]),
                        float(_config["averages"][k][0]),
                        float(_config["averages"][k][1]),
                    )
                    send_beacon = True
                    break

    if _config["onchangeonly"]:
        for k in ["1m", "5m", "15m"]:
            LAST_STATUS[k] = avg_dict[k]

    if send_beacon:
        ret.append(avg_dict)

    return ret
Exemplo n.º 52
0
Arquivo: dns.py Projeto: yvwulei/salt
def query(name,
          rdtype,
          method=None,
          servers=None,
          timeout=None,
          walk=False,
          walk_tld=False,
          secure=None):
    '''
    Query DNS for information
    :param name: name to lookup
    :param rdtype: DNS record type
    :param method: gai (getaddrinfo()), pydns, dig, drill, host, nslookup or auto (default)
    :param servers: (list of) server(s) to try in-order
    :param timeout: query timeout or a valiant approximation of that
    :param secure: return only DNSSEC secured response
    :param walk: Find records in parents if they don't exist
    :param walk_tld: Include the top-level domain in the walk
    :return: [] of records
    '''
    rdtype = rdtype.upper()
    qargs = {
        'method': method,
        'servers': servers,
        'timeout': timeout,
        'walk': walk,
        'walk_tld': walk_tld,
        'secure': secure
    }

    if rdtype == 'PTR' and not name.endswith('arpa'):
        name = ptr_name(name)

    qres = lookup(name, rdtype, **qargs)
    if rdtype == 'SPF' and not qres:
        # 'SPF' has become a regular 'TXT' again
        qres = [
            answer for answer in lookup(name, 'TXT', **qargs)
            if answer.startswith('v=spf')
        ]

    rec_map = {
        'A': a_rec,
        'AAAA': aaaa_rec,
        'CAA': caa_rec,
        'MX': mx_rec,
        'SOA': soa_rec,
        'SPF': spf_rec,
        'SRV': srv_rec,
    }

    if rdtype not in rec_map:
        return qres

    caster = rec_map[rdtype]

    if rdtype in ('MX', 'SRV'):
        # Grouped returns
        res = caster(qres)
    else:
        # List of results
        res = list(map(caster, qres))

    return res
Exemplo n.º 53
0
def _clean_salt_variables(params, variable_prefix="__"):
    '''
    Pops out variables from params which starts with `variable_prefix`.
    '''
    list(list(map(params.pop, [k for k in params if k.startswith(variable_prefix)])))
    return params
Exemplo n.º 54
0
def beacon(config):
    '''
    Broadcast values via zeroconf

    If the announced values are static, it is advised to set run_once: True
    (do not poll) on the beacon configuration.

    The following are required configuration settings:

    - ``servicetype`` - The service type to announce
    - ``port`` - The port of the service to announce
    - ``txt`` - The TXT record of the service being announced as a dict. Grains
      can be used to define TXT values using one of following two formats:

      - ``grains.<grain_name>``
      - ``grains.<grain_name>[i]`` where i is an integer representing the
        index of the grain to use. If the grain is not a list, the index is
        ignored.

    The following are optional configuration settings:

    - ``servicename`` - Set the name of the service. Will use the hostname from
      the minion's ``host`` grain if this value is not set.
    - ``reset_on_change`` - If ``True`` and there is a change in TXT records
      detected, it will stop announcing the service and then restart announcing
      the service. This interruption in service announcement may be desirable
      if the client relies on changes in the browse records to update its cache
      of TXT records. Defaults to ``False``.
    - ``reset_wait`` - The number of seconds to wait after announcement stops
      announcing and before it restarts announcing in the case where there is a
      change in TXT records detected and ``reset_on_change`` is ``True``.
      Defaults to ``0``.
    - ``copy_grains`` - If ``True``, Salt will copy the grains passed into the
      beacon when it backs them up to check for changes on the next iteration.
      Normally, instead of copy, it would use straight value assignment. This
      will allow detection of changes to grains where the grains are modified
      in-place instead of completely replaced.  In-place grains changes are not
      currently done in the main Salt code but may be done due to a custom
      plug-in. Defaults to ``False``.

    Example Config

    .. code-block:: yaml

       beacons:
         avahi_announce:
           - run_once: True
           - servicetype: _demo._tcp
           - port: 1234
           - txt:
               ProdName: grains.productname
               SerialNo: grains.serialnumber
               Comments: 'this is a test'
    '''
    ret = []
    changes = {}
    txt = {}

    global LAST_GRAINS

    _config = {}
    list(map(_config.update, config))

    if 'servicename' in _config:
        servicename = _config['servicename']
    else:
        servicename = __grains__['host']
        # Check for hostname change
        if LAST_GRAINS and LAST_GRAINS['host'] != servicename:
            changes['servicename'] = servicename

    if LAST_GRAINS and _config.get('reset_on_change', False):
        # Check for IP address change in the case when we reset on change
        if LAST_GRAINS.get('ipv4', []) != __grains__.get('ipv4', []):
            changes['ipv4'] = __grains__.get('ipv4', [])
        if LAST_GRAINS.get('ipv6', []) != __grains__.get('ipv6', []):
            changes['ipv6'] = __grains__.get('ipv6', [])

    for item in _config['txt']:
        changes_key = 'txt.' + salt.utils.stringutils.to_unicode(item)
        if _config['txt'][item].startswith('grains.'):
            grain = _config['txt'][item][7:]
            grain_index = None
            square_bracket = grain.find('[')
            if square_bracket != -1 and grain[-1] == ']':
                grain_index = int(grain[square_bracket+1:-1])
                grain = grain[:square_bracket]

            grain_value = __grains__.get(grain, '')
            if isinstance(grain_value, list):
                if grain_index is not None:
                    grain_value = grain_value[grain_index]
                else:
                    grain_value = ','.join(grain_value)
            txt[item] = _enforce_txt_record_maxlen(item, grain_value)
            if LAST_GRAINS and (LAST_GRAINS.get(grain, '') != __grains__.get(grain, '')):
                changes[changes_key] = txt[item]
        else:
            txt[item] = _enforce_txt_record_maxlen(item, _config['txt'][item])

        if not LAST_GRAINS:
            changes[changes_key] = txt[item]

    if changes:
        if not LAST_GRAINS:
            changes['servicename'] = servicename
            changes['servicetype'] = _config['servicetype']
            changes['port'] = _config['port']
            changes['ipv4'] = __grains__.get('ipv4', [])
            changes['ipv6'] = __grains__.get('ipv6', [])
            GROUP.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
                             servicename, _config['servicetype'], '', '',
                             dbus.UInt16(_config['port']), avahi.dict_to_txt_array(txt))
            GROUP.Commit()
        elif _config.get('reset_on_change', False) or 'servicename' in changes:
            # A change in 'servicename' requires a reset because we can only
            # directly update TXT records
            GROUP.Reset()
            reset_wait = _config.get('reset_wait', 0)
            if reset_wait > 0:
                time.sleep(reset_wait)
            GROUP.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
                             servicename, _config['servicetype'], '', '',
                             dbus.UInt16(_config['port']), avahi.dict_to_txt_array(txt))
            GROUP.Commit()
        else:
            GROUP.UpdateServiceTxt(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
                                   servicename, _config['servicetype'], '',
                                   avahi.dict_to_txt_array(txt))

        ret.append({'tag': 'result', 'changes': changes})

    if _config.get('copy_grains', False):
        LAST_GRAINS = __grains__.copy()
    else:
        LAST_GRAINS = __grains__

    return ret
Exemplo n.º 55
0
def webconfiguration_settings(name, settings=None):
    r"""
    Set the value of webconfiguration settings.

    :param str name: The name of the IIS PSPath containing the settings.
        Possible PSPaths are :
        MACHINE, MACHINE/WEBROOT, IIS:\, IIS:\Sites\sitename, ...
    :param dict settings: Dictionaries of dictionaries.
        You can match a specific item in a collection with this syntax inside a key:
        'Collection[{name: site0}].logFile.directory'

    Example of usage for the ``MACHINE/WEBROOT`` PSPath:

    .. code-block:: yaml

        MACHINE-WEBROOT-level-security:
          win_iis.webconfiguration_settings:
            - name: 'MACHINE/WEBROOT'
            - settings:
                system.web/authentication/forms:
                  requireSSL: True
                  protection: "All"
                  credentials.passwordFormat: "SHA1"
                system.web/httpCookies:
                  httpOnlyCookies: True

    Example of usage for the ``IIS:\Sites\site0`` PSPath:

    .. code-block:: yaml

        site0-IIS-Sites-level-security:
          win_iis.webconfiguration_settings:
            - name: 'IIS:\Sites\site0'
            - settings:
                system.webServer/httpErrors:
                  errorMode: "DetailedLocalOnly"
                system.webServer/security/requestFiltering:
                  allowDoubleEscaping: False
                  verbs.Collection:
                    - verb: TRACE
                      allowed: False
                  fileExtensions.allowUnlisted: False

    Example of usage for the ``IIS:\`` PSPath with a collection matching:

    .. code-block:: yaml

        site0-IIS-level-security:
          win_iis.webconfiguration_settings:
            - name: 'IIS:\'
            - settings:
                system.applicationHost/sites:
                  'Collection[{name: site0}].logFile.directory': 'C:\logs\iis\site0'

    """

    ret = {"name": name, "changes": {}, "comment": str(), "result": None}

    if not settings:
        ret["comment"] = "No settings to change provided."
        ret["result"] = True
        return ret

    ret_settings = {
        "changes": {},
        "failures": {},
    }

    settings_list = list()

    for filter, filter_settings in settings.items():
        for setting_name, value in filter_settings.items():
            settings_list.append({
                "filter": filter,
                "name": setting_name,
                "value": value
            })

    current_settings_list = __salt__["win_iis.get_webconfiguration_settings"](
        name=name, settings=settings_list)
    for idx, setting in enumerate(settings_list):

        is_collection = setting["name"].split(".")[-1] == "Collection"
        # If this is a new setting and not an update to an existing setting
        if len(current_settings_list) <= idx:
            ret_settings["changes"][setting["filter"] + "." +
                                    setting["name"]] = {
                                        "old": {},
                                        "new": settings_list[idx]["value"],
                                    }
        elif (is_collection and list(map(dict, setting["value"])) != list(
                map(dict, current_settings_list[idx]["value"]))) or (
                    not is_collection and str(setting["value"]) != str(
                        current_settings_list[idx]["value"])):
            ret_settings["changes"][setting["filter"] + "." +
                                    setting["name"]] = {
                                        "old":
                                        current_settings_list[idx]["value"],
                                        "new": settings_list[idx]["value"],
                                    }
    if not ret_settings["changes"]:
        ret["comment"] = "Settings already contain the provided values."
        ret["result"] = True
        return ret
    elif __opts__["test"]:
        ret["comment"] = "Settings will be changed."
        ret["changes"] = ret_settings
        return ret

    success = __salt__["win_iis.set_webconfiguration_settings"](
        name=name, settings=settings_list)

    new_settings_list = __salt__["win_iis.get_webconfiguration_settings"](
        name=name, settings=settings_list)
    for idx, setting in enumerate(settings_list):

        is_collection = setting["name"].split(".")[-1] == "Collection"
        if (is_collection and setting["value"] !=
                new_settings_list[idx]["value"]) or (not is_collection and str(
                    setting["value"]) != str(new_settings_list[idx]["value"])):
            ret_settings["failures"][setting["filter"] + "." +
                                     setting["name"]] = {
                                         "old":
                                         current_settings_list[idx]["value"],
                                         "new":
                                         new_settings_list[idx]["value"],
                                     }
            ret_settings["changes"].get(
                setting["filter"] + "." + setting["name"], None)

    if ret_settings["failures"]:
        ret["comment"] = "Some settings failed to change."
        ret["changes"] = ret_settings
        ret["result"] = False
    else:
        ret["comment"] = "Set settings to contain the provided values."
        ret["changes"] = ret_settings["changes"]
        ret["result"] = success

    return ret
Exemplo n.º 56
0
Arquivo: dracr.py Projeto: kvikas/salt
def inventory(host=None, admin_username=None, admin_password=None):
    def mapit(x, y):
        return {x: y}

    fields = {}
    fields['server'] = [
        'name', 'idrac_version', 'blade_type', 'gen', 'updateable'
    ]
    fields['switch'] = ['name', 'model_name', 'hw_version', 'fw_version']
    fields['cmc'] = ['name', 'cmc_version', 'updateable']
    fields['chassis'] = ['name', 'fw_version', 'fqdd']

    rawinv = __execute_ret('getversion',
                           host=host,
                           admin_username=admin_username,
                           admin_password=admin_password)

    if rawinv['retcode'] != 0:
        return rawinv

    in_server = False
    in_switch = False
    in_cmc = False
    in_chassis = False
    ret = {}
    ret['server'] = {}
    ret['switch'] = {}
    ret['cmc'] = {}
    ret['chassis'] = {}
    for l in rawinv['stdout'].splitlines():
        if l.startswith('<Server>'):
            in_server = True
            in_switch = False
            in_cmc = False
            in_chassis = False
            continue

        if l.startswith('<Switch>'):
            in_server = False
            in_switch = True
            in_cmc = False
            in_chassis = False
            continue

        if l.startswith('<CMC>'):
            in_server = False
            in_switch = False
            in_cmc = True
            in_chassis = False
            continue

        if l.startswith('<Chassis Infrastructure>'):
            in_server = False
            in_switch = False
            in_cmc = False
            in_chassis = True
            continue

        if len(l) < 1:
            continue

        line = re.split('  +', l.strip())

        if in_server:
            ret['server'][line[0]] = dict(
                (k, v) for d in map(mapit, fields['server'], line)
                for (k, v) in d.items())
        if in_switch:
            ret['switch'][line[0]] = dict(
                (k, v) for d in map(mapit, fields['switch'], line)
                for (k, v) in d.items())
        if in_cmc:
            ret['cmc'][line[0]] = dict((k, v)
                                       for d in map(mapit, fields['cmc'], line)
                                       for (k, v) in d.items())
        if in_chassis:
            ret['chassis'][line[0]] = dict(
                (k, v) for d in map(mapit, fields['chassis'], line)
                for k, v in d.items())

    return ret
Exemplo n.º 57
0
def present(name,
            running=None,
            source=None,
            profiles=None,
            config=None,
            devices=None,
            architecture='x86_64',
            ephemeral=False,
            restart_on_change=False,
            remote_addr=None,
            cert=None,
            key=None,
            verify_cert=True):
    '''
    Create the named container if it does not exist

    name
        The name of the container to be created

    running : None
        * If ``True``, ensure that the container is running
        * If ``False``, ensure that the container is stopped
        * If ``None``, do nothing with regards to the running state of the
          container

    source : None
        Can be either a string containing an image alias:
             "xenial/amd64"
        or an dict with type "image" with alias:
            {"type": "image",
             "alias": "xenial/amd64"}
        or image with "fingerprint":
            {"type": "image",
             "fingerprint": "SHA-256"}
        or image with "properties":
            {"type": "image",
             "properties": {
                "os": "ubuntu",
                "release": "14.04",
                "architecture": "x86_64"
             }}
        or none:
            {"type": "none"}
        or copy:
            {"type": "copy",
             "source": "my-old-container"}


    profiles : ['default']
        List of profiles to apply on this container

    config :
        A config dict or None (None = unset).

        Can also be a list:
            [{'key': 'boot.autostart', 'value': 1},
             {'key': 'security.privileged', 'value': '1'}]

    devices :
        A device dict or None (None = unset).

    architecture : 'x86_64'
        Can be one of the following:
            * unknown
            * i686
            * x86_64
            * armv7l
            * aarch64
            * ppc
            * ppc64
            * ppc64le
            * s390x

    ephemeral : False
        Destroy this container after stop?

    restart_on_change : False
        Restart the container when we detect changes on the config or
        its devices?

    remote_addr :
        An URL to a remote Server, you also have to give cert and key if you
        provide remote_addr!

        Examples:
            https://myserver.lan:8443
            /var/lib/mysocket.sock

    cert :
        PEM Formatted SSL Zertifikate.

        Examples:
            ~/.config/lxc/client.crt

    key :
        PEM Formatted SSL Key.

        Examples:
            ~/.config/lxc/client.key

    verify_cert : True
        Wherever to verify the cert, this is by default True
        but in the most cases you want to set it off as LXD
        normaly uses self-signed certificates.
    '''
    if profiles is None:
        profiles = ['default']

    if source is None:
        source = {}

    ret = {
        'name': name,
        'running': running,
        'profiles': profiles,
        'source': source,
        'config': config,
        'devices': devices,
        'architecture': architecture,
        'ephemeral': ephemeral,
        'restart_on_change': restart_on_change,
        'remote_addr': remote_addr,
        'cert': cert,
        'key': key,
        'verify_cert': verify_cert,
        'changes': {}
    }

    container = None
    try:
        container = __salt__['lxd.container_get'](name,
                                                  remote_addr,
                                                  cert,
                                                  key,
                                                  verify_cert,
                                                  _raw=True)
    except CommandExecutionError as e:
        return _error(ret, six.text_type(e))
    except SaltInvocationError as e:
        # Profile not found
        pass

    if container is None:
        if __opts__['test']:
            # Test is on, just return that we would create the container
            msg = 'Would create the container "{0}"'.format(name)
            ret['changes'] = {'created': msg}
            if running is True:
                msg = msg + ' and start it.'
                ret['changes']['started'] = (
                    'Would start the container "{0}"'.format(name))

            ret['changes'] = {'created': msg}
            return _unchanged(ret, msg)

        # create the container
        try:
            __salt__['lxd.container_create'](
                name,
                source,
                profiles,
                config,
                devices,
                architecture,
                ephemeral,
                True,  # Wait
                remote_addr,
                cert,
                key,
                verify_cert)
        except CommandExecutionError as e:
            return _error(ret, six.text_type(e))

        msg = 'Created the container "{0}"'.format(name)
        ret['changes'] = {'created': msg}

        if running is True:
            try:
                __salt__['lxd.container_start'](name, remote_addr, cert, key,
                                                verify_cert)
            except CommandExecutionError as e:
                return _error(ret, six.text_type(e))

            msg = msg + ' and started it.'
            ret['changes'] = {
                'started': 'Started the container "{0}"'.format(name)
            }

        return _success(ret, msg)

    # Container exists, lets check for differences
    new_profiles = set(map(six.text_type, profiles))
    old_profiles = set(map(six.text_type, container.profiles))

    container_changed = False

    profile_changes = []
    # Removed profiles
    for k in old_profiles.difference(new_profiles):
        if not __opts__['test']:
            profile_changes.append('Removed profile "{0}"'.format(k))
            old_profiles.discard(k)
        else:
            profile_changes.append('Would remove profile "{0}"'.format(k))

    # Added profiles
    for k in new_profiles.difference(old_profiles):
        if not __opts__['test']:
            profile_changes.append('Added profile "{0}"'.format(k))
            old_profiles.add(k)
        else:
            profile_changes.append('Would add profile "{0}"'.format(k))

    if profile_changes:
        container_changed = True
        ret['changes']['profiles'] = profile_changes
        container.profiles = list(old_profiles)

    # Config and devices changes
    config, devices = __salt__['lxd.normalize_input_values'](config, devices)
    changes = __salt__['lxd.sync_config_devices'](container, config, devices,
                                                  __opts__['test'])
    if changes:
        container_changed = True
        ret['changes'].update(changes)

    is_running = \
        container.status_code == CONTAINER_STATUS_RUNNING

    if not __opts__['test']:
        try:
            __salt__['lxd.pylxd_save_object'](container)
        except CommandExecutionError as e:
            return _error(ret, six.text_type(e))

    if running != is_running:
        if running is True:
            if __opts__['test']:
                changes['running'] = 'Would start the container'
                return _unchanged(ret, ('Container "{0}" would get changed '
                                        'and started.').format(name))
            else:
                container.start(wait=True)
                changes['running'] = 'Started the container'

        elif running is False:
            if __opts__['test']:
                changes['stopped'] = 'Would stopped the container'
                return _unchanged(ret, ('Container "{0}" would get changed '
                                        'and stopped.').format(name))
            else:
                container.stop(wait=True)
                changes['stopped'] = 'Stopped the container'

    if ((running is True or running is None) and is_running
            and restart_on_change and container_changed):

        if __opts__['test']:
            changes['restarted'] = 'Would restart the container'
            return _unchanged(ret,
                              'Would restart the container "{0}"'.format(name))
        else:
            container.restart(wait=True)
            changes['restarted'] = (
                'Container "{0}" has been restarted'.format(name))
            return _success(ret,
                            'Container "{0}" has been restarted'.format(name))

    if not container_changed:
        return _success(ret, 'No changes')

    if __opts__['test']:
        return _unchanged(ret,
                          'Container "{0}" would get changed.'.format(name))

    return _success(ret, '{0} changes'.format(len(ret['changes'].keys())))
Exemplo n.º 58
0
    def process(self, config):
        '''
        Process the configured beacons
        The config must be a list and looks like this in yaml
        code_block:: yaml
            beacons:
                inotify:
                    - /etc/fstab: {}
                    - /var/cache/foo: {}
        '''
        ret = []
        b_config = copy.deepcopy(config)
        if 'enabled' in b_config and not b_config['enabled']:
            return
        for mod in config:
            if mod == 'enabled':
                continue

            # Convert beacons that are lists to a dict to make processing easier
            current_beacon_config = None
            if isinstance(config[mod], list):
                current_beacon_config = {}
                list(map(current_beacon_config.update, config[mod]))
            elif isinstance(config[mod], dict):
                salt.utils.warn_until(
                    'Nitrogen',
                    'Beacon configuration should be a list instead of a dictionary.'
                )
                current_beacon_config = config[mod]

            if 'enabled' in current_beacon_config:
                if not current_beacon_config['enabled']:
                    log.trace('Beacon {0} disabled'.format(mod))
                    continue
                else:
                    # remove 'enabled' item before processing the beacon
                    if isinstance(config[mod], dict):
                        del config[mod]['enabled']
                    else:
                        self._remove_list_item(config[mod], 'enabled')

            log.trace('Beacon processing: {0}'.format(mod))
            fun_str = '{0}.beacon'.format(mod)
            if fun_str in self.beacons:
                interval = self._determine_beacon_config(current_beacon_config, 'interval')
                if interval:
                    b_config = self._trim_config(b_config, mod, 'interval')
                    if not self._process_interval(mod, interval):
                        log.trace('Skipping beacon {0}. Interval not reached.'.format(mod))
                        continue
                if self._determine_beacon_config(current_beacon_config, 'disable_during_state_run'):
                    log.trace('Evaluting if beacon {0} should be skipped due to a state run.'.format(mod))
                    b_config = self._trim_config(b_config, mod, 'disable_during_state_run')
                    is_running = False
                    running_jobs = salt.utils.minion.running(self.opts)
                    for job in running_jobs:
                        if re.match('state.*', job['fun']):
                            is_running = True
                    if is_running:
                        log.info('Skipping beacon {0}. State run in progress.'.format(mod))
                        continue
                # Fire the beacon!
                raw = self.beacons[fun_str](b_config[mod])
                for data in raw:
                    tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod)
                    if 'tag' in data:
                        tag += data.pop('tag')
                    if 'id' not in data:
                        data['id'] = self.opts['id']
                    ret.append({'tag': tag, 'data': data})
            else:
                log.debug('Unable to process beacon {0}'.format(mod))
        return ret
Exemplo n.º 59
0
def validate(config):
    """
    Validate the beacon configuration
    """

    VALID_MASK = [
        "access",
        "attrib",
        "close_nowrite",
        "close_write",
        "create",
        "delete",
        "delete_self",
        "excl_unlink",
        "ignored",
        "modify",
        "moved_from",
        "moved_to",
        "move_self",
        "oneshot",
        "onlydir",
        "open",
        "unmount",
    ]

    # Configuration for inotify beacon should be a dict of dicts
    if not isinstance(config, list):
        return False, "Configuration for inotify beacon must be a list."
    else:
        _config = {}
        list(map(_config.update, config))

        if "files" not in _config:
            return False, "Configuration for inotify beacon must include files."
        else:
            if not isinstance(_config['files'], dict):
                return False, ('Configuration for inotify beacon invalid, '
                               'files must be a dict.')

            for path in _config.get('files'):

                if not isinstance(_config['files'][path], dict):
                    return False, ('Configuration for inotify beacon must '
                                   'be a list of dictionaries.')
                else:
                    if not any(
                        j in ["mask", "recurse", "auto_add"]
                        for j in _config["files"][path]
                    ):
                        return (
                            False,
                            (
                                "Configuration for inotify beacon must "
                                "contain mask, recurse or auto_add items."
                            ),
                        )

                    if "auto_add" in _config["files"][path]:
                        if not isinstance(_config["files"][path]["auto_add"], bool):
                            return (
                                False,
                                (
                                    "Configuration for inotify beacon "
                                    "auto_add must be boolean."
                                ),
                            )

                    if "recurse" in _config["files"][path]:
                        if not isinstance(_config["files"][path]["recurse"], bool):
                            return (
                                False,
                                (
                                    "Configuration for inotify beacon "
                                    "recurse must be boolean."
                                ),
                            )

                    if "mask" in _config["files"][path]:
                        if not isinstance(_config["files"][path]["mask"], list):
                            return (
                                False,
                                (
                                    "Configuration for inotify beacon "
                                    "mask must be list."
                                ),
                            )
                        for mask in _config["files"][path]["mask"]:
                            if mask not in VALID_MASK:
                                return (
                                    False,
                                    (
                                        "Configuration for inotify beacon "
                                        "invalid mask option {0}.".format(mask)
                                    ),
                                )
    return True, "Valid beacon configuration"
Exemplo n.º 60
0
Arquivo: at.py Projeto: mjura/salt-1
def absent(name, jobid=None, **kwargs):
    '''
    Remove a job from queue
    The 'kwargs' can include hour. minute. day. month. year

    limit
        Target range

    tag
        Job's tag

    runas
        Runs user-specified jobs

    .. code-block:: yaml

        example1:
          at.absent:
            - limit: all

    .. code-block:: yaml

        example2:
          at.absent:
            - limit: all
            - year: 13

    .. code-block:: yaml

        example3:
          at.absent:
            - limit: all
            - tag: rose
            - runas: jim

    .. code-block:: yaml

        example4:
          at.absent:
            - limit: all
            - tag: rose
            - day: 13
            - hour: 16
    '''
    if 'limit' in kwargs:
        name = kwargs['limit']
    ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}

    binary = salt.utils.which('at')

    if __opts__['test']:
        ret['result'] = None
        ret['comment'] = 'Remove jobs()'
        return ret

    if name != 'all':
        ret['comment'] = 'limit parameter not supported {0}'.format(name)
        ret['result'] = False
        return ret

    #if jobid:
    #    output = __salt__['cmd.run']('{0} -d {1}'.format(binary, jobid))
    #    if i in map(str, [j['job'] for j in __salt__['at.atq']()['jobs']]):
    #        ret['result'] = False
    #        return ret
    #    ret['comment'] = 'Remove job({0}) from queue'.format(' '.join(opts))
    #    return ret

    if kwargs:
        opts = list(
            list(
                map(str, [
                    j['job'] for j in __salt__['at.jobcheck'](**kwargs)['jobs']
                ])))
    else:
        opts = list(
            list(map(str, [j['job'] for j in __salt__['at.atq']()['jobs']])))

    if not opts:
        ret['result'] = False
        ret['comment'] = 'No match jobs or time format error'
        return ret

    __salt__['cmd.run']('{0} -d {1}'.format(binary, ' '.join(opts)))
    fail = []
    for i in opts:
        if i in list(
                list(map(str,
                         [j['job'] for j in __salt__['at.atq']()['jobs']]))):
            fail.append(i)

    if fail:
        ret['comment'] = 'Remove job({0}) from queue but ({1}) fail'.format(
            ' '.join(opts), fail)
    else:
        ret['comment'] = 'Remove job({0}) from queue'.format(' '.join(opts))

    return ret