예제 #1
0
파일: vmware.py 프로젝트: scottking2/salt
def get_service_instance(host, username, password, protocol=None, port=None):
    '''
    Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.

    host
        The location of the vCenter server or ESX/ESXi host.

    username
        The username used to login to the vCenter server or ESX/ESXi host.

    password
        The password used to login to the vCenter server or ESX/ESXi host.

    protocol
        Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
        using the default protocol. Default protocol is ``https``.

    port
        Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
        using the default port. Default port is ``443``.
    '''
    if protocol is None:
        protocol = 'https'
    if port is None:
        port = 443

    try:
        service_instance = SmartConnect(host=host,
                                        user=username,
                                        pwd=password,
                                        protocol=protocol,
                                        port=port)
    except Exception as exc:
        default_msg = 'Could not connect to host \'{0}\'. ' \
                      'Please check the debug log for more information.'.format(host)
        if isinstance(exc, vim.fault.HostConnectFault
                      ) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg:
            try:
                import ssl
                default_context = ssl._create_default_https_context
                ssl._create_default_https_context = ssl._create_unverified_context
                service_instance = SmartConnect(host=host,
                                                user=username,
                                                pwd=password,
                                                protocol=protocol,
                                                port=port)
                ssl._create_default_https_context = default_context
            except Exception as exc:
                err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
                log.debug(exc)
                raise SaltSystemExit(err_msg)
        else:
            err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
            log.debug(exc)
            raise SaltSystemExit(err_msg)

    atexit.register(Disconnect, service_instance)

    return service_instance
예제 #2
0
파일: verify.py 프로젝트: zxstar/salt
def verify_files(files, user):
    """
    Verify that the named files exist and are owned by the named user
    """
    if salt.utils.platform.is_windows():
        return True
    import pwd  # after confirming not running Windows

    try:
        pwnam = pwd.getpwnam(user)
        uid = pwnam[2]
    except KeyError:
        err = ("Failed to prepare the Salt environment for user "
               "{0}. The user is not available.\n").format(user)
        sys.stderr.write(err)
        sys.exit(salt.defaults.exitcodes.EX_NOUSER)

    for fn_ in files:
        dirname = os.path.dirname(fn_)
        try:
            if dirname:
                try:
                    os.makedirs(dirname)
                except OSError as err:
                    if err.errno != errno.EEXIST:
                        raise
            if not os.path.isfile(fn_):
                with salt.utils.files.fopen(fn_, "w"):
                    pass

        except IOError as err:
            if os.path.isfile(dirname):
                msg = "Failed to create path {0}, is {1} a file?".format(
                    fn_, dirname)
                raise SaltSystemExit(msg=msg)
            if err.errno != errno.EACCES:
                raise
            msg = 'No permissions to access "{0}", are you running as the correct user?'.format(
                fn_)
            raise SaltSystemExit(msg=msg)

        except OSError as err:  # pylint: disable=duplicate-except
            msg = 'Failed to create path "{0}" - {1}'.format(fn_, err)
            raise SaltSystemExit(msg=msg)

        stats = os.stat(fn_)
        if uid != stats.st_uid:
            try:
                os.chown(fn_, uid, -1)
            except OSError:
                pass
    return True
예제 #3
0
def put_blob(storage_conn=None, **kwargs):
    """
    .. versionadded:: 2015.8.0

    Upload a blob
    """
    if not storage_conn:
        storage_conn = get_storage_conn(opts=kwargs)

    if "container" not in kwargs:
        raise SaltSystemExit(
            code=42, msg='The blob container name must be specified as "container"'
        )

    if "name" not in kwargs:
        raise SaltSystemExit(code=42, msg='The blob name must be specified as "name"')

    if "blob_path" not in kwargs and "blob_content" not in kwargs:
        raise SaltSystemExit(
            code=42,
            msg=(
                'Either a path to a file needs to be passed in as "blob_path" '
                'or the contents of a blob as "blob_content."'
            ),
        )

    blob_kwargs = {
        "container_name": kwargs["container"],
        "blob_name": kwargs["name"],
        "cache_control": kwargs.get("cache_control", None),
        "content_language": kwargs.get("content_language", None),
        "content_md5": kwargs.get("content_md5", None),
        "x_ms_blob_content_type": kwargs.get("blob_content_type", None),
        "x_ms_blob_content_encoding": kwargs.get("blob_content_encoding", None),
        "x_ms_blob_content_language": kwargs.get("blob_content_language", None),
        "x_ms_blob_content_md5": kwargs.get("blob_content_md5", None),
        "x_ms_blob_cache_control": kwargs.get("blob_cache_control", None),
        "x_ms_meta_name_values": kwargs.get("meta_name_values", None),
        "x_ms_lease_id": kwargs.get("lease_id", None),
    }
    if "blob_path" in kwargs:
        data = storage_conn.put_block_blob_from_path(
            file_path=kwargs["blob_path"], **blob_kwargs
        )
    elif "blob_content" in kwargs:
        data = storage_conn.put_block_blob_from_bytes(
            blob=kwargs["blob_content"], **blob_kwargs
        )

    return data
예제 #4
0
def put_blob(storage_conn=None, **kwargs):
    '''
    .. versionadded:: 2015.8.0

    Upload a blob
    '''
    if not storage_conn:
        storage_conn = get_storage_conn(opts=kwargs)

    if 'container' not in kwargs:
        raise SaltSystemExit(
            code=42,
            msg='The blob container name must be specified as "container"')

    if 'name' not in kwargs:
        raise SaltSystemExit(code=42,
                             msg='The blob name must be specified as "name"')

    if 'blob_path' not in kwargs and 'blob_content' not in kwargs:
        raise SaltSystemExit(
            code=42,
            msg='Either a path to a file needs to be passed in as "blob_path" '
            'or the contents of a blob as "blob_content."')

    blob_kwargs = {
        'container_name': kwargs['container'],
        'blob_name': kwargs['name'],
        'cache_control': kwargs.get('cache_control', None),
        'content_language': kwargs.get('content_language', None),
        'content_md5': kwargs.get('content_md5', None),
        'x_ms_blob_content_type': kwargs.get('blob_content_type', None),
        'x_ms_blob_content_encoding': kwargs.get('blob_content_encoding',
                                                 None),
        'x_ms_blob_content_language': kwargs.get('blob_content_language',
                                                 None),
        'x_ms_blob_content_md5': kwargs.get('blob_content_md5', None),
        'x_ms_blob_cache_control': kwargs.get('blob_cache_control', None),
        'x_ms_meta_name_values': kwargs.get('meta_name_values', None),
        'x_ms_lease_id': kwargs.get('lease_id', None),
    }
    if 'blob_path' in kwargs:
        data = storage_conn.put_block_blob_from_path(
            file_path=kwargs['blob_path'], **blob_kwargs)
    elif 'blob_content' in kwargs:
        data = storage_conn.put_block_blob_from_bytes(
            blob=kwargs['blob_content'], **blob_kwargs)

    return data
예제 #5
0
파일: esxi.py 프로젝트: zhengyu1992/salt
def find_credentials(host):
    '''
    Cycle through all the possible credentials and return the first one that
    works.
    '''
    user_names = [__pillar__['proxy'].get('username', 'root')]
    passwords = __pillar__['proxy']['passwords']
    for user in user_names:
        for password in passwords:
            try:
                # Try to authenticate with the given user/password combination
                ret = __salt__['vsphere.system_info'](host=host,
                                                      username=user,
                                                      password=password)
            except SaltSystemExit:
                # If we can't authenticate, continue on to try the next password.
                continue
            # If we have data returned from above, we've successfully authenticated.
            if ret:
                DETAILS['username'] = user
                DETAILS['password'] = password
                return user, password
    # We've reached the end of the list without successfully authenticating.
    raise SaltSystemExit(
        'Cannot complete login due to an incorrect user name or password.')
예제 #6
0
def list_blobs(storage_conn=None, **kwargs):
    '''
    .. versionadded:: 2015.8.0

    List blobs associated with the container
    '''
    if not storage_conn:
        storage_conn = get_storage_conn(opts=kwargs)

    if 'container' not in kwargs:
        raise SaltSystemExit(
            code=42,
            msg='An storage container name must be specified as "container"')

    data = storage_conn.list_blobs(
        container_name=kwargs['container'],
        prefix=kwargs.get('prefix', None),
        marker=kwargs.get('marker', None),
        maxresults=kwargs.get('maxresults', None),
        include=kwargs.get('include', None),
        delimiter=kwargs.get('delimiter', None),
    )

    ret = {}
    for item in data.blobs:
        ret[item.name] = object_to_dict(item)
    return ret
예제 #7
0
def get_blob(storage_conn=None, **kwargs):
    """
    .. versionadded:: 2015.8.0

    Download a blob
    """
    if not storage_conn:
        storage_conn = get_storage_conn(opts=kwargs)

    if "container" not in kwargs:
        raise SaltSystemExit(
            code=42, msg='The blob container name must be specified as "container"'
        )

    if "name" not in kwargs:
        raise SaltSystemExit(code=42, msg='The blob name must be specified as "name"')

    if "local_path" not in kwargs and "return_content" not in kwargs:
        raise SaltSystemExit(
            code=42,
            msg=(
                'Either a local path needs to be passed in as "local_path", '
                'or "return_content" to return the blob contents directly'
            ),
        )

    blob_kwargs = {
        "container_name": kwargs["container"],
        "blob_name": kwargs["name"],
        "snapshot": kwargs.get("snapshot", None),
        "x_ms_lease_id": kwargs.get("lease_id", None),
        "progress_callback": kwargs.get("progress_callback", None),
        "max_connections": kwargs.get("max_connections", 1),
        "max_retries": kwargs.get("max_retries", 5),
        "retry_wait": kwargs.get("retry_wait", 1),
    }

    if "local_path" in kwargs:
        data = storage_conn.get_blob_to_path(
            file_path=kwargs["local_path"],
            open_mode=kwargs.get("open_mode", "wb"),
            **blob_kwargs
        )
    elif "return_content" in kwargs:
        data = storage_conn.get_blob_to_bytes(**blob_kwargs)

    return data
예제 #8
0
def verify_files(files, user):
    """
    Verify that the named files exist and are owned by the named user
    """
    if salt.utils.platform.is_windows():
        return True

    # after confirming not running Windows
    pwnam = _get_pwnam(user)
    uid = pwnam[2]

    for fn_ in files:
        dirname = os.path.dirname(fn_)
        try:
            if dirname:
                try:
                    os.makedirs(dirname)
                except OSError as err:
                    if err.errno != errno.EEXIST:
                        raise
            if not os.path.isfile(fn_):
                with salt.utils.files.fopen(fn_, "w"):
                    pass

        except OSError as err:
            if os.path.isfile(dirname):
                msg = "Failed to create path {}, is {} a file?".format(
                    fn_, dirname)
                raise SaltSystemExit(msg=msg)
            if err.errno != errno.EACCES:
                raise
            msg = 'No permissions to access "{}", are you running as the correct user?'.format(
                fn_)
            raise SaltSystemExit(msg=msg)

        except OSError as err:  # pylint: disable=duplicate-except
            msg = 'Failed to create path "{}" - {}'.format(fn_, err)
            raise SaltSystemExit(msg=msg)

        stats = os.stat(fn_)
        if uid != stats.st_uid:
            try:
                os.chown(fn_, uid, -1)
            except OSError:
                pass
    return True
예제 #9
0
def get_client(client_type, **kwargs):
    """
    Dynamically load the selected client and return a management client object
    """
    client_map = {
        "compute": "ComputeManagement",
        "authorization": "AuthorizationManagement",
        "dns": "DnsManagement",
        "storage": "StorageManagement",
        "managementlock": "ManagementLock",
        "monitor": "MonitorManagement",
        "network": "NetworkManagement",
        "policy": "Policy",
        "resource": "ResourceManagement",
        "subscription": "Subscription",
        "web": "WebSiteManagement",
    }

    if client_type not in client_map:
        raise SaltSystemExit(
            msg="The Azure ARM client_type {} specified can not be found.".format(
                client_type
            )
        )

    map_value = client_map[client_type]

    if client_type in ["policy", "subscription"]:
        module_name = "resource"
    elif client_type in ["managementlock"]:
        module_name = "resource.locks"
    else:
        module_name = client_type

    try:
        client_module = importlib.import_module("azure.mgmt." + module_name)
        # pylint: disable=invalid-name
        Client = getattr(client_module, "{}Client".format(map_value))
    except ImportError:
        raise sys.exit("The azure {} client is not available.".format(client_type))

    credentials, subscription_id, cloud_env = _determine_auth(**kwargs)

    if client_type == "subscription":
        client = Client(
            credentials=credentials,
            base_url=cloud_env.endpoints.resource_manager,
        )
    else:
        client = Client(
            credentials=credentials,
            subscription_id=subscription_id,
            base_url=cloud_env.endpoints.resource_manager,
        )

    client.config.add_user_agent("Salt/{}".format(salt.version.__version__))

    return client
예제 #10
0
def get_blob(storage_conn=None, **kwargs):
    '''
    .. versionadded:: 2015.8.0

    Download a blob
    '''
    if not storage_conn:
        storage_conn = get_storage_conn(opts=kwargs)

    if 'container' not in kwargs:
        raise SaltSystemExit(code=42, msg='The blob container name must be specified as "container"')

    if 'name' not in kwargs:
        raise SaltSystemExit(code=42, msg='The blob name must be specified as "name"')

    if 'local_path' not in kwargs and 'return_content' not in kwargs:
        raise SaltSystemExit(
            code=42,
            msg='Either a local path needs to be passed in as "local_path", '
            'or "return_content" to return the blob contents directly'
        )

    blob_kwargs = {
        'container_name': kwargs['container'],
        'blob_name': kwargs['name'],
        'snapshot': kwargs.get('snapshot', None),
        'x_ms_lease_id': kwargs.get('lease_id', None),
        'progress_callback': kwargs.get('progress_callback', None),
        'max_connections': kwargs.get('max_connections', 1),
        'max_retries': kwargs.get('max_retries', 5),
        'retry_wait': kwargs.get('retry_wait', 1),
    }

    if 'local_path' in kwargs:
        data = storage_conn.get_blob_to_path(
            file_path=kwargs['local_path'],
            open_mode=kwargs.get('open_mode', 'wb'),
            **blob_kwargs
        )
    elif 'return_content' in kwargs:
        data = storage_conn.get_blob_to_bytes(
            **blob_kwargs
        )

    return data
예제 #11
0
def get_client(client_type, **kwargs):
    '''
    Dynamically load the selected client and return a management client object
    '''
    client_map = {
        'compute': 'ComputeManagement',
        'authorization': 'AuthorizationManagement',
        'dns': 'DnsManagement',
        'storage': 'StorageManagement',
        'managementlock': 'ManagementLock',
        'monitor': 'MonitorManagement',
        'network': 'NetworkManagement',
        'policy': 'Policy',
        'resource': 'ResourceManagement',
        'subscription': 'Subscription',
        'web': 'WebSiteManagement'
    }

    if client_type not in client_map:
        raise SaltSystemExit(
            'The Azure ARM client_type {0} specified can not be found.'.format(
                client_type))

    map_value = client_map[client_type]

    if client_type in ['policy', 'subscription']:
        module_name = 'resource'
    elif client_type in ['managementlock']:
        module_name = 'resource.locks'
    else:
        module_name = client_type

    try:
        client_module = importlib.import_module('azure.mgmt.' + module_name)
        # pylint: disable=invalid-name
        Client = getattr(client_module, '{0}Client'.format(map_value))
    except ImportError:
        raise sys.exit(
            'The azure {0} client is not available.'.format(client_type))

    credentials, subscription_id, cloud_env = _determine_auth(**kwargs)

    if client_type == 'subscription':
        client = Client(
            credentials=credentials,
            base_url=cloud_env.endpoints.resource_manager,
        )
    else:
        client = Client(
            credentials=credentials,
            subscription_id=subscription_id,
            base_url=cloud_env.endpoints.resource_manager,
        )

    client.config.add_user_agent('Salt/{0}'.format(salt.version.__version__))

    return client
예제 #12
0
def check_ipc_path_max_len(uri):
    # The socket path is limited to 107 characters on Solaris and
    # Linux, and 103 characters on BSD-based systems.
    ipc_path_max_len = getattr(zmq, 'IPC_PATH_MAX_LEN', 103)
    if ipc_path_max_len and len(uri) > ipc_path_max_len:
        raise SaltSystemExit(
            'The socket path is longer than allowed by OS. '
            '{0!r} is longer than {1} characters. '
            'Either try to reduce the length of this setting\'s '
            'path or switch to TCP; in the configuration file, '
            'set "ipc_mode: tcp".'.format(uri, ipc_path_max_len))
예제 #13
0
def check_ipc_path_max_len(uri):
    # The socket path is limited to 107 characters on Solaris and
    # Linux, and 103 characters on BSD-based systems.
    if zmq is None:
        return
    ipc_path_max_len = getattr(zmq, "IPC_PATH_MAX_LEN", 103)
    if ipc_path_max_len and len(uri) > ipc_path_max_len:
        raise SaltSystemExit(
            "The socket path is longer than allowed by OS. "
            "'{}' is longer than {} characters. "
            "Either try to reduce the length of this setting's "
            "path or switch to TCP; in the configuration file, "
            'set "ipc_mode: tcp".'.format(uri, ipc_path_max_len))
예제 #14
0
def _find_credentials(host):
    """
    Cycle through all the possible credentials and return the first one that
    works.
    """
    user_names = [__pillar__["proxy"].get("username", "root")]
    passwords = __pillar__["proxy"]["passwords"]
    for user in user_names:
        for password in passwords:
            try:
                # Try to authenticate with the given user/password combination
                ret = salt.modules.vsphere.system_info(host=host,
                                                       username=user,
                                                       password=password)
            except SaltSystemExit:
                # If we can't authenticate, continue on to try the next password.
                continue
            # If we have data returned from above, we've successfully authenticated.
            if ret:
                return user, password
    # We've reached the end of the list without successfully authenticating.
    raise SaltSystemExit(
        "Cannot complete login due to an incorrect user name or password.")
예제 #15
0
def dns_check(addr, safe=False, ipv6=False):
    '''
    Return the ip resolved by dns, but do not exit on failure, only raise an
    exception. Obeys system preference for IPv4/6 address resolution.
    '''
    error = False
    try:
        hostnames = socket.getaddrinfo(addr, None, socket.AF_UNSPEC,
                                       socket.SOCK_STREAM)
        if not hostnames:
            error = True
        else:
            addr = False
            for h in hostnames:
                if h[0] == socket.AF_INET or (h[0] == socket.AF_INET6
                                              and ipv6):
                    addr = ip_bracket(h[4][0])
                    break
            if not addr:
                error = True
    except socket.error:
        error = True

    if error:
        err = ('This master address: \'{0}\' was previously resolvable '
               'but now fails to resolve! The previously resolved ip addr '
               'will continue to be used').format(addr)
        if safe:
            if salt.log.is_console_configured():
                # If logging is not configured it also means that either
                # the master or minion instance calling this hasn't even
                # started running
                log.error(err)
            raise SaltClientError()
        raise SaltSystemExit(code=42, msg=err)
    return addr
예제 #16
0
파일: salt.py 프로젝트: eirinikos/salt
    def run(self):
        '''
        Execute the salt command line
        '''
        import salt.client
        self.parse_args()

        if self.config['log_level'] not in ('quiet', ):
            # Setup file logging!
            self.setup_logfile_logger()
            verify_log(self.config)

        try:
            # We don't need to bail on config file permission errors
            # if the CLI process is run with the -a flag
            skip_perm_errors = self.options.eauth != ''

            self.local_client = salt.client.get_local_client(
                self.get_config_file_path(),
                skip_perm_errors=skip_perm_errors,
                auto_reconnect=True)
        except SaltClientError as exc:
            self.exit(2, '{0}\n'.format(exc))
            return

        if self.options.batch or self.options.static:
            # _run_batch() will handle all output and
            # exit with the appropriate error condition
            # Execution will not continue past this point
            # in batch mode.
            self._run_batch()
            return

        if self.options.preview_target:
            minion_list = self._preview_target()
            self._output_ret(minion_list, self.config.get('output', 'nested'))
            return

        if self.options.timeout <= 0:
            self.options.timeout = self.local_client.opts['timeout']

        kwargs = {
            'tgt': self.config['tgt'],
            'fun': self.config['fun'],
            'arg': self.config['arg'],
            'timeout': self.options.timeout,
            'show_timeout': self.options.show_timeout,
            'show_jid': self.options.show_jid
        }

        if 'token' in self.config:
            import salt.utils.files
            try:
                with salt.utils.files.fopen(
                        os.path.join(self.config['cachedir'], '.root_key'),
                        'r') as fp_:
                    kwargs['key'] = fp_.readline()
            except IOError:
                kwargs['token'] = self.config['token']

        kwargs['delimiter'] = self.options.delimiter

        if self.selected_target_option:
            kwargs['tgt_type'] = self.selected_target_option
        else:
            kwargs['tgt_type'] = 'glob'

        # If batch_safe_limit is set, check minions matching target and
        # potentially switch to batch execution
        if self.options.batch_safe_limit > 1:
            if len(self._preview_target()) >= self.options.batch_safe_limit:
                salt.utils.stringutils.print_cli(
                    '\nNOTICE: Too many minions targeted, switching to batch execution.'
                )
                self.options.batch = self.options.batch_safe_size
                self._run_batch()
                return

        if getattr(self.options, 'return'):
            kwargs['ret'] = getattr(self.options, 'return')

        if getattr(self.options, 'return_config'):
            kwargs['ret_config'] = getattr(self.options, 'return_config')

        if getattr(self.options, 'return_kwargs'):
            kwargs['ret_kwargs'] = yamlify_arg(
                getattr(self.options, 'return_kwargs'))

        if getattr(self.options, 'module_executors'):
            kwargs['module_executors'] = yamlify_arg(
                getattr(self.options, 'module_executors'))

        if getattr(self.options, 'executor_opts'):
            kwargs['executor_opts'] = yamlify_arg(
                getattr(self.options, 'executor_opts'))

        if getattr(self.options, 'metadata'):
            kwargs['metadata'] = yamlify_arg(getattr(self.options, 'metadata'))

        # If using eauth and a token hasn't already been loaded into
        # kwargs, prompt the user to enter auth credentials
        if 'token' not in kwargs and 'key' not in kwargs and self.options.eauth:
            # This is expensive. Don't do it unless we need to.
            import salt.auth
            resolver = salt.auth.Resolver(self.config)
            res = resolver.cli(self.options.eauth)
            if self.options.mktoken and res:
                tok = resolver.token_cli(self.options.eauth, res)
                if tok:
                    kwargs['token'] = tok.get('token', '')
            if not res:
                sys.stderr.write('ERROR: Authentication failed\n')
                sys.exit(2)
            kwargs.update(res)
            kwargs['eauth'] = self.options.eauth

        if self.config['async']:
            jid = self.local_client.cmd_async(**kwargs)
            salt.utils.stringutils.print_cli(
                'Executed command with job ID: {0}'.format(jid))
            return

        # local will be None when there was an error
        if not self.local_client:
            return

        retcodes = []
        errors = []

        try:
            if self.options.subset:
                cmd_func = self.local_client.cmd_subset
                kwargs['sub'] = self.options.subset
                kwargs['cli'] = True
            else:
                cmd_func = self.local_client.cmd_cli

            if self.options.progress:
                kwargs['progress'] = True
                self.config['progress'] = True
                ret = {}
                for progress in cmd_func(**kwargs):
                    out = 'progress'
                    try:
                        self._progress_ret(progress, out)
                    except LoaderError as exc:
                        raise SaltSystemExit(exc)
                    if 'return_count' not in progress:
                        ret.update(progress)
                self._progress_end(out)
                self._print_returns_summary(ret)
            elif self.config['fun'] == 'sys.doc':
                ret = {}
                out = ''
                for full_ret in self.local_client.cmd_cli(**kwargs):
                    ret_, out, retcode = self._format_ret(full_ret)
                    ret.update(ret_)
                self._output_ret(ret, out, retcode=retcode)
            else:
                if self.options.verbose:
                    kwargs['verbose'] = True
                ret = {}
                for full_ret in cmd_func(**kwargs):
                    try:
                        ret_, out, retcode = self._format_ret(full_ret)
                        retcodes.append(retcode)
                        self._output_ret(ret_, out, retcode=retcode)
                        ret.update(full_ret)
                    except KeyError:
                        errors.append(full_ret)

            # Returns summary
            if self.config['cli_summary'] is True:
                if self.config['fun'] != 'sys.doc':
                    if self.options.output is None:
                        self._print_returns_summary(ret)
                        self._print_errors_summary(errors)

            # NOTE: Return code is set here based on if all minions
            # returned 'ok' with a retcode of 0.
            # This is the final point before the 'salt' cmd returns,
            # which is why we set the retcode here.
            if retcodes.count(0) < len(retcodes):
                sys.stderr.write(
                    'ERROR: Minions returned with non-zero exit code\n')
                sys.exit(11)

        except (AuthenticationError, AuthorizationError, SaltInvocationError,
                EauthAuthenticationError, SaltClientError) as exc:
            ret = six.text_type(exc)
            self._output_ret(ret, '', retcode=1)
예제 #17
0
파일: proxy.py 프로젝트: fevrin/salt-sproxy
    def gen_modules(self, initial_load=False):
        '''
        Tell the minion to reload the execution modules.

        CLI Example:

        .. code-block:: bash

            salt '*' sys.reload_modules
        '''
        cached_grains = None
        if self.opts.get('proxy_use_cached_grains', True):
            cached_grains = self.opts.pop('proxy_cached_grains', None)
        if not cached_grains and self.opts.get('proxy_preload_grains', True):
            loaded_grains = salt.loader.grains(self.opts)
            self.opts['grains'].update(loaded_grains)
        elif cached_grains:
            self.opts['grains'].update(cached_grains)

        cached_pillar = None
        if self.opts.get('proxy_use_cached_pillar', True):
            cached_pillar = self.opts.pop('proxy_cached_pillar', None)
        if not cached_pillar and self.opts.get('proxy_load_pillar', True):
            self.opts['pillar'] = salt.pillar.get_pillar(
                self.opts,
                self.opts['grains'],
                self.opts['id'],
                saltenv=self.opts['saltenv'],
                pillarenv=self.opts.get('pillarenv'),
            ).compile_pillar()
        elif cached_pillar:
            self.opts['pillar'].update(cached_pillar)

        if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
            errmsg = (
                'No "proxy" configuration key found in pillar or opts '
                'dictionaries for id {id}. Check your pillar/options '
                'configuration and contents. Salt-proxy aborted.').format(
                    id=self.opts['id'])
            log.error(errmsg)
            self._running = False
            raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC,
                                 msg=errmsg)

        if 'proxy' not in self.opts:
            self.opts['proxy'] = self.opts['pillar']['proxy']

        # Then load the proxy module
        self.utils = salt.loader.utils(self.opts)
        self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
        self.functions = salt.loader.minion_mods(self.opts,
                                                 utils=self.utils,
                                                 notify=False,
                                                 proxy=self.proxy)
        self.functions.pack['__grains__'] = self.opts['grains']
        self.returners = salt.loader.returners(self.opts,
                                               self.functions,
                                               proxy=self.proxy)
        self.functions['sys.reload_modules'] = self.gen_modules

        fq_proxyname = self.opts['proxy']['proxytype']
        self.functions.pack['__proxy__'] = self.proxy
        self.proxy.pack['__salt__'] = self.functions
        self.proxy.pack['__ret__'] = self.returners
        self.proxy.pack['__pillar__'] = self.opts['pillar']

        # No need to inject the proxy into utils, as we don't need scheduler for
        # this sort of short living Minion.
        # self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
        self.proxy.pack['__utils__'] = self.utils

        # Reload all modules so all dunder variables are injected
        self.proxy.reload_modules()

        if ('{0}.init'.format(fq_proxyname) not in self.proxy
                or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
            errmsg = (
                'Proxymodule {0} is missing an init() or a shutdown() or both. '
                .format(fq_proxyname) +
                'Check your proxymodule.  Salt-proxy aborted.')
            log.error(errmsg)
            self._running = False
            raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC,
                                 msg=errmsg)

        proxy_init_fn = self.proxy[fq_proxyname + '.init']
        proxy_init_fn(self.opts)
        if not cached_grains and self.opts.get('proxy_load_grains', True):
            # When the Grains are loaded from the cache, no need to re-load them
            # again.
            loaded_grains = salt.loader.grains(self.opts, proxy=self.proxy)
            self.opts['grains'].update(loaded_grains)
        self.functions.pack['__grains__'] = self.opts['grains']
        self.grains_cache = copy.deepcopy(self.opts['grains'])
        self.ready = True
예제 #18
0
    def gen_modules(self, initial_load=False):
        '''
        Tell the minion to reload the execution modules.

        CLI Example:

        .. code-block:: bash

            salt '*' sys.reload_modules
        '''
        cached_grains = None
        if self.opts.get('proxy_use_cached_grains', True):
            cached_grains = self.opts.pop('proxy_cached_grains', None)

        if not cached_grains and self.opts.get('proxy_preload_grains', True):
            loaded_grains = salt.loader.grains(self.opts)
            self.opts['grains'].update(loaded_grains)
        elif cached_grains:
            self.opts['grains'].update(cached_grains)

        if (
            self.opts['roster_opts']
            and self.opts.get('proxy_merge_roster_grains', True)
            and 'grains' in self.opts['roster_opts']
            and isinstance(self.opts['roster_opts']['grains'], dict)
        ):
            # Merge the Grains from the Roster opts
            log.debug('Merging Grains with the Roster provided ones')
            self.opts['grains'] = salt.utils.dictupdate.merge(
                self.opts['roster_opts']['grains'], self.opts['grains']
            )

        cached_pillar = None
        if self.opts.get('proxy_use_cached_pillar', True):
            cached_pillar = self.opts.pop('proxy_cached_pillar', None)
        if not cached_pillar and self.opts.get('proxy_load_pillar', True):
            self.opts['pillar'] = salt.pillar.get_pillar(
                self.opts,
                self.opts['grains'],
                self.opts['id'],
                saltenv=self.opts['saltenv'],
                pillarenv=self.opts.get('pillarenv'),
            ).compile_pillar()
        elif cached_pillar:
            self.opts['pillar'] = salt.utils.dictupdate.merge(
                cached_pillar, self.opts['pillar']
            )

        if self.opts['roster_opts'] and self.opts.get('proxy_merge_roster_opts', True):
            if 'proxy' not in self.opts['pillar']:
                self.opts['pillar']['proxy'] = {}
            self.opts['pillar']['proxy'] = salt.utils.dictupdate.merge(
                self.opts['pillar']['proxy'], self.opts['roster_opts']
            )
            self.opts['pillar']['proxy'].pop('name', None)

        if self.opts.get('preload_targeting', False) or self.opts.get(
            'invasive_targeting', False
        ):
            log.debug('Loading the Matchers modules')
            self.matchers = salt.loader.matchers(self.opts)

        if self.opts.get('preload_targeting', False):
            log.debug(
                'Preload targeting requested, trying to see if %s matches the target %s (%s)',
                self.opts['id'],
                str(self.opts['__tgt']),
                self.opts['__tgt_type'],
            )
            matched = self._matches_target()
            if not matched:
                return

        if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
            errmsg = (
                'No "proxy" configuration key found in pillar or opts '
                'dictionaries for id {id}. Check your pillar/options '
                'configuration and contents. Salt-proxy aborted.'
            ).format(id=self.opts['id'])
            log.error(errmsg)
            self._running = False
            raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)

        if 'proxy' not in self.opts:
            self.opts['proxy'] = self.opts['pillar']['proxy']

        # Then load the proxy module
        self.utils = salt.loader.utils(self.opts)
        self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
        self.functions = salt.loader.minion_mods(
            self.opts, utils=self.utils, notify=False, proxy=self.proxy
        )
        self.functions.pack['__grains__'] = self.opts['grains']
        self.returners = None
        if self.opts['returner']:
            self.returners = salt.loader.returners(
                self.opts, self.functions, proxy=self.proxy
            )

        fq_proxyname = self.opts['proxy']['proxytype']
        self.functions.pack['__proxy__'] = self.proxy
        self.proxy.pack['__salt__'] = self.functions
        self.proxy.pack['__ret__'] = self.returners
        self.proxy.pack['__pillar__'] = self.opts['pillar']

        # No need to inject the proxy into utils, as we don't need scheduler for
        # this sort of short living Minion.
        # self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
        self.proxy.pack['__utils__'] = self.utils

        # Reload all modules so all dunder variables are injected
        self.proxy.reload_modules()

        if self.opts.get('proxy_no_connect', False):
            log.info('Requested not to initialize the connection with the device')
        else:
            log.debug('Trying to initialize the connection with the device')
            # When requested --no-connect, don't init the connection, but simply
            # go ahead and execute the function requested.
            if (
                '{0}.init'.format(fq_proxyname) not in self.proxy
                or '{0}.shutdown'.format(fq_proxyname) not in self.proxy
            ):
                errmsg = (
                    '[{0}] Proxymodule {1} is missing an init() or a shutdown() or both. '.format(
                        self.opts['id'], fq_proxyname
                    )
                    + 'Check your proxymodule.  Salt-proxy aborted.'
                )
                log.error(errmsg)
                self._running = False
                if self.unreachable_devices is not None:
                    self.unreachable_devices.append(self.opts['id'])
                raise SaltSystemExit(
                    code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg
                )

            proxy_init_fn = self.proxy[fq_proxyname + '.init']
            try:
                proxy_init_fn(self.opts)
                self.connected = True
            except Exception as exc:
                log.error(
                    'Encountered error when starting up the connection with %s:',
                    self.opts['id'],
                )
                if self.unreachable_devices is not None:
                    self.unreachable_devices.append(self.opts['id'])
                raise
            if not cached_grains and self.opts.get('proxy_load_grains', True):
                # When the Grains are loaded from the cache, no need to re-load them
                # again.
                loaded_grains = salt.loader.grains(self.opts, proxy=self.proxy)
                self.opts['grains'] = salt.utils.dictupdate.merge(
                    self.opts['grains'], loaded_grains
                )
            self.functions.pack['__grains__'] = self.opts['grains']
        self.grains_cache = copy.deepcopy(self.opts['grains'])

        if self.opts.get('invasive_targeting', False):
            log.info(
                'Invasive targeting requested, trying to see if %s matches the target %s (%s)',
                self.opts['id'],
                str(self.opts['__tgt']),
                self.opts['__tgt_type'],
            )
            matched = self._matches_target()
            if not matched:
                # Didn't match, shutting down this Proxy Minion, and exiting.
                log.debug(
                    '%s does not match the target expression, aborting', self.opts['id']
                )
                proxy_shut_fn = self.proxy[fq_proxyname + '.shutdown']
                proxy_shut_fn(self.opts)
                return

        self.ready = True
예제 #19
0
def get_service_instance(host, username, password, protocol=None, port=None):
    '''
    Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.

    host
        The location of the vCenter server or ESX/ESXi host.

    username
        The username used to login to the vCenter server or ESX/ESXi host.

    password
        The password used to login to the vCenter server or ESX/ESXi host.

    protocol
        Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
        using the default protocol. Default protocol is ``https``.

    port
        Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
        using the default port. Default port is ``443``.
    '''
    if protocol is None:
        protocol = 'https'
    if port is None:
        port = 443

    service_instance = GetSi()
    if service_instance:
        if service_instance._GetStub().host == ':'.join([host, str(port)]):
            service_instance._GetStub().GetConnection()
            return service_instance
        Disconnect(service_instance)

    try:
        service_instance = SmartConnect(host=host,
                                        user=username,
                                        pwd=password,
                                        protocol=protocol,
                                        port=port)
    except Exception as exc:
        default_msg = 'Could not connect to host \'{0}\'. ' \
                      'Please check the debug log for more information.'.format(host)
        try:
            if (isinstance(exc, vim.fault.HostConnectFault)
                    and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg
                ) or '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(exc):
                import ssl
                default_context = ssl._create_default_https_context
                ssl._create_default_https_context = ssl._create_unverified_context
                service_instance = SmartConnect(host=host,
                                                user=username,
                                                pwd=password,
                                                protocol=protocol,
                                                port=port)
                ssl._create_default_https_context = default_context
            elif (
                    isinstance(exc, vim.fault.HostConnectFault) and
                    'SSL3_GET_SERVER_CERTIFICATE\', \'certificate verify failed'
                    in exc.msg
            ) or 'SSL3_GET_SERVER_CERTIFICATE\', \'certificate verify failed' in str(
                    exc):
                import ssl
                default_context = ssl._create_default_https_context
                ssl._create_default_https_context = ssl._create_unverified_context
                service_instance = SmartConnect(host=host,
                                                user=username,
                                                pwd=password,
                                                protocol=protocol,
                                                port=port)
                ssl._create_default_https_context = default_context
            else:
                err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
                log.debug(exc)
                raise SaltSystemExit(err_msg)

        except Exception as exc:
            if 'certificate verify failed' in str(exc):
                import ssl
                context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
                context.verify_mode = ssl.CERT_NONE
                service_instance = SmartConnect(host=host,
                                                user=username,
                                                pwd=password,
                                                protocol=protocol,
                                                port=port,
                                                sslContext=context)
            else:
                err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
                log.debug(exc)
                raise SaltSystemExit(err_msg)

    atexit.register(Disconnect, service_instance)

    return service_instance
예제 #20
0
    def prepare(self):
        '''
        Run the preparation sequence required to start a salt minion.

        If sub-classed, don't **ever** forget to run:

            super(YourSubClass, self).prepare()
        '''
        super(ProxyMinion, self).prepare()

        if not self.values.proxyid:
            raise SaltSystemExit('salt-proxy requires --proxyid')

        # Proxies get their ID from the command line.  This may need to change in
        # the future.
        self.config['id'] = self.values.proxyid

        try:
            if self.config['verify_env']:
                confd = self.config.get('default_include')
                if confd:
                    # If 'default_include' is specified in config, then use it
                    if '*' in confd:
                        # Value is of the form "minion.d/*.conf"
                        confd = os.path.dirname(confd)
                    if not os.path.isabs(confd):
                        # If configured 'default_include' is not an absolute
                        # path, consider it relative to folder of 'conf_file'
                        # (/etc/salt by default)
                        confd = os.path.join(
                            os.path.dirname(self.config['conf_file']), confd
                        )
                else:
                    confd = os.path.join(
                        os.path.dirname(self.config['conf_file']), 'minion.d'
                    )

                v_dirs = [
                    self.config['pki_dir'],
                    self.config['cachedir'],
                    self.config['sock_dir'],
                    self.config['extension_modules'],
                    confd,
                ]

                if self.config.get('transport') == 'raet':
                    v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted'))
                    v_dirs.append(os.path.join(self.config['pki_dir'], 'pending'))
                    v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected'))
                    v_dirs.append(os.path.join(self.config['cachedir'], 'raet'))

                verify_env(
                    v_dirs,
                    self.config['user'],
                    permissive=self.config['permissive_pki_access'],
                    pki_dir=self.config['pki_dir'],
                )
                if 'proxy_log' in self.config:
                    logfile = self.config['proxy_log']
                else:
                    logfile = self.config['log_file']
                if logfile is not None and not logfile.startswith(('tcp://',
                                                                   'udp://',
                                                                   'file://')):
                    # Logfile is not using Syslog, verify
                    current_umask = os.umask(0o027)
                    verify_files([logfile], self.config['user'])
                    os.umask(current_umask)

        except OSError as err:
            log.exception('Failed to prepare salt environment')
            self.shutdown(err.errno)

        self.setup_logfile_logger()
        verify_log(self.config)
        log.info(
            'Setting up a Salt Proxy Minion "{0}"'.format(
                self.config['id']
            )
        )
        migrations.migrate_paths(self.config)
        # TODO: AIO core is separate from transport
        if self.config['transport'].lower() in ('zeromq', 'tcp'):
            # Late import so logging works correctly
            import salt.minion
            # If the minion key has not been accepted, then Salt enters a loop
            # waiting for it, if we daemonize later then the minion could halt
            # the boot process waiting for a key to be accepted on the master.
            # This is the latest safe place to daemonize
            self.daemonize_if_required()
            self.set_pidfile()
            # TODO Proxy minions don't currently support failover
            self.minion = salt.minion.ProxyMinion(self.config)
        else:
            # For proxy minions, this doesn't work yet.
            import salt.daemons.flo
            self.daemonize_if_required()
            self.set_pidfile()
            self.minion = salt.daemons.flo.IofloMinion(self.config)
예제 #21
0
    def sign_in(self, timeout=60, safe=True, tries=1):
        '''
        Send a sign in request to the master, sets the key information and
        returns a dict containing the master publish interface to bind to
        and the decrypted aes key for transport decryption.

        :param int timeout: Number of seconds to wait before timing out the sign-in request
        :param bool safe: If True, do not raise an exception on timeout. Retry instead.
        :param int tries: The number of times to try to authenticate before giving up.

        :raises SaltReqTimeoutError: If the sign-in request has timed out and :param safe: is not set

        :return: Return a string on failure indicating the reason for failure. On success, return a dictionary
        with the publication port and the shared AES key.

        '''
        auth = {}

        auth_timeout = self.opts.get('auth_timeout', None)
        if auth_timeout is not None:
            timeout = auth_timeout
        auth_safemode = self.opts.get('auth_safemode', None)
        if auth_safemode is not None:
            safe = auth_safemode
        auth_tries = self.opts.get('auth_tries', None)
        if auth_tries is not None:
            tries = auth_tries

        m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)

        auth['master_uri'] = self.opts['master_uri']

        channel = salt.transport.client.AsyncReqChannel.factory(
            self.opts, crypt='clear', io_loop=self.io_loop)

        try:
            payload = yield channel.send(self.minion_sign_in_payload(),
                                         tries=tries,
                                         timeout=timeout)
        except SaltReqTimeoutError as e:
            if safe:
                log.warning('SaltReqTimeoutError: {0}'.format(e))
                raise tornado.gen.Return('retry')
            raise SaltClientError(
                'Attempt to authenticate with the salt master failed with timeout error'
            )
        if 'load' in payload:
            if 'ret' in payload['load']:
                if not payload['load']['ret']:
                    if self.opts['rejected_retry']:
                        log.error(
                            'The Salt Master has rejected this minion\'s public '
                            'key.\nTo repair this issue, delete the public key '
                            'for this minion on the Salt Master.\nThe Salt '
                            'Minion will attempt to to re-authenicate.')
                        raise tornado.gen.Return('retry')
                    else:
                        log.critical(
                            'The Salt Master has rejected this minion\'s public '
                            'key!\nTo repair this issue, delete the public key '
                            'for this minion on the Salt Master and restart this '
                            'minion.\nOr restart the Salt Master in open mode to '
                            'clean out the keys. The Salt Minion will now exit.'
                        )
                        sys.exit(salt.defaults.exitcodes.EX_OK)
                # has the master returned that its maxed out with minions?
                elif payload['load']['ret'] == 'full':
                    raise tornado.gen.Return('full')
                else:
                    log.error(
                        'The Salt Master has cached the public key for this '
                        'node, this salt minion will wait for {0} seconds '
                        'before attempting to re-authenticate'.format(
                            self.opts['acceptance_wait_time']))
                    raise tornado.gen.Return('retry')
        auth['aes'] = self.verify_master(payload)
        if not auth['aes']:
            log.critical(
                'The Salt Master server\'s public key did not authenticate!\n'
                'The master may need to be updated if it is a version of Salt '
                'lower than {0}, or\n'
                'If you are confident that you are connecting to a valid Salt '
                'Master, then remove the master public key and restart the '
                'Salt Minion.\nThe master public key can be found '
                'at:\n{1}'.format(salt.version.__version__, m_pub_fn))
            raise SaltSystemExit('Invalid master key')
        if self.opts.get('syndic_master', False):  # Is syndic
            syndic_finger = self.opts.get(
                'syndic_finger', self.opts.get('master_finger', False))
            if syndic_finger:
                if salt.utils.pem_finger(m_pub_fn) != syndic_finger:
                    self._finger_fail(syndic_finger, m_pub_fn)
        else:
            if self.opts.get('master_finger', False):
                if salt.utils.pem_finger(
                        m_pub_fn) != self.opts['master_finger']:
                    self._finger_fail(self.opts['master_finger'], m_pub_fn)
        auth['publish_port'] = payload['publish_port']
        raise tornado.gen.Return(auth)
예제 #22
0
def _determine_auth(**kwargs):
    """
    Acquire Azure ARM Credentials
    """
    if "profile" in kwargs:
        azure_credentials = __salt__["config.option"](kwargs["profile"])
        kwargs.update(azure_credentials)

    service_principal_creds_kwargs = ["client_id", "secret", "tenant"]
    user_pass_creds_kwargs = ["username", "password"]

    try:
        if kwargs.get("cloud_environment") and kwargs.get(
                "cloud_environment").startswith("http"):
            cloud_env = get_cloud_from_metadata_endpoint(
                kwargs["cloud_environment"])
        else:
            cloud_env_module = importlib.import_module(
                "msrestazure.azure_cloud")
            cloud_env = getattr(
                cloud_env_module,
                kwargs.get("cloud_environment", "AZURE_PUBLIC_CLOUD"))
    except (AttributeError, ImportError, MetadataEndpointError):
        raise sys.exit(
            "The Azure cloud environment {} is not available.".format(
                kwargs["cloud_environment"]))

    if set(service_principal_creds_kwargs).issubset(kwargs):
        if not (kwargs["client_id"] and kwargs["secret"] and kwargs["tenant"]):
            raise SaltInvocationError(
                "The client_id, secret, and tenant parameters must all be "
                "populated if using service principals.")
        else:
            credentials = ServicePrincipalCredentials(
                kwargs["client_id"],
                kwargs["secret"],
                tenant=kwargs["tenant"],
                cloud_environment=cloud_env,
            )
    elif set(user_pass_creds_kwargs).issubset(kwargs):
        if not (kwargs["username"] and kwargs["password"]):
            raise SaltInvocationError(
                "The username and password parameters must both be "
                "populated if using username/password authentication.")
        else:
            credentials = UserPassCredentials(kwargs["username"],
                                              kwargs["password"],
                                              cloud_environment=cloud_env)
    elif "subscription_id" in kwargs:
        try:
            from msrestazure.azure_active_directory import MSIAuthentication

            credentials = MSIAuthentication(cloud_environment=cloud_env)
        except ImportError:
            raise SaltSystemExit(msg=(
                "MSI authentication support not availabe (requires msrestazure >="
                " 0.4.14)"))

    else:
        raise SaltInvocationError(
            "Unable to determine credentials. "
            "A subscription_id with username and password, "
            "or client_id, secret, and tenant or a profile with the "
            "required parameters populated")

    if "subscription_id" not in kwargs:
        raise SaltInvocationError("A subscription_id must be specified")

    subscription_id = salt.utils.stringutils.to_str(kwargs["subscription_id"])

    return credentials, subscription_id, cloud_env
예제 #23
0
파일: proxy.py 프로젝트: waynegemmell/salt
def post_master_init(self, master):
    """
    Function to finish init after a proxy
    minion has finished connecting to a master.

    This is primarily loading modules, pillars, etc. (since they need
    to know which master they connected to)
    """

    log.debug("subclassed LazyLoaded _post_master_init")
    if self.connected:
        self.opts["master"] = master

        self.opts["pillar"] = yield salt.pillar.get_async_pillar(
            self.opts,
            self.opts["grains"],
            self.opts["id"],
            saltenv=self.opts["saltenv"],
            pillarenv=self.opts.get("pillarenv"),
        ).compile_pillar()

    if "proxy" not in self.opts["pillar"] and "proxy" not in self.opts:
        errmsg = (
            "No proxy key found in pillar or opts for id " + self.opts["id"] +
            ". " +
            "Check your pillar/opts configuration and contents.  Salt-proxy aborted."
        )
        log.error(errmsg)
        self._running = False
        raise SaltSystemExit(code=-1, msg=errmsg)

    if "proxy" not in self.opts:
        self.opts["proxy"] = self.opts["pillar"]["proxy"]

    if self.opts.get("proxy_merge_pillar_in_opts"):
        # Override proxy opts with pillar data when the user required.
        self.opts = salt.utils.dictupdate.merge(
            self.opts,
            self.opts["pillar"],
            strategy=self.opts.get("proxy_merge_pillar_in_opts_strategy"),
            merge_lists=self.opts.get("proxy_deep_merge_pillar_in_opts",
                                      False),
        )
    elif self.opts.get("proxy_mines_pillar"):
        # Even when not required, some details such as mine configuration
        # should be merged anyway whenever possible.
        if "mine_interval" in self.opts["pillar"]:
            self.opts["mine_interval"] = self.opts["pillar"]["mine_interval"]
        if "mine_functions" in self.opts["pillar"]:
            general_proxy_mines = self.opts.get("mine_functions", {})
            specific_proxy_mines = self.opts["pillar"]["mine_functions"]
            try:
                self.opts[
                    "mine_functions"] = general_proxy_mines + specific_proxy_mines
            except TypeError as terr:
                log.error(
                    "Unable to merge mine functions from the pillar in the opts, for proxy %s",
                    self.opts["id"],
                )

    fq_proxyname = self.opts["proxy"]["proxytype"]

    # Need to load the modules so they get all the dunder variables
    (
        self.functions,
        self.returners,
        self.function_errors,
        self.executors,
    ) = self._load_modules()

    # we can then sync any proxymodules down from the master
    # we do a sync_all here in case proxy code was installed by
    # SPM or was manually placed in /srv/salt/_modules etc.
    self.functions["saltutil.sync_all"](saltenv=self.opts["saltenv"])

    # Pull in the utils
    self.utils = salt.loader.utils(self.opts)

    # Then load the proxy module
    self.proxy = salt.loader.proxy(self.opts, utils=self.utils)

    # And re-load the modules so the __proxy__ variable gets injected
    (
        self.functions,
        self.returners,
        self.function_errors,
        self.executors,
    ) = self._load_modules()
    self.functions.pack["__proxy__"] = self.proxy
    self.proxy.pack["__salt__"] = self.functions
    self.proxy.pack["__ret__"] = self.returners
    self.proxy.pack["__pillar__"] = self.opts["pillar"]

    # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
    self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
    self.proxy.pack["__utils__"] = self.utils

    # Reload all modules so all dunder variables are injected
    self.proxy.reload_modules()

    # Start engines here instead of in the Minion superclass __init__
    # This is because we need to inject the __proxy__ variable but
    # it is not setup until now.
    self.io_loop.spawn_callback(salt.engines.start_engines,
                                self.opts,
                                self.process_manager,
                                proxy=self.proxy)

    if ("{}.init".format(fq_proxyname) not in self.proxy
            or "{}.shutdown".format(fq_proxyname) not in self.proxy):
        errmsg = (
            "Proxymodule {} is missing an init() or a shutdown() or both. ".
            format(fq_proxyname) +
            "Check your proxymodule.  Salt-proxy aborted.")
        log.error(errmsg)
        self._running = False
        raise SaltSystemExit(code=-1, msg=errmsg)

    self.module_executors = self.proxy.get(
        "{}.module_executors".format(fq_proxyname), lambda: [])()
    proxy_init_fn = self.proxy[fq_proxyname + ".init"]
    proxy_init_fn(self.opts)

    self.opts["grains"] = salt.loader.grains(self.opts, proxy=self.proxy)

    self.mod_opts = self._prep_mod_opts()
    self.matchers = salt.loader.matchers(self.opts)
    self.beacons = salt.beacons.Beacon(self.opts, self.functions)
    uid = salt.utils.user.get_uid(user=self.opts.get("user", None))
    self.proc_dir = salt.minion.get_proc_dir(self.opts["cachedir"], uid=uid)

    if self.connected and self.opts["pillar"]:
        # The pillar has changed due to the connection to the master.
        # Reload the functions so that they can use the new pillar data.
        (
            self.functions,
            self.returners,
            self.function_errors,
            self.executors,
        ) = self._load_modules()
        if hasattr(self, "schedule"):
            self.schedule.functions = self.functions
            self.schedule.returners = self.returners

    if not hasattr(self, "schedule"):
        self.schedule = salt.utils.schedule.Schedule(
            self.opts,
            self.functions,
            self.returners,
            cleanup=[salt.minion.master_event(type="alive")],
            proxy=self.proxy,
        )

    # add default scheduling jobs to the minions scheduler
    if self.opts["mine_enabled"] and "mine.update" in self.functions:
        self.schedule.add_job(
            {
                "__mine_interval": {
                    "function": "mine.update",
                    "minutes": self.opts["mine_interval"],
                    "jid_include": True,
                    "maxrunning": 2,
                    "run_on_start": True,
                    "return_job": self.opts.get("mine_return_job", False),
                }
            },
            persist=True,
        )
        log.info("Added mine.update to scheduler")
    else:
        self.schedule.delete_job("__mine_interval", persist=True)

    # add master_alive job if enabled
    if self.opts["transport"] != "tcp" and self.opts[
            "master_alive_interval"] > 0:
        self.schedule.add_job(
            {
                salt.minion.master_event(type="alive",
                                         master=self.opts["master"]): {
                    "function": "status.master",
                    "seconds": self.opts["master_alive_interval"],
                    "jid_include": True,
                    "maxrunning": 1,
                    "return_job": False,
                    "kwargs": {
                        "master": self.opts["master"],
                        "connected": True
                    },
                }
            },
            persist=True,
        )
        if (self.opts["master_failback"] and "master_list" in self.opts
                and self.opts["master"] != self.opts["master_list"][0]):
            self.schedule.add_job(
                {
                    salt.minion.master_event(type="failback"): {
                        "function": "status.ping_master",
                        "seconds": self.opts["master_failback_interval"],
                        "jid_include": True,
                        "maxrunning": 1,
                        "return_job": False,
                        "kwargs": {
                            "master": self.opts["master_list"][0]
                        },
                    }
                },
                persist=True,
            )
        else:
            self.schedule.delete_job(salt.minion.master_event(type="failback"),
                                     persist=True)
    else:
        self.schedule.delete_job(
            salt.minion.master_event(type="alive", master=self.opts["master"]),
            persist=True,
        )
        self.schedule.delete_job(salt.minion.master_event(type="failback"),
                                 persist=True)

    # proxy keepalive
    proxy_alive_fn = fq_proxyname + ".alive"
    if (proxy_alive_fn in self.proxy
            and "status.proxy_reconnect" in self.functions
            and self.opts.get("proxy_keep_alive", True)):
        # if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
        self.schedule.add_job(
            {
                "__proxy_keepalive": {
                    "function": "status.proxy_reconnect",
                    "minutes":
                    self.opts.get("proxy_keep_alive_interval",
                                  1),  # by default, check once per minute
                    "jid_include": True,
                    "maxrunning": 1,
                    "return_job": False,
                    "kwargs": {
                        "proxy_name": fq_proxyname
                    },
                }
            },
            persist=True,
        )
        self.schedule.enable_schedule()
    else:
        self.schedule.delete_job("__proxy_keepalive", persist=True)

    #  Sync the grains here so the proxy can communicate them to the master
    self.functions["saltutil.sync_grains"](saltenv="base")
    self.grains_cache = self.opts["grains"]
    self.ready = True
예제 #24
0
    def run(self):
        """
        Execute the salt command line
        """
        import salt.client

        self.parse_args()

        if self.config["log_level"] not in ("quiet", ):
            # Setup file logging!
            self.setup_logfile_logger()
            verify_log(self.config)

        try:
            # We don't need to bail on config file permission errors
            # if the CLI process is run with the -a flag
            skip_perm_errors = self.options.eauth != ""

            self.local_client = salt.client.get_local_client(
                self.get_config_file_path(),
                skip_perm_errors=skip_perm_errors,
                auto_reconnect=True,
            )
        except SaltClientError as exc:
            self.exit(2, "{}\n".format(exc))
            return

        if self.options.batch or self.options.static:
            # _run_batch() will handle all output and
            # exit with the appropriate error condition
            # Execution will not continue past this point
            # in batch mode.
            self._run_batch()
            return

        if self.options.preview_target:
            minion_list = self._preview_target()
            self._output_ret(minion_list, self.config.get("output", "nested"))
            return

        if self.options.timeout <= 0:
            self.options.timeout = self.local_client.opts["timeout"]

        kwargs = {
            "tgt": self.config["tgt"],
            "fun": self.config["fun"],
            "arg": self.config["arg"],
            "timeout": self.options.timeout,
            "show_timeout": self.options.show_timeout,
            "show_jid": self.options.show_jid,
        }

        if "token" in self.config:
            import salt.utils.files

            try:
                with salt.utils.files.fopen(
                        os.path.join(self.config["cachedir"], ".root_key"),
                        "r") as fp_:
                    kwargs["key"] = fp_.readline()
            except OSError:
                kwargs["token"] = self.config["token"]

        kwargs["delimiter"] = self.options.delimiter

        if self.selected_target_option:
            kwargs["tgt_type"] = self.selected_target_option
        else:
            kwargs["tgt_type"] = "glob"

        # If batch_safe_limit is set, check minions matching target and
        # potentially switch to batch execution
        if self.options.batch_safe_limit > 1:
            if len(self._preview_target()) >= self.options.batch_safe_limit:
                salt.utils.stringutils.print_cli(
                    "\nNOTICE: Too many minions targeted, switching to batch execution."
                )
                self.options.batch = self.options.batch_safe_size
                try:
                    self._run_batch()
                finally:
                    self.local_client.destroy()
                return

        if getattr(self.options, "return"):
            kwargs["ret"] = getattr(self.options, "return")

        if getattr(self.options, "return_config"):
            kwargs["ret_config"] = getattr(self.options, "return_config")

        if getattr(self.options, "return_kwargs"):
            kwargs["ret_kwargs"] = yamlify_arg(
                getattr(self.options, "return_kwargs"))

        if getattr(self.options, "module_executors"):
            kwargs["module_executors"] = yamlify_arg(
                getattr(self.options, "module_executors"))

        if getattr(self.options, "executor_opts"):
            kwargs["executor_opts"] = yamlify_arg(
                getattr(self.options, "executor_opts"))

        if getattr(self.options, "metadata"):
            kwargs["metadata"] = yamlify_arg(getattr(self.options, "metadata"))

        # If using eauth and a token hasn't already been loaded into
        # kwargs, prompt the user to enter auth credentials
        if "token" not in kwargs and "key" not in kwargs and self.options.eauth:
            # This is expensive. Don't do it unless we need to.
            import salt.auth

            resolver = salt.auth.Resolver(self.config)
            res = resolver.cli(self.options.eauth)
            if self.options.mktoken and res:
                tok = resolver.token_cli(self.options.eauth, res)
                if tok:
                    kwargs["token"] = tok.get("token", "")
            if not res:
                sys.stderr.write("ERROR: Authentication failed\n")
                sys.exit(2)
            kwargs.update(res)
            kwargs["eauth"] = self.options.eauth

        if self.config["async"]:
            jid = self.local_client.cmd_async(**kwargs)
            salt.utils.stringutils.print_cli(
                "Executed command with job ID: {}".format(jid))
            return

        # local will be None when there was an error
        if not self.local_client:
            return

        retcodes = []
        errors = []

        try:
            if self.options.subset:
                cmd_func = self.local_client.cmd_subset
                kwargs["sub"] = self.options.subset
                kwargs["cli"] = True
            else:
                cmd_func = self.local_client.cmd_cli

            if self.options.progress:
                kwargs["progress"] = True
                self.config["progress"] = True
                ret = {}
                for progress in cmd_func(**kwargs):
                    out = "progress"
                    try:
                        self._progress_ret(progress, out)
                    except LoaderError as exc:
                        raise SaltSystemExit(exc)
                    if "return_count" not in progress:
                        ret.update(progress)
                self._progress_end(out)
                self._print_returns_summary(ret)
            elif self.config["fun"] == "sys.doc":
                ret = {}
                out = ""
                for full_ret in self.local_client.cmd_cli(**kwargs):
                    ret_, out, retcode = self._format_ret(full_ret)
                    ret.update(ret_)
                self._output_ret(ret, out, retcode=retcode)
            else:
                if self.options.verbose:
                    kwargs["verbose"] = True
                ret = {}
                for full_ret in cmd_func(**kwargs):
                    try:
                        ret_, out, retcode = self._format_ret(full_ret)
                        retcodes.append(retcode)
                        self._output_ret(ret_, out, retcode=retcode)
                        ret.update(full_ret)
                    except KeyError:
                        errors.append(full_ret)

            # Returns summary
            if self.config["cli_summary"] is True:
                if self.config["fun"] != "sys.doc":
                    if self.options.output is None:
                        self._print_returns_summary(ret)
                        self._print_errors_summary(errors)

            # NOTE: Return code is set here based on if all minions
            # returned 'ok' with a retcode of 0.
            # This is the final point before the 'salt' cmd returns,
            # which is why we set the retcode here.
            if not all(exit_code == salt.defaults.exitcodes.EX_OK
                       for exit_code in retcodes):
                sys.stderr.write(
                    "ERROR: Minions returned with non-zero exit code\n")
                sys.exit(salt.defaults.exitcodes.EX_GENERIC)

        except (
                AuthenticationError,
                AuthorizationError,
                SaltInvocationError,
                EauthAuthenticationError,
                SaltClientError,
        ) as exc:
            ret = str(exc)
            self._output_ret(ret, "", retcode=1)
        finally:
            self.local_client.destroy()
예제 #25
0
def post_master_init(self, master):
    """
    Function to finish init after a deltaproxy proxy
    minion has finished connecting to a master.

    This is primarily loading modules, pillars, etc. (since they need
    to know which master they connected to)
    """

    if self.connected:
        self.opts["pillar"] = yield salt.pillar.get_async_pillar(
            self.opts,
            self.opts["grains"],
            self.opts["id"],
            saltenv=self.opts["saltenv"],
            pillarenv=self.opts.get("pillarenv"),
        ).compile_pillar()

        # Ensure that the value of master is the one we passed in.
        # if pillar_opts is enabled then master could be overwritten
        # when compile_pillar is run.
        self.opts["master"] = master

        tag = "salt/deltaproxy/start"
        self._fire_master(tag=tag)

    if "proxy" not in self.opts["pillar"] and "proxy" not in self.opts:
        errmsg = (
            "No proxy key found in pillar or opts for id {}. Check your pillar/opts "
            "configuration and contents.  Salt-proxy aborted.".format(
                self.opts["id"]))
        log.error(errmsg)
        self._running = False
        raise SaltSystemExit(code=-1, msg=errmsg)

    if "proxy" not in self.opts:
        self.opts["proxy"] = self.opts["pillar"]["proxy"]

    self.opts = salt.utils.dictupdate.merge(
        self.opts,
        self.opts["pillar"],
        strategy=self.opts.get("proxy_merge_pillar_in_opts_strategy"),
        merge_lists=self.opts.get("proxy_deep_merge_pillar_in_opts", False),
    )

    if self.opts.get("proxy_mines_pillar"):
        # Even when not required, some details such as mine configuration
        # should be merged anyway whenever possible.
        if "mine_interval" in self.opts["pillar"]:
            self.opts["mine_interval"] = self.opts["pillar"]["mine_interval"]
        if "mine_functions" in self.opts["pillar"]:
            general_proxy_mines = self.opts.get("mine_functions", [])
            specific_proxy_mines = self.opts["pillar"]["mine_functions"]
            try:
                self.opts[
                    "mine_functions"] = general_proxy_mines + specific_proxy_mines
            except TypeError as terr:
                log.error(
                    "Unable to merge mine functions from the pillar in the opts, for proxy %s",
                    self.opts["id"],
                )

    fq_proxyname = self.opts["proxy"]["proxytype"]

    # Need to load the modules so they get all the dunder variables
    (
        self.functions,
        self.returners,
        self.function_errors,
        self.executors,
    ) = self._load_modules()

    # we can then sync any proxymodules down from the master
    # we do a sync_all here in case proxy code was installed by
    # SPM or was manually placed in /srv/salt/_modules etc.
    self.functions["saltutil.sync_all"](saltenv=self.opts["saltenv"])

    # Pull in the utils
    self.utils = salt.loader.utils(self.opts)

    # Then load the proxy module
    self.proxy = salt.loader.proxy(self.opts, utils=self.utils)

    # And re-load the modules so the __proxy__ variable gets injected
    (
        self.functions,
        self.returners,
        self.function_errors,
        self.executors,
    ) = self._load_modules()
    self.functions.pack["__proxy__"] = self.proxy
    self.proxy.pack["__salt__"] = self.functions
    self.proxy.pack["__ret__"] = self.returners
    self.proxy.pack["__pillar__"] = self.opts["pillar"]

    # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
    self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
    self.proxy.pack["__utils__"] = self.utils

    # Reload all modules so all dunder variables are injected
    self.proxy.reload_modules()

    # Start engines here instead of in the Minion superclass __init__
    # This is because we need to inject the __proxy__ variable but
    # it is not setup until now.
    self.io_loop.spawn_callback(salt.engines.start_engines,
                                self.opts,
                                self.process_manager,
                                proxy=self.proxy)

    proxy_init_func_name = "{}.init".format(fq_proxyname)
    proxy_shutdown_func_name = "{}.shutdown".format(fq_proxyname)
    if (proxy_init_func_name not in self.proxy
            or proxy_shutdown_func_name not in self.proxy):
        errmsg = (
            "Proxymodule {} is missing an init() or a shutdown() or both. "
            "Check your proxymodule.  Salt-proxy aborted.".format(fq_proxyname)
        )
        log.error(errmsg)
        self._running = False
        raise SaltSystemExit(code=-1, msg=errmsg)

    self.module_executors = self.proxy.get(
        "{}.module_executors".format(fq_proxyname), lambda: [])()
    proxy_init_fn = self.proxy[proxy_init_func_name]
    proxy_init_fn(self.opts)

    self.opts["grains"] = salt.loader.grains(self.opts, proxy=self.proxy)

    self.mod_opts = self._prep_mod_opts()
    self.matchers = salt.loader.matchers(self.opts)
    self.beacons = salt.beacons.Beacon(self.opts, self.functions)
    uid = salt.utils.user.get_uid(user=self.opts.get("user", None))
    self.proc_dir = salt.minion.get_proc_dir(self.opts["cachedir"], uid=uid)

    if self.connected and self.opts["pillar"]:
        # The pillar has changed due to the connection to the master.
        # Reload the functions so that they can use the new pillar data.
        (
            self.functions,
            self.returners,
            self.function_errors,
            self.executors,
        ) = self._load_modules()
        if hasattr(self, "schedule"):
            self.schedule.functions = self.functions
            self.schedule.returners = self.returners

    if not hasattr(self, "schedule"):
        self.schedule = salt.utils.schedule.Schedule(
            self.opts,
            self.functions,
            self.returners,
            cleanup=[salt.minion.master_event(type="alive")],
            proxy=self.proxy,
            _subprocess_list=self.subprocess_list,
        )

    # add default scheduling jobs to the minions scheduler
    if self.opts["mine_enabled"] and "mine.update" in self.functions:
        self.schedule.add_job(
            {
                "__mine_interval": {
                    "function": "mine.update",
                    "minutes": self.opts["mine_interval"],
                    "jid_include": True,
                    "maxrunning": 2,
                    "run_on_start": True,
                    "return_job": self.opts.get("mine_return_job", False),
                }
            },
            persist=True,
        )
        log.info("Added mine.update to scheduler")
    else:
        self.schedule.delete_job("__mine_interval", persist=True)

    # add master_alive job if enabled
    if self.opts["transport"] != "tcp" and self.opts[
            "master_alive_interval"] > 0:
        self.schedule.add_job(
            {
                salt.minion.master_event(type="alive",
                                         master=self.opts["master"]): {
                    "function": "status.master",
                    "seconds": self.opts["master_alive_interval"],
                    "jid_include": True,
                    "maxrunning": 1,
                    "return_job": False,
                    "kwargs": {
                        "master": self.opts["master"],
                        "connected": True
                    },
                }
            },
            persist=True,
        )
        if (self.opts["master_failback"] and "master_list" in self.opts
                and self.opts["master"] != self.opts["master_list"][0]):
            self.schedule.add_job(
                {
                    salt.minion.master_event(type="failback"): {
                        "function": "status.ping_master",
                        "seconds": self.opts["master_failback_interval"],
                        "jid_include": True,
                        "maxrunning": 1,
                        "return_job": False,
                        "kwargs": {
                            "master": self.opts["master_list"][0]
                        },
                    }
                },
                persist=True,
            )
        else:
            self.schedule.delete_job(salt.minion.master_event(type="failback"),
                                     persist=True)
    else:
        self.schedule.delete_job(
            salt.minion.master_event(type="alive", master=self.opts["master"]),
            persist=True,
        )
        self.schedule.delete_job(salt.minion.master_event(type="failback"),
                                 persist=True)

    # proxy keepalive
    proxy_alive_fn = fq_proxyname + ".alive"
    if (proxy_alive_fn in self.proxy
            and "status.proxy_reconnect" in self.functions
            and self.opts.get("proxy_keep_alive", True)):
        # if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
        self.schedule.add_job(
            {
                "__proxy_keepalive": {
                    "function": "status.proxy_reconnect",
                    "minutes":
                    self.opts.get("proxy_keep_alive_interval",
                                  1),  # by default, check once per minute
                    "jid_include": True,
                    "maxrunning": 1,
                    "return_job": False,
                    "kwargs": {
                        "proxy_name": fq_proxyname
                    },
                }
            },
            persist=True,
        )
        self.schedule.enable_schedule()
    else:
        self.schedule.delete_job("__proxy_keepalive", persist=True)

    #  Sync the grains here so the proxy can communicate them to the master
    self.functions["saltutil.sync_grains"](saltenv="base")
    self.grains_cache = self.opts["grains"]
    # Now setup the deltaproxies
    self.deltaproxy = {}
    self.deltaproxy_opts = {}
    self.deltaproxy_objs = {}
    self.proxy_grains = {}
    self.proxy_pillar = {}
    self.proxy_context = {}
    self.add_periodic_callback("cleanup", self.cleanup_subprocesses)
    for _id in self.opts["proxy"].get("ids", []):
        control_id = self.opts["id"]
        proxyopts = self.opts.copy()
        proxyopts["id"] = _id

        proxyopts = salt.config.proxy_config(self.opts["conf_file"],
                                             defaults=proxyopts,
                                             minion_id=_id)
        proxyopts["id"] = proxyopts["proxyid"] = _id

        proxyopts["subproxy"] = True

        self.proxy_context[_id] = {"proxy_id": _id}

        # We need grains first to be able to load pillar, which is where we keep the proxy
        # configurations
        self.proxy_grains[_id] = salt.loader.grains(
            proxyopts, proxy=self.proxy, context=self.proxy_context[_id])
        self.proxy_pillar[_id] = yield salt.pillar.get_async_pillar(
            proxyopts,
            self.proxy_grains[_id],
            _id,
            saltenv=proxyopts["saltenv"],
            pillarenv=proxyopts.get("pillarenv"),
        ).compile_pillar()

        proxyopts["proxy"] = self.proxy_pillar[_id].get("proxy", {})
        if not proxyopts["proxy"]:
            log.warning(
                "Pillar data for proxy minion %s could not be loaded, skipping.",
                _id)
            continue

        # Remove ids
        proxyopts["proxy"].pop("ids", None)

        proxyopts["pillar"] = self.proxy_pillar[_id]
        proxyopts["grains"] = self.proxy_grains[_id]

        proxyopts["hash_id"] = self.opts["id"]

        _proxy_minion = ProxyMinion(proxyopts)
        _proxy_minion.proc_dir = salt.minion.get_proc_dir(
            proxyopts["cachedir"], uid=uid)

        _proxy_minion.proxy = salt.loader.proxy(
            proxyopts, utils=self.utils, context=self.proxy_context[_id])
        _proxy_minion.subprocess_list = self.subprocess_list

        # And load the modules
        (
            _proxy_minion.functions,
            _proxy_minion.returners,
            _proxy_minion.function_errors,
            _proxy_minion.executors,
        ) = _proxy_minion._load_modules(opts=proxyopts,
                                        grains=proxyopts["grains"],
                                        context=self.proxy_context[_id])

        # we can then sync any proxymodules down from the master
        # we do a sync_all here in case proxy code was installed by
        # SPM or was manually placed in /srv/salt/_modules etc.
        _proxy_minion.functions["saltutil.sync_all"](
            saltenv=self.opts["saltenv"])

        # And re-load the modules so the __proxy__ variable gets injected
        (
            _proxy_minion.functions,
            _proxy_minion.returners,
            _proxy_minion.function_errors,
            _proxy_minion.executors,
        ) = _proxy_minion._load_modules(opts=proxyopts,
                                        grains=proxyopts["grains"],
                                        context=self.proxy_context[_id])

        _proxy_minion.functions.pack["__proxy__"] = _proxy_minion.proxy
        _proxy_minion.proxy.pack["__salt__"] = _proxy_minion.functions
        _proxy_minion.proxy.pack["__ret__"] = _proxy_minion.returners
        _proxy_minion.proxy.pack["__pillar__"] = proxyopts["pillar"]
        _proxy_minion.proxy.pack["__grains__"] = proxyopts["grains"]

        # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
        _proxy_minion.proxy.utils = salt.loader.utils(
            proxyopts,
            proxy=_proxy_minion.proxy,
            context=self.proxy_context[_id])

        _proxy_minion.proxy.pack["__utils__"] = _proxy_minion.proxy.utils

        # Reload all modules so all dunder variables are injected
        _proxy_minion.proxy.reload_modules()

        _proxy_minion.connected = True

        _fq_proxyname = proxyopts["proxy"]["proxytype"]

        proxy_init_fn = _proxy_minion.proxy[_fq_proxyname + ".init"]
        try:
            proxy_init_fn(proxyopts)
        except Exception as exc:  # pylint: disable=broad-except
            log.error(
                "An exception occured during the initialization of minion %s: %s",
                _id,
                exc,
                exc_info=True,
            )
            continue

        # Reload the grains
        self.proxy_grains[_id] = salt.loader.grains(
            proxyopts,
            proxy=_proxy_minion.proxy,
            context=self.proxy_context[_id])
        proxyopts["grains"] = self.proxy_grains[_id]

        if not hasattr(_proxy_minion, "schedule"):
            _proxy_minion.schedule = salt.utils.schedule.Schedule(
                proxyopts,
                _proxy_minion.functions,
                _proxy_minion.returners,
                cleanup=[salt.minion.master_event(type="alive")],
                proxy=_proxy_minion.proxy,
                new_instance=True,
                _subprocess_list=_proxy_minion.subprocess_list,
            )

        self.deltaproxy_objs[_id] = _proxy_minion
        self.deltaproxy_opts[_id] = copy.deepcopy(proxyopts)

        # proxy keepalive
        _proxy_alive_fn = _fq_proxyname + ".alive"
        if (_proxy_alive_fn in _proxy_minion.proxy and "status.proxy_reconnect"
                in self.deltaproxy_objs[_id].functions
                and proxyopts.get("proxy_keep_alive", True)):
            # if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
            _proxy_minion.schedule.add_job(
                {
                    "__proxy_keepalive": {
                        "function": "status.proxy_reconnect",
                        "minutes":
                        proxyopts.get("proxy_keep_alive_interval",
                                      1),  # by default, check once per minute
                        "jid_include": True,
                        "maxrunning": 1,
                        "return_job": False,
                        "kwargs": {
                            "proxy_name": _fq_proxyname
                        },
                    }
                },
                persist=True,
            )
            _proxy_minion.schedule.enable_schedule()
        else:
            _proxy_minion.schedule.delete_job("__proxy_keepalive",
                                              persist=True)

    self.ready = True
예제 #26
0
파일: net.py 프로젝트: xbglowx/salt
from __future__ import unicode_literals

# Import salt lib
import salt.output
from salt.ext import six
from salt.ext.six.moves import map
from salt.exceptions import SaltSystemExit

# Import third party libs
try:
    from netaddr import IPNetwork  # netaddr is already required by napalm-base
    from netaddr.core import AddrFormatError
    from napalm_base import helpers as napalm_helpers
except ImportError:
    # sorry
    raise SaltSystemExit('Please install napalm-base')

# ----------------------------------------------------------------------------------------------------------------------
# module properties
# ----------------------------------------------------------------------------------------------------------------------

_DEFAULT_TARGET = '*'
_DEFAULT_EXPR_FORM = 'glob'
_DEFAULT_IGNORE_INTF = []
# 'lo0', 'em1', 'em0', 'jsrv', 'fxp0'
_DEFAULT_DISPLAY = True
_DEFAULT_OUTPUTTER = 'table'

# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
예제 #27
0
파일: azurearm.py 프로젝트: morinap/salt-1
def _determine_auth(**kwargs):
    '''
    Acquire Azure ARM Credentials
    '''
    if 'profile' in kwargs:
        azure_credentials = __salt__['config.option'](kwargs['profile'])
        kwargs.update(azure_credentials)

    service_principal_creds_kwargs = ['client_id', 'secret', 'tenant']
    user_pass_creds_kwargs = ['username', 'password']

    try:
        if kwargs.get('cloud_environment') and kwargs.get(
                'cloud_environment').startswith('http'):
            cloud_env = get_cloud_from_metadata_endpoint(
                kwargs['cloud_environment'])
        else:
            cloud_env_module = importlib.import_module(
                'msrestazure.azure_cloud')
            cloud_env = getattr(
                cloud_env_module,
                kwargs.get('cloud_environment', 'AZURE_PUBLIC_CLOUD'))
    except (AttributeError, ImportError, MetadataEndpointError):
        raise sys.exit(
            'The Azure cloud environment {0} is not available.'.format(
                kwargs['cloud_environment']))

    if set(service_principal_creds_kwargs).issubset(kwargs):
        if not (kwargs['client_id'] and kwargs['secret'] and kwargs['tenant']):
            raise SaltInvocationError(
                'The client_id, secret, and tenant parameters must all be '
                'populated if using service principals.')
        else:
            credentials = ServicePrincipalCredentials(
                kwargs['client_id'],
                kwargs['secret'],
                tenant=kwargs['tenant'],
                cloud_environment=cloud_env)
    elif set(user_pass_creds_kwargs).issubset(kwargs):
        if not (kwargs['username'] and kwargs['password']):
            raise SaltInvocationError(
                'The username and password parameters must both be '
                'populated if using username/password authentication.')
        else:
            credentials = UserPassCredentials(kwargs['username'],
                                              kwargs['password'],
                                              cloud_environment=cloud_env)
    elif 'subscription_id' in kwargs:
        try:
            from msrestazure.azure_active_directory import (MSIAuthentication)
            credentials = MSIAuthentication(cloud_environment=cloud_env)
        except ImportError:
            raise SaltSystemExit(
                msg=
                'MSI authentication support not availabe (requires msrestazure >= 0.4.14)'
            )

    else:
        raise SaltInvocationError(
            'Unable to determine credentials. '
            'A subscription_id with username and password, '
            'or client_id, secret, and tenant or a profile with the '
            'required parameters populated')

    if 'subscription_id' not in kwargs:
        raise SaltInvocationError('A subscription_id must be specified')

    subscription_id = salt.utils.stringutils.to_str(kwargs['subscription_id'])

    return credentials, subscription_id, cloud_env
예제 #28
0
파일: proxy.py 프로젝트: jodok/salt
def post_master_init(self, master):
    log.debug("subclassed LazyLoaded _post_master_init")
    if self.connected:
        self.opts['master'] = master

        self.opts['pillar'] = yield salt.pillar.get_async_pillar(
            self.opts,
            self.opts['grains'],
            self.opts['id'],
            saltenv=self.opts['saltenv'],
            pillarenv=self.opts.get('pillarenv'),
        ).compile_pillar()

    if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
        errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
                 'Check your pillar/opts configuration and contents.  Salt-proxy aborted.'
        log.error(errmsg)
        self._running = False
        raise SaltSystemExit(code=-1, msg=errmsg)

    if 'proxy' not in self.opts:
        self.opts['proxy'] = self.opts['pillar']['proxy']

    if self.opts.get('proxy_merge_pillar_in_opts'):
        # Override proxy opts with pillar data when the user required.
        self.opts = salt.utils.dictupdate.merge(self.opts,
                                                self.opts['pillar'],
                                                strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
                                                merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
    elif self.opts.get('proxy_mines_pillar'):
        # Even when not required, some details such as mine configuration
        # should be merged anyway whenever possible.
        if 'mine_interval' in self.opts['pillar']:
            self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
        if 'mine_functions' in self.opts['pillar']:
            general_proxy_mines = self.opts.get('mine_functions', [])
            specific_proxy_mines = self.opts['pillar']['mine_functions']
            try:
                self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
            except TypeError as terr:
                log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
                    self.opts['id']))

    fq_proxyname = self.opts['proxy']['proxytype']

    # Need to load the modules so they get all the dunder variables
    self.functions, self.returners, self.function_errors, self.executors = self._load_modules()

    # we can then sync any proxymodules down from the master
    # we do a sync_all here in case proxy code was installed by
    # SPM or was manually placed in /srv/salt/_modules etc.
    self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])

    # Pull in the utils
    self.utils = salt.loader.utils(self.opts)

    # Then load the proxy module
    self.proxy = salt.loader.proxy(self.opts, utils=self.utils)

    # And re-load the modules so the __proxy__ variable gets injected
    self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
    self.functions.pack['__proxy__'] = self.proxy
    self.proxy.pack['__salt__'] = self.functions
    self.proxy.pack['__ret__'] = self.returners
    self.proxy.pack['__pillar__'] = self.opts['pillar']

    # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
    self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
    self.proxy.pack['__utils__'] = self.utils

    # Reload all modules so all dunder variables are injected
    self.proxy.reload_modules()

    # Start engines here instead of in the Minion superclass __init__
    # This is because we need to inject the __proxy__ variable but
    # it is not setup until now.
    self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
                                self.process_manager, proxy=self.proxy)

    if ('{0}.init'.format(fq_proxyname) not in self.proxy
        or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
        errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
                 'Check your proxymodule.  Salt-proxy aborted.'
        log.error(errmsg)
        self._running = False
        raise SaltSystemExit(code=-1, msg=errmsg)

    self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
    proxy_init_fn = self.proxy[fq_proxyname + '.init']
    proxy_init_fn(self.opts)

    self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)

    self.serial = salt.payload.Serial(self.opts)
    self.mod_opts = self._prep_mod_opts()
    self.matchers = salt.loader.matchers(self.opts)
    self.beacons = salt.beacons.Beacon(self.opts, self.functions)
    uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
    self.proc_dir = salt.minion.get_proc_dir(self.opts['cachedir'], uid=uid)

    if self.connected and self.opts['pillar']:
        # The pillar has changed due to the connection to the master.
        # Reload the functions so that they can use the new pillar data.
        self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
        if hasattr(self, 'schedule'):
            self.schedule.functions = self.functions
            self.schedule.returners = self.returners

    if not hasattr(self, 'schedule'):
        self.schedule = salt.utils.schedule.Schedule(
            self.opts,
            self.functions,
            self.returners,
            cleanup=[salt.minion.master_event(type='alive')],
            proxy=self.proxy)

    # add default scheduling jobs to the minions scheduler
    if self.opts['mine_enabled'] and 'mine.update' in self.functions:
        self.schedule.add_job({
            '__mine_interval':
                {
                    'function': 'mine.update',
                    'minutes': self.opts['mine_interval'],
                    'jid_include': True,
                    'maxrunning': 2,
                    'run_on_start': True,
                    'return_job': self.opts.get('mine_return_job', False)
                }
        }, persist=True)
        log.info('Added mine.update to scheduler')
    else:
        self.schedule.delete_job('__mine_interval', persist=True)

    # add master_alive job if enabled
    if (self.opts['transport'] != 'tcp' and
        self.opts['master_alive_interval'] > 0):
        self.schedule.add_job({
            salt.minion.master_event(type='alive', master=self.opts['master']):
                {
                    'function': 'status.master',
                    'seconds': self.opts['master_alive_interval'],
                    'jid_include': True,
                    'maxrunning': 1,
                    'return_job': False,
                    'kwargs': {'master': self.opts['master'],
                               'connected': True}
                }
        }, persist=True)
        if self.opts['master_failback'] and \
            'master_list' in self.opts and \
            self.opts['master'] != self.opts['master_list'][0]:
            self.schedule.add_job({
                salt.minion.master_event(type='failback'):
                    {
                        'function': 'status.ping_master',
                        'seconds': self.opts['master_failback_interval'],
                        'jid_include': True,
                        'maxrunning': 1,
                        'return_job': False,
                        'kwargs': {'master': self.opts['master_list'][0]}
                    }
            }, persist=True)
        else:
            self.schedule.delete_job(salt.minion.master_event(type='failback'), persist=True)
    else:
        self.schedule.delete_job(salt.minion.master_event(type='alive', master=self.opts['master']), persist=True)
        self.schedule.delete_job(salt.minion.master_event(type='failback'), persist=True)

    # proxy keepalive
    proxy_alive_fn = fq_proxyname+'.alive'
    if (proxy_alive_fn in self.proxy
        and 'status.proxy_reconnect' in self.functions
        and self.opts.get('proxy_keep_alive', True)):
        # if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
        self.schedule.add_job({
            '__proxy_keepalive':
                {
                    'function': 'status.proxy_reconnect',
                    'minutes': self.opts.get('proxy_keep_alive_interval', 1),  # by default, check once per minute
                    'jid_include': True,
                    'maxrunning': 1,
                    'return_job': False,
                    'kwargs': {
                        'proxy_name': fq_proxyname
                    }
                }
        }, persist=True)
        self.schedule.enable_schedule()
    else:
        self.schedule.delete_job('__proxy_keepalive', persist=True)

    #  Sync the grains here so the proxy can communicate them to the master
    self.functions['saltutil.sync_grains'](saltenv='base')
    self.grains_cache = self.opts['grains']
    self.ready = True
예제 #29
0
    def tune_in(self):
        '''
        Lock onto the publisher. This is the main event loop for the minion
        '''
        log.info('{0} is starting as user \'{1}\''.format(
            self.__class__.__name__, getpass.getuser()))
        log.debug('Minion "{0}" trying to tune in'.format(self.opts['id']))
        self.context = zmq.Context()

        # Prepare the minion event system
        #
        # Start with the publish socket
        id_hash = hashlib.md5(self.opts['id']).hexdigest()
        epub_sock_path = os.path.join(
            self.opts['sock_dir'], 'minion_event_{0}_pub.ipc'.format(id_hash))
        epull_sock_path = os.path.join(
            self.opts['sock_dir'], 'minion_event_{0}_pull.ipc'.format(id_hash))
        self.epub_sock = self.context.socket(zmq.PUB)
        if self.opts.get('ipc_mode', '') == 'tcp':
            epub_uri = 'tcp://127.0.0.1:{0}'.format(self.opts['tcp_pub_port'])
            epull_uri = 'tcp://127.0.0.1:{0}'.format(
                self.opts['tcp_pull_port'])
        else:
            epub_uri = 'ipc://{0}'.format(epub_sock_path)
            epull_uri = 'ipc://{0}'.format(epull_sock_path)
            for uri in (epub_uri, epull_uri):
                if uri.startswith('tcp://'):
                    # This check only applies to IPC sockets
                    continue
                # The socket path is limited to 107 characters on Solaris and
                # Linux, and 103 characters on BSD-based systems.
                # Let's fail at the lower level so no system checks are
                # required.
                if len(uri) > 103:
                    raise SaltSystemExit(
                        'The socket path length is more that what ZMQ allows. '
                        'The length of {0!r} is more than 103 characters. '
                        'Either try to reduce the length of this setting\'s '
                        'path or switch to TCP; In the configuration file set '
                        '"ipc_mode: tcp"'.format(uri))
        log.debug('{0} PUB socket URI: {1}'.format(self.__class__.__name__,
                                                   epub_uri))
        log.debug('{0} PULL socket URI: {1}'.format(self.__class__.__name__,
                                                    epull_uri))

        # Create the pull socket
        self.epull_sock = self.context.socket(zmq.PULL)
        # Bind the event sockets
        self.epub_sock.bind(epub_uri)
        self.epull_sock.bind(epull_uri)
        # Restrict access to the sockets
        if not self.opts.get('ipc_mode', '') == 'tcp':
            os.chmod(epub_sock_path, 448)
            os.chmod(epull_sock_path, 448)

        self.poller = zmq.Poller()
        self.epoller = zmq.Poller()
        self.socket = self.context.socket(zmq.SUB)
        self.socket.setsockopt(zmq.SUBSCRIBE, '')
        self.socket.setsockopt(zmq.IDENTITY, self.opts['id'])
        if hasattr(zmq, 'RECONNECT_IVL_MAX'):
            self.socket.setsockopt(zmq.RECONNECT_IVL_MAX,
                                   self.opts['recon_max'])
        if hasattr(zmq, 'TCP_KEEPALIVE'):
            self.socket.setsockopt(zmq.TCP_KEEPALIVE,
                                   self.opts['tcp_keepalive'])
            self.socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE,
                                   self.opts['tcp_keepalive_idle'])
            self.socket.setsockopt(zmq.TCP_KEEPALIVE_CNT,
                                   self.opts['tcp_keepalive_cnt'])
            self.socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL,
                                   self.opts['tcp_keepalive_intvl'])
        self.socket.connect(self.master_pub)
        self.poller.register(self.socket, zmq.POLLIN)
        self.epoller.register(self.epull_sock, zmq.POLLIN)
        # Send an event to the master that the minion is live
        self._fire_master(
            'Minion {0} started at {1}'.format(self.opts['id'],
                                               time.asctime()), 'minion_start')

        # Make sure to gracefully handle SIGUSR1
        enable_sigusr1_handler()

        # On first startup execute a state run if configured to do so
        self._state_run()

        while True:
            try:
                self.schedule.eval()
                socks = dict(
                    self.poller.poll(self.opts['loop_interval'] * 1000))
                if self.socket in socks and socks[self.socket] == zmq.POLLIN:
                    payload = self.serial.loads(self.socket.recv())
                    self._handle_payload(payload)
                time.sleep(0.05)
                # Clean up the minion processes which have been executed and
                # have finished
                multiprocessing.active_children()
                # Check if modules and grains need to be refreshed
                self.passive_refresh()
                # Check the event system
                if self.epoller.poll(1):
                    try:
                        package = self.epull_sock.recv(zmq.NOBLOCK)
                        self.epub_sock.send(package)
                    except Exception:
                        pass
            except Exception:
                log.critical(traceback.format_exc())