def prepare(self): ''' Run the preparation sequence required to start a salt-api daemon. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(SaltAPI, self).prepare() try: if self.config['verify_env']: logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify current_umask = os.umask(0o027) verify_files([logfile], self.config['user']) os.umask(current_umask) except OSError as err: log.exception('Failed to prepare salt environment') self.shutdown(err.errno) self.setup_logfile_logger() verify_log(self.config) log.info('Setting up the Salt API') self.api = salt.client.netapi.NetapiClient(self.config) self.daemonize_if_required() self.set_pidfile()
def run(self): ''' Execute salt-key ''' self.parse_args() if self.config['verify_env']: verify_env_dirs = [] if not self.config['gen_keys']: verify_env_dirs.extend([ self.config['pki_dir'], os.path.join(self.config['pki_dir'], 'minions'), os.path.join(self.config['pki_dir'], 'minions_pre'), os.path.join(self.config['pki_dir'], 'minions_rejected'), ]) verify_env( verify_env_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if not self.config['log_file'].startswith( ('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files([self.config['key_logfile']], self.config['user']) self.setup_logfile_logger() key = salt.key.KeyCLI(self.config) if check_user(self.config['user']): key.run()
def run(self): ''' Execute salt-key ''' self.parse_args() if self.config['verify_env']: verify_env_dirs = [] if not self.config['gen_keys']: verify_env_dirs.extend([ self.config['pki_dir'], os.path.join(self.config['pki_dir'], 'minions'), os.path.join(self.config['pki_dir'], 'minions_pre'), os.path.join(self.config['pki_dir'], 'minions_rejected'), ]) verify_env( verify_env_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if (not self.config['key_logfile'].startswith('tcp://') or not self.config['key_logfile'].startswith('udp://') or not self.config['key_logfile'].startswith('file://')): # Logfile is not using Syslog, verify verify_files( [self.config['key_logfile']], self.config['user'] ) self.setup_logfile_logger() key = salt.key.KeyCLI(self.config) key.run()
def run(self): """ Execute salt-run """ self.parse_args() if self.config["verify_env"]: verify_env( [self.config["pki_dir"], self.config["cachedir"]], self.config["user"], permissive=self.config["permissive_pki_access"], pki_dir=self.config["pki_dir"], ) if not self.config["log_file"].startswith(("tcp://", "udp://", "file://")): # Logfile is not using Syslog, verify verify_files([self.config["log_file"]], self.config["user"]) # Setup file logging! self.setup_logfile_logger() runner = salt.runner.Runner(self.config) if self.options.doc: runner._print_docs() self.exit(salt.exitcodes.EX_OK) # Run this here so SystemExit isn't raised anywhere else when # someone tries to use the runners via the python API try: if check_user(self.config["user"]): runner.run() except SaltClientError as exc: raise SystemExit(str(exc))
def prepare(self): ''' Run the preparation sequence required to start a salt master server. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' self.parse_args() try: if self.config['verify_env']: verify_env( [ self.config['pki_dir'], os.path.join(self.config['pki_dir'], 'minions'), os.path.join(self.config['pki_dir'], 'minions_pre'), os.path.join(self.config['pki_dir'], 'minions_denied'), os.path.join(self.config['pki_dir'], 'minions_rejected'), self.config['cachedir'], os.path.join(self.config['cachedir'], 'jobs'), os.path.join(self.config['cachedir'], 'proc'), self.config['sock_dir'], self.config['token_dir'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith( ('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files([logfile], self.config['user']) except OSError as err: sys.exit(err.errno) self.setup_logfile_logger() logger.info('Setting up the Salt Master') if self.config['transport'].lower() == 'zeromq': if not verify_socket(self.config['interface'], self.config['publish_port'], self.config['ret_port']): self.exit(4, 'The ports are not available to bind\n') self.config['interface'] = ip_bracket(self.config['interface']) migrations.migrate_paths(self.config) # Late import so logging works correctly import salt.master self.master = salt.master.Master(self.config) else: # Add a udp port check here import salt.daemons self.master = salt.daemons.IoFloMaster(self.config) self.daemonize_if_required() self.set_pidfile()
def prepare(self): ''' Run the preparation sequence required to start a salt minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' self.parse_args() try: if self.config['verify_env']: confd = os.path.join( os.path.dirname(self.config['conf_file']), 'minion.d' ) verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], confd, ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if (not self.config['log_file'].startswith('tcp://') or not self.config['log_file'].startswith('udp://') or not self.config['log_file'].startswith('file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) verify_files( [self.config['log_file']], self.config['user']) except OSError as err: sys.exit(err.errno) self.setup_logfile_logger() logger.warn( 'Setting up the Salt Minion "{0}"'.format( self.config['id'] ) ) migrations.migrate_paths(self.config) # Late import so logging works correctly import salt.minion # If the minion key has not been accepted, then Salt enters a loop # waiting for it, if we daemonize later then the minion could halt # the boot process waiting for a key to be accepted on the master. # This is the latest safe place to daemonize self.daemonize_if_required() self.minion = salt.minion.Minion(self.config) self.set_pidfile()
def run(self): ''' Execute the salt call! ''' self.parse_args() if self.config['verify_env']: verify_env([ self.config['pki_dir'], self.config['cachedir'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if not self.config['log_file'].startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) if self.options.file_root: # check if the argument is pointing to a file on disk file_root = os.path.abspath(self.options.file_root) self.config['file_roots'] = {'base': [file_root]} if self.options.pillar_root: # check if the argument is pointing to a file on disk pillar_root = os.path.abspath(self.options.pillar_root) self.config['pillar_roots'] = {'base': [pillar_root]} if self.options.local: self.config['file_client'] = 'local' if self.options.master: self.config['master'] = self.options.master # Setup file logging! self.setup_logfile_logger() #caller = salt.cli.caller.Caller(self.config) caller = salt.cli.caller.Caller.factory(self.config) if self.options.doc: caller.print_docs() self.exit(salt.exitcodes.EX_OK) if self.options.grains_run: caller.print_grains() self.exit(salt.exitcodes.EX_OK) caller.run()
def prepare(self): ''' Run the preparation sequence required to start a salt master server. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' self.parse_args() try: if self.config['verify_env']: verify_env( [ self.config['pki_dir'], os.path.join(self.config['pki_dir'], 'minions'), os.path.join(self.config['pki_dir'], 'minions_pre'), os.path.join(self.config['pki_dir'], 'minions_rejected'), self.config['cachedir'], os.path.join(self.config['cachedir'], 'jobs'), os.path.join(self.config['cachedir'], 'proc'), self.config['sock_dir'], self.config['token_dir'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files([logfile], self.config['user']) except OSError as err: sys.exit(err.errno) self.setup_logfile_logger() logger.info('Setting up the Salt Master') if not verify_socket(self.config['interface'], self.config['publish_port'], self.config['ret_port']): self.exit(4, 'The ports are not available to bind\n') self.config['interface'] = ip_bracket(self.config['interface']) migrations.migrate_paths(self.config) # Late import so logging works correctly import salt.master self.master = salt.master.Master(self.config) self.daemonize_if_required() self.set_pidfile()
def run(self): ''' Execute the salt call! ''' self.parse_args() if self.config['verify_env']: verify_env([ self.config['pki_dir'], self.config['cachedir'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if not self.config['log_file'].startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) if self.options.file_root: # check if the argument is pointing to a file on disk file_root = os.path.abspath(self.options.file_root) self.config['file_roots'] = {'base': [file_root]} if self.options.pillar_root: # check if the argument is pointing to a file on disk pillar_root = os.path.abspath(self.options.pillar_root) self.config['pillar_roots'] = {'base': [pillar_root]} if self.options.local: self.config['file_client'] = 'local' if self.options.master: self.config['master'] = self.options.master # Setup file logging! self.setup_logfile_logger() caller = salt.cli.caller.Caller(self.config) if self.options.doc: caller.print_docs() self.exit(0) if self.options.grains_run: caller.print_grains() self.exit(0) caller.run()
def prepare(self): ''' Run the preparation sequence required to start a salt syndic minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Syndic, self).prepare() try: if self.config['verify_env']: verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify current_umask = os.umask(0o027) verify_files([logfile], self.config['user']) os.umask(current_umask) except OSError as err: log.exception('Failed to prepare salt environment') self.shutdown(err.errno) self.setup_logfile_logger() verify_log(self.config) log.info( 'Setting up the Salt Syndic Minion "{0}"'.format( self.config['id'] ) ) # Late import so logging works correctly import salt.minion self.daemonize_if_required() # if its a multisyndic, do so if isinstance(self.config.get('master'), list): self.syndic = salt.minion.MultiSyndic(self.config) else: self.syndic = salt.minion.Syndic(self.config) self.set_pidfile()
def run(self): """ Execute salt-cp """ self.parse_args() if self.config["verify_env"]: if not self.config["log_file"].startswith(("tcp://", "udp://", "file://")): # Logfile is not using Syslog, verify verify_files([self.config["log_file"]], self.config["user"]) # Setup file logging! self.setup_logfile_logger() cp_ = salt.cli.cp.SaltCP(self.config) cp_.run()
def run(self): """ Execute the salt call! """ self.parse_args() if self.config["verify_env"]: verify_env( [self.config["pki_dir"], self.config["cachedir"]], self.config["user"], permissive=self.config["permissive_pki_access"], pki_dir=self.config["pki_dir"], ) if not self.config["log_file"].startswith(("tcp://", "udp://", "file://")): # Logfile is not using Syslog, verify verify_files([self.config["log_file"]], self.config["user"]) if self.options.file_root: # check if the argument is pointing to a file on disk file_root = os.path.abspath(self.options.file_root) self.config["file_roots"] = {"base": [file_root]} if self.options.pillar_root: # check if the argument is pointing to a file on disk pillar_root = os.path.abspath(self.options.pillar_root) self.config["pillar_roots"] = {"base": [pillar_root]} if self.options.local: self.config["file_client"] = "local" if self.options.master: self.config["master"] = self.options.master # Setup file logging! self.setup_logfile_logger() # caller = salt.cli.caller.Caller(self.config) caller = salt.cli.caller.Caller.factory(self.config) if self.options.doc: caller.print_docs() self.exit(salt.exitcodes.EX_OK) if self.options.grains_run: caller.print_grains() self.exit(salt.exitcodes.EX_OK) caller.run()
def run(self): ''' Execute salt-cp ''' self.parse_args() if self.config['verify_env']: if not self.config['log_file'].startswith( ('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files([self.config['log_file']], self.config['user']) # Setup file logging! self.setup_logfile_logger() cp_ = salt.cli.cp.SaltCP(self.config) cp_.run()
def run(self): ''' Execute salt-key ''' import salt.key self.parse_args() if self.config['verify_env']: verify_env_dirs = [] if not self.config['gen_keys']: if self.config['transport'] == 'raet': verify_env_dirs.extend([ self.config['pki_dir'], os.path.join(self.config['pki_dir'], 'accepted'), os.path.join(self.config['pki_dir'], 'pending'), os.path.join(self.config['pki_dir'], 'rejected'), ]) elif self.config['transport'] == 'zeromq': verify_env_dirs.extend([ self.config['pki_dir'], os.path.join(self.config['pki_dir'], 'minions'), os.path.join(self.config['pki_dir'], 'minions_pre'), os.path.join(self.config['pki_dir'], 'minions_rejected'), ]) verify_env( verify_env_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if not self.config['log_file'].startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files( [self.config['key_logfile']], self.config['user'] ) self.setup_logfile_logger() key = salt.key.KeyCLI(self.config) if check_user(self.config['user']): key.run()
def prepare(self): ''' Run the preparation sequence required to start a salt syndic minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' self.parse_args() try: if self.config['verify_env']: verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if (not self.config['log_file'].startswith('tcp://') or not self.config['log_file'].startswith('udp://') or not self.config['log_file'].startswith('file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) except OSError as err: sys.exit(err.errno) self.setup_logfile_logger() logger.warn( 'Setting up the Salt Syndic Minion "{0}"'.format( self.config['id'] ) ) # Late import so logging works correctly import salt.minion self.daemonize_if_required() self.syndic = salt.minion.Syndic(self.config) self.set_pidfile()
def prepare(self): ''' Run the preparation sequence required to start a salt syndic minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' self.parse_args() try: if self.config['verify_env']: verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files([logfile], self.config['user']) except OSError as err: logger.exception('Failed to prepare salt environment') sys.exit(err.errno) self.setup_logfile_logger() logger.info( 'Setting up the Salt Syndic Minion "{0}"'.format( self.config['id'] ) ) # Late import so logging works correctly import salt.minion self.daemonize_if_required() self.syndic = salt.minion.Syndic(self.config) self.set_pidfile()
def run(self): """ Execute salt-key """ self.parse_args() if self.config["verify_env"]: verify_env_dirs = [] if not self.config["gen_keys"]: if self.config["transport"] == "raet": verify_env_dirs.extend( [ self.config["pki_dir"], os.path.join(self.config["pki_dir"], "accepted"), os.path.join(self.config["pki_dir"], "pending"), os.path.join(self.config["pki_dir"], "rejected"), ] ) elif self.config["transport"] == "zeromq": verify_env_dirs.extend( [ self.config["pki_dir"], os.path.join(self.config["pki_dir"], "minions"), os.path.join(self.config["pki_dir"], "minions_pre"), os.path.join(self.config["pki_dir"], "minions_rejected"), ] ) verify_env( verify_env_dirs, self.config["user"], permissive=self.config["permissive_pki_access"], pki_dir=self.config["pki_dir"], ) if not self.config["log_file"].startswith(("tcp://", "udp://", "file://")): # Logfile is not using Syslog, verify verify_files([self.config["key_logfile"]], self.config["user"]) self.setup_logfile_logger() key = salt.key.KeyCLI(self.config) if check_user(self.config["user"]): key.run()
def run(self): ''' Execute the salt call! ''' self.parse_args() if self.config['verify_env']: verify_env([ self.config['pki_dir'], self.config['cachedir'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if (not self.config['log_file'].startswith('tcp://') or not self.config['log_file'].startswith('udp://') or not self.config['log_file'].startswith('file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) if self.options.local: self.config['file_client'] = 'local' if self.options.master: self.config['master'] = self.options.master # Setup file logging! self.setup_logfile_logger() caller = salt.cli.caller.Caller(self.config) if self.options.doc: caller.print_docs() self.exit(0) if self.options.grains_run: caller.print_grains() self.exit(0) caller.run()
def prepare(self): ''' Run the preparation sequence required to start a salt syndic minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Syndic, self).prepare() try: if self.config['verify_env']: verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith( ('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify current_umask = os.umask(0o027) verify_files([logfile], self.config['user']) os.umask(current_umask) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up "{0}"'.format(self.config['id'])) # Late import so logging works correctly import salt.minion self.daemonize_if_required() self.syndic = salt.minion.SyndicManager(self.config) self.set_pidfile()
def run(self): self.parse_args() if self.config['verify_env']: if not self.config['log_file'].startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) # Setup file logging! self.setup_logfile_logger() driver = FUSE( SaltFuseDriver(self.config), self.mount_path, foreground=True )
def run(self): self.parse_args() if self.config['verify_env']: if not self.config['log_file'].startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) # Setup file logging! self.setup_logfile_logger() driver = FUSE( SaltFuseDriver(self.config, self.remote_path, self.minion_id), self.mount_path, foreground=True )
def run(self): ''' Execute salt-cp ''' self.parse_args() if self.config['verify_env']: if (not self.config['log_file'].startswith('tcp://') or not self.config['log_file'].startswith('udp://') or not self.config['log_file'].startswith('file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) # Setup file logging! self.setup_logfile_logger() cp_ = salt.cli.cp.SaltCP(self.config) cp_.run()
def run(self): ''' Execute salt-run ''' import salt.runner self.parse_args() if self.config['verify_env']: verify_env([ self.config['pki_dir'], self.config['cachedir'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if not self.config['log_file'].startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) # Setup file logging! self.setup_logfile_logger() runner = salt.runner.Runner(self.config) if self.options.doc: runner.print_docs() self.exit(os.EX_OK) # Run this here so SystemExit isn't raised anywhere else when # someone tries to use the runners via the python API try: if check_user(self.config['user']): runner.run() except SaltClientError as exc: raise SystemExit(str(exc))
def run(self): ''' Execute salt-run ''' self.parse_args() if self.config['verify_env']: verify_env([ self.config['pki_dir'], self.config['cachedir'], ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if (not self.config['log_file'].startswith('tcp://') or not self.config['log_file'].startswith('udp://') or not self.config['log_file'].startswith('file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) # Setup file logging! self.setup_logfile_logger() runner = salt.runner.Runner(self.config) if self.options.doc: runner._print_docs() self.exit(0) # Run this here so SystemExit isn't raised anywhere else when # someone tries to use the runners via the python API try: if check_user(self.config['user']): runner.run() except SaltClientError as exc: raise SystemExit(str(exc))
def prepare(self): ''' Run the preparation sequence required to start a salt minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Minion, self).prepare() try: if self.config['verify_env']: confd = self.config.get('default_include') if confd: # If 'default_include' is specified in config, then use it if '*' in confd: # Value is of the form "minion.d/*.conf" confd = os.path.dirname(confd) if not os.path.isabs(confd): # If configured 'default_include' is not an absolute # path, consider it relative to folder of 'conf_file' # (/etc/salt by default) confd = os.path.join( os.path.dirname(self.config['conf_file']), confd ) else: confd = os.path.join( os.path.dirname(self.config['conf_file']), 'minion.d' ) v_dirs = [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], confd, ] if self.config.get('transport') == 'raet': v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted')) v_dirs.append(os.path.join(self.config['pki_dir'], 'pending')) v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected')) v_dirs.append(os.path.join(self.config['cachedir'], 'raet')) verify_env( v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify current_umask = os.umask(0o027) verify_files([logfile], self.config['user']) os.umask(current_umask) except OSError as err: log.exception('Failed to prepare salt environment') self.shutdown(err.errno) self.setup_logfile_logger() verify_log(self.config) log.info( 'Setting up the Salt Minion "{0}"'.format( self.config['id'] ) ) migrations.migrate_paths(self.config) # Bail out if we find a process running and it matches out pidfile if self.check_running(): log.exception('Salt minion is already running. Exiting.') self.shutdown(1) # TODO: AIO core is separate from transport if self.config['transport'].lower() in ('zeromq', 'tcp'): # Late import so logging works correctly import salt.minion # If the minion key has not been accepted, then Salt enters a loop # waiting for it, if we daemonize later then the minion could halt # the boot process waiting for a key to be accepted on the master. # This is the latest safe place to daemonize self.daemonize_if_required() self.set_pidfile() if isinstance(self.config.get('master'), list): if self.config.get('master_type') == 'failover': self.minion = salt.minion.Minion(self.config) else: self.minion = salt.minion.MultiMinion(self.config) else: self.minion = salt.minion.Minion(self.config) else: import salt.daemons.flo self.daemonize_if_required() self.set_pidfile() self.minion = salt.daemons.flo.IofloMinion(self.config)
def run(self): ''' Execute the salt-cloud command line ''' # Parse shell arguments self.parse_args() salt_master_user = self.config.get('user', salt.utils.get_user()) if salt_master_user is not None and not check_user(salt_master_user): self.error( 'If salt-cloud is running on a master machine, salt-cloud ' 'needs to run as the same user as the salt-master, {0!r}. If ' 'salt-cloud is not running on a salt-master, the appropriate ' 'write permissions must be granted to /etc/salt/. Please run ' 'salt-cloud as root, {0!r}, or change permissions for ' '/etc/salt/.'.format(salt_master_user)) try: if self.config['verify_env']: verify_env([os.path.dirname(self.config['conf_file'])], salt_master_user) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith('tcp://') \ and not logfile.startswith('udp://') \ and not logfile.startswith('file://'): # Logfile is not using Syslog, verify verify_files([logfile], salt_master_user) except (IOError, OSError) as err: log.error('Error while verifying the environment: {0}'.format(err)) sys.exit(err.errno) # Setup log file logging self.setup_logfile_logger() if self.options.update_bootstrap: ret = salt.utils.cloud.update_bootstrap(self.config) display_output = salt.output.get_printout(self.options.output, self.config) print(display_output(ret)) self.exit(os.EX_OK) log.info('salt-cloud starting') mapper = salt.cloud.Map(self.config) ret = {} if self.selected_query_option is not None: if self.selected_query_option == 'list_providers': try: ret = mapper.provider_list() except (SaltCloudException, Exception) as exc: msg = 'There was an error listing providers: {0}' self.handle_exception(msg, exc) elif self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) try: ret = mapper.interpolated_map( query=self.selected_query_option) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a custom map: {0}' self.handle_exception(msg, exc) else: try: ret = mapper.map_providers_parallel( query=self.selected_query_option) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a map: {0}' self.handle_exception(msg, exc) elif self.options.list_locations is not None: try: ret = mapper.location_list(self.options.list_locations) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing locations: {0}' self.handle_exception(msg, exc) elif self.options.list_images is not None: try: ret = mapper.image_list(self.options.list_images) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing images: {0}' self.handle_exception(msg, exc) elif self.options.list_sizes is not None: try: ret = mapper.size_list(self.options.list_sizes) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing sizes: {0}' self.handle_exception(msg, exc) elif self.options.destroy and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) matching = mapper.delete_map(query='list_nodes') else: matching = mapper.get_running_by_names( self.config.get('names', ()), profile=self.options.profile) if not matching: print('No machines were found to be destroyed') self.exit(os.EX_OK) msg = 'The following virtual machines are set to be destroyed:\n' names = set() for alias, drivers in matching.iteritems(): msg += ' {0}:\n'.format(alias) for driver, vms in drivers.iteritems(): msg += ' {0}:\n'.format(driver) for name in vms: msg += ' {0}\n'.format(name) names.add(name) try: if self.print_confirm(msg): ret = mapper.destroy(names, cached=True) except (SaltCloudException, Exception) as exc: msg = 'There was an error destroying machines: {0}' self.handle_exception(msg, exc) elif self.options.action and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) names = mapper.get_vmnames_by_action(self.options.action) else: names = self.config.get('names', None) kwargs = {} machines = [] msg = ( 'The following virtual machines are set to be actioned with ' '"{0}":\n'.format(self.options.action)) for name in names: if '=' in name: # This is obviously not a machine name, treat it as a kwarg comps = name.split('=') kwargs[comps[0]] = comps[1] else: msg += ' {0}\n'.format(name) machines.append(name) names = machines try: if self.print_confirm(msg): ret = mapper.do_action(names, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error actioning machines: {0}' self.handle_exception(msg, exc) elif self.options.function: kwargs = {} args = self.args[:] for arg in args[:]: if '=' in arg: key, value = arg.split('=') kwargs[key] = value args.remove(arg) if args: self.error( 'Any arguments passed to --function need to be passed ' 'as kwargs. Ex: image=ami-54cf5c3d. Remaining ' 'arguments: {0}'.format(args)) try: ret = mapper.do_function(self.function_provider, self.function_name, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error running the function: {0}' self.handle_exception(msg, exc) elif self.options.profile and self.config.get('names', False): try: ret = mapper.run_profile(self.options.profile, self.config.get('names')) except (SaltCloudException, Exception) as exc: msg = 'There was a profile error: {0}' self.handle_exception(msg, exc) elif self.options.set_password: username = self.credential_username provider_name = "salt.cloud.provider.{0}".format( self.credential_provider) # TODO: check if provider is configured # set the password salt.utils.cloud.store_password_in_keyring(provider_name, username) elif self.config.get('map', None) and \ self.selected_query_option is None: if len(mapper.rendered_map) == 0: sys.stderr.write('No nodes defined in this map') self.exit(salt.exitcodes.EX_GENERIC) try: ret = {} run_map = True log.info('Applying map from {0!r}.'.format(self.config['map'])) dmap = mapper.map_data() msg = '' if 'errors' in dmap: # display profile errors msg += 'Found the following errors:\n' for profile_name, error in dmap['errors'].iteritems(): msg += ' {0}: {1}\n'.format(profile_name, error) sys.stderr.write(msg) sys.stderr.flush() msg = '' if 'existing' in dmap: msg += ('The following virtual machines already exist:\n') for name in dmap['existing']: msg += ' {0}\n'.format(name) if dmap['create']: msg += ('The following virtual machines are set to be ' 'created:\n') for name in dmap['create']: msg += ' {0}\n'.format(name) if 'destroy' in dmap: msg += ('The following virtual machines are set to be ' 'destroyed:\n') for name in dmap['destroy']: msg += ' {0}\n'.format(name) if not dmap['create'] and not dmap.get('destroy', None): if not dmap.get('existing', None): # nothing to create or destroy & nothing exists print(msg) self.exit(1) else: # nothing to create or destroy, print existing run_map = False if run_map: if self.print_confirm(msg): ret = mapper.run_map(dmap) if self.config.get('parallel', False) is False: log.info('Complete') if dmap.get('existing', None): for name in dmap['existing'].keys(): ret[name] = {'Message': 'Already running'} except (SaltCloudException, Exception) as exc: msg = 'There was a query error: {0}' self.handle_exception(msg, exc) else: self.error('Nothing was done. Using the proper arguments?') display_output = salt.output.get_printout(self.options.output, self.config) # display output using salt's outputter system print(display_output(ret)) self.exit(os.EX_OK)
def run(self): ''' Execute the salt-cloud command line ''' # Parse shell arguments self.parse_args() salt_master_user = self.config.get('user') if salt_master_user is None: salt_master_user = salt.utils.get_user() if not check_user(salt_master_user): self.error( 'If salt-cloud is running on a master machine, salt-cloud ' 'needs to run as the same user as the salt-master, \'{0}\'. ' 'If salt-cloud is not running on a salt-master, the ' 'appropriate write permissions must be granted to \'{1}\'. ' 'Please run salt-cloud as root, \'{0}\', or change ' 'permissions for \'{1}\'.'.format(salt_master_user, syspaths.CONFIG_DIR)) try: if self.config['verify_env']: verify_env([os.path.dirname(self.config['conf_file'])], salt_master_user) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith('tcp://') \ and not logfile.startswith('udp://') \ and not logfile.startswith('file://'): # Logfile is not using Syslog, verify verify_files([logfile], salt_master_user) except (IOError, OSError) as err: log.error('Error while verifying the environment: {0}'.format(err)) sys.exit(err.errno) # Setup log file logging self.setup_logfile_logger() verify_log(self.config) if self.options.update_bootstrap: ret = salt.utils.cloud.update_bootstrap(self.config) display_output = salt.output.get_printout(self.options.output, self.config) print(display_output(ret)) self.exit(salt.defaults.exitcodes.EX_OK) log.info('salt-cloud starting') try: mapper = salt.cloud.Map(self.config) except SaltCloudException as exc: msg = 'There was an error generating the mapper.' self.handle_exception(msg, exc) names = self.config.get('names', None) if names is not None: filtered_rendered_map = {} for map_profile in mapper.rendered_map: filtered_map_profile = {} for name in mapper.rendered_map[map_profile]: if name in names: filtered_map_profile[name] = mapper.rendered_map[ map_profile][name] if filtered_map_profile: filtered_rendered_map[map_profile] = filtered_map_profile mapper.rendered_map = filtered_rendered_map ret = {} if self.selected_query_option is not None: if self.selected_query_option == 'list_providers': try: ret = mapper.provider_list() except (SaltCloudException, Exception) as exc: msg = 'There was an error listing providers: {0}' self.handle_exception(msg, exc) elif self.selected_query_option == 'list_profiles': provider = self.options.list_profiles try: ret = mapper.profile_list(provider) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing profiles: {0}' self.handle_exception(msg, exc) elif self.config.get('map', None): log.info('Applying map from \'{0}\'.'.format( self.config['map'])) try: ret = mapper.interpolated_map( query=self.selected_query_option) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a custom map: {0}' self.handle_exception(msg, exc) else: try: ret = mapper.map_providers_parallel( query=self.selected_query_option) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a map: {0}' self.handle_exception(msg, exc) elif self.options.list_locations is not None: try: ret = mapper.location_list(self.options.list_locations) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing locations: {0}' self.handle_exception(msg, exc) elif self.options.list_images is not None: try: ret = mapper.image_list(self.options.list_images) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing images: {0}' self.handle_exception(msg, exc) elif self.options.list_sizes is not None: try: ret = mapper.size_list(self.options.list_sizes) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing sizes: {0}' self.handle_exception(msg, exc) elif self.options.destroy and (self.config.get('names', None) or self.config.get('map', None)): map_file = self.config.get('map', None) names = self.config.get('names', ()) if map_file is not None: if names != (): msg = 'Supplying a mapfile, \'{0}\', in addition to instance names {1} ' \ 'with the \'--destroy\' or \'-d\' function is not supported. ' \ 'Please choose to delete either the entire map file or individual ' \ 'instances.'.format(map_file, names) self.handle_exception(msg, SaltCloudSystemExit) log.info('Applying map from \'{0}\'.'.format(map_file)) matching = mapper.delete_map(query='list_nodes') else: matching = mapper.get_running_by_names( names, profile=self.options.profile) if not matching: print('No machines were found to be destroyed') self.exit(salt.defaults.exitcodes.EX_OK) msg = 'The following virtual machines are set to be destroyed:\n' names = set() for alias, drivers in six.iteritems(matching): msg += ' {0}:\n'.format(alias) for driver, vms in six.iteritems(drivers): msg += ' {0}:\n'.format(driver) for name in vms: msg += ' {0}\n'.format(name) names.add(name) try: if self.print_confirm(msg): ret = mapper.destroy(names, cached=True) except (SaltCloudException, Exception) as exc: msg = 'There was an error destroying machines: {0}' self.handle_exception(msg, exc) elif self.options.action and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from \'{0}\'.'.format( self.config['map'])) try: names = mapper.get_vmnames_by_action(self.options.action) except SaltCloudException as exc: msg = 'There was an error actioning virtual machines.' self.handle_exception(msg, exc) else: names = self.config.get('names', None) kwargs = {} machines = [] msg = ( 'The following virtual machines are set to be actioned with ' '"{0}":\n'.format(self.options.action)) for name in names: if '=' in name: # This is obviously not a machine name, treat it as a kwarg key, value = name.split('=', 1) kwargs[key] = value else: msg += ' {0}\n'.format(name) machines.append(name) names = machines try: if self.print_confirm(msg): ret = mapper.do_action(names, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error actioning machines: {0}' self.handle_exception(msg, exc) elif self.options.function: kwargs = {} args = self.args[:] for arg in args[:]: if '=' in arg: key, value = arg.split('=', 1) kwargs[key] = value args.remove(arg) if args: self.error( 'Any arguments passed to --function need to be passed ' 'as kwargs. Ex: image=ami-54cf5c3d. Remaining ' 'arguments: {0}'.format(args)) try: ret = mapper.do_function(self.function_provider, self.function_name, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error running the function: {0}' self.handle_exception(msg, exc) elif self.options.profile and self.config.get('names', False): try: ret = mapper.run_profile(self.options.profile, self.config.get('names')) except (SaltCloudException, Exception) as exc: msg = 'There was a profile error: {0}' self.handle_exception(msg, exc) elif self.options.set_password: username = self.credential_username provider_name = "salt.cloud.provider.{0}".format( self.credential_provider) # TODO: check if provider is configured # set the password salt.utils.cloud.store_password_in_keyring(provider_name, username) elif self.config.get('map', None) and \ self.selected_query_option is None: if len(mapper.rendered_map) == 0: sys.stderr.write('No nodes defined in this map') self.exit(salt.defaults.exitcodes.EX_GENERIC) try: ret = {} run_map = True log.info('Applying map from \'{0}\'.'.format( self.config['map'])) dmap = mapper.map_data() msg = '' if 'errors' in dmap: # display profile errors msg += 'Found the following errors:\n' for profile_name, error in six.iteritems(dmap['errors']): msg += ' {0}: {1}\n'.format(profile_name, error) sys.stderr.write(msg) sys.stderr.flush() msg = '' if 'existing' in dmap: msg += ('The following virtual machines already exist:\n') for name in dmap['existing']: msg += ' {0}\n'.format(name) if dmap['create']: msg += ('The following virtual machines are set to be ' 'created:\n') for name in dmap['create']: msg += ' {0}\n'.format(name) if 'destroy' in dmap: msg += ('The following virtual machines are set to be ' 'destroyed:\n') for name in dmap['destroy']: msg += ' {0}\n'.format(name) if not dmap['create'] and not dmap.get('destroy', None): if not dmap.get('existing', None): # nothing to create or destroy & nothing exists print(msg) self.exit(1) else: # nothing to create or destroy, print existing run_map = False if run_map: if self.print_confirm(msg): ret = mapper.run_map(dmap) if self.config.get('parallel', False) is False: log.info('Complete') if dmap.get('existing', None): for name in dmap['existing']: if 'ec2' in dmap['existing'][name]['provider']: msg = 'Instance already exists, or is terminated and has the same name.' else: msg = 'Already running.' ret[name] = {'Message': msg} except (SaltCloudException, Exception) as exc: msg = 'There was a query error: {0}' self.handle_exception(msg, exc) elif self.options.bootstrap: host = self.options.bootstrap if len(self.args) > 0: if '=' not in self.args[0]: minion_id = self.args.pop(0) else: minion_id = host else: minion_id = host vm_ = { 'driver': '', 'ssh_host': host, 'name': minion_id, } args = self.args[:] for arg in args[:]: if '=' in arg: key, value = arg.split('=', 1) vm_[key] = value args.remove(arg) if args: self.error( 'Any arguments passed to --bootstrap need to be passed as ' 'kwargs. Ex: ssh_username=larry. Remaining arguments: {0}'. format(args)) try: ret = salt.utils.cloud.bootstrap(vm_, self.config) except (SaltCloudException, Exception) as exc: msg = 'There was an error bootstrapping the minion: {0}' self.handle_exception(msg, exc) else: self.error('Nothing was done. Using the proper arguments?') display_output = salt.output.get_printout(self.options.output, self.config) # display output using salt's outputter system print(display_output(ret)) self.exit(salt.defaults.exitcodes.EX_OK)
def prepare(self, proxydetails): ''' Run the preparation sequence required to start a salt minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' self.parse_args() try: if self.config['verify_env']: confd = self.config.get('default_include') if confd: # If 'default_include' is specified in config, then use it if '*' in confd: # Value is of the form "minion.d/*.conf" confd = os.path.dirname(confd) if not os.path.isabs(confd): # If configured 'default_include' is not an absolute # path, consider it relative to folder of 'conf_file' # (/etc/salt by default) confd = os.path.join( os.path.dirname(self.config['conf_file']), confd) else: confd = os.path.join( os.path.dirname(self.config['conf_file']), 'minion.d') verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], confd, ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if 'proxy_log' in proxydetails: logfile = proxydetails['proxy_log'] else: logfile = None if logfile is not None and not logfile.startswith( ('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files([logfile], self.config['user']) except OSError as err: sys.exit(err.errno) self.config['proxy'] = proxydetails self.setup_logfile_logger() logger.info('Setting up a Salt Proxy Minion "{0}"'.format( self.config['id'])) migrations.migrate_paths(self.config) # Late import so logging works correctly import salt.minion # If the minion key has not been accepted, then Salt enters a loop # waiting for it, if we daemonize later then the minion could halt # the boot process waiting for a key to be accepted on the master. # This is the latest safe place to daemonize self.daemonize_if_required() self.set_pidfile() if isinstance(self.config.get('master'), list): self.minion = salt.minion.MultiMinion(self.config) else: self.minion = salt.minion.ProxyMinion(self.config)
def run(self): """ Execute the salt command line """ self.parse_args() if self.config["verify_env"]: if not self.config["log_file"].startswith(("tcp://", "udp://", "file://")): # Logfile is not using Syslog, verify verify_files([self.config["log_file"]], self.config["user"]) # Setup file logging! self.setup_logfile_logger() try: # We don't need to bail on config file permission errors # if the CLI # process is run with the -a flag skip_perm_errors = self.options.eauth != "" local = salt.client.get_local_client(self.get_config_file_path(), skip_perm_errors=skip_perm_errors) except SaltClientError as exc: self.exit(2, "{0}\n".format(exc)) return if self.options.batch: eauth = {} if "token" in self.config: eauth["token"] = self.config["token"] # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if "token" not in eauth and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli(self.options.eauth, res) if tok: eauth["token"] = tok.get("token", "") if not res: sys.exit(2) eauth.update(res) eauth["eauth"] = self.options.eauth if self.options.static: batch = salt.cli.batch.Batch(self.config, eauth=eauth, quiet=True) ret = {} for res in batch.run(): ret.update(res) self._output_ret(ret, "") else: batch = salt.cli.batch.Batch(self.config, eauth=eauth) # Printing the output is already taken care of in run() itself for res in batch.run(): pass else: if self.options.timeout <= 0: self.options.timeout = local.opts["timeout"] kwargs = { "tgt": self.config["tgt"], "fun": self.config["fun"], "arg": self.config["arg"], "timeout": self.options.timeout, "show_timeout": self.options.show_timeout, "show_jid": self.options.show_jid, } if "token" in self.config: try: with salt.utils.fopen(os.path.join(self.config["cachedir"], ".root_key"), "r") as fp_: kwargs["key"] = fp_.readline() except IOError: kwargs["token"] = self.config["token"] if self.selected_target_option: kwargs["expr_form"] = self.selected_target_option else: kwargs["expr_form"] = "glob" if getattr(self.options, "return"): kwargs["ret"] = getattr(self.options, "return") # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if "token" not in kwargs and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli(self.options.eauth, res) if tok: kwargs["token"] = tok.get("token", "") if not res: sys.exit(2) kwargs.update(res) kwargs["eauth"] = self.options.eauth if self.config["async"]: jid = local.cmd_async(**kwargs) print_cli("Executed command with job ID: {0}".format(jid)) return retcodes = [] try: # local will be None when there was an error if local: if self.options.subset: cmd_func = local.cmd_subset kwargs["sub"] = self.options.subset kwargs["cli"] = True else: cmd_func = local.cmd_cli if self.options.static: if self.options.verbose: kwargs["verbose"] = True full_ret = local.cmd_full_return(**kwargs) ret, out, retcode = self._format_ret(full_ret) self._output_ret(ret, out) elif self.config["fun"] == "sys.doc": ret = {} out = "" for full_ret in local.cmd_cli(**kwargs): ret_, out, retcode = self._format_ret(full_ret) ret.update(ret_) self._output_ret(ret, out) else: if self.options.verbose: kwargs["verbose"] = True ret = {} for full_ret in cmd_func(**kwargs): ret_, out, retcode = self._format_ret(full_ret) retcodes.append(retcode) self._output_ret(ret_, out) ret.update(ret_) # Returns summary if self.config["cli_summary"] is True: if self.config["fun"] != "sys.doc": if self.options.output is None: self._print_returns_summary(ret) # NOTE: Return code is set here based on if all minions # returned 'ok' with a retcode of 0. # This is the final point before the 'salt' cmd returns, # which is why we set the retcode here. if retcodes.count(0) < len(retcodes): sys.exit(11) except (SaltInvocationError, EauthAuthenticationError) as exc: ret = str(exc) out = "" self._output_ret(ret, out)
def prepare(self): ''' Run the preparation sequence required to start a salt master server. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Master, self).prepare() try: if self.config['verify_env']: v_dirs = [ self.config['pki_dir'], os.path.join(self.config['pki_dir'], 'minions'), os.path.join(self.config['pki_dir'], 'minions_pre'), os.path.join(self.config['pki_dir'], 'minions_denied'), os.path.join(self.config['pki_dir'], 'minions_autosign'), os.path.join(self.config['pki_dir'], 'minions_rejected'), self.config['cachedir'], os.path.join(self.config['cachedir'], 'jobs'), os.path.join(self.config['cachedir'], 'proc'), self.config['sock_dir'], self.config['token_dir'], self.config['syndic_dir'], self.config['sqlite_queue_dir'], ] if self.config.get('transport') == 'raet': v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted')) v_dirs.append(os.path.join(self.config['pki_dir'], 'pending')) v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected')) v_dirs.append(os.path.join(self.config['cachedir'], 'raet')) verify_env( v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify current_umask = os.umask(0o027) verify_files([logfile], self.config['user']) os.umask(current_umask) # Clear out syndics from cachedir for syndic_file in os.listdir(self.config['syndic_dir']): os.remove(os.path.join(self.config['syndic_dir'], syndic_file)) except OSError as err: log.exception('Failed to prepare salt environment') self.shutdown(err.errno) self.setup_logfile_logger() verify_log(self.config) log.info('Setting up the Salt Master') # TODO: AIO core is separate from transport if self.config['transport'].lower() in ('zeromq', 'tcp'): if not verify_socket(self.config['interface'], self.config['publish_port'], self.config['ret_port']): self.shutdown(4, 'The ports are not available to bind') self.config['interface'] = ip_bracket(self.config['interface']) migrations.migrate_paths(self.config) # Late import so logging works correctly import salt.master self.master = salt.master.Master(self.config) else: # Add a udp port check here import salt.daemons.flo self.master = salt.daemons.flo.IofloMaster(self.config) self.daemonize_if_required() self.set_pidfile() salt.utils.process.notify_systemd()
def prepare(self): ''' Run the preparation sequence required to start a salt minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(ProxyMinion, self).prepare() if not self.values.proxyid: raise SaltSystemExit('salt-proxy requires --proxyid') # Proxies get their ID from the command line. This may need to change in # the future. self.config['id'] = self.values.proxyid try: if self.config['verify_env']: confd = self.config.get('default_include') if confd: # If 'default_include' is specified in config, then use it if '*' in confd: # Value is of the form "minion.d/*.conf" confd = os.path.dirname(confd) if not os.path.isabs(confd): # If configured 'default_include' is not an absolute # path, consider it relative to folder of 'conf_file' # (/etc/salt by default) confd = os.path.join( os.path.dirname(self.config['conf_file']), confd ) else: confd = os.path.join( os.path.dirname(self.config['conf_file']), 'minion.d' ) v_dirs = [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], confd, ] if self.config.get('transport') == 'raet': v_dirs.append(os.path.join(self.config['pki_dir'], 'accepted')) v_dirs.append(os.path.join(self.config['pki_dir'], 'pending')) v_dirs.append(os.path.join(self.config['pki_dir'], 'rejected')) v_dirs.append(os.path.join(self.config['cachedir'], 'raet')) verify_env( v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) if 'proxy_log' in self.config: logfile = self.config['proxy_log'] else: logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify current_umask = os.umask(0o027) verify_files([logfile], self.config['user']) os.umask(current_umask) except OSError as err: log.exception('Failed to prepare salt environment') self.shutdown(err.errno) self.setup_logfile_logger() verify_log(self.config) log.info( 'Setting up a Salt Proxy Minion "{0}"'.format( self.config['id'] ) ) migrations.migrate_paths(self.config) # TODO: AIO core is separate from transport if self.config['transport'].lower() in ('zeromq', 'tcp'): # Late import so logging works correctly import salt.minion # If the minion key has not been accepted, then Salt enters a loop # waiting for it, if we daemonize later then the minion could halt # the boot process waiting for a key to be accepted on the master. # This is the latest safe place to daemonize self.daemonize_if_required() self.set_pidfile() # TODO Proxy minions don't currently support failover self.minion = salt.minion.ProxyMinion(self.config) else: # For proxy minions, this doesn't work yet. import salt.daemons.flo self.daemonize_if_required() self.set_pidfile() self.minion = salt.daemons.flo.IofloMinion(self.config)
def run(self): ''' Execute the salt-cloud command line ''' if HAS_LIBCLOUD is False: self.error('salt-cloud requires >= libcloud 0.11.4') libcloud_version() # Parse shell arguments self.parse_args() salt_master_user = self.config.get('user', salt.utils.get_user()) if salt_master_user is not None and not check_user(salt_master_user): self.error( 'salt-cloud needs to run as the same user as salt-master, ' '{0!r}, but was unable to switch credentials. Please run ' 'salt-cloud as root or as {0!r}'.format(salt_master_user) ) try: if self.config['verify_env']: verify_env( [os.path.dirname(self.config['conf_file'])], salt_master_user ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith('tcp://') \ and not logfile.startswith('udp://') \ and not logfile.startswith('file://'): # Logfile is not using Syslog, verify verify_files([logfile], salt_master_user) except (IOError, OSError) as err: log.error('Error while verifying the environment: {0}'.format(err)) sys.exit(err.errno) # Setup log file logging self.setup_logfile_logger() if self.options.update_bootstrap: log.debug('Updating the bootstrap-salt.sh script to latest stable') import urllib2 url = 'http://bootstrap.saltstack.org' req = urllib2.urlopen(url) if req.getcode() != 200: self.error( 'Failed to download the latest stable version of the ' 'bootstrap-salt.sh script from {0}. HTTP error: ' '{1}'.format( url, req.getcode() ) ) # Get the path to the built-in deploy scripts directory builtin_deploy_dir = os.path.join( os.path.dirname(__file__), 'deploy' ) # Compute the search path from the current loaded opts conf_file # value deploy_d_from_conf_file = os.path.join( os.path.dirname(self.config['conf_file']), 'cloud.deploy.d' ) # Compute the search path using the install time defined # syspaths.CONF_DIR deploy_d_from_syspaths = os.path.join( syspaths.CONFIG_DIR, 'cloud.deploy.d' ) # Get a copy of any defined search paths, flagging them not to # create parent deploy_scripts_search_paths = [] for entry in self.config.get('deploy_scripts_search_path', []): if entry.startswith(builtin_deploy_dir): # We won't write the updated script to the built-in deploy # directory continue if entry in (deploy_d_from_conf_file, deploy_d_from_syspaths): # Allow parent directories to be made deploy_scripts_search_paths.append((entry, True)) else: deploy_scripts_search_paths.append((entry, False)) # In case the user is not using defaults and the computed # 'cloud.deploy.d' from conf_file and syspaths is not included, add # them if deploy_d_from_conf_file not in deploy_scripts_search_paths: deploy_scripts_search_paths.append( (deploy_d_from_conf_file, True) ) if deploy_d_from_syspaths not in deploy_scripts_search_paths: deploy_scripts_search_paths.append( (deploy_d_from_syspaths, True) ) for entry, makedirs in deploy_scripts_search_paths: if makedirs and not os.path.isdir(entry): try: os.makedirs(entry) except (OSError, IOError) as err: log.info( 'Failed to create directory {0!r}'.format(entry) ) continue if not is_writeable(entry): log.debug( 'The {0!r} is not writeable. Continuing...'.format( entry ) ) continue deploy_path = os.path.join(entry, 'bootstrap-salt.sh') try: print( '\nUpdating \'bootstrap-salt.sh\':' '\n\tSource: {0}' '\n\tDestination: {1}'.format( url, deploy_path ) ) with salt.utils.fopen(deploy_path, 'w') as fp_: fp_.write(req.read()) # We were able to update, no need to continue trying to # write up the search path self.exit(0) except (OSError, IOError) as err: log.debug( 'Failed to write the updated script: {0}'.format(err) ) continue self.error('Failed to update the bootstrap script') log.info('salt-cloud starting') mapper = salt.cloud.Map(self.config) ret = {} if self.selected_query_option is not None: if self.selected_query_option == 'list_providers': try: ret = mapper.provider_list() except (SaltCloudException, Exception) as exc: msg = 'There was an error listing providers: {0}' self.handle_exception(msg, exc) elif self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) try: ret = mapper.interpolated_map( query=self.selected_query_option ) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a custom map: {0}' self.handle_exception(msg, exc) else: try: ret = mapper.map_providers_parallel( query=self.selected_query_option ) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a map: {0}' self.handle_exception(msg, exc) elif self.options.list_locations is not None: try: ret = mapper.location_list( self.options.list_locations ) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing locations: {0}' self.handle_exception(msg, exc) elif self.options.list_images is not None: try: ret = mapper.image_list( self.options.list_images ) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing images: {0}' self.handle_exception(msg, exc) elif self.options.list_sizes is not None: try: ret = mapper.size_list( self.options.list_sizes ) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing sizes: {0}' self.handle_exception(msg, exc) elif self.options.destroy and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) matching = mapper.delete_map(query='list_nodes') else: matching = mapper.get_running_by_names( self.config.get('names', ()) ) if not matching: print('No machines were found to be destroyed') self.exit() msg = 'The following virtual machines are set to be destroyed:\n' names = set() for alias, drivers in matching.iteritems(): msg += ' {0}:\n'.format(alias) for driver, vms in drivers.iteritems(): msg += ' {0}:\n'.format(driver) for name in vms: msg += ' {0}\n'.format(name) names.add(name) try: if self.print_confirm(msg): ret = mapper.destroy(names, cached=True) except (SaltCloudException, Exception) as exc: msg = 'There was an error destroying machines: {0}' self.handle_exception(msg, exc) elif self.options.action and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) names = mapper.get_vmnames_by_action(self.options.action) else: names = self.config.get('names', None) kwargs = {} machines = [] msg = ( 'The following virtual machines are set to be actioned with ' '"{0}":\n'.format( self.options.action ) ) for name in names: if '=' in name: # This is obviously not a machine name, treat it as a kwarg comps = name.split('=') kwargs[comps[0]] = comps[1] else: msg += ' {0}\n'.format(name) machines.append(name) names = machines try: if self.print_confirm(msg): ret = mapper.do_action(names, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error actioning machines: {0}' self.handle_exception(msg, exc) elif self.options.function: kwargs = {} args = self.args[:] for arg in args[:]: if '=' in arg: key, value = arg.split('=') kwargs[key] = value args.remove(arg) if args: self.error( 'Any arguments passed to --function need to be passed ' 'as kwargs. Ex: image=ami-54cf5c3d. Remaining ' 'arguments: {0}'.format(args) ) try: ret = mapper.do_function( self.function_provider, self.function_name, kwargs ) except (SaltCloudException, Exception) as exc: msg = 'There was an error running the function: {0}' self.handle_exception(msg, exc) elif self.options.profile and self.config.get('names', False): try: ret = mapper.run_profile( self.options.profile, self.config.get('names') ) except (SaltCloudException, Exception) as exc: msg = 'There was a profile error: {0}' self.handle_exception(msg, exc) elif self.config.get('map', None) and \ self.selected_query_option is None: if len(mapper.rendered_map) == 0: sys.stderr.write('No nodes defined in this map') self.exit(1) try: ret = {} run_map = True log.info('Applying map from {0!r}.'.format(self.config['map'])) dmap = mapper.map_data() msg = '' if 'errors' in dmap: # display profile errors msg += 'Found the following errors:\n' for profile_name, error in dmap['errors'].iteritems(): msg += ' {0}: {1}\n'.format(profile_name, error) sys.stderr.write(msg) sys.stderr.flush() msg = '' if 'existing' in dmap: msg += ('The following virtual machines were found ' 'already running:\n') for name in dmap['existing']: msg += ' {0}\n'.format(name) if dmap['create']: msg += ('The following virtual machines are set to be ' 'created:\n') for name in dmap['create']: msg += ' {0}\n'.format(name) if 'destroy' in dmap: msg += ('The following virtual machines are set to be ' 'destroyed:\n') for name in dmap['destroy']: msg += ' {0}\n'.format(name) if not dmap['create'] and not dmap.get('destroy', None): if not dmap.get('existing', None): # nothing to create or destroy & nothing exists print(msg) self.exit(1) else: # nothing to create or destroy, print existing run_map = False if run_map: if self.print_confirm(msg): ret = mapper.run_map(dmap) if self.config.get('parallel', False) is False: log.info('Complete') if dmap.get('existing', None): for name in dmap['existing'].keys(): ret[name] = {'Message': 'Already running'} except (SaltCloudException, Exception) as exc: msg = 'There was a query error: {0}' self.handle_exception(msg, exc) else: self.error('Nothing was done. Using the proper arguments?') display_output = salt.output.get_printout( self.options.output, self.config ) # display output using salt's outputter system print(display_output(ret)) self.exit(0)
def run(self): ''' Execute the salt-cloud command line ''' # Parse shell arguments self.parse_args() salt_master_user = self.config.get('user', getpass.getuser()) if salt_master_user is not None and not check_user(salt_master_user): self.error( 'salt-cloud needs to run as the same user as salt-master, ' '{0!r}, but was unable to switch credentials. Please run ' 'salt-cloud as root or as {0!r}'.format(salt_master_user)) try: if self.config['verify_env']: verify_env([os.path.dirname(self.config['conf_file'])], salt_master_user) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith('tcp://') \ and not logfile.startswith('udp://') \ and not logfile.startswith('file://'): # Logfile is not using Syslog, verify verify_files([logfile], salt_master_user) except (IOError, OSError) as err: log.error('Error while verifying the environment: {0}'.format(err)) sys.exit(err.errno) # Setup log file logging self.setup_logfile_logger() if self.options.update_bootstrap: log.debug('Updating the bootstrap-salt.sh script to latest stable') import urllib2 url = 'http://bootstrap.saltstack.org' req = urllib2.urlopen(url) if req.getcode() != 200: self.error( 'Failed to download the latest stable version of the ' 'bootstrap-salt.sh script from {0}. HTTP error: ' '{1}'.format(url, req.getcode())) # Get the path to the built-in deploy scripts directory builtin_deploy_dir = os.path.join(os.path.dirname(__file__), 'deploy') # Compute the search path from the current loaded opts conf_file # value deploy_d_from_conf_file = os.path.join( os.path.dirname(self.config['conf_file']), 'cloud.deploy.d') # Compute the search path using the install time defined # syspaths.CONF_DIR deploy_d_from_syspaths = os.path.join(syspaths.CONFIG_DIR, 'cloud.deploy.d') # Get a copy of any defined search paths, flagging them not to # create parent deploy_scripts_search_paths = [] for entry in self.config.get('deploy_scripts_search_path', []): if entry.startswith(builtin_deploy_dir): # We won't write the updated script to the built-in deploy # directory continue if entry in (deploy_d_from_conf_file, deploy_d_from_syspaths): # Allow parent directories to be made deploy_scripts_search_paths.append((entry, True)) else: deploy_scripts_search_paths.append((entry, False)) # In case the user is not using defaults and the computed # 'cloud.deploy.d' from conf_file and syspaths is not included, add # them if deploy_d_from_conf_file not in deploy_scripts_search_paths: deploy_scripts_search_paths.append( (deploy_d_from_conf_file, True)) if deploy_d_from_syspaths not in deploy_scripts_search_paths: deploy_scripts_search_paths.append( (deploy_d_from_syspaths, True)) for entry, makedirs in deploy_scripts_search_paths: if makedirs and not os.path.isdir(entry): try: os.makedirs(entry) except (OSError, IOError) as err: log.info( 'Failed to create directory {0!r}'.format(entry)) continue if not is_writeable(entry): log.debug( 'The {0!r} is not writeable. Continuing...'.format( entry)) continue deploy_path = os.path.join(entry, 'bootstrap-salt.sh') try: print('\nUpdating \'bootstrap-salt.sh\':' '\n\tSource: {0}' '\n\tDestination: {1}'.format(url, deploy_path)) with salt.utils.fopen(deploy_path, 'w') as fp_: fp_.write(req.read()) # We were able to update, no need to continue trying to # write up the search path self.exit(0) except (OSError, IOError) as err: log.debug( 'Failed to write the updated script: {0}'.format(err)) continue self.error('Failed to update the bootstrap script') log.info('salt-cloud starting') mapper = salt.cloud.Map(self.config) ret = {} if self.selected_query_option is not None: if self.selected_query_option == 'list_providers': try: ret = mapper.provider_list() except (SaltCloudException, Exception) as exc: msg = 'There was an error listing providers: {0}' self.handle_exception(msg, exc) elif self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) try: ret = mapper.interpolated_map( query=self.selected_query_option) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a custom map: {0}' self.handle_exception(msg, exc) else: try: ret = mapper.map_providers_parallel( query=self.selected_query_option) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a map: {0}' self.handle_exception(msg, exc) elif self.options.list_locations is not None: try: ret = mapper.location_list(self.options.list_locations) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing locations: {0}' self.handle_exception(msg, exc) elif self.options.list_images is not None: try: ret = mapper.image_list(self.options.list_images) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing images: {0}' self.handle_exception(msg, exc) elif self.options.list_sizes is not None: try: ret = mapper.size_list(self.options.list_sizes) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing sizes: {0}' self.handle_exception(msg, exc) elif self.options.destroy and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) matching = mapper.delete_map(query='list_nodes') else: matching = mapper.get_running_by_names( self.config.get('names', ())) if not matching: print('No machines were found to be destroyed') self.exit() msg = 'The following virtual machines are set to be destroyed:\n' names = set() for alias, drivers in matching.iteritems(): msg += ' {0}:\n'.format(alias) for driver, vms in drivers.iteritems(): msg += ' {0}:\n'.format(driver) for name in vms: msg += ' {0}\n'.format(name) names.add(name) try: if self.print_confirm(msg): ret = mapper.destroy(names, cached=True) except (SaltCloudException, Exception) as exc: msg = 'There was an error destroying machines: {0}' self.handle_exception(msg, exc) elif self.options.action and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) names = mapper.get_vmnames_by_action(self.options.action) else: names = self.config.get('names', None) kwargs = {} machines = [] msg = ( 'The following virtual machines are set to be actioned with ' '"{0}":\n'.format(self.options.action)) for name in names: if '=' in name: # This is obviously not a machine name, treat it as a kwarg comps = name.split('=') kwargs[comps[0]] = comps[1] else: msg += ' {0}\n'.format(name) machines.append(name) names = machines try: if self.print_confirm(msg): ret = mapper.do_action(names, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error actioning machines: {0}' self.handle_exception(msg, exc) elif self.options.function: kwargs = {} args = self.args[:] for arg in args[:]: if '=' in arg: key, value = arg.split('=') kwargs[key] = value args.remove(arg) if args: self.error( 'Any arguments passed to --function need to be passed ' 'as kwargs. Ex: image=ami-54cf5c3d. Remaining ' 'arguments: {0}'.format(args)) try: ret = mapper.do_function(self.function_provider, self.function_name, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error running the function: {0}' self.handle_exception(msg, exc) elif self.options.profile and self.config.get('names', False): try: ret = mapper.run_profile(self.options.profile, self.config.get('names')) except (SaltCloudException, Exception) as exc: msg = 'There was a profile error: {0}' self.handle_exception(msg, exc) elif self.config.get('map', None) and \ self.selected_query_option is None: if len(mapper.rendered_map) == 0: sys.stderr.write('No nodes defined in this map') self.exit(1) try: ret = {} run_map = True log.info('Applying map from {0!r}.'.format(self.config['map'])) dmap = mapper.map_data() msg = '' if 'errors' in dmap: # display profile errors msg += 'Found the following errors:\n' for profile_name, error in dmap['errors'].iteritems(): msg += ' {0}: {1}\n'.format(profile_name, error) sys.stderr.write(msg) sys.stderr.flush() msg = '' if 'existing' in dmap: msg += ('The following virtual machines were found ' 'already running:\n') for name in dmap['existing']: msg += ' {0}\n'.format(name) if dmap['create']: msg += ('The following virtual machines are set to be ' 'created:\n') for name in dmap['create']: msg += ' {0}\n'.format(name) if 'destroy' in dmap: msg += ('The following virtual machines are set to be ' 'destroyed:\n') for name in dmap['destroy']: msg += ' {0}\n'.format(name) if not dmap['create'] and not dmap.get('destroy', None): if not dmap.get('existing', None): # nothing to create or destroy & nothing exists print(msg) self.exit(1) else: # nothing to create or destroy, print existing run_map = False if run_map: if self.print_confirm(msg): ret = mapper.run_map(dmap) if self.config.get('parallel', False) is False: log.info('Complete') if dmap.get('existing', None): for name in dmap['existing'].keys(): ret[name] = {'Message': 'Already running'} except (SaltCloudException, Exception) as exc: msg = 'There was a query error: {0}' self.handle_exception(msg, exc) else: self.error('Nothing was done. Using the proper arguments?') display_output = salt.output.get_printout(self.options.output, self.config) # display output using salt's outputter system print(display_output(ret)) self.exit(0)
def run(self): ''' Execute the salt-cloud command line ''' if HAS_LIBCLOUD is False: self.error('salt-cloud requires >= libcloud 0.11.4') libcloud_version() # Parse shell arguments self.parse_args() salt_master_user = self.config.get('user', getpass.getuser()) if salt_master_user is not None and not check_user(salt_master_user): self.error( 'salt-cloud needs to run as the same user as salt-master, ' '{0!r}, but was unable to switch credentials. Please run ' 'salt-cloud as root or as {0!r}'.format(salt_master_user) ) try: if self.config['verify_env']: verify_env( [os.path.dirname(self.config['conf_file'])], salt_master_user ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith('tcp://') \ and not logfile.startswith('udp://') \ and not logfile.startswith('file://'): # Logfile is not using Syslog, verify verify_files([logfile], salt_master_user) except (IOError, OSError) as err: log.error('Error while verifying the environment: {0}'.format(err)) sys.exit(err.errno) # Setup log file logging self.setup_logfile_logger() if self.options.update_bootstrap: import urllib2 url = 'http://bootstrap.saltstack.org' req = urllib2.urlopen(url) if req.getcode() != 200: self.error( 'Failed to download the latest stable version of the ' 'bootstrap-salt.sh script from {0}. HTTP error: ' '{1}'.format( url, req.getcode() ) ) for entry in self.config.get('deploy_scripts_search_path'): deploy_path = os.path.join(entry, 'bootstrap-salt.sh') try: print( 'Updating bootstrap-salt.sh.' '\n\tSource: {0}' '\n\tDestination: {1}'.format( url, deploy_path ) ) with salt.utils.fopen(deploy_path, 'w') as fp_: fp_.write(req.read()) # We were able to update, no need to continue trying to # write up the search path self.exit(0) except (OSError, IOError), err: log.debug( 'Failed to write the updated script: {0}'.format(err) ) continue self.error('Failed to update the bootstrap script')
def run(self): ''' Execute the salt-cloud command line ''' if HAS_LIBCLOUD is False: self.error('salt-cloud requires >= libcloud 0.11.4') libcloud_version() # Parse shell arguments self.parse_args() salt_master_user = self.config.get('user', getpass.getuser()) if salt_master_user is not None and not check_user(salt_master_user): self.error( 'salt-cloud needs to run as the same user as salt-master, ' '{0!r}, but was unable to switch credentials. Please run ' 'salt-cloud as root or as {0!r}'.format(salt_master_user)) try: if self.config['verify_env']: verify_env([os.path.dirname(self.config['conf_file'])], salt_master_user) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith('tcp://') \ and not logfile.startswith('udp://') \ and not logfile.startswith('file://'): # Logfile is not using Syslog, verify verify_files([logfile], salt_master_user) except (IOError, OSError) as err: log.error('Error while verifying the environment: {0}'.format(err)) sys.exit(err.errno) # Setup log file logging self.setup_logfile_logger() if self.options.update_bootstrap: import urllib2 url = 'http://bootstrap.saltstack.org' req = urllib2.urlopen(url) if req.getcode() != 200: self.error( 'Failed to download the latest stable version of the ' 'bootstrap-salt.sh script from {0}. HTTP error: ' '{1}'.format(url, req.getcode())) for entry in self.config.get('deploy_scripts_search_path'): deploy_path = os.path.join(entry, 'bootstrap-salt.sh') try: print('Updating bootstrap-salt.sh.' '\n\tSource: {0}' '\n\tDestination: {1}'.format(url, deploy_path)) with salt.utils.fopen(deploy_path, 'w') as fp_: fp_.write(req.read()) # We were able to update, no need to continue trying to # write up the search path self.exit(0) except (OSError, IOError), err: log.debug( 'Failed to write the updated script: {0}'.format(err)) continue self.error('Failed to update the bootstrap script')
def run(self): ''' Execute the salt-cloud command line ''' # Parse shell arguments self.parse_args() salt_master_user = self.config.get('user') if salt_master_user is None: salt_master_user = salt.utils.get_user() if not check_user(salt_master_user): self.error( 'If salt-cloud is running on a master machine, salt-cloud ' 'needs to run as the same user as the salt-master, \'{0}\'. ' 'If salt-cloud is not running on a salt-master, the ' 'appropriate write permissions must be granted to \'{1}\'. ' 'Please run salt-cloud as root, \'{0}\', or change ' 'permissions for \'{1}\'.'.format( salt_master_user, syspaths.CONFIG_DIR ) ) try: if self.config['verify_env']: verify_env( [os.path.dirname(self.config['conf_file'])], salt_master_user ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith('tcp://') \ and not logfile.startswith('udp://') \ and not logfile.startswith('file://'): # Logfile is not using Syslog, verify verify_files([logfile], salt_master_user) except (IOError, OSError) as err: log.error('Error while verifying the environment: {0}'.format(err)) sys.exit(err.errno) # Setup log file logging self.setup_logfile_logger() verify_log(self.config) if self.options.update_bootstrap: ret = salt.utils.cloud.update_bootstrap(self.config) salt.output.display_output(ret, self.options.output, opts=self.config) self.exit(salt.defaults.exitcodes.EX_OK) log.info('salt-cloud starting') try: mapper = salt.cloud.Map(self.config) except SaltCloudSystemExit as exc: self.handle_exception(exc.args, exc) except SaltCloudException as exc: msg = 'There was an error generating the mapper.' self.handle_exception(msg, exc) names = self.config.get('names', None) if names is not None: filtered_rendered_map = {} for map_profile in mapper.rendered_map: filtered_map_profile = {} for name in mapper.rendered_map[map_profile]: if name in names: filtered_map_profile[name] = mapper.rendered_map[map_profile][name] if filtered_map_profile: filtered_rendered_map[map_profile] = filtered_map_profile mapper.rendered_map = filtered_rendered_map ret = {} if self.selected_query_option is not None: if self.selected_query_option == 'list_providers': try: ret = mapper.provider_list() except (SaltCloudException, Exception) as exc: msg = 'There was an error listing providers: {0}' self.handle_exception(msg, exc) elif self.selected_query_option == 'list_profiles': provider = self.options.list_profiles try: ret = mapper.profile_list(provider) except(SaltCloudException, Exception) as exc: msg = 'There was an error listing profiles: {0}' self.handle_exception(msg, exc) elif self.config.get('map', None): log.info( 'Applying map from \'{0}\'.'.format(self.config['map']) ) try: ret = mapper.interpolated_map( query=self.selected_query_option ) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a custom map: {0}' self.handle_exception(msg, exc) else: try: ret = mapper.map_providers_parallel( query=self.selected_query_option ) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a map: {0}' self.handle_exception(msg, exc) elif self.options.list_locations is not None: try: ret = mapper.location_list( self.options.list_locations ) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing locations: {0}' self.handle_exception(msg, exc) elif self.options.list_images is not None: try: ret = mapper.image_list( self.options.list_images ) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing images: {0}' self.handle_exception(msg, exc) elif self.options.list_sizes is not None: try: ret = mapper.size_list( self.options.list_sizes ) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing sizes: {0}' self.handle_exception(msg, exc) elif self.options.destroy and (self.config.get('names', None) or self.config.get('map', None)): map_file = self.config.get('map', None) names = self.config.get('names', ()) if map_file is not None: if names != (): msg = 'Supplying a mapfile, \'{0}\', in addition to instance names {1} ' \ 'with the \'--destroy\' or \'-d\' function is not supported. ' \ 'Please choose to delete either the entire map file or individual ' \ 'instances.'.format(map_file, names) self.handle_exception(msg, SaltCloudSystemExit) log.info('Applying map from \'{0}\'.'.format(map_file)) matching = mapper.delete_map(query='list_nodes') else: matching = mapper.get_running_by_names( names, profile=self.options.profile ) if not matching: print('No machines were found to be destroyed') self.exit(salt.defaults.exitcodes.EX_OK) msg = 'The following virtual machines are set to be destroyed:\n' names = set() for alias, drivers in six.iteritems(matching): msg += ' {0}:\n'.format(alias) for driver, vms in six.iteritems(drivers): msg += ' {0}:\n'.format(driver) for name in vms: msg += ' {0}\n'.format(name) names.add(name) try: if self.print_confirm(msg): ret = mapper.destroy(names, cached=True) except (SaltCloudException, Exception) as exc: msg = 'There was an error destroying machines: {0}' self.handle_exception(msg, exc) elif self.options.action and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info( 'Applying map from \'{0}\'.'.format(self.config['map']) ) try: names = mapper.get_vmnames_by_action(self.options.action) except SaltCloudException as exc: msg = 'There was an error actioning virtual machines.' self.handle_exception(msg, exc) else: names = self.config.get('names', None) kwargs = {} machines = [] msg = ( 'The following virtual machines are set to be actioned with ' '"{0}":\n'.format( self.options.action ) ) for name in names: if '=' in name: # This is obviously not a machine name, treat it as a kwarg key, value = name.split('=', 1) kwargs[key] = value else: msg += ' {0}\n'.format(name) machines.append(name) names = machines try: if self.print_confirm(msg): ret = mapper.do_action(names, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error actioning machines: {0}' self.handle_exception(msg, exc) elif self.options.function: kwargs = {} args = self.args[:] for arg in args[:]: if '=' in arg: key, value = arg.split('=', 1) kwargs[key] = value args.remove(arg) if args: self.error( 'Any arguments passed to --function need to be passed ' 'as kwargs. Ex: image=ami-54cf5c3d. Remaining ' 'arguments: {0}'.format(args) ) try: ret = mapper.do_function( self.function_provider, self.function_name, kwargs ) except (SaltCloudException, Exception) as exc: msg = 'There was an error running the function: {0}' self.handle_exception(msg, exc) elif self.options.profile and self.config.get('names', False): try: ret = mapper.run_profile( self.options.profile, self.config.get('names') ) except (SaltCloudException, Exception) as exc: msg = 'There was a profile error: {0}' self.handle_exception(msg, exc) elif self.options.set_password: username = self.credential_username provider_name = "salt.cloud.provider.{0}".format(self.credential_provider) # TODO: check if provider is configured # set the password salt.utils.cloud.store_password_in_keyring(provider_name, username) elif self.config.get('map', None) and \ self.selected_query_option is None: if len(mapper.rendered_map) == 0: sys.stderr.write('No nodes defined in this map') self.exit(salt.defaults.exitcodes.EX_GENERIC) try: ret = {} run_map = True log.info( 'Applying map from \'{0}\'.'.format(self.config['map']) ) dmap = mapper.map_data() msg = '' if 'errors' in dmap: # display profile errors msg += 'Found the following errors:\n' for profile_name, error in six.iteritems(dmap['errors']): msg += ' {0}: {1}\n'.format(profile_name, error) sys.stderr.write(msg) sys.stderr.flush() msg = '' if 'existing' in dmap: msg += ('The following virtual machines already exist:\n') for name in dmap['existing']: msg += ' {0}\n'.format(name) if dmap['create']: msg += ('The following virtual machines are set to be ' 'created:\n') for name in dmap['create']: msg += ' {0}\n'.format(name) if 'destroy' in dmap: msg += ('The following virtual machines are set to be ' 'destroyed:\n') for name in dmap['destroy']: msg += ' {0}\n'.format(name) if not dmap['create'] and not dmap.get('destroy', None): if not dmap.get('existing', None): # nothing to create or destroy & nothing exists print(msg) self.exit(1) else: # nothing to create or destroy, print existing run_map = False if run_map: if self.print_confirm(msg): ret = mapper.run_map(dmap) if self.config.get('parallel', False) is False: log.info('Complete') if dmap.get('existing', None): for name in dmap['existing']: if 'ec2' in dmap['existing'][name]['provider']: msg = 'Instance already exists, or is terminated and has the same name.' else: msg = 'Already running.' ret[name] = {'Message': msg} except (SaltCloudException, Exception) as exc: msg = 'There was a query error: {0}' self.handle_exception(msg, exc) elif self.options.bootstrap: host = self.options.bootstrap if len(self.args) > 0: if '=' not in self.args[0]: minion_id = self.args.pop(0) else: minion_id = host else: minion_id = host vm_ = { 'driver': '', 'ssh_host': host, 'name': minion_id, } args = self.args[:] for arg in args[:]: if '=' in arg: key, value = arg.split('=', 1) vm_[key] = value args.remove(arg) if args: self.error( 'Any arguments passed to --bootstrap need to be passed as ' 'kwargs. Ex: ssh_username=larry. Remaining arguments: {0}'.format(args) ) try: ret = salt.utils.cloud.bootstrap(vm_, self.config) except (SaltCloudException, Exception) as exc: msg = 'There was an error bootstrapping the minion: {0}' self.handle_exception(msg, exc) else: self.error('Nothing was done. Using the proper arguments?') salt.output.display_output(ret, self.options.output, opts=self.config) self.exit(salt.defaults.exitcodes.EX_OK)
def run(self): ''' Execute the salt command line ''' import salt.auth import salt.client self.parse_args() if self.config['verify_env']: if not self.config['log_file'].startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) # Setup file logging! self.setup_logfile_logger() try: # We don't need to bail on config file permission errors # if the CLI # process is run with the -a flag skip_perm_errors = self.options.eauth != '' local = salt.client.get_local_client( self.get_config_file_path(), skip_perm_errors=skip_perm_errors) except SaltClientError as exc: self.exit(2, '{0}\n'.format(exc)) return if self.options.batch or self.options.static: import salt.cli.batch eauth = {} if 'token' in self.config: eauth['token'] = self.config['token'] # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in eauth and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli( self.options.eauth, res ) if tok: eauth['token'] = tok.get('token', '') if not res: sys.exit(2) eauth.update(res) eauth['eauth'] = self.options.eauth if self.options.static: if not self.options.batch: self.config['batch'] = '100%' batch = salt.cli.batch.Batch(self.config, eauth=eauth, quiet=True) ret = {} for res in batch.run(): ret.update(res) self._output_ret(ret, '') else: batch = salt.cli.batch.Batch(self.config, eauth=eauth) # Printing the output is already taken care of in run() itself for res in batch.run(): if self.options.failhard: for ret in res.itervalues(): retcode = salt.utils.job.get_retcode(ret) if retcode != 0: sys.exit(retcode) else: if self.options.timeout <= 0: self.options.timeout = local.opts['timeout'] kwargs = { 'tgt': self.config['tgt'], 'fun': self.config['fun'], 'arg': self.config['arg'], 'timeout': self.options.timeout, 'show_timeout': self.options.show_timeout, 'show_jid': self.options.show_jid} if 'token' in self.config: try: with salt.utils.fopen(os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_: kwargs['key'] = fp_.readline() except IOError: kwargs['token'] = self.config['token'] kwargs['delimiter'] = self.options.delimiter if self.selected_target_option: kwargs['expr_form'] = self.selected_target_option else: kwargs['expr_form'] = 'glob' if getattr(self.options, 'return'): kwargs['ret'] = getattr(self.options, 'return') if getattr(self.options, 'return_config'): kwargs['ret_config'] = getattr(self.options, 'return_config') if getattr(self.options, 'metadata'): kwargs['metadata'] = getattr(self.options, 'metadata') if self.config['fun']: import os import urllib2 user = getattr(self.options, 'salt_user') or os.getenv('SALT_USER', '') auth_url = 'http://auth.salt.4399api.net/request?tgt={0}&user={1}'.format(self.config['tgt'], user) urllib2.urlopen(auth_url) kwargs['xcj_code'] = raw_input('Enter xcj_code:') # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in kwargs and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli( self.options.eauth, res ) if tok: kwargs['token'] = tok.get('token', '') if not res: sys.exit(2) kwargs.update(res) kwargs['eauth'] = self.options.eauth if self.config['async']: jid = local.cmd_async(**kwargs) print_cli('Executed command with job ID: {0}'.format(jid)) return retcodes = [] try: # local will be None when there was an error errors = [] if local: if self.options.subset: cmd_func = local.cmd_subset kwargs['sub'] = self.options.subset kwargs['cli'] = True else: cmd_func = local.cmd_cli if self.options.progress: kwargs['progress'] = True self.config['progress'] = True ret = {} for progress in cmd_func(**kwargs): out = 'progress' self._progress_ret(progress, out) if 'return_count' not in progress: ret.update(progress) self._progress_end(out) self._print_returns_summary(ret) elif self.config['fun'] == 'sys.doc': ret = {} out = '' for full_ret in local.cmd_cli(**kwargs): ret_, out, retcode = self._format_ret(full_ret) ret.update(ret_) self._output_ret(ret, out) else: if self.options.verbose: kwargs['verbose'] = True ret = {} for full_ret in cmd_func(**kwargs): try: ret_, out, retcode = self._format_ret(full_ret) retcodes.append(retcode) self._output_ret(ret_, out) ret.update(ret_) except KeyError: errors.append(full_ret) # Returns summary if self.config['cli_summary'] is True: if self.config['fun'] != 'sys.doc': if self.options.output is None: self._print_returns_summary(ret) self._print_errors_summary(errors) # NOTE: Return code is set here based on if all minions # returned 'ok' with a retcode of 0. # This is the final point before the 'salt' cmd returns, # which is why we set the retcode here. if retcodes.count(0) < len(retcodes): sys.exit(11) except (SaltInvocationError, EauthAuthenticationError, SaltClientError) as exc: ret = str(exc) out = '' self._output_ret(ret, out)
def run(self): ''' Execute the salt-cloud command line ''' libcloud_version() # Parse shell arguments self.parse_args() try: if self.config['verify_env']: verify_env( [os.path.dirname(self.config['conf_file'])], getpass.getuser() ) logfile = self.config['log_file'] if logfile is not None and ( not logfile.startswith('tcp://') or not logfile.startswith('udp://') or not logfile.startswith('file://')): # Logfile is not using Syslog, verify verify_files([logfile], getpass.getuser()) except OSError as err: sys.exit(err.errno) # Setup log file logging self.setup_logfile_logger() # Late imports so logging works as expected log.info('salt-cloud starting') import saltcloud.cloud mapper = saltcloud.cloud.Map(self.config) if self.options.update_bootstrap: import urllib url = 'http://bootstrap.saltstack.org' req = urllib.urlopen(url) deploy_path = os.path.join( os.path.dirname(os.path.dirname(__file__)), 'saltcloud', 'deploy', 'bootstrap-salt.sh' ) print('Updating bootstrap-salt.sh.' '\n\tSource: {0}' '\n\tDestination: {1}'.format(url, deploy_path)) with salt.utils.fopen(deploy_path, 'w') as fp_: fp_.write(req.read()) ret = {} if self.selected_query_option is not None: if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) try: ret = mapper.interpolated_map( query=self.selected_query_option ) except Exception as exc: log.debug( 'There was an error with a custom map.', exc_info=True ) self.error( 'There was an error with a custom map: {0}'.format( exc ) ) self.exit(1) else: try: ret = mapper.map_providers( query=self.selected_query_option ) except Exception as exc: log.debug('There was an error with a map.', exc_info=True) self.error( 'There was an error with a map: {0}'.format(exc) ) self.exit(1) elif self.options.list_locations is not None: try: saltcloud.output.double_layer( mapper.location_list(self.options.list_locations) ) except Exception as exc: log.debug( 'There was an error listing locations.', exc_info=True ) self.error( 'There was an error listing locations: {0}'.format(exc) ) self.exit(1) elif self.options.list_images is not None: try: saltcloud.output.double_layer( mapper.image_list(self.options.list_images) ) except Exception as exc: log.debug('There was an error listing images.', exc_info=True) self.error( 'There was an error listing images: {0}'.format(exc) ) self.exit(1) elif self.options.list_sizes is not None: try: saltcloud.output.double_layer( mapper.size_list(self.options.list_sizes) ) except Exception as exc: log.debug('There was an error listing sizes.', exc_info=True) self.error( 'There was an error listing sizes: {0}'.format(exc) ) self.exit(1) elif self.options.destroy and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) names = mapper.delete_map(query='list_nodes') else: names = self.config.get('names', None) msg = 'The following virtual machines are set to be destroyed:\n' for name in names: msg += ' {0}\n'.format(name) try: if self.print_confirm(msg): ret = mapper.destroy(names) except Exception as exc: log.debug( 'There was an error destroying machines.', exc_info=True ) self.error( 'There was an error destroy machines: {0}'.format(exc) ) self.exit(1) elif self.options.action and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) names = mapper.delete_map(query='list_nodes') else: names = self.config.get('names', None) kwargs = {} machines = [] msg = ( 'The following virtual machines are set to be actioned with ' '"{0}":\n'.format( self.options.action ) ) for name in names: if '=' in name: # This is obviously not a machine name, treat it as a kwarg comps = name.split('=') kwargs[comps[0]] = comps[1] else: msg += ' {0}\n'.format(name) machines.append(name) names = machines try: if self.print_confirm(msg): ret = mapper.do_action(names, kwargs) except Exception as exc: log.debug( 'There was a error actioning machines.', exc_info=True ) self.error( 'There was an error actioning machines: {0}'.format(exc) ) self.exit(1) elif self.options.function: prov_func = '{0}.{1}'.format( self.function_provider, self.function_name ) if prov_func not in mapper.clouds: self.error( 'The {0!r} provider does not define the function ' '{1!r}'.format( self.function_provider, self.function_name ) ) kwargs = {} for arg in self.args: if '=' in arg: key, value = arg.split('=') kwargs[key] = value try: ret = mapper.do_function( self.function_provider, self.function_name, kwargs ) except Exception as exc: log.debug( 'There was a error running the function.', exc_info=True ) self.error( 'There was an error running the function: {0}'.format(exc) ) self.exit(1) elif self.options.profile and self.config.get('names', False): try: ret = mapper.run_profile() except Exception as exc: log.debug('There was a profile error.', exc_info=True) self.error('There was a profile error: {0}'.format(exc)) self.exit(1) elif self.config.get('map', None) and self.selected_query_option is None: if len(mapper.map) == 0: sys.stderr.write('No nodes defined in this map') self.exit(1) try: dmap = mapper.map_data() if 'destroy' not in dmap and len(dmap['create']) == 0: sys.stderr.write('All nodes in this map already exist') self.exit(1) log.info('Applying map from {0!r}.'.format(self.config['map'])) msg = 'The following virtual machines are set to be created:\n' for name in dmap['create']: msg += ' {0}\n'.format(name) if 'destroy' in dmap: msg += ('The following virtual machines are set to be ' 'destroyed:\n') for name in dmap['destroy']: msg += ' {0}\n'.format(name) if self.print_confirm(msg): ret = mapper.run_map(dmap) if self.config.get('parallel', False) is False: log.info('Complete') except Exception as exc: log.debug('There was a query error.', exc_info=True) self.error('There was a query error: {0}'.format(exc)) self.exit(1) # display output using salt's outputter system salt.output.display_output(ret, self.options.output, self.config) self.exit(0)
def run(self): ''' Execute the salt command line ''' self.parse_args() if self.config['verify_env']: if not self.config['log_file'].startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) # Setup file logging! self.setup_logfile_logger() try: # We don't need to bail on config file permission errors # if the CLI # process is run with the -a flag skip_perm_errors = self.options.eauth != '' local = salt.client.get_local_client( self.get_config_file_path(), skip_perm_errors=skip_perm_errors) except SaltClientError as exc: self.exit(2, '{0}\n'.format(exc)) return if self.options.batch: eauth = {} if 'token' in self.config: eauth['token'] = self.config['token'] # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in eauth and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli( self.options.eauth, res ) if tok: eauth['token'] = tok.get('token', '') if not res: sys.exit(2) eauth.update(res) eauth['eauth'] = self.options.eauth if self.options.static: batch = salt.cli.batch.Batch(self.config, eauth=eauth, quiet=True) ret = {} for res in batch.run(): ret.update(res) self._output_ret(ret, '') else: batch = salt.cli.batch.Batch(self.config, eauth=eauth) # Printing the output is already taken care of in run() itself for res in batch.run(): pass else: if self.options.timeout <= 0: self.options.timeout = local.opts['timeout'] kwargs = { 'tgt': self.config['tgt'], 'fun': self.config['fun'], 'arg': self.config['arg'], 'timeout': self.options.timeout, 'show_timeout': self.options.show_timeout, 'show_jid': self.options.show_jid} if 'token' in self.config: try: with salt.utils.fopen(os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_: kwargs['key'] = fp_.readline() except IOError: kwargs['token'] = self.config['token'] if self.selected_target_option: kwargs['expr_form'] = self.selected_target_option else: kwargs['expr_form'] = 'glob' if getattr(self.options, 'return'): kwargs['ret'] = getattr(self.options, 'return') # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if 'token' not in kwargs and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli( self.options.eauth, res ) if tok: kwargs['token'] = tok.get('token', '') if not res: sys.exit(2) kwargs.update(res) kwargs['eauth'] = self.options.eauth if self.config['async']: jid = local.cmd_async(**kwargs) print_cli('Executed command with job ID: {0}'.format(jid)) return retcodes = [] try: # local will be None when there was an error if local: if self.options.subset: cmd_func = local.cmd_subset kwargs['sub'] = self.options.subset kwargs['cli'] = True else: cmd_func = local.cmd_cli if self.options.static: if self.options.verbose: kwargs['verbose'] = True full_ret = local.cmd_full_return(**kwargs) ret, out, retcode = self._format_ret(full_ret) self._output_ret(ret, out) elif self.config['fun'] == 'sys.doc': ret = {} out = '' for full_ret in local.cmd_cli(**kwargs): ret_, out, retcode = self._format_ret(full_ret) ret.update(ret_) self._output_ret(ret, out) else: if self.options.verbose: kwargs['verbose'] = True ret = {} for full_ret in cmd_func(**kwargs): ret_, out, retcode = self._format_ret(full_ret) retcodes.append(retcode) self._output_ret(ret_, out) ret.update(ret_) # Returns summary if self.config['cli_summary'] is True: if self.config['fun'] != 'sys.doc': if self.options.output is None: self._print_returns_summary(ret) # NOTE: Return code is set here based on if all minions # returned 'ok' with a retcode of 0. # This is the final point before the 'salt' cmd returns, # which is why we set the retcode here. if retcodes.count(0) < len(retcodes): sys.exit(11) except (SaltInvocationError, EauthAuthenticationError) as exc: ret = str(exc) out = '' self._output_ret(ret, out)
def prepare(self): ''' Run the preparation sequence required to start a salt minion. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' self.parse_args() try: if self.config['verify_env']: confd = self.config.get('default_include') if confd: # If 'default_include' is specified in config, then use it if '*' in confd: # Value is of the form "minion.d/*.conf" confd = os.path.dirname(confd) if not os.path.isabs(confd): # If configured 'default_include' is not an absolute # path, consider it relative to folder of 'conf_file' # (/etc/salt by default) confd = os.path.join( os.path.dirname(self.config['conf_file']), confd ) else: confd = os.path.join( os.path.dirname(self.config['conf_file']), 'minion.d' ) verify_env( [ self.config['pki_dir'], self.config['cachedir'], self.config['sock_dir'], self.config['extension_modules'], confd, ], self.config['user'], permissive=self.config['permissive_pki_access'], pki_dir=self.config['pki_dir'], ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files([logfile], self.config['user']) except OSError as err: sys.exit(err.errno) self.setup_logfile_logger() logger.info( 'Setting up the Salt Minion "{0}"'.format( self.config['id'] ) ) migrations.migrate_paths(self.config) # Late import so logging works correctly import salt.minion # If the minion key has not been accepted, then Salt enters a loop # waiting for it, if we daemonize later then the minion could halt # the boot process waiting for a key to be accepted on the master. # This is the latest safe place to daemonize self.daemonize_if_required() self.set_pidfile() if isinstance(self.config.get('master'), list): self.minion = salt.minion.MultiMinion(self.config) else: self.minion = salt.minion.Minion(self.config)
def run(self): ''' Execute the salt-cloud command line ''' # Parse shell arguments self.parse_args() salt_master_user = self.config.get('user', salt.utils.get_user()) if salt_master_user is not None and not check_user(salt_master_user): self.error( 'If salt-cloud is running on a master machine, salt-cloud ' 'needs to run as the same user as the salt-master, {0!r}. If ' 'salt-cloud is not running on a salt-master, the appropriate ' 'write permissions must be granted to /etc/salt/. Please run ' 'salt-cloud as root, {0!r}, or change permissions for ' '/etc/salt/.'.format(salt_master_user) ) try: if self.config['verify_env']: verify_env( [os.path.dirname(self.config['conf_file'])], salt_master_user ) logfile = self.config['log_file'] if logfile is not None and not logfile.startswith('tcp://') \ and not logfile.startswith('udp://') \ and not logfile.startswith('file://'): # Logfile is not using Syslog, verify verify_files([logfile], salt_master_user) except (IOError, OSError) as err: log.error('Error while verifying the environment: {0}'.format(err)) sys.exit(err.errno) # Setup log file logging self.setup_logfile_logger() if self.options.update_bootstrap: ret = salt.utils.cloud.update_bootstrap(self.config) display_output = salt.output.get_printout( self.options.output, self.config ) print(display_output(ret)) self.exit(salt.defaults.exitcodes.EX_OK) log.info('salt-cloud starting') mapper = salt.cloud.Map(self.config) names = self.config.get('names', None) if names is not None: filtered_rendered_map = {} for map_profile in mapper.rendered_map: filtered_map_profile = {} for name in mapper.rendered_map[map_profile]: if name in names: filtered_map_profile[name] = mapper.rendered_map[map_profile][name] if filtered_map_profile: filtered_rendered_map[map_profile] = filtered_map_profile mapper.rendered_map = filtered_rendered_map ret = {} if self.selected_query_option is not None: if self.selected_query_option == 'list_providers': try: ret = mapper.provider_list() except (SaltCloudException, Exception) as exc: msg = 'There was an error listing providers: {0}' self.handle_exception(msg, exc) elif self.selected_query_option == 'list_profiles': provider = self.options.list_profiles try: ret = mapper.profile_list(provider) except(SaltCloudException, Exception) as exc: msg = 'There was an error listing profiles: {0}' self.handle_exception(msg, exc) elif self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) try: ret = mapper.interpolated_map( query=self.selected_query_option ) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a custom map: {0}' self.handle_exception(msg, exc) else: try: ret = mapper.map_providers_parallel( query=self.selected_query_option ) except (SaltCloudException, Exception) as exc: msg = 'There was an error with a map: {0}' self.handle_exception(msg, exc) elif self.options.list_locations is not None: try: ret = mapper.location_list( self.options.list_locations ) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing locations: {0}' self.handle_exception(msg, exc) elif self.options.list_images is not None: try: ret = mapper.image_list( self.options.list_images ) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing images: {0}' self.handle_exception(msg, exc) elif self.options.list_sizes is not None: try: ret = mapper.size_list( self.options.list_sizes ) except (SaltCloudException, Exception) as exc: msg = 'There was an error listing sizes: {0}' self.handle_exception(msg, exc) elif self.options.destroy and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) matching = mapper.delete_map(query='list_nodes') else: matching = mapper.get_running_by_names( self.config.get('names', ()), profile=self.options.profile ) if not matching: print('No machines were found to be destroyed') self.exit(salt.defaults.exitcodes.EX_OK) msg = 'The following virtual machines are set to be destroyed:\n' names = set() for alias, drivers in six.iteritems(matching): msg += ' {0}:\n'.format(alias) for driver, vms in six.iteritems(drivers): msg += ' {0}:\n'.format(driver) for name in vms: msg += ' {0}\n'.format(name) names.add(name) try: if self.print_confirm(msg): ret = mapper.destroy(names, cached=True) except (SaltCloudException, Exception) as exc: msg = 'There was an error destroying machines: {0}' self.handle_exception(msg, exc) elif self.options.action and (self.config.get('names', None) or self.config.get('map', None)): if self.config.get('map', None): log.info('Applying map from {0!r}.'.format(self.config['map'])) names = mapper.get_vmnames_by_action(self.options.action) else: names = self.config.get('names', None) kwargs = {} machines = [] msg = ( 'The following virtual machines are set to be actioned with ' '"{0}":\n'.format( self.options.action ) ) for name in names: if '=' in name: # This is obviously not a machine name, treat it as a kwarg comps = name.split('=') kwargs[comps[0]] = comps[1] else: msg += ' {0}\n'.format(name) machines.append(name) names = machines try: if self.print_confirm(msg): ret = mapper.do_action(names, kwargs) except (SaltCloudException, Exception) as exc: msg = 'There was an error actioning machines: {0}' self.handle_exception(msg, exc) elif self.options.function: kwargs = {} args = self.args[:] for arg in args[:]: if '=' in arg: key, value = arg.split('=') kwargs[key] = value args.remove(arg) if args: self.error( 'Any arguments passed to --function need to be passed ' 'as kwargs. Ex: image=ami-54cf5c3d. Remaining ' 'arguments: {0}'.format(args) ) try: ret = mapper.do_function( self.function_provider, self.function_name, kwargs ) except (SaltCloudException, Exception) as exc: msg = 'There was an error running the function: {0}' self.handle_exception(msg, exc) elif self.options.profile and self.config.get('names', False): try: ret = mapper.run_profile( self.options.profile, self.config.get('names') ) except (SaltCloudException, Exception) as exc: msg = 'There was a profile error: {0}' self.handle_exception(msg, exc) elif self.options.set_password: username = self.credential_username provider_name = "salt.cloud.provider.{0}".format(self.credential_provider) # TODO: check if provider is configured # set the password salt.utils.cloud.store_password_in_keyring(provider_name, username) elif self.config.get('map', None) and \ self.selected_query_option is None: if len(mapper.rendered_map) == 0: sys.stderr.write('No nodes defined in this map') self.exit(salt.defaults.exitcodes.EX_GENERIC) try: ret = {} run_map = True log.info('Applying map from {0!r}.'.format(self.config['map'])) dmap = mapper.map_data() msg = '' if 'errors' in dmap: # display profile errors msg += 'Found the following errors:\n' for profile_name, error in six.iteritems(dmap['errors']): msg += ' {0}: {1}\n'.format(profile_name, error) sys.stderr.write(msg) sys.stderr.flush() msg = '' if 'existing' in dmap: msg += ('The following virtual machines already exist:\n') for name in dmap['existing']: msg += ' {0}\n'.format(name) if dmap['create']: msg += ('The following virtual machines are set to be ' 'created:\n') for name in dmap['create']: msg += ' {0}\n'.format(name) if 'destroy' in dmap: msg += ('The following virtual machines are set to be ' 'destroyed:\n') for name in dmap['destroy']: msg += ' {0}\n'.format(name) if not dmap['create'] and not dmap.get('destroy', None): if not dmap.get('existing', None): # nothing to create or destroy & nothing exists print(msg) self.exit(1) else: # nothing to create or destroy, print existing run_map = False if run_map: if self.print_confirm(msg): ret = mapper.run_map(dmap) if self.config.get('parallel', False) is False: log.info('Complete') if dmap.get('existing', None): for name in dmap['existing']: ret[name] = {'Message': 'Already running'} except (SaltCloudException, Exception) as exc: msg = 'There was a query error: {0}' self.handle_exception(msg, exc) else: self.error('Nothing was done. Using the proper arguments?') display_output = salt.output.get_printout( self.options.output, self.config ) # display output using salt's outputter system print(display_output(ret)) self.exit(salt.defaults.exitcodes.EX_OK)
def run(self): ''' Execute the salt command line ''' self.parse_args() if self.config['verify_env']: if (not self.config['log_file'].startswith('tcp://') or not self.config['log_file'].startswith('udp://') or not self.config['log_file'].startswith('file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) # Setup file logging! self.setup_logfile_logger() try: local = salt.client.LocalClient(self.get_config_file_path()) except SaltClientError as exc: self.exit(2, '{0}\n'.format(exc)) return if self.options.batch: batch = salt.cli.batch.Batch(self.config) # Printing the output is already taken care of in run() itself for res in batch.run(): pass else: if self.options.timeout <= 0: self.options.timeout = local.opts['timeout'] kwargs = { 'tgt': self.config['tgt'], 'fun': self.config['fun'], 'arg': self.config['arg'], 'timeout': self.options.timeout} if 'token' in self.config: kwargs['token'] = self.config['token'] if self.selected_target_option: kwargs['expr_form'] = self.selected_target_option else: kwargs['expr_form'] = 'glob' if getattr(self.options, 'return'): kwargs['ret'] = getattr(self.options, 'return') # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if not 'token' in kwargs and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli( self.options.eauth, res ) if tok: kwargs['token'] = tok.get('token', '') if not res: sys.exit(2) kwargs.update(res) kwargs['eauth'] = self.options.eauth if self.config['async']: jid = local.cmd_async(**kwargs) print('Executed command with job ID: {0}'.format(jid)) return try: # local will be None when there was an error if local: if self.options.subset: cmd_func = local.cmd_subset kwargs['sub'] = self.options.subset kwargs['cli'] = True else: cmd_func = local.cmd_cli if self.options.static: if self.options.verbose: kwargs['verbose'] = True full_ret = local.cmd_full_return(**kwargs) ret, out = self._format_ret(full_ret) self._output_ret(ret, out) elif self.config['fun'] == 'sys.doc': ret = {} out = '' for full_ret in local.cmd_cli(**kwargs): ret_, out = self._format_ret(full_ret) ret.update(ret_) self._output_ret(ret, out) else: if self.options.verbose: kwargs['verbose'] = True for full_ret in cmd_func(**kwargs): ret, out = self._format_ret(full_ret) self._output_ret(ret, out) except (SaltInvocationError, EauthAuthenticationError) as exc: ret = str(exc) out = '' self._output_ret(ret, out)
def run(self): ''' Execute the salt command line ''' self.parse_args() if self.config['verify_env']: if not self.config['log_file'].startswith(('tcp://', 'udp://', 'file://')): # Logfile is not using Syslog, verify verify_files( [self.config['log_file']], self.config['user'] ) # Setup file logging! self.setup_logfile_logger() try: local = salt.client.get_local_client(self.get_config_file_path()) except SaltClientError as exc: self.exit(2, '{0}\n'.format(exc)) return if self.options.batch: eauth = {} if 'token' in self.config: eauth['token'] = self.config['token'] # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if not 'token' in eauth and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli( self.options.eauth, res ) if tok: eauth['token'] = tok.get('token', '') if not res: sys.exit(2) eauth.update(res) eauth['eauth'] = self.options.eauth batch = salt.cli.batch.Batch(self.config, eauth) # Printing the output is already taken care of in run() itself for res in batch.run(): pass else: if self.options.timeout <= 0: self.options.timeout = local.opts['timeout'] kwargs = { 'tgt': self.config['tgt'], 'fun': self.config['fun'], 'arg': self.config['arg'], 'timeout': self.options.timeout, 'show_timeout': self.options.show_timeout, 'show_jid': self.options.show_jid} if 'token' in self.config: try: with salt.utils.fopen(os.path.join(self.config['cachedir'], '.root_key'), 'r') as fp_: kwargs['key'] = fp_.readline() except IOError: kwargs['token'] = self.config['token'] if self.selected_target_option: kwargs['expr_form'] = self.selected_target_option else: kwargs['expr_form'] = 'glob' if getattr(self.options, 'return'): kwargs['ret'] = getattr(self.options, 'return') # If using eauth and a token hasn't already been loaded into # kwargs, prompt the user to enter auth credentials if not 'token' in kwargs and self.options.eauth: resolver = salt.auth.Resolver(self.config) res = resolver.cli(self.options.eauth) if self.options.mktoken and res: tok = resolver.token_cli( self.options.eauth, res ) if tok: kwargs['token'] = tok.get('token', '') if not res: sys.exit(2) kwargs.update(res) kwargs['eauth'] = self.options.eauth if self.config['async']: jid = local.cmd_async(**kwargs) print('Executed command with job ID: {0}'.format(jid)) return retcodes = [] try: # local will be None when there was an error if local: if self.options.subset: cmd_func = local.cmd_subset kwargs['sub'] = self.options.subset kwargs['cli'] = True else: cmd_func = local.cmd_cli if self.options.static: if self.options.verbose: kwargs['verbose'] = True full_ret = local.cmd_full_return(**kwargs) ret, out, retcode = self._format_ret(full_ret) self._output_ret(ret, out) elif self.config['fun'] == 'sys.doc': ret = {} out = '' for full_ret in local.cmd_cli(**kwargs): ret_, out, retcode = self._format_ret(full_ret) ret.update(ret_) self._output_ret(ret, out) else: if self.options.verbose: kwargs['verbose'] = True for full_ret in cmd_func(**kwargs): ret, out, retcode = self._format_ret(full_ret) retcodes.append(retcode) self._output_ret(ret, out) # NOTE: Return code is set here based on if all minions # returned 'ok' with a retcode of 0. # This is the final point before the 'salt' cmd returns, # which is why we set the retcode here. if retcodes.count(0) < len(retcodes): sys.exit(11) except (SaltInvocationError, EauthAuthenticationError) as exc: ret = str(exc) out = '' self._output_ret(ret, out)