def process_queue(queue, quantity=1, backend='sqlite'): ''' Pop items off a queue and create an event on the Salt event bus to be processed by a Reactor. CLI Example: .. code-block:: bash salt-run queue.process_queue myqueue salt-run queue.process_queue myqueue 6 salt-run queue.process_queue myqueue all backend=sqlite ''' # get ready to send an event event = salt.utils.event.get_event('master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) try: items = pop(queue=queue, quantity=quantity, backend=backend) except SaltInvocationError as exc: error_txt = '{0}'.format(exc) salt.output.display_output(error_txt, 'nested', __opts__) return False data = { 'items': items, 'backend': backend, 'queue': queue, } event.fire_event(data, tagify([queue, 'process'], prefix='queue'))
def reboot(name, conn=None): ''' Reboot a single VM ''' if not conn: conn = get_conn() # pylint: disable-msg=E0602 node = get_node(conn, name) if node is None: log.error('Unable to find the VM {0}'.format(name)) log.info('Rebooting VM: {0}'.format(name)) ret = conn.reboot_node(node) if ret: log.info('Rebooted VM: {0}'.format(name)) # Fire reboot action # Fire destroy action event = salt.utils.event.SaltEvent('master', __opts__['sock_dir']) try: event.fire_event( '{0} has been rebooted'.format(name), 'salt-cloud' ) except ValueError: # We're using develop or a 0.17.x version of salt event.fire_event( {name: '{0} has been rebooted'.format(name)}, 'salt-cloud' ) return True log.error('Failed to reboot VM: {0}'.format(name)) return False
def master(master_ip=None, connected=True): ''' .. versionadded:: 2014.7.0 Fire an event if the minion gets disconnected from its master. This function is meant to be run via a scheduled job from the minion CLI Example: .. code-block:: bash salt '*' status.master ''' # the default publishing port port = 4505 if __salt__['config.get']('publish_port') != '': port = int(__salt__['config.get']('publish_port')) ips = _remote_port_tcp(port) if connected: if master_ip not in ips: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) event.fire_event({'master': master_ip}, '__master_disconnected') else: if master_ip in ips: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) event.fire_event({'master': master_ip}, '__master_connected')
def _proc_function(self, fun, low, user, tag, jid): ''' Run this method in a multiprocess target to execute the function in a multiprocess and fire the return data on the event bus ''' salt.utils.daemonize() data = {'fun': '{0}.{1}'.format(self.client, fun), 'jid': jid, 'user': user, } event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) event.fire_event(data, tagify('new', base=tag)) try: data['return'] = self.low(fun, low) data['success'] = True except Exception as exc: data['return'] = 'Exception occurred in {0} {1}: {2}: {3}'.format( self.client, fun, exc.__class__.__name__, exc, ) data['success'] = False data['user'] = user event.fire_event(data, tagify('ret', base=tag)) # if we fired an event, make sure to delete the event object. # This will ensure that we call destroy, which will do the 0MQ linger del event
def _proc_runner(self, fun, low, user, tag, jid): ''' Run this method in a multiprocess target to execute the runner in a multiprocess and fire the return data on the event bus ''' salt.utils.daemonize() event = salt.utils.event.MasterEvent(self.opts['sock_dir']) data = {'fun': 'runner.{0}'.format(fun), 'jid': jid, 'user': user, } event.fire_event(data, tagify('new', base=tag)) try: data['return'] = self.low(fun, low) data['success'] = True except Exception as exc: data['return'] = 'Exception occured in runner {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False data['user'] = user event.fire_event(data, tagify('ret', base=tag)) # this is a workaround because process reaping is defeating 0MQ linger time.sleep(2.0) # delay so 0MQ event gets out before runner process reaped
def _proc_runner(self, fun, low, user, tag, jid): ''' Run this method in a multiprocess target to execute the runner in a multiprocess and fire the return data on the event bus ''' salt.utils.daemonize() event = salt.utils.event.get_event('master', self.opts['sock_dir'], self.opts['transport'], listen=False) data = { 'fun': 'runner.{0}'.format(fun), 'jid': jid, 'user': user, } event.fire_event(data, tagify('new', base=tag)) try: data['return'] = self.low(fun, low) data['success'] = True except Exception as exc: data[ 'return'] = 'Exception occurred in runner {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False data['user'] = user event.fire_event(data, tagify('ret', base=tag)) # this is a workaround because process reaping is defeating 0MQ linger time.sleep( 2.0) # delay so 0MQ event gets out before runner process reaped
def reboot(name, conn=None): ''' Reboot a single VM ''' if not conn: conn = get_conn() # pylint: disable-msg=E0602 node = get_node(conn, name) if node is None: log.error('Unable to find the VM {0}'.format(name)) log.info('Rebooting VM: {0}'.format(name)) ret = conn.reboot_node(node) if ret: log.info('Rebooted VM: {0}'.format(name)) # Fire reboot action # Fire destroy action event = salt.utils.event.SaltEvent('master', __opts__['sock_dir']) try: event.fire_event('{0} has been rebooted'.format(name), 'salt-cloud') except ValueError: # We're using develop or a 0.17.x version of salt event.fire_event({name: '{0} has been rebooted'.format(name)}, 'salt-cloud') return True log.error('Failed to reboot VM: {0}'.format(name)) return False
def returner(ret): ''' Send the return data to the Salt Master over the encrypted 0MQ bus with custom tag for 3rd party script filtering. ''' # get opts from minion config file, supports minion.d drop dir! opts = minion_config(os.path.join(syspaths.CONFIG_DIR, 'minion')) # TODO: this needs to be customizable! tag = 'third-party' # add custom tag to return data for filtering ret['tag'] = tag # multi event example, supports a list of event ret objects. # single event does not currently expand/filter properly on Master side. package = { #'id': opts['id'], 'events': [ ret ], 'tag': None, 'pretag': None, 'data': None } # opts must contain valid minion ID else it binds to invalid 0MQ socket. event = salt.utils.event.SaltEvent('minion', **opts) # Fire event payload with 'fire_master' tag which triggers the # salt-minion daemon to forward payload to the master event bus! event.fire_event(package, 'fire_master')
def update(self): """ COPIED FROM SALT changed: salt.utils.fopen() call opens the file in binary mode instead. """ # data for the fileserver event data = {"changed": self.clear_old_remotes(), "backend": "gitfs"} if self.fetch_remotes(): data["changed"] = True if data["changed"] is True or not os.path.isfile(self.env_cache): env_cachedir = os.path.dirname(self.env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = self.envs(ignore_cache=True) serial = salt.payload.Serial(self.opts) with salt.utils.fopen(self.env_cache, "wb+") as fp_: fp_.write(serial.dumps(new_envs)) logger.trace("Wrote env cache data to {0}".format(self.env_cache)) # if there is a change, fire an event if self.opts.get("fileserver_events", False): event = salt.utils.event.get_event( "master", self.opts["sock_dir"], self.opts["transport"], opts=self.opts, listen=False ) event.fire_event(data, tagify(["gitfs", "update"], prefix="fileserver")) try: salt.fileserver.reap_fileserver_cache_dir(self.hash_cachedir, self.find_file) except (OSError, IOError): # Hash file won't exist if no files have yet been served up pass
def destroy(name, conn=None): ''' Delete a single VM ''' if not conn: conn = get_conn() node = get_node(conn, name) if node is None: log.error('Unable to find the VM {0}'.format(name)) log.info('Destroying VM: {0}'.format(name)) ret = conn.destroy_node(node) if ret: log.info('Destroyed VM: {0}'.format(name)) # Fire destroy action event = salt.utils.event.SaltEvent( 'master', __opts__['sock_dir'] ) event.fire_event('{0} has been destroyed'.format(name), 'salt-cloud') if __opts__['delete_sshkeys'] is True: saltcloud.utils.remove_sshkey(node.public_ips[0]) return True log.error('Failed to Destroy VM: {0}'.format(name)) return False
def process_queue(queue, quantity=1, backend='sqlite'): ''' Pop items off a queue and create an event on the Salt event bus to be processed by a Reactor. CLI Example: .. code-block:: bash salt-run queue.process_queue myqueue salt-run queue.process_queue myqueue 6 salt-run queue.process_queue myqueue all backend=sqlite ''' # get ready to send an event event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) try: items = pop(queue=queue, quantity=quantity, backend=backend) except SaltInvocationError as exc: error_txt = '{0}'.format(exc) __progress__(error_txt) return False data = {'items': items, 'backend': backend, 'queue': queue, } event.fire_event(data, tagify([queue, 'process'], prefix='queue'))
def fire_exception(exc, opts, job=None, node="minion"): """ Fire raw exception across the event bus """ if job is None: job = {} event = salt.utils.event.SaltEvent(node, opts=opts, listen=False) event.fire_event(pack_exception(exc), "_salt_error")
def fire_exception(exc, opts, job=None, node='minion'): ''' Fire raw exception across the event bus ''' if job is None: job = {} event = salt.utils.event.SaltEvent(node, opts=opts, listen=False) event.fire_event(pack_exception(exc), '_salt_error')
def fire_exception(exc, opts, job=None, node='minion'): ''' Fire raw exception across the event bus ''' if job is None: job = {} event = salt.utils.event.SaltEvent(node, opts=opts) event.fire_event(pack_exception(exc), '_salt_error')
def test_ping_reaction(self): """ Fire an event on the master and ensure that it pings the minion """ # Create event bus connection with salt.utils.event.get_event( "minion", sock_dir=self.minion_opts["sock_dir"], opts=self.minion_opts ) as event: event.fire_event({"a": "b"}, "/test_event") self.assertMinionEventReceived({"a": "b"}, timeout=30)
def fire_event(key, msg, tag, args=None, sock_dir='/var/run/salt/master'): # Fire deploy action event = salt.utils.event.SaltEvent('master', sock_dir) try: event.fire_event(msg, tag) except ValueError: # We're using develop or a 0.17.x version of salt if type(args) is dict: args[key] = msg else: args = {key: msg} event.fire_event(args, tag)
def _fire_event(self, operation): """ Fire optional Salt event for some operations """ settings = _skip_dunder(self.settings) tags = settings['event'].split('/') if self.include_operation: tags += operation log.info("firing event for {}".format("/".join(tags))) event = salt.utils.event.SaltEvent('master', __opts__['sock_dir']) event.fire_event(settings, "/".join(tags))
def render(dict_data, saltenv="", sls="", **kwargs): event = salt.utils.event.get_master_event(__opts__, __opts__["sock_dir"], listen=False) lowstate = {} body = json.loads(kwargs["data"]["body"]) headers = kwargs["data"]["headers"] if headers["X-Github-Event"] != "push": log.warning("Skipping %s event", headers["X-Github-Event"]) return lowstate try: event.fire_event( { "repo": body["repository"]["full_name"], "ref": body["ref"], "sls": sls, "saltenv": saltenv, }, "autodeploy/check/github", ) except: log.exception("Unable to log check") for repo in dict_data: if repo != body["repository"]["full_name"]: log.debug("%s != %s", repo, body["repository"]["full_name"]) continue for ref in dict_data[repo]: if ref != body["ref"]: log.debug("%s != %s", ref, body["ref"]) continue for state in dict_data[repo][ref]: lowstate["%s:%s:%s" % (repo, ref, state)] = dict_data[repo][ref][state] event.fire_event( { "repo": repo, "ref": ref, "state": dict_data[repo][ref][state], "sls": sls, "saltenv": saltenv, }, "autodeploy/found/github", ) return lowstate
def master(): ''' Fire an event if the minion gets disconnected from its master This function is meant to be run via a scheduled job from the minion ''' ip = __salt__['config.option']('master') port = int(__salt__['config.option']('publish_port')) ips = remote_port_tcp(port) if ip not in ips: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) event.fire_event({'master': ip}, '__master_disconnected')
def master(master=None, connected=True): ''' .. versionadded:: 2014.7.0 Return the connection status with master. Fire an event if the connection to master is not as expected. This function is meant to be run via a scheduled job from the minion. If master_ip is an FQDN/Hostname, it must be resolvable to a valid IPv4 address. CLI Example: .. code-block:: bash salt '*' status.master ''' # the default publishing port port = 4505 master_ips = None if __salt__['config.get']('publish_port') != '': port = int(__salt__['config.get']('publish_port')) # Check if we have FQDN/hostname defined as master # address and try resolving it first. _remote_port_tcp # only works with IP-addresses. if master is not None: master_ips = _host_to_ips(master) master_connection_status = False if master_ips: ips = _remote_port_tcp(port) for master_ip in master_ips: if master_ip in ips: master_connection_status = True break if master_connection_status is not connected: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) if master_connection_status: event.fire_event({'master': master}, salt.minion.master_event(type='connected')) else: event.fire_event({'master': master}, salt.minion.master_event(type='disconnected')) return master_connection_status
def fire(data, tag, timeout=None): ''' Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag' ''' if timeout is None: timeout = 60000 else: timeout = timeout * 1000 try: with salt.utils.event.get_event(__opts__.get('__role', 'minion'), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], opts=__opts__, keep_loop=True, listen=False) as event: return event.fire_event(data, tag, timeout=timeout) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False
def update(self): """ COPIED FROM SALT changed: salt.utils.fopen() call opens the file in binary mode instead. """ # data for the fileserver event data = { 'changed': self.clear_old_remotes(), 'backend': 'gitfs' } if self.fetch_remotes(): data['changed'] = True if data['changed'] is True or not os.path.isfile(self.env_cache): env_cachedir = os.path.dirname(self.env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = self.envs(ignore_cache=True) serial = salt.payload.Serial(self.opts) with salt.utils.fopen(self.env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) logger.trace('Wrote env cache data to {0}'.format(self.env_cache)) # if there is a change, fire an event if self.opts.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, ) event.fire_event( data, tagify(['gitfs', 'update'], prefix='fileserver') ) try: salt.fileserver.reap_fileserver_cache_dir( self.hash_cachedir, self.find_file ) except (OSError, IOError): # Hash file won't exist if no files have yet been served up pass
def master(master=None, connected=True): ''' .. versionadded:: 2014.7.0 Return the connection status with master. Fire an event if the connection to master is not as expected. This function is meant to be run via a scheduled job from the minion. If master_ip is an FQDN/Hostname, it must be resolvable to a valid IPv4 address. CLI Example: .. code-block:: bash salt '*' status.master ''' master_ips = None if master: master_ips = _host_to_ips(master) if not master_ips: return master_connection_status = False port = __salt__['config.get']('publish_port', default=4505) connected_ips = _remote_port_tcp(port) # Get connection status for master for master_ip in master_ips: if master_ip in connected_ips: master_connection_status = True break # Connection to master is not as expected if master_connection_status is not connected: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) if master_connection_status: event.fire_event({'master': master}, salt.minion.master_event(type='connected')) else: event.fire_event({'master': master}, salt.minion.master_event(type='disconnected')) return master_connection_status
def ping_master(master): ''' .. versionadded:: 2016.3.0 Sends ping request to the given master. Fires '__master_failback' event on success. Returns bool result. CLI Example: .. code-block:: bash salt '*' status.ping_master localhost ''' if master is None or master == '': return False opts = copy.deepcopy(__opts__) opts['master'] = master if 'master_ip' in opts: # avoid 'master ip changed' warning del opts['master_ip'] opts.update(salt.minion.prep_ip_port(opts)) try: opts.update(salt.minion.resolve_dns(opts, fallback=False)) except Exception: return False timeout = opts.get('auth_timeout', 60) load = {'cmd': 'ping'} result = False channel = salt.transport.client.ReqChannel.factory(opts, crypt='clear') try: payload = channel.send(load, tries=0, timeout=timeout) result = True except Exception as e: pass if result: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) event.fire_event({'master': master}, salt.minion.master_event(type='failback')) return result
def master(master=None, connected=True): ''' .. versionadded:: 2014.7.0 Fire an event if the minion gets disconnected from its master. This function is meant to be run via a scheduled job from the minion. If master_ip is an FQDN/Hostname, it must be resolvable to a valid IPv4 address. CLI Example: .. code-block:: bash salt '*' status.master ''' # the default publishing port port = 4505 master_ip = None if __salt__['config.get']('publish_port') != '': port = int(__salt__['config.get']('publish_port')) # Check if we have FQDN/hostname defined as master # address and try resolving it first. _remote_port_tcp # only works with IP-addresses. if master is not None: tmp_ip = _host_to_ip(master) if tmp_ip is not None: master_ip = tmp_ip ips = _remote_port_tcp(port) if connected: if master_ip not in ips: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) event.fire_event({'master': master}, '__master_disconnected') else: if master_ip in ips: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) event.fire_event({'master': master}, '__master_connected')
def fire_event(key, msg, tag, args=None, sock_dir=None): # Fire deploy action if sock_dir is None: sock_dir = os.path.join(syspaths.SOCK_DIR, 'master') event = salt.utils.event.SaltEvent('master', sock_dir) try: event.fire_event(msg, tag) except ValueError: # We're using develop or a 0.17.x version of salt if type(args) is dict: args[key] = msg else: args = {key: msg} event.fire_event(args, tag) # https://github.com/zeromq/pyzmq/issues/173#issuecomment-4037083 # Assertion failed: get_load () == 0 (poller_base.cpp:32) time.sleep(0.025)
def _fire_event(self, result, operation): """ Fire optional Salt event for some operations. Always send an event unless 'fire_on' is set. Then, only send an event when matching """ if 'fire' in self.settings and not self.settings['fire']: return settings = _skip_dunder(self.settings) tags = settings['event'].split('/') if self.include_operation: tags += operation log.info("firing event for {}".format("/".join(tags))) if ('fire_on' not in settings or 'fire_on' in settings and settings['fire_on'] == result): event = salt.utils.event.SaltEvent('master', __opts__['sock_dir']) event.fire_event(settings, "/".join(tags))
def queue(**kwargs): """ Fire an event if a queue is empty """ # The same event drives the deletion and this check. Give sqlite two # seconds to complete its update time.sleep(2) # defaults settings = { 'backend': 'sqlite', 'queue': 'prep', 'next': 'discovery' } settings.update(kwargs) queue_funcs = salt.loader.queues(__opts__) cmd = '{0}.list_length'.format(settings['backend']) if cmd not in queue_funcs: raise SaltInvocationError('Function "{0}" is not available'.format(cmd)) ret = queue_funcs[cmd](queue=settings['queue']) if (ret == 0): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) # skip dunder keys settings = {k:v for k,v in settings.iteritems() if not k.startswith('__')} event.fire_event(settings, tagify(['start', settings['next'], 'stage'], prefix='ceph')) log.info("firing event for stage {}".format(settings['next'])) else: log.info("size of queue {} is {}".format(settings['queue'], ret)) return ret
def destroy(name, conn=None): ''' Delete a single VM ''' salt.cloud.utils.fire_event( 'event', 'destroying instance', 'salt.cloud.destroy', {'name': name}, ) if not conn: conn = get_conn() # pylint: disable-msg=E0602 node = get_node(conn, name) if node is None: log.error('Unable to find the VM {0}'.format(name)) log.info('Destroying VM: {0}'.format(name)) ret = conn.destroy_node(node) if ret: log.info('Destroyed VM: {0}'.format(name)) # Fire destroy action event = salt.utils.event.SaltEvent('master', __opts__['sock_dir']) try: salt.cloud.utils.fire_event( 'event', 'destroyed instance', 'salt.cloud.destroy', {'name': name}, ) except ValueError: # We're using develop or a 0.17.x version of salt event.fire_event( {name: '{0} has been destroyed'.format(name)}, 'salt-cloud' ) if __opts__['delete_sshkeys'] is True: salt.cloud.utils.remove_sshkey(node.public_ips[0]) return True log.error('Failed to Destroy VM: {0}'.format(name)) return False
def master(master=None, connected=True): ''' .. versionadded:: 2014.7.0 Return the connection status with master. Fire an event if the connection to master is not as expected. This function is meant to be run via a scheduled job from the minion. If master_ip is an FQDN/Hostname, it must be resolvable to a valid IPv4 address. CLI Example: .. code-block:: bash salt '*' status.master ''' master_ips = None if master: master_ips = _host_to_ips(master) if not master_ips: return master_connection_status = False connected_ips = _connected_masters() # Get connection status for master for master_ip in master_ips: if master_ip in connected_ips: master_connection_status = True break # Connection to master is not as expected if master_connection_status is not connected: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) if master_connection_status: event.fire_event({'master': master}, salt.minion.master_event(type='connected')) else: event.fire_event({'master': master}, salt.minion.master_event(type='disconnected')) return master_connection_status
def master(master=None, connected=True): ''' .. versionadded:: 2014.7.0 Fire an event if the minion gets disconnected from its master. This function is meant to be run via a scheduled job from the minion. If master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4 address. CLI Example: .. code-block:: bash salt '*' status.master ''' # the default publishing port port = 4505 master_ip = None if __salt__['config.get']('publish_port') != '': port = int(__salt__['config.get']('publish_port')) # Check if we have FQDN/hostname defined as master # address and try resolving it first. _remote_port_tcp # only works with IP-addresses. if master is not None: tmp_ip = _host_to_ip(master) if tmp_ip is not None: master_ip = tmp_ip ips = _remote_port_tcp(port) if connected: if master_ip not in ips: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) event.fire_event({'master': master}, '__master_disconnected') else: if master_ip in ips: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) event.fire_event({'master': master}, '__master_connected')
def test_ping_reaction(event_listener, salt_minion): """ Fire an event on the master and ensure that it pings the minion """ event_tag = "/test_event" start_time = time.time() # Create event bus connection with salt.utils.event.get_event( "minion", sock_dir=salt_minion.config["sock_dir"], opts=salt_minion.config.copy(), ) as event: event.fire_event({"a": "b"}, event_tag) event_pattern = (salt_minion.id, event_tag) matched_events = event_listener.wait_for_events( [event_pattern], after_time=start_time, timeout=90 ) assert matched_events.found_all_events for event in matched_events: assert event.data == {"a": "b"}
def update(self): """ COPIED FROM SALT changed: salt.utils.fopen() call opens the file in binary mode instead. """ # data for the fileserver event data = {'changed': self.clear_old_remotes(), 'backend': 'gitfs'} if self.fetch_remotes(): data['changed'] = True if data['changed'] is True or not os.path.isfile(self.env_cache): env_cachedir = os.path.dirname(self.env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = self.envs(ignore_cache=True) serial = salt.payload.Serial(self.opts) with salt.utils.fopen(self.env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) logger.trace('Wrote env cache data to {0}'.format( self.env_cache)) # if there is a change, fire an event if self.opts.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, ) event.fire_event(data, tagify(['gitfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir(self.hash_cachedir, self.find_file) except (OSError, IOError): # Hash file won't exist if no files have yet been served up pass
def queue(**kwargs): """ Fire an event if a queue is empty """ # The same event drives the deletion and this check. Give sqlite two # seconds to complete its update time.sleep(2) # defaults settings = {'backend': 'sqlite', 'queue': 'prep', 'next': 'discovery'} settings.update(kwargs) queue_funcs = salt.loader.queues(__opts__) cmd = '{0}.list_length'.format(settings['backend']) if cmd not in queue_funcs: raise SaltInvocationError( 'Function "{0}" is not available'.format(cmd)) ret = queue_funcs[cmd](queue=settings['queue']) if (ret == 0): event = salt.utils.event.get_event('master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) # skip dunder keys settings = { k: v for k, v in settings.iteritems() if not k.startswith('__') } event.fire_event( settings, tagify(['start', settings['next'], 'stage'], prefix='ceph')) log.info("firing event for stage {}".format(settings['next'])) else: log.info("size of queue {} is {}".format(settings['queue'], ret)) return ret
def _proc_runner(self, tag, fun, low): ''' Run this method in a multiprocess target to execute the runner in a multiprocess and fire the return data on the event bus ''' salt.utils.daemonize() event = salt.utils.event.MasterEvent(self.opts['sock_dir']) data = {'fun': "runner.{0}".format(fun), 'jid': low['jid'], } event.fire_event(data, tagify('new', base=tag)) try: data['ret'] = self.low(fun, low) data['success'] = True except Exception as exc: data['ret'] = 'Exception occured in runner {0}: {1}'.format( fun, exc, ) event.fire_event(data, tagify('ret', base=tag))
def master(): ''' .. versionadded:: Helium Fire an event if the minion gets disconnected from its master. This function is meant to be run via a scheduled job from the minion CLI Example: .. code-block:: bash salt '*' status.master ''' ip = __salt__['config.option']('master') port = int(__salt__['config.option']('publish_port')) ips = _remote_port_tcp(port) if ip not in ips: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) event.fire_event({'master': ip}, '__master_disconnected')
def _proc_runner(self, tag, fun, low, user): ''' Run this method in a multiprocess target to execute the runner in a multiprocess and fire the return data on the event bus ''' salt.utils.daemonize() event = salt.utils.event.MasterEvent(self.opts['sock_dir']) data = {'fun': "runner.{0}".format(fun), 'jid': low['jid'], 'user': user, } event.fire_event(data, tagify('new', base=tag)) try: data['ret'] = self.low(fun, low) data['success'] = True except Exception as exc: data['ret'] = 'Exception occured in runner {0}: {1}'.format( fun, exc, ) data['user'] = user event.fire_event(data, tagify('ret', base=tag))
def run(self): """ Enter into the server loop """ salt.utils.process.appendproctitle(self.__class__.__name__) # instantiate some classes inside our new process with salt.utils.event.get_event( self.opts["__role"], self.opts["sock_dir"], self.opts["transport"], opts=self.opts, listen=True, ) as event: self.wrap = ReactWrap(self.opts) for data in event.iter_events(full=True): # skip all events fired by ourselves if data["data"].get("user") == self.wrap.event_user: continue if data["tag"].endswith("salt/reactors/manage/add"): _data = data["data"] res = self.add_reactor(_data["event"], _data["reactors"]) event.fire_event( { "reactors": self.list_all(), "result": res }, "salt/reactors/manage/add-complete", ) elif data["tag"].endswith("salt/reactors/manage/delete"): _data = data["data"] res = self.delete_reactor(_data["event"]) event.fire_event( { "reactors": self.list_all(), "result": res }, "salt/reactors/manage/delete-complete", ) elif data["tag"].endswith("salt/reactors/manage/list"): event.fire_event( {"reactors": self.list_all()}, "salt/reactors/manage/list-results", ) else: reactors = self.list_reactors(data["tag"]) if not reactors: continue chunks = self.reactions(data["tag"], data["data"], reactors) if chunks: try: self.call_reactions(chunks) except SystemExit: log.warning("Exit ignored by reactor")
def reboot(name, conn=None): ''' Reboot a single VM ''' if not conn: conn = get_conn() node = get_node(conn, name) if node is None: log.error('Unable to find the VM {0}'.format(name)) log.info('Rebooting VM: {0}'.format(name)) ret = conn.reboot_node(node) if ret: log.info('Rebooted VM: {0}'.format(name)) # Fire reboot action event = salt.utils.event.SaltEvent( 'master', __opts__['sock_dir'] ) event.fire_event('{0} has been rebooted'.format(name), 'salt-cloud') return True log.error('Failed to reboot VM: {0}'.format(name)) return False
def fire(data, tag): ''' Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag' ''' try: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) return event.fire_event(data, tag) except Exception: return False
def run(self): ''' Enter into the server loop ''' salt.utils.process.appendproctitle(self.__class__.__name__) # instantiate some classes inside our new process with salt.utils.event.get_event(self.opts['__role'], self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=True) as event: self.wrap = ReactWrap(self.opts) for data in event.iter_events(full=True): # skip all events fired by ourselves if data['data'].get('user') == self.wrap.event_user: continue if data['tag'].endswith('salt/reactors/manage/add'): _data = data['data'] res = self.add_reactor(_data['event'], _data['reactors']) event.fire_event( { 'reactors': self.list_all(), 'result': res }, 'salt/reactors/manage/add-complete') elif data['tag'].endswith('salt/reactors/manage/delete'): _data = data['data'] res = self.delete_reactor(_data['event']) event.fire_event( { 'reactors': self.list_all(), 'result': res }, 'salt/reactors/manage/delete-complete') elif data['tag'].endswith('salt/reactors/manage/list'): event.fire_event({'reactors': self.list_all()}, 'salt/reactors/manage/list-results') else: reactors = self.list_reactors(data['tag']) if not reactors: continue chunks = self.reactions(data['tag'], data['data'], reactors) if chunks: try: self.call_reactions(chunks) except SystemExit: log.warning('Exit ignored by reactor')
def fire(data, tag): ''' Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag' ''' try: with salt.utils.event.get_event('minion', # was __opts__['id'] sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], opts=__opts__, listen=False) as event: return event.fire_event(data, tag) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False
def fire(data, tag): """ Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag' """ try: with salt.utils.event.get_event( "minion", # was __opts__['id'] sock_dir=__opts__["sock_dir"], opts=__opts__, listen=False, ) as event: return event.fire_event(data, tag) except Exception: # pylint: disable=broad-except exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False
def fire(data, tag): ''' Fire an event on the local minion event bus. Data must be formed as a dict. CLI Example: .. code-block:: bash salt '*' event.fire '{"data":"my event data"}' 'tag' ''' try: event = salt.utils.event.get_event('minion', # was __opts__['id'] sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], opts=__opts__, listen=False) return event.fire_event(data, tag) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.debug(lines) return False
import salt.utils.event sock_dir = "/var/run/salt/master" payload = {"sample_msg": "This is a test."} event = salt.utils.event.SaltEvent("master", sock_dir) event.fire_event(payload, "salt/mycustomtag") # other file import salt.client caller = salt.client.Caller() caller.function("event.send", "salt/mycustomtag", {"foo": "bar"})
def store_job(opts, load, event=None, mminion=None): ''' Store job information using the configured master_job_cache ''' # Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) # If the return data is invalid, just ignore it if any(key not in load for key in ('return', 'jid', 'id')): return False if not salt.utils.verify.valid_id(opts, load['id']): return False if mminion is None: mminion = salt.minion.MasterMinion(opts, states=False, rend=False) job_cache = opts['master_job_cache'] if load['jid'] == 'req': # The minion is returning a standalone job, request a jobid load['arg'] = load.get('arg', load.get('fun_args', [])) load['tgt_type'] = 'glob' load['tgt'] = load['id'] prep_fstr = '{0}.prep_jid'.format(opts['master_job_cache']) try: load['jid'] = mminion.returners[prep_fstr](nocache=load.get('nocache', False)) except KeyError: emsg = "Returner '{0}' does not support function prep_jid".format(job_cache) log.error(emsg) raise KeyError(emsg) # save the load, since we don't have it saveload_fstr = '{0}.save_load'.format(job_cache) try: mminion.returners[saveload_fstr](load['jid'], load) except KeyError: emsg = "Returner '{0}' does not support function save_load".format(job_cache) log.error(emsg) raise KeyError(emsg) elif salt.utils.jid.is_jid(load['jid']): # Store the jid jidstore_fstr = '{0}.prep_jid'.format(job_cache) try: mminion.returners[jidstore_fstr](False, passed_jid=load['jid']) except KeyError: emsg = "Returner '{0}' does not support function prep_jid".format(job_cache) log.error(emsg) raise KeyError(emsg) if event: # If the return data is invalid, just ignore it log.info('Got return from {id} for job {jid}'.format(**load)) event.fire_event(load, salt.utils.event.tagify([load['jid'], 'ret', load['id']], 'job')) event.fire_ret_load(load) # if you have a job_cache, or an ext_job_cache, don't write to # the regular master cache if not opts['job_cache'] or opts.get('ext_job_cache'): return # do not cache job results if explicitly requested if load.get('jid') == 'nocache': log.debug('Ignoring job return with jid for caching {jid} from {id}'.format(**load)) return # otherwise, write to the master cache savefstr = '{0}.save_load'.format(job_cache) getfstr = '{0}.get_load'.format(job_cache) fstr = '{0}.returner'.format(job_cache) if 'fun' not in load and load.get('return', {}): ret_ = load.get('return', {}) if 'fun' in ret_: load.update({'fun': ret_['fun']}) if 'user' in ret_: load.update({'user': ret_['user']}) try: if 'jid' in load \ and 'get_load' in mminion.returners \ and not mminion.returners[getfstr](load.get('jid', '')): mminion.returners[savefstr](load['jid'], load) mminion.returners[fstr](load) updateetfstr = '{0}.update_endtime'.format(job_cache) if (opts.get('job_cache_store_endtime') and updateetfstr in mminion.returners): mminion.returners[updateetfstr](load['jid'], endtime) except KeyError: emsg = "Returner '{0}' does not support function returner".format(job_cache) log.error(emsg) raise KeyError(emsg)
def store_job(opts, load, event=None, mminion=None): ''' Store job information using the configured master_job_cache ''' # Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(opts)) # If the return data is invalid, just ignore it if any(key not in load for key in ('return', 'jid', 'id')): return False if not salt.utils.verify.valid_id(opts, load['id']): return False if mminion is None: mminion = salt.minion.MasterMinion(opts, states=False, rend=False) job_cache = opts['master_job_cache'] if load['jid'] == 'req': # The minion is returning a standalone job, request a jobid load['arg'] = load.get('arg', load.get('fun_args', [])) load['tgt_type'] = 'glob' load['tgt'] = load['id'] prep_fstr = '{0}.prep_jid'.format(opts['master_job_cache']) try: load['jid'] = mminion.returners[prep_fstr]( nocache=load.get('nocache', False)) except KeyError: emsg = "Returner '{0}' does not support function prep_jid".format( job_cache) log.error(emsg) raise KeyError(emsg) # save the load, since we don't have it saveload_fstr = '{0}.save_load'.format(job_cache) try: mminion.returners[saveload_fstr](load['jid'], load) except KeyError: emsg = "Returner '{0}' does not support function save_load".format( job_cache) log.error(emsg) raise KeyError(emsg) elif salt.utils.jid.is_jid(load['jid']): # Store the jid jidstore_fstr = '{0}.prep_jid'.format(job_cache) try: mminion.returners[jidstore_fstr](False, passed_jid=load['jid']) except KeyError: emsg = "Returner '{0}' does not support function prep_jid".format( job_cache) log.error(emsg) raise KeyError(emsg) if event: # If the return data is invalid, just ignore it log.info('Got return from {id} for job {jid}'.format(**load)) event.fire_event( load, salt.utils.event.tagify([load['jid'], 'ret', load['id']], 'job')) event.fire_ret_load(load) # if you have a job_cache, or an ext_job_cache, don't write to # the regular master cache if not opts['job_cache'] or opts.get('ext_job_cache'): return # do not cache job results if explicitly requested if load.get('jid') == 'nocache': log.debug( 'Ignoring job return with jid for caching {jid} from {id}'.format( **load)) return # otherwise, write to the master cache savefstr = '{0}.save_load'.format(job_cache) getfstr = '{0}.get_load'.format(job_cache) fstr = '{0}.returner'.format(job_cache) updateetfstr = '{0}.update_endtime'.format(job_cache) if 'fun' not in load and load.get('return', {}): ret_ = load.get('return', {}) if 'fun' in ret_: load.update({'fun': ret_['fun']}) if 'user' in ret_: load.update({'user': ret_['user']}) # Try to reach returner methods try: savefstr_func = mminion.returners[savefstr] getfstr_func = mminion.returners[getfstr] fstr_func = mminion.returners[fstr] except KeyError as error: emsg = "Returner '{0}' does not support function {1}".format( job_cache, error) log.error(emsg) raise KeyError(emsg) if 'jid' in load \ and 'get_load' in mminion.returners \ and not mminion.returners[getfstr](load.get('jid', '')): mminion.returners[savefstr](load['jid'], load) mminion.returners[fstr](load) if (opts.get('job_cache_store_endtime') and updateetfstr in mminion.returners): mminion.returners[updateetfstr](load['jid'], endtime)
def master(master=None, connected=True): ''' .. versionadded:: 2015.5.0 Fire an event if the minion gets disconnected from its master. This function is meant to be run via a scheduled job from the minion. If master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4 address. CLI Example: .. code-block:: bash salt '*' status.master ''' def _win_remotes_on(port): ''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections PS C:> netstat -n -p TCP Active Connections Proto Local Address Foreign Address State TCP 10.1.1.26:3389 10.1.1.1:4505 ESTABLISHED TCP 10.1.1.26:56862 10.1.1.10:49155 TIME_WAIT TCP 10.1.1.26:56868 169.254.169.254:80 CLOSE_WAIT TCP 127.0.0.1:49197 127.0.0.1:49198 ESTABLISHED TCP 127.0.0.1:49198 127.0.0.1:49197 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n', '-p', 'TCP']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.stringutils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() remote_host, remote_port = chunks[2].rsplit(':', 1) if int(remote_port) != port: continue remotes.add(remote_host) return remotes # the default publishing port port = 4505 master_ips = None if master: master_ips = _host_to_ips(master) if not master_ips: return if __salt__['config.get']('publish_port') != '': port = int(__salt__['config.get']('publish_port')) master_connection_status = False connected_ips = _win_remotes_on(port) # Get connection status for master for master_ip in master_ips: if master_ip in connected_ips: master_connection_status = True break # Connection to master is not as expected if master_connection_status is not connected: event = salt.utils.event.get_event('minion', opts=__opts__, listen=False) if master_connection_status: event.fire_event({'master': master}, salt.minion.master_event(type='connected')) else: event.fire_event({'master': master}, salt.minion.master_event(type='disconnected')) return master_connection_status
def master(master=None, connected=True): ''' .. versionadded:: 2015.5.0 Fire an event if the minion gets disconnected from its master. This function is meant to be run via a scheduled job from the minion. If master_ip is an FQDN/Hostname, is must be resolvable to a valid IPv4 address. CLI Example: .. code-block:: bash salt '*' status.master ''' def _win_remotes_on(port): ''' Windows specific helper function. Returns set of ipv4 host addresses of remote established connections on local or remote tcp port. Parses output of shell 'netstat' to get connections PS C:> netstat -n -p TCP Active Connections Proto Local Address Foreign Address State TCP 10.1.1.26:3389 10.1.1.1:4505 ESTABLISHED TCP 10.1.1.26:56862 10.1.1.10:49155 TIME_WAIT TCP 10.1.1.26:56868 169.254.169.254:80 CLOSE_WAIT TCP 127.0.0.1:49197 127.0.0.1:49198 ESTABLISHED TCP 127.0.0.1:49198 127.0.0.1:49197 ESTABLISHED ''' remotes = set() try: data = subprocess.check_output(['netstat', '-n', '-p', 'TCP']) # pylint: disable=minimum-python-version except subprocess.CalledProcessError: log.error('Failed netstat') raise lines = salt.utils.to_str(data).split('\n') for line in lines: if 'ESTABLISHED' not in line: continue chunks = line.split() remote_host, remote_port = chunks[2].rsplit(':', 1) if int(remote_port) != port: continue remotes.add(remote_host) return remotes # the default publishing port port = 4505 master_ip = None if __salt__['config.get']('publish_port') != '': port = int(__salt__['config.get']('publish_port')) # Check if we have FQDN/hostname defined as master # address and try resolving it first. _remote_port_tcp # only works with IP-addresses. if master is not None: tmp_ip = _host_to_ip(master) if tmp_ip is not None: master_ip = tmp_ip ips = _win_remotes_on(port) if connected: if master_ip not in ips: event = salt.utils.event.get_event( 'minion', opts=__opts__, listen=False ) event.fire_event({'master': master}, '__master_disconnected') else: if master_ip in ips: event = salt.utils.event.get_event( 'minion', opts=__opts__, listen=False ) event.fire_event({'master': master}, '__master_connected')
def deploy_script(host, port=22, timeout=900, username='******', password=None, key_filename=None, script=None, deploy_command='/tmp/deploy.sh', sudo=False, tty=None, name=None, pub_key=None, sock_dir=None, provider=None, conf_file=None, start_action=None, make_master=False, master_pub=None, master_pem=None, master_conf=None, minion_pub=None, minion_pem=None, minion_conf=None, keep_tmp=False, script_args=None, ssh_timeout=15, display_ssh_output=True, make_syndic=False): ''' Copy a deploy script to a remote server, execute it, and remove it ''' starttime = time.mktime(time.localtime()) log.debug('Deploying {0} at {1}'.format(host, starttime)) if wait_for_ssh(host=host, port=port, timeout=timeout): log.debug('SSH port {0} on {1} is available'.format(port, host)) newtimeout = timeout - (time.mktime(time.localtime()) - starttime) if wait_for_passwd(host, port=port, username=username, password=password, key_filename=key_filename, ssh_timeout=ssh_timeout): log.debug( 'Logging into {0}:{1} as {2}'.format( host, port, username ) ) newtimeout = timeout - (time.mktime(time.localtime()) - starttime) kwargs = {'hostname': host, 'port': port, 'username': username, 'timeout': ssh_timeout, 'display_ssh_output': display_ssh_output} if key_filename: log.debug('Using {0} as the key_filename'.format(key_filename)) kwargs['key_filename'] = key_filename elif password: log.debug('Using {0} as the password'.format(password)) kwargs['password'] = password try: log.debug('SSH connection to {0} successful'.format(host)) except Exception as exc: log.error( 'There was an error in deploy_script: {0}'.format(exc) ) if provider == 'ibmsce': subsys_command = ( 'sed -i "s/#Subsystem/Subsystem/" ' '/etc/ssh/sshd_config' ) root_cmd(subsys_command, tty, sudo, **kwargs) root_cmd('service sshd restart', tty, sudo, **kwargs) # Minion configuration if minion_pem: scp_file('/tmp/minion.pem', minion_pem, kwargs) root_cmd('chmod 600 /tmp/minion.pem', tty, sudo, **kwargs) if minion_pub: scp_file('/tmp/minion.pub', minion_pub, kwargs) if minion_conf: scp_file('/tmp/minion', minion_conf, kwargs) # Master configuration if master_pem: scp_file('/tmp/master.pem', master_pem, kwargs) root_cmd('chmod 600 /tmp/master.pem', tty, sudo, **kwargs) if master_pub: scp_file('/tmp/master.pub', master_pub, kwargs) if master_conf: scp_file('/tmp/master', master_conf, kwargs) # The actual deploy script if script: scp_file('/tmp/deploy.sh', script, kwargs) root_cmd('chmod +x /tmp/deploy.sh', tty, sudo, **kwargs) newtimeout = timeout - (time.mktime(time.localtime()) - starttime) queue = None process = None # Consider this code experimental. It causes Salt Cloud to wait # for the minion to check in, and then fire a startup event. if start_action: queue = multiprocessing.Queue() process = multiprocessing.Process( target=lambda: check_auth(name=name, pub_key=pub_key, sock_dir=sock_dir, timeout=newtimeout, queue=queue) ) log.debug('Starting new process to wait for salt-minion') process.start() # Run the deploy script if script: log.debug('Executing /tmp/deploy.sh') if make_syndic: deploy_command += ' -S' if make_master: deploy_command += ' -M' if 'bootstrap-salt' in script: deploy_command += ' -c /tmp/' if script_args: deploy_command += ' {0}'.format(script_args) root_cmd(deploy_command, tty, sudo, **kwargs) log.debug('Executed command {0}'.format(deploy_command)) # Remove the deploy script if not keep_tmp: root_cmd('rm /tmp/deploy.sh', tty, sudo, **kwargs) log.debug('Removed /tmp/deploy.sh') if keep_tmp: log.debug('Not removing deloyment files from /tmp/') # Remove minion configuration if not keep_tmp: if minion_pub: root_cmd('rm /tmp/minion.pub', tty, sudo, **kwargs) log.debug('Removed /tmp/minion.pub') if minion_pem: root_cmd('rm /tmp/minion.pem', tty, sudo, **kwargs) log.debug('Removed /tmp/minion.pem') if minion_conf: root_cmd('rm /tmp/minion', tty, sudo, **kwargs) log.debug('Removed /tmp/minion') # Remove master configuration if not keep_tmp: if master_pub: root_cmd('rm /tmp/master.pub', tty, sudo, **kwargs) log.debug('Removed /tmp/master.pub') if master_pem: root_cmd('rm /tmp/master.pem', tty, sudo, **kwargs) log.debug('Removed /tmp/master.pem') if master_conf: root_cmd('rm /tmp/master', tty, sudo, **kwargs) log.debug('Removed /tmp/master') if start_action: queuereturn = queue.get() process.join() if queuereturn and start_action: #client = salt.client.LocalClient(conf_file) #output = client.cmd_iter( # host, 'state.highstate', timeout=timeout #) #for line in output: # print(line) log.info( 'Executing {0} on the salt-minion'.format( start_action ) ) root_cmd( 'salt-call {0}'.format(start_action), tty, sudo, **kwargs ) log.info( 'Finished executing {0} on the salt-minion'.format( start_action ) ) #Fire deploy action event = salt.utils.event.SaltEvent( 'master', sock_dir ) event.fire_event( '{0} has been created at {1}'.format(name, host), 'salt-cloud' ) return True return False
def send(tag, data=None): ''' Send an event with the given tag and data. This is useful for sending events directly to the master from the shell with salt-run. It is also quite useful for sending events in orchestration states where the ``fire_event`` requisite isn't sufficient because it does not support sending custom data with the event. Note that event tags will *not* be namespaced like events sent with the ``fire_event`` requisite! Whereas events produced from ``fire_event`` are prefixed with ``salt/state_result/<jid>/<minion_id>/<name>``, events sent using this runner module will have no such prefix. Make sure your reactors don't expect a prefix! :param tag: the tag to send with the event :param data: an optional dictionary of data to send with the event CLI Example: .. code-block:: bash salt-run event.send my/custom/event '{"foo": "bar"}' Orchestration Example: .. code-block:: yaml # orch/command.sls run_a_command: salt.function: - name: cmd.run - tgt: my_minion - arg: - exit {{ pillar['exit_code'] }} send_success_event: salt.runner: - name: event.send - tag: my_event/success - data: foo: bar - require: - salt: run_a_command send_failure_event: salt.runner: - name: event.send - tag: my_event/failure - data: baz: qux - onfail: - salt: run_a_command .. code-block:: bash salt-run state.orchestrate orch.command pillar='{"exit_code": 0}' salt-run state.orchestrate orch.command pillar='{"exit_code": 1}' ''' data = data or {} event = salt.utils.event.get_master_event(__opts__, __opts__['sock_dir'], listen=False) return event.fire_event(data, tag)
# Import the proper library import salt.utils.event # Fire deploy action sock_dir = '/var/run/salt/minion' payload = {'sample-msg': 'this is a test', 'example': 'this is the same test'} event = salt.utils.event.SaltEvent('master', sock_dir) event.fire_event(payload, 'tag')