def down(removekeys=False, tgt='*', tgt_type='glob', timeout=None, gather_job_timeout=None): ''' .. versionchanged:: 2017.7.0 The ``expr_form`` argument has been renamed to ``tgt_type``, earlier releases must use ``expr_form``. Print a list of all the down or unresponsive salt minions Optionally remove keys of down minions CLI Example: .. code-block:: bash salt-run manage.down salt-run manage.down removekeys=True salt-run manage.down tgt="webservers" tgt_type="nodegroup" ''' ret = status(output=False, tgt=tgt, tgt_type=tgt_type, timeout=timeout, gather_job_timeout=gather_job_timeout ).get('down', []) for minion in ret: if removekeys: wheel = salt.wheel.Wheel(__opts__) wheel.call_func('key.delete', match=minion) return ret
def down(removekeys=False, tgt='*', tgt_type='glob', expr_form=None): ''' .. versionchanged:: 2017.7.0 The ``expr_form`` argument has been renamed to ``tgt_type``, earlier releases must use ``expr_form``. Print a list of all the down or unresponsive salt minions Optionally remove keys of down minions CLI Example: .. code-block:: bash salt-run manage.down salt-run manage.down removekeys=True salt-run manage.down tgt="webservers" tgt_type="nodegroup" ''' # remember to remove the expr_form argument from this function when # performing the cleanup on this deprecation. if expr_form is not None: _warn_expr_form() tgt_type = expr_form ret = status(output=False, tgt=tgt, tgt_type=tgt_type).get('down', []) for minion in ret: if removekeys: wheel = salt.wheel.Wheel(__opts__) wheel.call_func('key.delete', match=minion) return ret
def process_minion_request(minion_id, url="http://localhost:8001/workernode?state=1"): try: r = requests.get(url) except: log.error("Error when connecting to Dynamic Cluster.") return False else: wns=r.json() log.debug("wns %s" % wns) starting_ids=[wn['instance']['instance_name'] for wn in wns] wheel = salt.wheel.Wheel(__opts__) if minion_id in starting_ids: wheel.call_func('key.accept', match=minion_id) else: wheel.call_func('key.reject', match=minion_id) return True
def process_auth_event(self, data): global info if 'act' in data and data['act']=="pend": minion_id=data['id'] workernodes=[w for w in info.worker_nodes if w.instance.instance_name==minion_id and (w.state==WorkerNode.Starting or w.state==WorkerNode.Configuring)] import salt opts = salt.config.master_config('/etc/salt/master') import salt.wheel as wheel wheel = wheel.Wheel(opts) argument={'match':minion_id} if len(workernodes)==0: log.debug("node %s is a valid node, accept it" % minion_id) wheel.call_func('key.accept',**argument) else: log.debug("node %s is not a valid node, reject it" % minion_id) wheel.call_func('key.reject',**argument)
def down(removekeys=False): ''' Print a list of all the down or unresponsive salt minions Optionally remove keys of down minions CLI Example: .. code-block:: bash salt-run manage.down salt-run manage.down removekeys=True ''' ret = status(output=False).get('down', []) for minion in ret: if removekeys: wheel = salt.wheel.Wheel(__opts__) wheel.call_func('key.delete', match=minion) return ret
def down(removekeys=False): """ Print a list of all the down or unresponsive salt minions Optionally remove keys of down minions CLI Example: .. code-block:: bash salt-run manage.down salt-run manage.down removekeys=True """ ret = status(output=False).get("down", []) for minion in ret: if removekeys: wheel = salt.wheel.Wheel(__opts__) wheel.call_func("key.delete", match=minion) return ret
def clear_minions(): try: master, err = config.get_salt_master_config() if err: raise Exception(err) opts = salt.config.master_config(master) wheel = salt.wheel.Wheel(opts) keys = wheel.call_func('key.list_all') minions = keys['minions'] minion_list = [] if minions: print "The following GRIDCells are currently connected :" for i, minion in enumerate(minions): print "%d. %s" % (i + 1, minion) done = False while not done: node_list_str = raw_input( "Select the GRIDCells that you would like to remove (Enter a comma separated list of the numbers) :" ) if node_list_str: node_list = node_list_str.split(',') if node_list: error = False for node_num_str in node_list: try: node_num = int(node_num_str) except Exception, e: print "Please enter only a comma separated list of numbers" error = True break if not error: done = True if done: for node_num in node_list: # minion_list.append(minions[int(node_num)-1]) print "Removing GRIDCell %s" % minions[ int(node_num) - 1] wheel.call_func( 'key.delete', match=('%s' % minions[int(node_num) - 1])) else: print "Please enter a comma separated list of numbers" # print 'selected minions : ', minion_list else:
def down(removekeys=False, tgt='*', expr_form='glob'): ''' Print a list of all the down or unresponsive salt minions Optionally remove keys of down minions CLI Example: .. code-block:: bash salt-run manage.down salt-run manage.down removekeys=True salt-run manage.down tgt="webservers" expr_form="nodegroup" ''' ret = status(output=False, tgt=tgt, expr_form=expr_form).get('down', []) for minion in ret: if removekeys: wheel = salt.wheel.Wheel(__opts__) wheel.call_func('key.delete', match=minion) return ret
def down(removekeys=False): ''' Print a list of all the down or unresponsive salt minions Optionally remove keys of down minions CLI Example: .. code-block:: bash salt-run manage.down salt-run manage.down removekeys=True ''' ret = status(output=False).get('down', []) for minion in ret: if removekeys: wheel = salt.wheel.Wheel(__opts__) wheel.call_func('key.delete', match=minion) else: salt.output.display_output(minion, '', __opts__) return ret
def clear_minions(): try: master, err = config.get_salt_master_config() if err: raise Exception(err) opts = salt.config.master_config(master) wheel = salt.wheel.Wheel(opts) keys = wheel.call_func('key.list_all') minions = keys['minions'] minion_list = [] if minions: print "The following GRIDCells are currently connected :" for i, minion in enumerate(minions): print "%d. %s" % (i + 1, minion) done = False while not done: node_list_str = raw_input( "Select the GRIDCells that you would like to remove (Enter a comma separated list of the numbers) :") if node_list_str: node_list = node_list_str.split(',') if node_list: error = False for node_num_str in node_list: try: node_num = int(node_num_str) except Exception, e: print "Please enter only a comma separated list of numbers" error = True break if not error: done = True if done: for node_num in node_list: # minion_list.append(minions[int(node_num)-1]) print "Removing GRIDCell %s" % minions[int(node_num) - 1] wheel.call_func('key.delete', match=( '%s' % minions[int(node_num) - 1])) else: print "Please enter a comma separated list of numbers" # print 'selected minions : ', minion_list else:
def destroy(**kwargs): cli = salt.client.LocalClient(__opts__['conf_file']) wheel = salt.wheel.Wheel(salt.config.master_config('/etc/salt/master')) event = salt.utils.event.MasterEvent('/var/run/salt/master') __id = kwargs ['id'] int_name = __id + '.ngw' _log("Destroy event caught, id: <%s>." % (__id) , tag = "NGW-MANAGE") conn = psycopg2.connect(database="front", user="******", password="******", host="db-precise") cur = conn.cursor() cur.execute( ''' update instances set instanceeventaccepted = True where instanceid = %s ''' , [__id]) conn.commit() if cur.rowcount == 1: _log("An instance <%s> tagged as accepted." % __id) else: _log('Something strange happened: %s instances ' \ 'were tagged as accepted while trying %s.' % (cur.rowcount, __id)) _cmd_run(cli, 'proxy.ngw', 'nginx-rmconf.sh %s' % __id) _cmd_run(cli, 'proxy.ngw', 'service nginx reload') _cmd_run(cli, 'master-18', 'lxc-rmconf.sh %s' % __id) _cmd_run(cli, 'db-precise.ngw', 'pg_erase.sh %s %s' % ((__id,)*2)) wheel.call_func('key.delete', match = int_name) conn.close() del cli del wheel del event _log("Destroy event finished, id: <%s>." % (__id) , tag = "NGW-MANAGE") return True
def down(removekeys=False, tgt='*', tgt_type='glob', expr_form=None): ''' .. versionchanged:: Nitrogen The ``expr_form`` argument has been renamed to ``tgt_type``, earlier releases must use ``expr_form``. Print a list of all the down or unresponsive salt minions Optionally remove keys of down minions CLI Example: .. code-block:: bash salt-run manage.down salt-run manage.down removekeys=True salt-run manage.down tgt="webservers" tgt_type="nodegroup" ''' ret = status(output=False, tgt=tgt, tgt_type=tgt_type).get('down', []) for minion in ret: if removekeys: wheel = salt.wheel.Wheel(__opts__) wheel.call_func('key.delete', match=minion) return ret
def generate_manifest_info(): manifest_dict = {} try: #pp = pprint.PrettyPrinter(indent=4) use_salt, err = common.use_salt() if err: raise Exception(err) fqdn = socket.getfqdn() if use_salt: import salt.modules.network, salt.modules.ps, salt.modules.status, salt.client, salt.wheel, salt.config local = salt.client.LocalClient() cfg, err = common.get_salt_master_config() if err: raise Exception(err) opts = salt.config.master_config(cfg) wheel = salt.wheel.Wheel(opts) keys = wheel.call_func('key.list_all') if not keys: raise Exception('No GRIDCells found!') nodes = keys['minions'] #print nodes for node in nodes: manifest_dict[node] = {} roles = local.cmd('*', 'grains.item', ['roles']) for node, info in roles.items(): if node not in manifest_dict: manifest_dict[node] = {} manifest_dict[node]['roles'] = info['roles'] ret = local.cmd('*', 'integralstor.status') for node, info in ret.items(): if node not in manifest_dict: manifest_dict[node] = {} manifest_dict[node]['cpu_model'] = info['cpu_model'] manifest_dict[node]['disks'] = info['disks'] for dn, dv in manifest_dict[node]['disks'].items(): if 'pool' in dv: dv.pop('pool') manifest_dict[node]['interfaces'] = info['interfaces'] manifest_dict[node]['memory'] = info['memory'] manifest_dict[node]['fqdn'] = info['fqdn'] if 'hardware_specific_dict' in info: manifest_dict[node]['hardware_specific_dict'] = info[ 'hardware_specific_dict'] else: # Single node so get the info using a direct call and just bung it into the fqdn key! manifest_dict[fqdn] = {} status_dict, err = status() if err: raise Exception(err) manifest_dict[fqdn]['cpu_model'] = status_dict['cpu_model'] manifest_dict[fqdn]['disks'] = status_dict['disks'] for dn, dv in manifest_dict[fqdn]['disks'].items(): if 'pool' in dv: dv.pop('pool') manifest_dict[fqdn]['interfaces'] = status_dict['interfaces'] manifest_dict[fqdn]['memory'] = status_dict['memory'] manifest_dict[fqdn]['fqdn'] = fqdn #Remove transitory info and only keep the actual hardware info for node in manifest_dict.keys(): if 'interfaces' in manifest_dict[node]: for int_name, interface in manifest_dict[node][ 'interfaces'].items(): if 'up' in interface: interface.pop('up') if 'inet' in interface: interface.pop('inet') if 'disks' in manifest_dict[node]: for disk_name, diskinfo in manifest_dict[node]['disks'].items( ): if 'status' in diskinfo: diskinfo.pop('status') if 'memory' in manifest_dict[node]: if 'mem_free' in manifest_dict[node]['memory']: manifest_dict[node]['memory'].pop('mem_free') if not manifest_dict: raise Exception('Error getting manifest information') except Exception, e: return None, 'Error generating the manifest dictionary : %s' % str(e)
help='AWS EC2 region to query' ) parser.add_argument( '--force', default=False, choices=['False', 'True'], help='Forces salt to delete keys' ) ARGS = parser.parse_args() logging.info("Starting key check") # Get all minion names opts = salt.config.master_config('/etc/salt/master') wheel = salt.wheel.Wheel(opts) keys = wheel.call_func('key.list_all')['minions'] # Get ec2 name tags try: ec2 = boto.ec2.connect_to_region(region_name=ARGS.region) my_instance_id = boto.utils.get_instance_metadata()["instance-id"] filter_list = {'vpc-id': ec2.get_only_instances([my_instance_id])[0].vpc_id} # get vpc filter chain = itertools.chain.from_iterable all_instances = chain([res.instances for res in ec2.get_all_instances(filters=filter_list)]) except Exception as e: logging.error(e) names = map(lambda instance: instance.tags.get('Name'), all_instances) # make a list of keys to revoke # anything that is not listed in ec2 by name tag
def create(**kwargs): cli = salt.client.LocalClient(__opts__['conf_file']) wheel = salt.wheel.Wheel(salt.config.master_config('/etc/salt/master')) event = salt.utils.event.MasterEvent('/var/run/salt/master') # _cmd_run(cli, 'master-18', 'touch /tmp/touch.me', 'touch /tmp/touch_2.me') # yaml.dump(cli.cmd('salt.ngw', 'state.highstate'), file('/tmp/salt.yaml', 'w')) # ret = cli.cmd('salt.ngw', 'state.highstate') # for target in ret: # for key in data if key == "result" and data["key"]: # file('/tmp/salt.log', 'w')).write(" __id = kwargs ['id'] __class = kwargs ['class'] __name = kwargs ['name'] int_name = __id + '.ngw' ext_name = __name + '.gis.to' _log("Create event caught, id: <%s>, class: <%s>, name: <%s>." % (__id, __class, __name) , tag = "NGW-MANAGE") conn = psycopg2.connect(database="front", user="******", password="******", host="db-precise") cur = conn.cursor() cur.execute( ''' update instances set instanceeventaccepted = True where instanceid = %s ''' , [__id]) conn.commit() if cur.rowcount == 1: _log("An instance <%s> tagged as accepted." % __id) else: _log('Something strange happened: %s instances ' \ 'were tagged as accepted while trying %s.' % (cur.rowcount, __id)) cur.execute( ''' select instanceactive from instances ''' ''' where instanceid = %s and instanceactive = True ''', [__id]) if cur.rowcount == 1: _log("An instance <%s> already marked as active. Interrupt." % __id) return False _cmd_run(cli, 'master-18', 'lxc-genconf.sh %s %s' % (__id, __class)) _cmd_run(cli, 'master-18', 'ngw-instance-configure.sh %s %s %s %s' % ((__id,) * 4)) _cmd_run(cli, 'db-precise.ngw', 'pg_setup.sh %s %s %s' % ((__id,) * 3)) _cmd_run(cli, 'proxy.ngw', 'nginx-genconf.sh %s %s %s' % (__id, ext_name, int_name)) _cmd_run(cli, 'master-18', 'lxc-start --daemon --name ngw-%s' % __id) _sleep_on_event(event, tag = 'salt/auth', target = int_name, keys = {'act': 'pend'}) wheel.call_func('key.accept', match = int_name) _sleep_on_event(event, tag = 'salt/minion/%s/start' % int_name, target = int_name) cli.cmd(int_name, 'state.highstate') _cmd_run(cli, int_name, 'initctl stop ngw-uwsgi') _cmd_run(cli, int_name, '~ngw/env/bin/nextgisweb --config ~ngw/config.ini initialize_db') _cmd_run(cli, int_name, 'initctl start ngw-uwsgi') _cmd_run(cli, 'proxy.ngw', 'service nginx reload') _log("Create event finished, id: <%s>, class: <%s>, name: <%s>." % (__id, __class, __name) , tag = "NGW-MANAGE") response = requests.get("http://proxy/activate?instanceid=%s" % __id , headers = { 'host': 'console.gis.to'}) if response.content == "True": _log("Instance <%s> activated." % __id) else: _log("Failed to activate instance <%s>." % __id) del cli
def generate_manifest_info(): """Generate a dictionary containing all manifest information. Will be dumped into the master.manifest file in a json format.""" manifest_dict = {} try: #pp = pprint.PrettyPrinter(indent=4) use_salt, err = config.use_salt() if err: raise Exception(err) fqdn = socket.getfqdn() if use_salt: import salt.modules.network import salt.modules.ps import salt.modules.status import salt.client import salt.wheel import salt.config local = salt.client.LocalClient() cfg, err = config.get_salt_master_config() if err: raise Exception(err) opts = salt.config.master_config(cfg) wheel = salt.wheel.Wheel(opts) keys = wheel.call_func('key.list_all') if not keys: raise Exception('No GRIDCells found!') nodes = keys['minions'] # print nodes for node in nodes: manifest_dict[node] = {} roles = local.cmd('*', 'grains.item', ['roles']) for node, info in roles.items(): if node not in manifest_dict: manifest_dict[node] = {} manifest_dict[node]['roles'] = info['roles'] ret = local.cmd('*', 'integralstor.status') for node, info in ret.items(): if node not in manifest_dict: manifest_dict[node] = {} manifest_dict[node]['cpu_model'] = info['cpu_model'] manifest_dict[node]['disks'] = info['disks'] for dn, dv in manifest_dict[node]['disks'].items(): if 'pool' in dv: dv.pop('pool') manifest_dict[node]['interfaces'] = info['interfaces'] manifest_dict[node]['memory'] = info['memory'] manifest_dict[node]['fqdn'] = info['fqdn'] if 'hardware_specific_dict' in info: manifest_dict[node]['hardware_specific_dict'] = info['hardware_specific_dict'] else: # Single node so get the info using a direct call and just bung it # into the fqdn key! manifest_dict[fqdn] = {} status_dict, err = get_status() if err: raise Exception(err) manifest_dict[fqdn]['cpu_model'] = status_dict['cpu_model'] manifest_dict[fqdn]['disks'] = status_dict['disks'] for dn, dv in manifest_dict[fqdn]['disks'].items(): if 'pool' in dv: dv.pop('pool') manifest_dict[fqdn]['interfaces'] = status_dict['interfaces'] manifest_dict[fqdn]['memory'] = status_dict['memory'] manifest_dict[fqdn]['fqdn'] = fqdn # Remove transitory info and only keep the actual hardware info for node in manifest_dict.keys(): if 'interfaces' in manifest_dict[node]: for int_name, interface in manifest_dict[node]['interfaces'].items(): if 'up' in interface: interface.pop('up') if 'inet' in interface: interface.pop('inet') if 'disks' in manifest_dict[node]: for disk_name, diskinfo in manifest_dict[node]['disks'].items(): if 'status' in diskinfo: diskinfo.pop('status') if 'memory' in manifest_dict[node]: if 'mem_free' in manifest_dict[node]['memory']: manifest_dict[node]['memory'].pop('mem_free') if not manifest_dict: raise Exception('Error getting manifest information') except Exception, e: return None, 'Error generating the manifest dictionary : %s' % str(e)