Пример #1
0
def syncAsset():
    salt = LocalClient()
    grains = salt.cmd('*', 'grains.items')
    obj = Asset.objects.all()
    host_list = []
    for i in obj:
        host_list.append(i.hostname)

    try:
        for host in grains.keys():
            ip = grains[host]['ipv4'][-1]
            hostname_id = grains[host]['id']
            cpu = grains[host]['cpu_model']
            memory = grains[host]['mem_total']
            if grains[host].has_key('virtual'):
                asset_type = grains[host]['virtual']
            else:
                asset_type = 'physical'
            if grains[host].has_key('osfinger'):
                os = grains[host]['osfinger']
            else:
                os = grains[host]['osfullname']

            if host not in host_list:
                try:
                    Asset.objects.create(ip=ip,
                                         hostname=hostname_id,
                                         system_type=os,
                                         cpu=cpu,
                                         memory=memory,
                                         asset_type=asset_type)
                except Exception, e:
                    print e
    except Exception, e:
        print e
Пример #2
0
def flushMemcached(request):
    data = request.POST.getlist('mcName')
    project = 'memcache flush'
    username = request.user
    ip = request.META['REMOTE_ADDR']
    saltCmd = LocalClient()
    result = []

    for name in data:
        for info in memcache.objects.filter(memcacheName=name):
            try:
                cmd = saltCmd.cmd(
                    info.saltMinion.saltname, 'cmd.run',
                    ['echo "flush_all" | nc %s %s' % (info.ip, info.port)])
                result.append(cmd)
                if cmd[info.saltMinion.saltname] == 'OK':
                    msg = 'Success'
                else:
                    msg = 'error'
                host = info.ip + ":" + info.port
                dingding_robo(host, project, msg, username,
                              request.POST['phone_number'])
                print result
            except Exception, e:
                print e
Пример #3
0
def get_panel_pdf(handler,
                  panel,
                  server_name,
                  dash_user,
                  pdf_file='/tmp/table.pdf',
                  args=[],
                  provider=None,
                  kwargs={},
                  filter_field=''):
    if not args:
        args = list(args)
    cl = LocalClient()
    panel = yield get_panel_for_user(handler=handler,
                                     panel=panel,
                                     server_name=server_name,
                                     dash_user=dash_user,
                                     args=args,
                                     provider=provider,
                                     kwargs=kwargs)
    result = cl.cmd('va-master',
                    'va_utils.get_pdf',
                    kwarg={
                        'panel': panel,
                        'pdf_file': pdf_file,
                        'filter_field': filter_field
                    })
    if not result['va-master']:
        yield handler.serve_file(pdf_file)
        raise tornado.gen.Return({'data_type': 'file'})
    raise Exception('PDF returned a value - probably because of an error. ')
Пример #4
0
def get_all_roles():
    cl = LocalClient()
    result = cl.cmd('*', 'grains.get', arg=['role'])
    return {
        x: result[x]
        for x in result if result[x] not in error_msgs and result[x]
    }
Пример #5
0
def get_all_monitoring_data(datastore_handler):
    """
        description: Returns all icinga data from connected monitoring minions. 
    """
    cl = LocalClient()
    result = cl.cmd('G@role:monitoring',
                    fun='va_monitoring.icinga2',
                    tgt_type='compound')
    monitoring_errors = []

    for minion in result:
        if type(result[minion]) == str:
            print(
                'Error getting monitoring data for %s, salt returned %s, but will go on as usual. '
                % (minion, result[minion]))
            monitoring_errors.append(minion)
            continue
        for host in result[minion]:
            if 'va_master' in host['host_name']:
                panel = {'icon': 'fa-circle'}
            else:
                panel = yield datastore_handler.find_panel_for_server(
                    host['host_name'])
            host['icon'] = panel['icon']

    if monitoring_errors:
        monitoring_errors = 'There was an error with the monitoring server(s): ' + ', '.join(
            monitoring_errors)
        result = {
            'success': True,
            'data': result,
            'message': monitoring_errors
        }
    raise tornado.gen.Return(result)
Пример #6
0
def export_table(handler,
                 panel,
                 server_name,
                 dash_user,
                 export_type='pdf',
                 table_file='/tmp/table',
                 args=[],
                 provider=None,
                 kwargs={},
                 filter_field=''):
    table_func = 'va_utils.get_%s' % export_type
    table_file = table_file + '.' + export_type
    if not args:
        args = list(args)
    cl = LocalClient()
    panel = yield get_panel_for_user(handler=handler,
                                     panel=panel,
                                     server_name=server_name,
                                     dash_user=dash_user,
                                     args=args,
                                     provider=provider,
                                     kwargs=kwargs)
    print('Getting ', export_type, '  with filter : ', filter_field)
    result = cl.cmd('G@role:va-master',
                    fun=table_func,
                    tgt_type='compound',
                    kwarg={
                        'panel': panel,
                        'table_file': table_file,
                        'filter_field': filter_field
                    })
    yield handler.serve_file(table_file)
Пример #7
0
    def __init__(self,main_config):
	self.opts=master_config(main_config)
	self.wheel=Wheel(self.opts)
	self.client=LocalClient()
	self.connected_minions_list=self.wheel.call_func('minions.connected')
	self.key_dict=self.wheel.call_func('key.list_all')
	self.total=len(self.key_dict['minions'])+len(self.key_dict['minions_pre'])+len(self.key_dict['minions_denied'])+len(self.key_dict['minions_rejected'])
    def run(self, quiet=False):
        '''
        do all the work

        note that 'quiet' applies only to remotely
        run, and the same is true for returning the contents.
        maybe we want to fix that
        '''

        while True:
            client = LocalClient()
            module_args = [self.path, self.batchno,
                           self.batchsize, quiet]

            result = client.cmd([self.host],
                                "retentionaudit.examine_dir",
                                module_args, expr_form='list',
                                timeout=self.timeout)

            if self.host in result:
                lines = result[self.host].split("\n")

                maxlen = 0
                for line in lines:
                    if (line.startswith("WARNING:") or
                            line.startswith("INFO:")):
                        continue
                    else:
                        try:
                            entry = json.loads(
                                line, object_hook=JsonHelper.decode_dict)
                            if len(entry['path']) > maxlen:
                                maxlen = len(entry['path'])
                        except:
                            continue

                if not quiet:
                    for line in lines:
                        if (line.startswith("WARNING:") or
                                line.startswith("INFO:")):
                            print line
                        else:
                            try:
                                entry = json.loads(
                                    line,
                                    object_hook=JsonHelper.decode_dict)
                                EntryInfo.display_from_dict(
                                    entry, True, maxlen)
                            except:
                                print line
                return result[self.host]
            else:
                print "Failed to retrieve dir content for", self.path, "on", self.host
                continuing = ("Try again? Y/N [N]: ")
                if continuing == "":
                    continuing = "N"
                if continuing.upper() != "Y":
                    return None
Пример #9
0
class SaltByLocalApi(object):
    '''
    Saltapi class 通过salt本地接口调用,需和salt-master服务在同一台机器
    '''
    def __init__(self, main_config):
        self.opts = master_config(main_config)
        self.wheel = Wheel(self.opts)
        self.client = LocalClient()
        self.connected_minions_list = self.wheel.call_func('minions.connected')
        self.key_dict = self.wheel.call_func('key.list_all')
        self.total = len(self.key_dict['minions']) + len(
            self.key_dict['minions_pre']) + len(
                self.key_dict['minions_denied']) + len(
                    self.key_dict['minions_rejected'])

    def get_minions_key_status(self):
        reject = len(self.key_dict['minions_rejected'])
        unaccept = len(self.key_dict['minions_pre'])
        accept = len(self.key_dict['minions'])
        return [accept, reject, unaccept]

    def get_minions_status(self):
        return [
            self.total,
            len(self.connected_minions_list),
            self.total - len(self.connected_minions_list)
        ]

    def get_host_info(self):
        minions = self.connected_minions_list
        ret = self.client.cmd(
            minions,
            'grains.item', [
                'mem_total', 'osfullname', 'host', 'osrelease', 'num_cpus',
                'ipv4', 'group', 'area', 'usage'
            ],
            expr_form='list')
        host_info_dict = {}
        for k, v in ret.iteritems():
            v['ipv4'].remove('127.0.0.1')
            ips = '/'.join(v['ipv4']) if len(v['ipv4']) > 1 else v['ipv4'][0]
            values = [
                v['host'], ips, v['osfullname'] + v['osrelease'],
                str(v['num_cpus']) + ' cores',
                str(v['mem_total']) + ' MB', v['group'], v['area'], v['usage']
            ]
            host_info_dict[k] = values
        return host_info_dict

    def get_master_config(self):
        return self.opts

    def get_grains(self):
        if self.connected_minions_list is None or len(
                self.connected_minions_list) < 1:
            return None
        return self.client.cmd(self.connected_minions_list[0], 'grains.items',
                               [])
Пример #10
0
 def salt_runner(*args, **kwargs):
     print("In salt_runner")
     client = LocalClient()
     t = client.cmd(target, instruction, arg=cmd_arg, timeout=120)
     if (instruction in ("state.sls", "state.sls_id")):
         ans = formatter_state(t)
     else:
         ans = formatter_raw(t)
     return ans
Пример #11
0
def get_all_salt_functions(datastore_handler):
    """ Gets all salt functions for all minions. """
    cl = LocalClient()
    states = yield datastore_handler.get_states_and_apps()

    functions = cl.cmd('*', 'sys.doc')
    result = {[i for i in x if x[states[x]['module']] in i] for x in functions}

    raise tornado.gen.Return(result)
    def run(self, quiet=False):
        '''
        do all the work

        note that 'quiet' applies only to remotely
        run, and the same is true for returning the contents.
        maybe we want to fix that
        '''

        while True:
            client = LocalClient()
            module_args = [self.path, self.batchno, self.batchsize, quiet]

            result = client.cmd([self.host],
                                "retentionaudit.examine_dir",
                                module_args,
                                expr_form='list',
                                timeout=self.timeout)

            if self.host in result:
                lines = result[self.host].split("\n")

                maxlen = 0
                for line in lines:
                    if (line.startswith("WARNING:")
                            or line.startswith("INFO:")):
                        continue
                    else:
                        try:
                            entry = json.loads(
                                line, object_hook=JsonHelper.decode_dict)
                            if len(entry['path']) > maxlen:
                                maxlen = len(entry['path'])
                        except:
                            continue

                if not quiet:
                    for line in lines:
                        if (line.startswith("WARNING:")
                                or line.startswith("INFO:")):
                            print line
                        else:
                            try:
                                entry = json.loads(
                                    line, object_hook=JsonHelper.decode_dict)
                                EntryInfo.display_from_dict(
                                    entry, True, maxlen)
                            except:
                                print line
                return result[self.host]
            else:
                print "Failed to retrieve dir content for", self.path, "on", self.host
                continuing = ("Try again? Y/N [N]: ")
                if continuing == "":
                    continuing = "N"
                if continuing.upper() != "Y":
                    return None
Пример #13
0
 def salt_runner(*args, **kwargs):
     print "In salt_runner"
     client = LocalClient()
     t = client.cmd(target, instruction, arg=cmd_arg, timeout=120)
     if (instruction == "state.sls"):
         ans = formatter_state(t)
     else:
         ans = formatter_raw(t)
     return ans
Пример #14
0
def system(hostname):
    try:
        local = LocalClient()
        sys = local.cmd(hostname,'grains.item',['kernel'])
        if sys[hostname]['kernel'] == 'Linux':
            return 'linux'
        elif sys[hostname]['kernel'] == 'Windows':
            return 'windows'
    except Exception,e:
        print e
Пример #15
0
 def install_zfs(cls, data):
     client = LocalClient()
     subprocess.call(["mkdir",
                      "-p",
                      "/srv/salt"])
     subprocess.call(["mv",
                      "%(app_root)s/application/utils/install_zfs.sh" % {'app_root': CONFIGS['APP_ROOT']},
                      "/srv/salt/"])
     response = client.cmd(data['Storage-Node'],
                           "cmd.script",
                           ["salt://install_zfs.sh"])
     return response
Пример #16
0
def run_cmd(command, retcodes=[0], **kwargs):
    from salt.client import LocalClient
    global salt_client

    if not salt_client:
        salt_client = LocalClient()

    res = salt_client.cmd('*', 'cmd.run', [command], full_return=True)
    if retcodes:
        for _id, _res in res.items():
            assert _res['retcode'] in retcodes

    return res
Пример #17
0
def do_macro(registry, user, instruction, match):
    macro = match.group('target')
    if (macro not in deployments.keys()):
        return {'success': False, 'answer': "I don't currently know about %s" % macro}
    tgt = deployments[macro]
    print "Starting macro %s"%macro
    client = LocalClient()
    t = client.cmd(tgt['target'], tgt['instruction'], arg=tgt['arg'], timeout=120)
    if (tgt['instruction'] == "state.sls"):
        ans = formatter_state(t)
    else:
        ans = formatter_raw(t)
    return {'success': True, 'answer': ans}
Пример #18
0
def cache_ost_grains_item(ost, args=['default_addr'], timeout=60):
    from salt.client import LocalClient
    client = LocalClient()
    ret = {}
    try:
        ost = int(ost)
    except:
        return False
    if not isinstance(args, list):
        return False
    jid = client.cmd_async('G@osts:%d'% ost, 'grains.item', args, expr_form='compound')
    ret = wait_return_salt_cmd(jid, timeout=timeout)
    return ret
Пример #19
0
class SaltByLocalApi(object):
    '''
    Saltapi class 通过salt本地接口调用,需和salt-master服务在同一台机器
    '''
    def __init__(self,main_config):
	self.opts=master_config(main_config)
	self.wheel=Wheel(self.opts)
	self.client=LocalClient()
	self.connected_minions_list=self.wheel.call_func('minions.connected')
	self.key_dict=self.wheel.call_func('key.list_all')
	self.total=len(self.key_dict['minions'])+len(self.key_dict['minions_pre'])+len(self.key_dict['minions_denied'])+len(self.key_dict['minions_rejected'])
    def get_minions_key_status(self):
	reject=len(self.key_dict['minions_rejected'])
	unaccept=len(self.key_dict['minions_pre'])
	accept=len(self.key_dict['minions'])
	return [accept,reject,unaccept]
    def get_minions_status(self):
	online=len(self.get_host_info())
        return [self.total,online,self.total-online]

    def get_host_info(self):
	minions=self.connected_minions_list
	ret=self.client.cmd(minions,'grains.item',['mem_total',
	    'osfullname',
	    'host',
	    'osrelease',
	    'num_cpus',
	    'ipv4',
	    'group',
	    'area',
	    'usage'],expr_form='list')
        host_info_dict={}
	for k,v in ret.iteritems():
	    v['ipv4'].remove('127.0.0.1')
	    ips='/'.join(v['ipv4']) if len(v['ipv4'])>1 else v['ipv4'][0]
	    values=[v['host'],
		    ips,
		    v['osfullname']+v['osrelease'],
		    str(v['num_cpus'])+' cores',
		    str(v['mem_total'])+' MB',
		    v['group'],
		    v['area'],
		    v['usage']]
	    host_info_dict[k]=values
        return host_info_dict
    def get_master_config(self):
	return self.opts
    def get_grains(self):
	if self.connected_minions_list is None or len(self.connected_minions_list)<1:
	    return None
	return self.client.cmd(self.connected_minions_list[0],'grains.items',[])
Пример #20
0
def call_master_cmd(fun, arg=[], kwarg={}):
    ''' Calls the salt function on the va-master. Used to work with salt-call but a recent salt version made it incompatible with tornado, so we resort to using the `role` grain to find the va-master and call the function that way. '''

    cl = LocalClient()
    result = cl.cmd('G@role:va-master',
                    fun=fun,
                    tgt_type='compound',
                    arg=arg,
                    kwarg=kwarg)
    result = [result[i] for i in result if result[i]]
    if not result:
        raise Exception('Tried to run ' + str(fun) +
                        ' on va-master, but there was no response. arg was ' +
                        str(arg) + ' and kwarg was ' + str(kwarg))
    return result[0]
Пример #21
0
def wait_return_salt_cmd(jid, timeout, interval = 5):
    from salt.client import LocalClient

    client = LocalClient()
    result = {}
    start = time.time()
    while True:
        result = client.get_cache_returns(jid)
        if len(result) == 0:
            wait = int(time.time() - start)
            if wait > timeout:
                return {}
        else:
            return result
        time.sleep(interval)
Пример #22
0
    def tick(self):
        """
        For walltime-based monitoring of running requests.  Long-running requests
        get a periodic call to saltutil.running to verify that things really
        are still happening.
        """

        if not self._by_jid:
            return
        else:
            log.debug("RequestCollection.tick: %s JIDs underway" %
                      len(self._by_jid))

        # Identify JIDs who haven't had a saltutil.running reponse for too long.
        # Kill requests in a separate phase because request:JID is not 1:1
        stale_jobs = set()
        _now = now()
        for request in self._by_jid.values():
            if _now - request.alive_at > datetime.timedelta(
                    seconds=TICK_PERIOD * 3):
                log.error("Request %s JID %s stale: now=%s, alive_at=%s" %
                          (request.id, request.jid, _now, request.alive_at))
                stale_jobs.add(request)

        # Any identified stale jobs are errored out.
        for request in stale_jobs:
            with self._update_index(request):
                request.set_error("Lost contact")
                request.jid = None
                request.complete()

        # Identify minions associated with JIDs in flight
        query_minions = set()
        for jid, request in self._by_jid.items():
            query_minions.add(request.minion_id)

        # Attempt to emit a saltutil.running to ping jobs, next tick we
        # will see if we got updates to the alive_at attribute to indicate non-staleness
        if query_minions:
            log.info("RequestCollection.tick: sending saltutil.running to {0}".
                     format(query_minions))
            client = LocalClient(config.get('cthulhu', 'salt_config_path'))
            pub_data = client.run_job(list(query_minions),
                                      'saltutil.running', [],
                                      expr_form="list")
            if not pub_data:
                log.warning("Failed to publish saltutil.running to {0}".format(
                    query_minions))
Пример #23
0
def get_salt_functions():
    cl = LocalClient()

    salt_functions = cl.cmd('G@role:va-master',
                            fun='va_utils.get_documented_module_functions',
                            tgt_type='compound')
    salt_functions = salt_functions.items()[0][1]
    print('Salt : ', salt_functions)
    salt_functions = {
        method: [[function[0], yaml.load(function[1])]
                 for function in salt_functions[method]
                 if function_is_documented(function[1], func_name=function[0])]
        for method in salt_functions
    }

    return salt_functions
Пример #24
0
    def serve_file(self, source, chunk_size = 10**6, salt_source = {}, url_source = ''):

        self.set_header('Content-Type', 'application/octet-stream')
        self.set_header('Content-Disposition', 'attachment; filename=test.zip')

        try:
            offset = 0

            if salt_source:
                client = LocalClient()
                source = client.cmd
                kwargs = salt_source
                kwargs['kwarg'] = kwargs.get('kwarg', {})
                kwargs['kwarg']['range_from'] = 0
            elif url_source:
                def streaming_callback(chunk):
                    self.write(chunk)
                    self.flush()
                source = AsyncHTTPClient().fetch
                request = HTTPRequest(url = url_source, streaming_callback = streaming_callback)
                request = url_source
                kwargs = {"request" : request, 'streaming_callback' : streaming_callback}
            else:
                f = open(source, 'r')
                source = f.read
                kwargs = {"source_args" : [chunk_size]}

            print ('Serving file with : ', source, kwargs, chunk_size)
            result = yield self.send_data(source, kwargs, chunk_size)
            print ('Sent data and result is : ', result)
            self.finish()
        except:
            import traceback
            traceback.print_exc()
Пример #25
0
    def _submit(self, commands):
        self.log.debug("Request._submit: %s/%s/%s" % (self._minion_id, self._cluster_name, commands))

        client = LocalClient(config.get('cthulhu', 'salt_config_path'))
        pub_data = client.run_job(self._minion_id, 'ceph.rados_commands',
                                  [self._fsid, self._cluster_name, commands])
        if not pub_data:
            # FIXME: LocalClient uses 'print' to record the
            # details of what went wrong :-(
            raise PublishError("Failed to publish job")

        self.log.info("Request %s started job %s" % (self.id, pub_data['jid']))

        self.alive_at = now()
        self.jid = pub_data['jid']

        return self.jid
    def run(self):
        '''
        do all the work
        '''
        client = LocalClient()
        module_args = [self.path, self.num_lines]

        result = client.cmd([self.host],
                            "retentionaudit.examine_file",
                            module_args,
                            expr_form='list',
                            timeout=self.timeout)

        if self.host in result:
            if not self.quiet:
                print result[self.host]
            return result[self.host]
Пример #27
0
def restart_infra_impl(infra_name, target_server):
    # check if infra is supported
    if infra_name not in supported_infra:
        raise NoSuchInfrastructureException('', 1)

    logger.info('restart infra:%s on %s' % (infra_name, target_server))

    sls_arg = supported_infra[infra_name]
    # send saltstack cmd to restart infra
    client = LocalClient()
    client.cmd(target_server, 'state.apply', [sls_arg])

    # check if the infrastructure is up
    if not check_module_is_up(target_server, infra_name):
        raise InfraIsNotUpException(infra_name)

    logger.info('restart infra:%s on %s success' % (infra_name, target_server))
Пример #28
0
def salt_local_client():
    global _salt_local_client
    # TODO IMPROVE in case of minion retsart old handler will
    #      lead to Authentication error, so always recreate it
    #      as a workaround for now
    if not _salt_local_client or True:
        _salt_local_client = LocalClient()
    return _salt_local_client
    def run(self):
        '''
        do all the work
        '''
        client = LocalClient()
        module_args = [self.path,
                       self.num_lines]

        result = client.cmd([self.host],
                            "retentionaudit.examine_file",
                            module_args, expr_form='list',
                            timeout=self.timeout)

        if self.host in result:
            if not self.quiet:
                print result[self.host]
            return result[self.host]
Пример #30
0
def run(config, argv):
    args = docopt(__doc__, argv=argv)

    profile_name = args['--profile-name']
    state = args['--state']

    data = {'salt': {}}

    master = config.master_config_data(files=['/etc/salt/master'])

    instance_name = instances.instance_name_for_state(state, config)
    instance_id = instance_name.rsplit('-')[-1]
    minion_id = '{0}{1}'.format(state, instance_id)

    pem, pub = salt.gen_keys()

    client = LocalClient()
    grains = client.cmd('master', 'grains.item', ['fqdn'])

    try:
        master_fqdn = grains['master']['fqdn']
    except KeyError:
        return

    minion_data = {
    'master': master_fqdn,
    'id': minion_id
    }

    profile = config.profile_for_key(profile_name)
    minion_data.update(profile.get('minion', {}))

    data['salt']['minion_pub'] = pub
    data['salt']['minion_pem'] = pem
    data['salt']['minion'] = Filesystem.encode(
        config.minion_config_data(minion_data))

    salt.accept_key(master['pki_dir'], pub, minion_id)

    instances.create_instance(
        config=config,
        profile_name=profile_name,
        state=state,
        data=data,
        instance_name=instance_name)
Пример #31
0
 def run(self):
     try:
         from salt.client import LocalClient
         client = LocalClient(c_path=get_config().getstring('salt', 'master_config_path',
                                                            '/etc/salt/master'))
         try:
             log.debug('Running action against "%s": %s args: %s',
                       self.hostname, self.action, self.args)
             data = client.cmd(self.hostname, self.action, arg=self.args)
         except SystemExit as e:
             log.error('Failed action %s on host: %s (%s)', self.action, self.hostname, e)
             self.q.put(cPickle.dumps({}))
         else:
             pdata = cPickle.dumps(data)
             self.q.put(pdata)
     except Exception as e:
         log.err(system='salt-local')
         self.q.put(cPickle.dumps({'_error': e}))
Пример #32
0
    def cancel(self, request_id):
        """
        Immediately mark a request as cancelled, and in the background
        try and cancel any outstanding JID for it.
        """
        request = self._by_request_id[request_id]
        with self._update_index(request):
            request.set_error("Cancelled")
            request.complete()

            if request.jid:
                client = LocalClient(config.get('cthulhu', 'salt_config_path'))
                client.run_job(request.minion_id, 'saltutil.kill_job',
                               [request.jid])
                # We don't check for completion or errors from kill_job, it's a best-effort thing.  If we're
                # cancelling something we will do our best to kill any subprocess but can't
                # any guarantees because running nodes may be out of touch with the calamari server.
                request.jid = None
Пример #33
0
def getServices(request):
    action = request.POST.get('action')
    data = request.POST.getlist('id')
    username = request.user
    saltCmd = LocalClient()
    result = []
    ip = request.META['REMOTE_ADDR']

    for v in data:
        goName,host = v.split(',')
        getMes = saltCmd.cmd('%s'%host,'cmd.run',['supervisorctl %s %s'% (action,goName)])
        result.append(getMes)
        info = action + ' ' + goName
        dingding_robo(host,info,getMes,username,request.POST['phone_number'])
    logs(username,ip,'%s services' % action,result)


    return render(request,'getdata.html',{'result':result})
Пример #34
0
    def cancel(self, request_id):
        """
        Immediately mark a request as cancelled, and in the background
        try and cancel any outstanding JID for it.
        """
        request = self._by_request_id[request_id]
        with self._update_index(request):
            request.set_error("Cancelled")
            request.complete()

            if request.jid:
                client = LocalClient(config.get('cthulhu', 'salt_config_path'))
                client.run_job(request.minion_id, 'saltutil.kill_job',
                               [request.jid])
                # We don't check for completion or errors from kill_job, it's a best-effort thing.  If we're
                # cancelling something we will do our best to kill any subprocess but can't
                # any guarantees because running nodes may be out of touch with the calamari server.
                request.jid = None
Пример #35
0
def do_run(cmd, channel, body):
    # {'cid': 'blahblah', 'target': '*', 'instruction': 'test.ping', cmd_arg: ['']}
    try:
        cmd_arg = body.get('cmd_arg', ())
        instruction = body.get('instruction', 'test.ping')
        target = body.get('target', '*')
        client = LocalClient()
        t = client.cmd(target, instruction, arg=cmd_arg, timeout=240)
        if (instruction == "state.sls"):
            ans = formatter_state(t)
        else:
            ans = formatter_raw(t)
        result = {'cid': body.get('cid', None), 'result': ans}
        zmq_sock.send_multipart('MSG', 'input/salt/run', json.dumps(result))
    except:
        traceback.print_exc(None)
        result = {'cid': body.get('cid', None), 'result': 'An exception occurred'}
        zmq_sock.send_multipart('MSG', 'input/salt/run', json.dumps(result))
Пример #36
0
    def on_tick(self):
        # This procedure is to catch the annoying case of AES key changes (#7836), which are otherwise
        # ignored by minions which are doing only minion->master messaging.  To ensure they
        # pick up on key changes, we actively send them something (doesn't matter what).  To
        # avoid doing this constantly, we only send things to minions which seem to be a little
        # late

        # After this length of time, doubt a minion enough to send it a message in case
        # it needs a kick to update its key
        def _ping_period(fqdn):
            return datetime.timedelta(seconds=self.get_contact_period(fqdn) * 2)

        t = now()
        late_servers = [s.fqdn for s in self.servers.values() if s.last_contact and (t - s.last_contact) > _ping_period(s.fqdn)]
        log.debug("late servers: %s" % late_servers)
        if late_servers:
            client = LocalClient(config.get('cthulhu', 'salt_config_path'))
            pub = client.pub(late_servers, "test.ping", expr_form='list')
            log.debug(pub)
Пример #37
0
 def run(self):
     try:
         from salt.client import LocalClient
         client = LocalClient(c_path=get_config().getstring(
             'salt', 'master_config_path', '/etc/salt/master'))
         try:
             log.debug('Running action against "%s": %s args: %s',
                       self.hostname, self.action, self.args)
             data = client.cmd(self.hostname, self.action, arg=self.args)
         except SystemExit as e:
             log.error('Failed action %s on host: %s (%s)', self.action,
                       self.hostname, e)
             self.q.put(cPickle.dumps({}))
         else:
             pdata = cPickle.dumps(data)
             self.q.put(pdata)
     except Exception as e:
         log.err(system='salt-local')
         self.q.put(cPickle.dumps({'_error': e}))
Пример #38
0
def do_macro(registry, user, instruction, match):
    macro = match.group('target')
    if (macro not in deployments.keys()):
        return {
            'success': False,
            'answer': "I don't currently know about %s" % macro
        }
    tgt = deployments[macro]
    print "Starting macro %s" % macro
    client = LocalClient()
    t = client.cmd(tgt['target'],
                   tgt['instruction'],
                   arg=tgt['arg'],
                   timeout=120)
    if (tgt['instruction'] == "state.sls"):
        ans = formatter_state(t)
    else:
        ans = formatter_raw(t)
    return {'success': True, 'answer': ans}
Пример #39
0
    def tick(self):
        """
        For walltime-based monitoring of running requests.  Long-running requests
        get a periodic call to saltutil.running to verify that things really
        are still happening.
        """

        if not self._by_jid:
            return
        else:
            log.debug("RequestCollection.tick: %s JIDs underway" % len(self._by_jid))

        # Identify JIDs who haven't had a saltutil.running reponse for too long.
        # Kill requests in a separate phase because request:JID is not 1:1
        stale_jobs = set()
        _now = now()
        for request in self._by_jid.values():
            if _now - request.alive_at > datetime.timedelta(seconds=TICK_PERIOD * 3):
                log.error("Request %s JID %s stale: now=%s, alive_at=%s" % (
                    request.id, request.jid, _now, request.alive_at
                ))
                stale_jobs.add(request)

        # Any identified stale jobs are errored out.
        for request in stale_jobs:
            with self._update_index(request):
                request.set_error("Lost contact")
                request.jid = None
                request.complete()

        # Identify minions associated with JIDs in flight
        query_minions = set()
        for jid, request in self._by_jid.items():
            query_minions.add(request.minion_id)

        # Attempt to emit a saltutil.running to ping jobs, next tick we
        # will see if we got updates to the alive_at attribute to indicate non-staleness
        if query_minions:
            log.info("RequestCollection.tick: sending saltutil.running to {0}".format(query_minions))
            client = LocalClient(config.get('cthulhu', 'salt_config_path'))
            pub_data = client.run_job(list(query_minions), 'saltutil.running', [], expr_form="list")
            if not pub_data:
                log.warning("Failed to publish saltutil.running to {0}".format(query_minions))
Пример #40
0
def do_run(cmd, channel, body):
    # {'cid': 'blahblah', 'target': '*', 'instruction': 'test.ping', cmd_arg: ['']}
    try:
        cmd_arg = body.get('cmd_arg', ())
        instruction = body.get('instruction', 'test.ping')
        target = body.get('target', '*')
        client = LocalClient()
        t = client.cmd(target, instruction, arg=cmd_arg, timeout=240)
        if (instruction == "state.sls"):
            ans = formatter_state(t)
        else:
            ans = formatter_raw(t)
        result = {'cid': body.get('cid', None), 'result': ans}
        zmq_sock.send_multipart('MSG', 'input/salt/run', json.dumps(result))
    except:
        traceback.print_exc(None)
        result = {
            'cid': body.get('cid', None),
            'result': 'An exception occurred'
        }
        zmq_sock.send_multipart('MSG', 'input/salt/run', json.dumps(result))
Пример #41
0
def restart_module_impl(module_name, target_server, settings):
    module_settings = settings[module_name]
    # container info
    container_info = ContainerInfoParser(module_settings)
    container_name = container_info.parse_container_name()

    # remove old container
    remove_container(target_server, container_name)

    logger.info('restart module:%s on %s' % (module_name, target_server))

    # send docker cmd
    docker_cmd = compose_docker_cmd(module_settings)
    client = LocalClient()
    client.cmd(target_server, 'cmd.run', [docker_cmd])

    # check if the module is up
    if not check_module_is_up(target_server, container_name):
        raise ChatModuleIsNotUpException(module_name)

    logger.info('restart module:%s on %s success' %
                (module_name, target_server))
Пример #42
0
def server_list():
    try:
        linux_k_list=[]
        local = LocalClient()
        for item in hostname('/etc/salt/pki/master/minions'):
            sys = local.cmd(item, 'grains.item', ['kernel'])
            if sys[item]['kernel'] == 'Linux':
                for k,v in sys.items():
                    linux_k = unicode.encode(k)
                    #os_list_path = os.path.abspath(os.path.dirname(os.getcwd()) + os.path.sep + ".." + os.path.sep + "data")
                    os_list_path = os.path.abspath(os.getcwd() + os.path.sep + "data")
                    os_list_file_linux = os.path.join(os_list_path,"linux_list.txt")
                    f = open(os_list_file_linux,'a')
                    f.write("|")
                    f.write(linux_k)
                    f.close()
            elif sys[item]['kernel'] == 'Windows':
                pass
            else:
                print 'other os'
    except Exception,e:
        print e
Пример #43
0
    def on_tick(self):
        # This procedure is to catch the annoying case of AES key changes (#7836), which are otherwise
        # ignored by minions which are doing only minion->master messaging.  To ensure they
        # pick up on key changes, we actively send them something (doesn't matter what).  To
        # avoid doing this constantly, we only send things to minions which seem to be a little
        # late

        # After this length of time, doubt a minion enough to send it a message in case
        # it needs a kick to update its key
        def _ping_period(fqdn):
            return datetime.timedelta(seconds=self.get_contact_period(fqdn) *
                                      2)

        t = now()
        late_servers = [
            s.fqdn for s in self.servers.values()
            if s.last_contact and (t - s.last_contact) > _ping_period(s.fqdn)
        ]
        log.debug("late servers: %s" % late_servers)
        if late_servers:
            client = LocalClient(config.get('cthulhu', 'salt_config_path'))
            pub = client.pub(late_servers, "test.ping", expr_form='list')
            log.debug(pub)
Пример #44
0
def test_cmd():
    local = LocalClient()
    ret = local.cmd('*', 'test.ping')
    assert ret == {'master': True}
Пример #45
0
def test1():
    client=LocalClient()
    result=client.cmd('self','oss:centos',['ls /root'],'grain')
    print result
Пример #46
0
def move_files():
    '''Migrate file in Migration collection'''
    from salt.client import LocalClient
    cv = Condition()
    app = mongo.app
    tmp_dst_path = '/var/www/STO-%d/temp'
    dst_path     = '/var/www/STO-%d/FshareFS/mega/%s/%s'
    client = LocalClient()
    aria = None
    migratings = {}
    files_moving = {}
    resum = {}

    def res_grains_to_dict(salt_res):
        ret = {}
        for i in salt_res:
            osts = salt_res[i]['ret']['osts']
            if len(osts) > 1:
                for ost in osts:
                    ret[ost] = [i, salt_res[i]['ret']['default_addr']]
                continue
            ret[osts[0]]=[i, salt_res[i]['ret']['default_addr']]
        return ret

    def check_file_done(dst_id, file_id, file_path):
        sto_id = None
        new_file_name = None
        folder_path = None
        src_move = None
        dst_move = None
        minion_id = None
        with app.test_request_context():
            res = cache_ost_grains_item(dst_id, args=['default_addr', 'osts'])
            if not res or len(res) == 0 or type(res[res.keys()[0]]['ret'])=='str':
                return
            minion_id = res_grains_to_dict(res)
            minion_id = minion_id[dst_id][0]
        file_name = file_path.rsplit('/',1)[1]
        with app.test_request_context():
            try_connect_mysql()
            sysFile   = SystemFile.query.filter_by(id = file_id).first()
            # Record in DB is deleted before moving file
            if sysFile == None:
                client.cmd(minion_id, 'storage.delete_files', [dst_id] + [file_path], timeout = 60*10, expr_form='glob')
                mongo.Migration.collection.update({'_id': file_id}, {'$set': {'stat': "REMOVE_BEFORE"}})
                return

            folder_path = sysFile.folder_path
            sub_folder = folder_path.split('/')
            if len(sub_folder) == 1 and len(folder_path) > 6:
                folder_path = folder_path[:6] + '/' + folder_path[6:]

            src_move  = '%s/%s' % (tmp_dst_path % dst_id, file_name)
            if file_name.startswith('migrate_'):
                file_name = file_name[8:]
            dst_move  = dst_path % (dst_id, folder_path, file_name)
        while True:
            jid = client.cmd_async(minion_id, 'storage.move_file', [src_move, dst_move, 400, dst_id], expr_form='glob')
            ret = wait_return_salt_cmd(jid, timeout=60*10)
            if len(ret) == 0:
                continue

            new_file_name = ret[ret.keys()[0]]['ret']
            with app.test_request_context():
                if ' ' in new_file_name: # filename not have space
                    mongo.Migration.collection.update({'_id': file_id}, {'$set': {'stat': "MOVE_ERR"}})
                    return

                try_connect_mysql()
                sysFile   = SystemFile.query.filter_by(id = file_id).first()
                ost = dst_id
                sto_id = sysFile.storage_id
                file_path = dst_path % (sysFile.storage_id, sysFile.folder_path, sysFile.name)

                if sysFile == None:# Record in DB is deleted after moving file
                    new_file_path = dst_path % (ost, folder_path, new_file_name)
                    client.cmd(minion_id, 'storage.delete_files', [ost] + [new_file_path],
                        timeout=60*10, expr_form='glob')
                    mongo.Migration.collection.update({'_id': sid}, {'$set': {'stat': "REMOVE_AFTER"}})
                    return

            with app.test_request_context():
                # Update DB
                count = 3
                try_connect_mysql() # Update record in DB
                while count > 0:
                    try:
                        sess = sql.session()
                        sess.query(SystemFile).filter(SystemFile.id == file_id).update({"storage_id": long(dst_id),
                            "name": new_file_name, "folder_path": folder_path })
                        sess.commit()
                        break
                    except OperationalError as e:
                        if '1205' in e[0]:
                            time.sleep(1)
                            count -= 1
                            if count == 0:
                                return
                        else:
                            sess.rollback()
                            raise e

                mongo.FileRemoveSchedule.collection.insert({
                    'file_path'     : file_path,
                    'ost'           : sto_id,
                    'schedule_time' : datetime.datetime.utcnow() + datetime.timedelta(hours=24),
                    })
                mongo.Migration.collection.remove({'_id': file_id})
            break

    def get_result(migratings):
        while True:
            wait = 60
            for dst_id in migratings.keys():
                ret = {}
                with app.test_request_context():
                    res = cache_ost_grains_item(dst_id, args=['default_addr', 'osts'])
                    if not res or len(res) == 0 or type(res[res.keys()[0]]['ret']) == 'str':
                        continue
                    ret = res_grains_to_dict(res)
                jid = client.cmd_async(ret[dst_id][0], 'aria2.tellStatus', [migratings[dst_id][0]], expr_form='glob')
                aria = wait_return_salt_cmd(jid, timeout=20)
                if len(aria) == 0 or not isinstance(aria[aria.keys()[0]]['ret'], dict):
                    with app.test_request_context():
                        current_app.logger.error('salt %s aria2.tellStatus(%s) not return (jid: %s)',
                        migratings[dst_id][2], migratings[dst_id][0], jid)
                    continue
                stat = aria[aria.keys()[0]]['ret']
                if stat['status'] == 'complete':
                    check_file_done(dst_id, migratings[dst_id][1], stat['files'][0]['path'])
                    # Delete cac system file id da download xong/error ra de add system file id khac vao download tiep
                    del migratings[dst_id]
                    # remove result
                    client.cmd_async(aria.keys()[0], 'aria2.removeDownloadResult', [stat['gid']])
                    cv.acquire()
                    cv.notify()
                    cv.release()
                elif stat['status'] == 'error':
                    # Update stat -> ERROR & gid in mongo
                    with app.test_request_context():
                        mongo.Migration.collection.update({'_id': migratings[dst_id][1]} , {'$set': {'stat': "ARIA_ERROR_" + stat['errorCode']}})
                        current_app.logger.error('gid %s error %s: fid %d', stat['gid'], stat['errorCode'], migratings[dst_id][1])
                    # Delete cac system file id da download xong/error ra de add system file id khac vao download tiep
                    del migratings[dst_id]
                    cv.acquire()
                    cv.notify()
                    cv.release()
                else:
                    # Maximum time waiting is 60s
                    if (long(stat['downloadSpeed']) == 0):
                        wait = 60
                    else:
                        wait = (long(stat['files'][0]['length']) - long(stat['files'][0]['completedLength'])) / long(stat['downloadSpeed'])
                        wait = min(wait, 60)
            if wait < 10:
                wait = 10
            time.sleep(wait)

    if not write_pid('move_files', 30*60):
        return

    th = Thread(target = get_result, args = (migratings,))
    th.daemon = True
    th.start()

    count = 3
    while count > 0:
        try:
            ret = mongo.Migration.find({'stat': "MIGRATING"})
            for m in ret:
                migratings[m['dst_id']] = [m['gid'], m['_id']]
            break
        except OperationFailure as e:
            patt = re.compile(r"cursor id '(\d+)' not valid at server")
            if patt.match(e.message):
                time.sleep(1)
                count -= 1
                if count == 0:
                    return
            else:
                raise e
    while True:
        if not th.isAlive():
            return
        count = 3
        while count > 0:
            try:
                list_queue = mongo.Migration.find({'stat': "QUEUE"})
                for m in list_queue:
                    if m['dst_id'] in migratings:
                        continue

                    try_connect_mysql()
                    q = sql.session.query(SystemFile.name, SystemFile._checksum, SystemFile.storage_id, SystemFile.folder_path, UserFile.name). \
                        join(UserFile, SystemFile.id==UserFile.pid).filter(SystemFile.id == m['_id']).first()
                    # Record in DB is deleted before move file from source to dest's temp dir
                    if q == None or q[2] != m['src_id']:
                        mongo.Migration.collection.remove({'_id': m['_id']})
                        continue

                    src_id = cache_ost_grains_item(m['src_id'], args=['default_addr', 'osts'])
                    dst_id = cache_ost_grains_item(m['dst_id'], args=['default_addr', 'osts'])
                    if (not src_id or len(src_id) == 0 or type(src_id[src_id.keys()[0]]['ret']) == 'str') or \
                       (not dst_id or len(dst_id) == 0 or type(dst_id[dst_id.keys()[0]]['ret']) == 'str'):
                        continue
                    src_id = res_grains_to_dict(src_id)
                    dst_id = res_grains_to_dict(dst_id)

                    url = 'http://%s:89/private/STO-%d/FshareFS/mega/%s/%s' % (src_id[m['src_id']][1] , m['src_id'], q[3], q[0])
                    checksum = hexlify(q[1])
                    file_name = q[4]
                    file_name = filename_shortcut(file_name, lenght = 70, lower = True)
                    if file_name.startswith('_'):
                        file_name = checksum + file_name
                    else:
                        file_name = checksum + '_'+ file_name
                    file_name = 'migrate_' + file_name
                    ret = client.cmd(dst_id[m['dst_id']][0], 'aria2.addUri',
                            [url, 'dir=%s' % tmp_dst_path % m['dst_id'], 'out=%s' % file_name],
                            timeout = 60, expr_form='glob')

                    if len(ret) == 0 or len(ret[ret.keys()[0]]) > 16 or len(ret[ret.keys()[0]]) == 0:
                        continue
                    gid = ret[ret.keys()[0]]
                    mongo.Migration.collection.update({'_id': m['_id']}, {'$set': {'stat': "MIGRATING", 'gid': gid}})
                    migratings[m['dst_id']] = [gid, m['_id']]
                break

            except OperationFailure as e:
                patt = re.compile(r"cursor id '(\d+)' not valid at server")
                if patt.match(e.message):
                    time.sleep(1)
                    count -= 1
                    if count == 0:
                        return
                else:
                    raise e

        update_pid_expire('move_files', 30*60)
        cv.acquire()
        cv.wait(60*2)
        cv.release()
Пример #47
0
def delete_schedule():
    """ Delete physical file """

    from salt.client import LocalClient

    client = LocalClient()

    def get_result(deletings):

        while True:
            for ost in deletings.keys():
                result = client.get_cache_returns(deletings[ost][0])

                if len(result) == 0 or len(result[result.keys()[0]]['ret']) == 0:
                    wait = int(time.time() - deletings[ost][2])
                    if wait > 900 :
                        del deletings[ost]
                    else:
                        continue

                else:
                    ret = result[result.keys()[0]]['ret']
                    if type(ret) == str:
                        with mongo.app.test_request_context():
                            current_app.logger.fatal(ret)
                            print ost, ret
                    else:
                        for f in deletings[ost][1]:
                            with mongo.app.test_request_context():
                                mongo.FileRemoveSchedule.collection.remove({'_id': f._id})

                    del deletings[ost]

            time.sleep(0.3)

    if not write_pid('delete_schedule'):
        return

    system_files = {} # Cac system file can xoa trong DB
    deletings = {} # Cac file vat ly can xoa
    del_file = {}

    th = Thread(target = get_result, args = (deletings,))
    th.daemon = True
    th.start()

    while True:
        remove_schedule_files = mongo.FileRemoveSchedule.find({'schedule_time': {'$lte': datetime.datetime.utcnow()}}).limit(5000)
        if remove_schedule_files.count() == 0: break
        for f in remove_schedule_files:
            if f.ost in system_files:
                system_files[f.ost].append(f)
            else:
                system_files[f.ost] = [f]

        while len(system_files) > 0:
            for ost in system_files:
                if ost in deletings:
                    continue

                paths = []
                del_file[ost] = system_files[ost][:5]

                for f in del_file[ost]:
                    paths.append(f.file_path)

                jid = client.cmd_async('osts:%s' % ost, 'storage.delete_files', [ost] + paths, expr_form='grain')
                if jid:
                    deletings[ost] = [jid, del_file[ost], time.time()]
                else:
                    current_app.logger.error("Can't run delete_files for ost %s", ost)
                system_files[ost] = system_files[ost][5:]

            for ost in system_files.keys():
                if len(system_files[ost]) == 0:
                    del system_files[ost]

            time.sleep(30)