host_date = datetime.strptime('20%d' % int(matchObj.group(1)), '%Y%m%d') timediff = (date_curr - host_date).days if ( timediff >= int(args.max_age) ): host['timediff'] = timediff hosts_exclude.append(host) else: logger.debug("No matches for host: %s" % host) hosts_no_match.append(host) """ Perform (or not >> --no-run) the removal of preveously identified hosts """ total = hosts_exclude.__len__() logger.info("Hosts to remove: %d" % total) if args.run and total > 0: x = 0 bar = ProgressBar(maxval=total,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() logger.echo = False for host in hosts_exclude: x += 1 bar.update(x) logger.debug("(%d/%d) >> Removing >> %s" % (x, total, host)) out = zapi.host.delete(host['hostid']) bar.finish() logger.echo = args.verbose total = hosts_no_match.__len__() logger.info("Other hosts without timestamp to remove: %d" % total) if args.run and total > 0 and args.matches:
matchObj = re.search(r'_(\d{6})\d+_', host['name'], re.M | re.I) if matchObj: host_date = datetime.strptime('20%d' % int(matchObj.group(1)), '%Y%m%d') timediff = (date_curr - host_date).days if (timediff >= int(args.max_age)): host['timediff'] = timediff hosts_exclude.append(host) else: logger.debug("No matches for host: %s" % host) hosts_no_match.append(host) """ Perform (or not >> --no-run) the removal of preveously identified hosts """ total = hosts_exclude.__len__() logger.info("Hosts to remove: %d" % total) if args.run and total > 0: x = 0 bar = ProgressBar( maxval=total, widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() logger.echo = False for host in hosts_exclude: x += 1 bar.update(x) logger.debug("(%d/%d) >> Removing >> %s" % (x, total, host)) out = zapi.host.delete(host['hostid'])
proxies = zapi.proxy.get(output=['host'],sortfield=['host']) for p in proxies: hg_name = 'Zabbix::Proxy::{0}'.format(p['host']) logger.info('Starting \'{0}\''.format(hg_name)) hg_proxy = hg_search(hg_name) if not hg_proxy: logger.debug('Creating hostgroup: {0}'.format(hg_name)) out = zapi.hostgroup.create(name=hg_name) logger.print_json(out) hg_proxy = out['groupids'][0] hosts_proxy = zapi.host.get(output=['hostid'],proxyids=[p['proxyid']]) hg_cleangroup(hostgroupId=hg_proxy) hg_massupdate(hostsJson=hosts_proxy,hostgroupId=hg_proxy) return try: hg_names = hg_cache() operacao() pass except Exception, e: logger.error('Failed to organize operacao hostgroups: {0}'.format(e)) try: hg_names = hg_cache() proxy() except Exception, e: logger.error('Failed to organize proxy hostgroups: {0}'.format(e)) logger.info('Fim') zapi.user.logout()
for p in proxies: hg_name = 'Zabbix::Proxy::{0}'.format(p['host']) logger.info('Starting \'{0}\''.format(hg_name)) hg_proxy = hg_search(hg_name) if not hg_proxy: logger.debug('Creating hostgroup: {0}'.format(hg_name)) out = zapi.hostgroup.create(name=hg_name) logger.print_json(out) hg_proxy = out['groupids'][0] hosts_proxy = zapi.host.get(output=['hostid'], proxyids=[p['proxyid']]) hg_cleangroup(hostgroupId=hg_proxy) hg_massupdate(hostsJson=hosts_proxy, hostgroupId=hg_proxy) return try: hg_names = hg_cache() operacao() pass except Exception, e: logger.error('Failed to organize operacao hostgroups: {0}'.format(e)) try: hg_names = hg_cache() proxy() except Exception, e: logger.error('Failed to organize proxy hostgroups: {0}'.format(e)) logger.info('Fim') zapi.user.logout()
if not fake: query = { 'proxy_hostid': proxy['proxyid'], 'hosts': [ { 'hostid': x } for x in proxy['newhosts'] ] } out = zapi.host.massupdate(**query) if out or fake: logger.debug('\tSUCCESS: updated proxy with {0} hosts: {1}'.format(len(proxy['newhosts']),proxy['host'])) if ( loglevels[args.loglevel.upper()] < loglevels['INFO'] ): logger.debug('Detailed hosts at proxy {0}'.format(proxy['host'])) logger.print_json(out) else: logger.warning('\tFAILED: updated proxy with {0} hosts: {1}'.format(len(proxy['newhosts']),proxy['host'])) #1) Identificar redes via network api logger.info('1) Descobrir as redes:') network_ranges = network_api_get_ranges() for local in network_ranges: logger.info('Achei {0} redes para {1}'.format((len(network_ranges[local]) -2),local) ) # diminuo dois para ignorar o disabled e total_weight #2) Identificar proxies disponiveis logger.info('2) Descobrir os proxies cadastrados:') proxies = proxies_get() for local in proxies: #logger.print_json(proxies[local]) logger.info('Achei {0} proxies para {1}'.format(len(proxies[local]),local)) #2.1) Calcular a distribuicao das redes por proxy #2.2) Atribuir peso de quantidade maxima de hosts por rede, e calcular assim por proxy discovery_rule_per_proxy() logger.debug('2.1) Total de ranges por proxy:')
groupids = zapi.hostgroup.get(output=['groupid'],search={'name': args.group }) itens = zapi.item.get(output=['name','itemid','value_type','delay'],groupids=[x['groupid'] for x in groupids], search={'name': args.item },filter={'status': 0, 'state': 0}, selectHosts=['name'],sortorder='ASC',sortfield='itemid') value_type = itens[0]['value_type'] time_from = mktime(localtime()) - int(itens[0]['delay']) - 15 history = zapi.history.get(output='extend',history=value_type,itemids=[x['itemid'] for x in itens], time_from=time_from) def get_last_history(itemid,history): lastclock = int(0) for h in history: if h['itemid'] == itemid and lastclock == 0: value = h['value'] lastclock = h['clock'] elif h['itemid'] == itemid and h['clock'] > lastclock: value = h['value'] lastclock = h['clock'] return { 'value': value, 'clock': lastclock } for item in itens: x = get_last_history(item['itemid'],history) logger.info("Host {0}, itemid {1}, value {2}, clock {3}".format(item['hosts'][0]['name'],item['itemid'],x['value'],x['clock'])) logger.info("Fim") zapi.user.logout()
sortorder='ASC', sortfield='itemid') value_type = itens[0]['value_type'] time_from = mktime(localtime()) - int(itens[0]['delay']) - 15 history = zapi.history.get(output='extend', history=value_type, itemids=[x['itemid'] for x in itens], time_from=time_from) def get_last_history(itemid, history): lastclock = int(0) for h in history: if h['itemid'] == itemid and lastclock == 0: value = h['value'] lastclock = h['clock'] elif h['itemid'] == itemid and h['clock'] > lastclock: value = h['value'] lastclock = h['clock'] return {'value': value, 'clock': lastclock} for item in itens: x = get_last_history(item['itemid'], history) logger.info("Host {0}, itemid {1}, value {2}, clock {3}".format( item['hosts'][0]['name'], item['itemid'], x['value'], x['clock'])) logger.info("Fim") zapi.user.logout()
logger.error('Unable to login. Check your credentials.') exit(1) lista = [ { 'host': 'HOST A', 'trigger': 'eth0' }, { 'host': 'HOST B', 'trigger': 'eth0' }, ] maintenance_triggers_ids = [] for host in lista: h = zapi.host.get(output=['hostid','name'],search={'name': host['host']}) if h.__len__() == 0: logger.warning('Host {0} not found!'.format(host['host'])) continue triggers = zapi.trigger.get(output=['description','triggerid'],hostids=[h[0]['hostid']],expandDescription=1,search={'description': ': {0}'.format(host['trigger'])}) logger.info('Found {0} triggers for host {1}'.format(triggers.__len__(),host['host'])) logger.print_json(triggers) for t in triggers: maintenance_triggers_ids.append(t['triggerid']) i = 0 logger.info('Found {0} triggers'.format(maintenance_triggers_ids.__len__())) bar = ProgressBar(maxval=maintenance_triggers_ids.__len__(),widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() for t in maintenance_triggers_ids: if args.run == True: out = zapi.trigger.update(triggerid=t,status=args.status) i += 1 bar.update(i) else: logger.warning('Should change triggerid {0} to status {1}'.format(t,args.status)) bar.finish()
'trigger': 'eth0' }, ] maintenance_triggers_ids = [] for host in lista: h = zapi.host.get(output=['hostid', 'name'], search={'name': host['host']}) if h.__len__() == 0: logger.warning('Host {0} not found!'.format(host['host'])) continue triggers = zapi.trigger.get( output=['description', 'triggerid'], hostids=[h[0]['hostid']], expandDescription=1, search={'description': ': {0}'.format(host['trigger'])}) logger.info('Found {0} triggers for host {1}'.format( triggers.__len__(), host['host'])) logger.print_json(triggers) for t in triggers: maintenance_triggers_ids.append(t['triggerid']) i = 0 logger.info('Found {0} triggers'.format(maintenance_triggers_ids.__len__())) bar = ProgressBar( maxval=maintenance_triggers_ids.__len__(), widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start() for t in maintenance_triggers_ids: if args.run == True: