def connects_to_cachet(self): try: self.components = cachet.Components(endpoint = self.statuspage, api_token = self.cachet_token) except Exception as ex: print ('Error:', ex) exit('Failed to connect to Cachet using Cacht SDK, terminating')
def update(self, db): self._create(db) clusters = db.execute_many_dict(""" SELECT name, active, enabled FROM cluster """) for c in clusters: cid = self.components[c['name']] status = 1 if not c['active'] or not c['enabled']: status = 4 components = cachet.Components(endpoint=self.endpoint, api_token=self.api_token) components.put(id=cid, status=status, enabled=True) jobs = db.execute_one_dict(""" SELECT count(*) as count FROM job WHERE state = 'running' """) mid = self.metrics['Running Jobs'] points = cachet.Points(endpoint=self.endpoint, api_token=self.api_token) logger.info('Running jobs: %s', jobs['count']) points.post(id=mid, value=jobs['count'], timestamp=int(time.time()))
def get_component(id): """ Gets a Cachet component by id """ components = cachet.Components(endpoint=ENDPOINT) component = json.loads(components.get(id=id)) return component['data']
def updatecomponent(component_id, component_status): import cachetclient.cachet as cachet component = cachet.Components(endpoint=Uptime.ENDPOINT, api_token=Uptime.API_TOKEN) new_component = component.put(id=component_id, status=component_status, ) print('component status updated')
def get_components(): res = {} json_data = cachet.Components(endpoint=API_ENDPOINT, api_token=API_TOKEN, verify=False).get() items = json.loads(json_data) for item in items['data']: res[item['id']] = item return res
def _get_component_id(self, name): components = cachet.Components(endpoint=self.endpoint, api_token=self.api_token) data = json.loads(components.get()) for c in data['data']: if c['name'] == name: return c['id'] return None
def updateComponent(id, status): logger.info("Update CachetHQ component %s with status %s ..." % (id, status)) components = cachet.Components(endpoint=CACHETHQ_ENDPOINT, api_token=CACHETHQ_API_TOKEN) # Bad and Ugly workaround to force updated_at field : https://github.com/CachetHQ/Cachet/issues/2802 components.put(id=str(id), status=str(0)) components.put(id=str(id), status=str(status))
def _create(self, db): # Create Group groups = cachet.Groups(endpoint=self.endpoint, api_token=self.api_token) group_list = json.loads(groups.get(name='Clusters')) gid = None if not group_list['data']: group = json.loads(groups.post(name='Clusters')) gid = group['data']['id'] else: gid = group_list['data'][0]['id'] # Create Components components = cachet.Components(endpoint=self.endpoint, api_token=self.api_token) clusters = db.execute_many_dict(""" SELECT name FROM cluster """) for i, c in enumerate(clusters): if c['name'] in self.components: continue cid = self._get_component_id(c['name']) if not cid: a = components.post(name=c['name'], description=c['name'], status=1, order=i, group_id=gid, enabled=True) component = json.loads(a) cid = component['data']['id'] self.components[c['name']] = cid # Create metrics mid = self._get_metric_id('Running Jobs') if not mid: metrics = cachet.Metrics(endpoint=self.endpoint, api_token=self.api_token) m = metrics.post(name='Running Jobs', suffix='jobs', description='Currently active jobs', default_value=0, places=0, calc_type=1) mid = json.loads(m)['data']['id'] self.metrics['Running Jobs'] = mid
def monitor(conf): # retrieve component list components = cachet.Components(endpoint=conf['api_url'], api_token=conf['api_token']) json_components = json.loads(components.get()) # dict where key is id and value is component struct component_dict = dict() for component in json_components['data']: component['processed'] = False component['newstatus'] = component['status'] component_dict[component['name']] = component # check LXC if conf['lxc']['check']: output = str( subprocess.check_output(['lxc-ls', '-f'], universal_newlines=True)) for line in output.splitlines(): r = re.match(lxcls_pattern, line) if r: name = conf['lxc']['component_prefix'] + r.group(1) state = r.group(2) component = component_dict.get(name, None) if component: component['processed'] = True if state == 'RUNNING': component['newstatus'] = COMPONENT_STATUS_OK else: component['newstatus'] = COMPONENT_STATUS_KO # check URLs if conf['url']['check']: for endpoint in conf['url']['endpoints']: component = component_dict.get(endpoint['component'], None) if component: component['processed'] = True r = check_url(endpoint['url'], endpoint['regex']) if r['success']: component['newstatus'] = COMPONENT_STATUS_OK else: component['newstatus'] = COMPONENT_STATUS_KO # send status to CacheHQ for changed and unprocessed components for component in component_dict.values(): if not component['processed']: component['newstatus'] = COMPONENT_STATUS_KO if component['newstatus'] != component['status']: # print(component) components.put(id=component['id'], status=component['newstatus'])
def createComponent(name, status, groupId): logger.info("Create CachetHQ component %s with status %s in group %s ..." % (name, status, groupId)) components = cachet.Components(endpoint=CACHETHQ_ENDPOINT, api_token=CACHETHQ_API_TOKEN) new_component = json.loads( components.post(name=str(name), status=status, description=str('Automatic component creation'), group_id=str(groupId))) component_id = new_component['data']['id'] components.put(id=component_id, status=status)
def components(self): """Fetch components from Cachet API and remove all fields not contained in COMPONENT_DATA""" if not self._loaded: request = cachet.Components(endpoint=self.endpoint) response = json.loads(request.get()) components = tuple(response['data']) logging.info('retrieved status of %d components from Cachet', len(components)) for component in components: for key in list(component.keys()): if key not in COMPONENT_DATA: del component[key] self._components = components self._loaded = True return self._components
def _get_or_create_component(self, group_id, component_name): if len(component_name) == 0: raise Exception('Invalid component name') components = cachet.Components(endpoint=self.ENDPOINT, api_token=self.API_TOKEN) #check if exists params = {'name': component_name, 'group_id': group_id} data = json.loads(components.get(params=params)) if len(data['data']) > 0: id = data['data'][0]['id'] self.logger.debug('Component %s already exists with id %d', component_name, id) return id data = json.loads(components.post(name=component_name, group_id=group_id, status=1)) id = data['data']['id'] self.logger.debug('Create new component %s, component id is %d', component_name, id) return id
def _update_component(self, component_id, state, description): components = cachet.Components(endpoint=self.ENDPOINT, api_token=self.API_TOKEN) components.put(id=component_id, status=state, description=description) self.logger.debug('Update component #%d status to %d', component_id, state)
def main(script, component_id, config_file, script_args=None, retries=5, interval=0.5, debug=False): """Script execution""" if script_args is None: script_args = [] logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) if debug: # pragma: no cover logger.setLevel(logging.DEBUG) logger.info('debug logging enabled') # Read Cachet API parameters from config file config_parser = settings.RunnerConfigParser() config_parser.read(config_file) base_url = config_parser.get('Cachet', 'base-url', fallback=None) api_key = config_parser.get('Cachet', 'api-key', fallback=None) if not api_key or not base_url: logger.critical('one or both mandatory config parameters are missing') raise ValueError( 'invalid configuration, use config.ini.dist file as reference') logger.info('configuration parameters loaded') # Run monitoring script script_args.insert(0, script) logger.debug('invocation arguments %s', script_args) for retry_num in range(retries + 1, 0, -1): if retry_num < retries + 1: # Not first run, script is in soft failure status, wait before retrying time.sleep(interval) logger.info('attempt n. %d', retries - retry_num + 1) completed_execution = subprocess.run(script_args) logger.debug('execution completed, plugin exit code is %d', completed_execution.returncode) logger.debug('type of plugin exit code is %s', completed_execution.returncode) try: script_return_code = nagios_common.Codes( completed_execution.returncode) if script_return_code == nagios_common.Codes.OK: break except ValueError: logger.critical( 'script plugin exit code is not compatible with Nagios standards, ' 'return code is %d', completed_execution.returncode) nagios_common.plugin_exit(code=nagios_common.Codes.CRITICAL) logger.info('script return code is: %s, %d', script_return_code.name, script_return_code.value) retry_num -= 1 # Update Cachet cachet_status_code = codes_mapping[script_return_code] if cachet_status_code: components = cachet.Components(endpoint=base_url, api_token=api_key) components.put(id=component_id, status=cachet_status_code) logger.info('updated component %d with status %d', component_id, cachet_status_code) nagios_common.plugin_exit(code=nagios_common.Codes.OK) else: logger.warning('no updates were sent to Cachet') nagios_common.plugin_exit(code=nagios_common.Codes.UNKNOWN)
def __check_service(self): """ External Component Dependency Check Throws exception if all components are not UP :param scenario: teflo scenario object :param config: teflo config object """ # External Dependency Check # Available components to check ci-rhos, zabbix-sysops, brew, covscan # polarion, rpmdiff, umb, errata, rdo-cloud # gerrit # Verify dependency check components are supported/valid then # Check status (UP/DOWN) # Only check if dependency check endpoint set and components given # Else it is ignored LOG.info('Running external resource validation') if self.config['RESOURCE_CHECK_ENDPOINT']: endpoint = self.config['RESOURCE_CHECK_ENDPOINT'] ext_resources_avail = True # component_names = self.scenario.resource_check['service'] component_names = getattr(self.scenario, 'resource_check').get( 'monitored_services', None) urllib3.disable_warnings() components = cachet.Components(endpoint=endpoint, verify=False) LOG.info(' DEPENDENCY CHECK '.center(64, '-')) for comp in component_names: comp_resource_invalid = False comp_resource_avail = False for attempts in range(1, 6): component_data = components.get(params={'name': comp}) if json.loads(component_data)['data']: comp_status = json.loads( component_data)['data'][0]['status'] if comp_status == 4: comp_resource_avail = False time.sleep(30) continue else: comp_resource_avail = True break else: comp_resource_invalid = True if comp_resource_avail is not True or comp_resource_invalid is True: ext_resources_avail = False if comp_resource_invalid: LOG.info('{:>40} {:<9} - Attempts {}'.format( comp.upper(), ': INVALID', attempts)) else: LOG.info('{:>40} {:<9} - Attempts {}'.format( comp.upper(), ': UP' if comp_resource_avail else ': DOWN', attempts)) warnings.resetwarnings() LOG.info(''.center(64, '-')) if ext_resources_avail is not True: LOG.error( "ERROR: Not all external resources are available or valid. Not running scenario" ) raise TefloError( 'Scenario %s will not be run! Not all external resources are available or valid' % getattr(self.scenario, 'name'))
import cachetclient.cachet as cachet import json ENDPOINT = 'http://status.domain.tld/api/v1' API_TOKEN = 'token' # /ping ping = cachet.Ping(endpoint=ENDPOINT) print(ping.get()) # /version version = cachet.Version(endpoint=ENDPOINT) print(version.get()) # /components components = cachet.Components(endpoint=ENDPOINT, api_token=API_TOKEN) new_component = json.loads( components.post(name='Test component', status=1, description='Test component')) print(components.get()) components.put(id=new_component['data']['id'], description='Updated component') print(components.get(id=new_component['data']['id'])) components.delete(id=new_component['data']['id']) # /components/groups groups = cachet.Groups(endpoint=ENDPOINT, api_token=API_TOKEN) new_group = json.loads(groups.post(name='Test group')) print(groups.get()) groups.put(id=new_group['data']['id'], name='Updated group') print(groups.get(id=new_group['data']['id']))