def __init__(self, tid): super(CollectdPluginManager, self).__init__(CollectdPluginManager.NAME, tid) # Initialize logger object self.expanded_template = {} self.logger = get_logger('CollectdPluginManager') self.collector_type = COLLECTD self.config_info, self.node_state, self.nodes_cfg_list = [], [], [] self.all_conf = CollectdPluginDestDir + ALL_CONF
def __init__(self, template, sub_template): self.plugin_src_dir = CollectdPluginDestDir self.plugin_conf_dir = CollectdPluginConfDir self.collectd_conf_dir = CollectdConfDir self.interval = 10 self.template = template self.sub_template = sub_template self.cfg_list, self.tag_list, self.target_list = [], [], [] self.tags, self.targets = {}, {} self.logger = get_logger(COLLECTD_MGR) self.logger.debug('Template: %s', json.dumps(self.template)) self.logger.debug('SubTemplate: %s', json.dumps(self.sub_template))
class JobMonitor: logger = get_logger('JobMonitor') def __init__(self): pass @classmethod @csrf_exempt def perform(self, request, tid, oper): self.logger.info('API Call - Job Monitoring. ( operation = %s )', oper) pass response = dict() curr_temp = Template.objects.filter(id=tid) if len(curr_temp) == 0: response['status'] = 'Template Not Found.' self.logger.error('Template Not Found') return HttpResponseBadRequest(json.dumps(response)) data = json.loads(request.body) self.logger.info('Data Input: %s', json.dumps(data)) pm_obj = PollerManager(tid) pm_obj.start_job(data)
def __init__(self, tid): super(FluentdPluginManager, self).__init__(FluentdPluginManager.NAME, tid) # Initialize logger object self.logger = get_logger('FluentdPluginManager') # Initialize defaults self.plugin_path = os.path.sep + 'etc' + os.path.sep + 'td-agent' self.service_name = 'td-agent' self.plugins, self.target = [], [] # self.logger_user_input = template_data config_file = os.path.join(settings.PROFILE_DIR, self.name, 'plugin_config.json') with open(config_file, 'r') as f: self.plugin_config = json.load(f) self.plugin_post_data, self.status = [], [] self.logger.info('Logger Object Successfully Initialized.') self.logger.info('Targets Nodes : %s' % str(self.nodelist)) self.logger.info('User Input : %s' % str(self.sub_template))
from deepInsight.util import get_logger __author__ = 'Anand Nevase' requests.packages.urllib3.disable_warnings(InsecureRequestWarning) service_status = { 0: 'Not configured', 1: 'Configured', 2: 'Running', 3: 'Stopped', 4: 'Failed', 5: 'Stand by' } logger = get_logger("Mapr Hadoop Cluster Poller") class HadoopClusterPoller(): _NAME_ = "Mapr Hadoop Cluster Poller" def __init__(self, config): logger.info('Initialise {}'.format(self.get_name())) self.config = config self.configure() self.result = {} def load_config(self, config): logger.debug('Loading config: {}'.format(config)) self.config = config
class ClusterMonitor: logger = get_logger('ClusterMonitor') def __init__(self): pass @classmethod def monitor(self, request, tid, oper): self.logger.info('API Call - Cluster Monitoring. ( operation = %s )', oper) self.logger.debug('Input template id - %s', tid) response, curr_temp = dict(), dict() curr_temp = Template.objects.filter(id=tid) if len(curr_temp) == 0: response['status'] = 'Template Not Found.' self.logger.error('Template Not Found') return HttpResponseBadRequest(json.dumps(response)) else: if oper.lower() == 'start': active_temps = Template.objects.filter(state='Active') for item in active_temps: if item.id == tid: response['status'] = 'Template Already Active.' self.logger.error( 'Monitoring is already started for this template.') return HttpResponse(json.dumps(response)) else: item.state = 'Stopped' item.save() self.logger.warning( 'Template: %s - Monitoring overwritten by Template: %s.' % (item.id, tid)) if oper.lower() == 'stop': not_active_temps = Template.objects.exclude(state='Active') for item in not_active_temps: if item.id == tid: response['status'] = 'Template Not Active.' self.logger.error( 'Monitoring is not started for this template.') return HttpResponse(json.dumps(response)) template = {} with open(settings.DATA_DIR + os.path.sep + tid) as f: template = json.load(f) sub_templates = [ key for key, val in template.get('sub_template')[0].iteritems() if key != 'name' ] self.logger.debug('Available sub-templates in template - %s', str(sub_templates)) watchdogs = {} for watchdog_class in BaseMonitor.__subclasses__(): watch_type = watchdog_class.NAME if watch_type in sub_templates: self.logger.debug('Initializing Object - %s', watch_type) watchdogs[watch_type] = watchdog_class(tid) self.logger.debug('Initialization Complete.') else: self.logger.warning('%s - Not available in Template.', watch_type) continue ''' watchdogs = {} for item in sub_templates: if item == 'loggers': watchdogs['loggers'] = FluentdPluginManager(tid) self.logger.debug('Initializing Object Loggers.') if item == 'collectors': watchdogs['collectors'] = CollectdPluginManager(tid) self.logger.debug('Initializing Object Collectors.') if item == 'pollers': watchdogs['pollers'] = PollerManager(tid) self.logger.debug('Initializing Object Pollers.') ''' # Provision the Template for each of the watch dogs. response = dict() self.logger.debug('Perform Operation(%s) for : %s' % (oper.upper(), str(watchdogs.keys()))) for name, obj in watchdogs.iteritems(): status = obj.deploy(oper.lower()) self.logger.debug('%s: Provision Status: %s' % (name, json.dumps(status))) # Update template status as Active. if oper.lower() == 'start': curr_temp[0].state = 'Active' curr_temp[0].save() elif oper.lower() == 'stop': curr_temp[0].state = 'Stopped' curr_temp[0].save() elif oper.lower() == 'teardown': curr_temp[0].delete() self.logger.debug('Template Record Deleted.') response["Status"] = "Monitoring " + oper.capitalize( ) + "ed Successfully" return HttpResponse(json.dumps(response)) else: pass self.logger.info("Template State Changed.") # Create Response response["Status"] = "Monitoring " + oper.capitalize( ) + "ed Successfully" response["template_id"] = tid self.logger.info("Hadoop Monitoring " + oper.capitalize() + "ed Successfully.") return HttpResponse(json.dumps(response))
{ 'resource_manager_address': <mapr-cluster-ip>, 'port': <mapr-cluster-port>, # Port where cluster webserver running 'application_tag': <tag>, # Fetch job based on tag provided while starting application 'application_ids : <job-id-list>, # Fetch job based on tag provided while running application 'application_names : <job-name-list>, # Fetch job based on tag provided while running application 'application_status: 'running' # Fetch running jobs only } ''' from yarn_api_client import ApplicationMaster, HistoryServer, NodeManager, ResourceManager import json import time from deepInsight.util import get_logger logger = get_logger("Mapr Hadoop Job Poller") class HadoopJobPoller: _NAME_ = "Mapr Hadoop Job Poller" def __init__(self, config): logger.info('Initialise {}'.format(self.get_name())) self.config = config self.configure() self.result = [] def load_config(self, config): logger.debug('Loading config: {}'.format(config)) self.config = config
class PollerManager(BaseMonitor): NAME = "pollers" logger = get_logger(POLLERM) controller_id = -1 template = {} orch_comm = "" pcon_comm = "" orch_comm_producer = None pcon_comm_consumer = None kafka_client = "" pcon_consumer_p = None check_point = {} plugin_id_map = {} plugin_count_map = {} mapr_user = '******' mapr_home = os.path.sep + os.path.join('home', 'mapr') def __init__(self, tid): super(PollerManager, self).__init__(PollerManager.NAME, tid) self.pcontroller = CONTROLLER self.controller_id = 0 self.orch_comm = ORCH_COMM self.pcon_comm = PCON_COMM self.kafka_client = KAFKA_CLIENT self.pcon_consumer_p = None @staticmethod def get_name(self): return self.__NAME__ def _start_producer(self): try: producer = KafkaProducer( bootstrap_servers=self.kafka_client, value_serializer=lambda v: json.dumps(v).encode("utf-8")) return producer except Exception as e: self.logger.error('Exception Caused: %s.', str(e)) self.logger.error( "Error: Failed to start kafka producer form client- %s", self.kafka_client) exit(0) def _start_consumer(self): try: consumer = KafkaConsumer( self.pcon_comm, bootstrap_servers=[self.kafka_client], value_deserializer=lambda m: json.loads(m.decode("ascii"))) return consumer except Exception as e: self.logger.error('Exception Caused: %s.', str(e)) self.logger.error( "Error: Failed to start Consumer: %s, topic: %s" % (self.kafka_client, self.pcon_comm)) exit(0) def deploy(self, oper, dirty_list=[]): # Start Producer self.orch_comm_producer = self._start_producer() # Start Consumer self.pcon_consumer_p = multiprocessing.Process(target=pcon_consumer, args=()) self.pcon_consumer_p.start() controller = 'python' + ' ' + self.pcontroller + ' ' + self.kafka_client + ' ' + self.orch_comm + ' ' + self.pcon_comm + ' &' self.logger.debug("Controller: %s", controller) os.system(controller) # GAMGAM: Dirty list ( Yet to Handle ) # self.logger.debug("Template: %s", json.dumps(self.template)) self.logger.debug("Sub-Template: %s", json.dumps(self.sub_template)) self.logger.debug("dirty_list- %s", dirty_list) for item in self.sub_template.get(TARGETS, []): if (not dirty_list) or (item[NAME] in dirty_list): msg = self.build_msg(item, inst_type=TARGET, op=oper) self.logger.debug('Target# Message send to controller: %s', msg) self.send_to_controller(msg) for item in self.sub_template.get(PLUGINS, []): if (not dirty_list) or (item[NAME] in dirty_list): msg = self.build_msg(item, inst_type=PLUGIN, op=oper) self.logger.debug('Plugin# Message send to controller: %s', msg) self.send_to_controller(msg) def teardownall(self, template): # teardown poller self.logger.debug("Received teardown-all.") msg = self.build_msg(msg, op=TEARDOWNALL) self.logger.debug('Teardown# Message built: %s', msg) if msg: resp = self.send_to_controller(msg) if resp: if self.pcon_consumer_p: if self.pcon_consumer_p.is_alive(): self.pcon_consumer_p.terminate() self.logger.debug("Terminated- pcon_consumer_p") else: self.logger.debug("Failed to teardown.") def teardown_inst(self): pass def check_state(self, plugin_id=""): # get state of poller or plugin self.logger.debug("check state- 1# plugin_id- %s", plugin_id) state = {} with open(STATE_FILE_PATH, 'r') as fh: state = json.load(fh) self.logger.debug("State File: %s", json.dumps(state)) if plugin_id: if CHECK_POINT in state.keys(): if STATE in state[CHECK_POINT].keys(): if plugin_id in state[CHECK_POINT][STATE].keys(): ret = state[CHECK_POINT][STATE][plugin_id] self.logger.debug("Returning: %s", ret) return ret else: if CHECK_POINT in state.keys(): if STATE in state[CHECK_POINT].keys(): ret = state.get(CHECK_POINT, {}).get(STATE, {}) self.logger.debug("Returning: %s", ret) return ret return state def check_status(self, plugin_id=""): # get status of poller or plugin self.logger.debug("check state- 1# plugin_id- %s", plugin_id) status = {} pcon_comm_consumer_t = self.pcon_comm_consumer if not pcon_comm_consumer_t: self.logger.debug("check status: self.pcon_comm_consumer is None") pcon_comm_consumer_t = self._start_consumer() self.logger.debug( "check status: Initialized a temp consumer- pcon_comm_consumer_t" ) partitions = pcon_comm_consumer_t.poll(POLL_WAIT_TIME_MS) if len(partitions) > 0: for p in partitions: if not plugin_id: try: status = json.loads( partitions[p][-1].value)[CHECK_POINT][STATUS] self.logger.debug("check status- 2: Status: %s", status) return status except Exception as e: self.logger.exception("Exception: %s", str(e)) else: try: status = json.loads( partitions[p] [-1].value)[CHECK_POINT][STATUS][plugin_id] log.debug("check status- 3: Status: %s", status) return status except Exception as e: self.logger.exception("Exception: %s", str(e)) self.logger.debug("check status- 4: Status: %s", status) return status def build_msg(self, msg, inst_type=PLUGIN, op=START): self.logger.debug("build msg: msg- %s, inst_type- %s, op- %s" % (msg, inst_type, op)) if op in [TEARDOWNALL, STOPALL]: msg = {MSG_TYPE: op} if op in [START, UPDATE, STOP, RESUME, TEARDOWN]: msg.update({MSG_TYPE: CNTRL}) msg.update({CMD: op}) msg.update({PLUGIN_ID: msg[NAME]}) if inst_type == PLUGIN: msg.update({PLUGIN_TYPE: READER}) msg.update({DEST_LIST: msg[TARGETS]}) if inst_type == TARGET: msg.update({PLUGIN_TYPE: WRITER}) msg["id"] = self.controller_id self.logger.debug("Message Built: %s", msg) return msg def send_to_controller(self, msg): orch_comm_producer_t = self.orch_comm_producer if not orch_comm_producer_t: self.logger.debug("self.orch_comm_producer is None") orch_comm_producer_t = self._start_producer() self.logger.debug( "Initialized a temp producer- orch_comm_producer_t") try: orch_comm_producer_t.send(self.orch_comm, json.dumps(msg)) orch_comm_producer_t.flush() return True except Exception as e: self.logger.error("Failed to send to- orch_comm") self.logger.error("Exception: %s", str(e)) return False def generate_id(self, plugin_name): return plugin_name + "_" + str( self.plugin_count_map[plugin_name]) + "_" + str(self.controller_id) def start_yarn_application(self, job_meta): job_timeout = get_job_time_out() self.logger.info('Job Timeout in seconds: %s', str(job_timeout)) self.logger.debug("Job Details: %s", json.dumps(job_meta.get('job_details'))) resource_manager = str() if self.template.get('node_list'): resource_manager = self.template.get('node_list')[0] self.logger.debug("Resource Manager: %s", resource_manager or "None") job_tag = job_meta.get('tag', None) job_details = job_meta.get("job_details", {}) job_type = job_details.get('job_type', None) job_name = job_type + '_' + os.urandom(4).encode('hex') self.logger.info("job_type: %s, job_name: %s" % (job_type, job_name)) application_id = None try: job_cmd = str() if job_type: job_name_option, job_tag_option = str(), str() job_name_option = '-Dmapreduce.job.name={}'.format(job_name) if job_tag: job_tag_option = '-Dmapreduce.job.tags={}'.format(job_tag) # TestDFSIO Read & Write jar_name = '/opt/mapr/hadoop/hadoop-0.20.2/hadoop-0.20.2-dev-test.jar' if job_type.lower() in ['dfsioread', 'dfsiowrite']: file_count = job_details.get('nrfiles', 0) file_size = job_details.get('filesize', 0) test_name = 'TestDFSIO' if job_tag: if job_type.lower() == "dfsioread": job_cmd = 'yarn jar {} {} {} {} -read -nrFiles {} -fileSize {}'.format( jar_name, test_name, job_name_option, job_tag_option, file_count, file_size) else: job_cmd = 'yarn jar {} {} {} {} -write -nrFiles {} -fileSize {}'.format( jar_name, test_name, job_name_option, job_tag_option, file_count, file_size) else: if job_type.lower() == "dfsioread": job_cmd = 'yarn jar {} {} {} -read -nrFiles {} -fileSize {}'.format( jar_name, test_name, job_name_option, file_count, file_size) else: job_cmd = 'yarn jar {} {} {} -write -nrFiles {} -fileSize {}'.format( jar_name, test_name, job_name_option, file_count, file_size) self.logger.debug("job_cmd: %s", job_cmd) # Teragen if job_type.lower() == 'teragen': file_size = job_details.get('filesize', 0) jar_name = '/opt/mapr/hadoop/hadoop-0.20.2/hadoop-0.20.2-dev-examples.jar' test_name = 'teragen' if job_tag: job_cmd = 'hadoop fs -rm -r -f -skipTrash /terasort-input; yarn jar {} {} {} {} {} /terasort-input'.format( jar_name, test_name, job_name_option, job_tag_option, file_size) else: job_cmd = 'hadoop fs -rm -r -f -skipTrash /terasort-input; yarn jar {} {} {} {}'.format( jar_name, test_name, job_name_option, file_size) # Terasort if job_type.lower() == 'terasort': # file_size = job_details.get('file_size') jar_name = '/opt/mapr/hadoop/hadoop-0.20.2/hadoop-0.20.2-dev-examples.jar' test_name = 'terasort' if job_tag: job_cmd = 'hadoop fs -rm -r -f -skipTrash /terasort-output; yarn jar {} {} {} {} /terasort-input /terasort-output'.format( jar_name, test_name, job_name_option, job_tag_option) else: job_cmd = 'hadoop fs -rm -r -f -skipTrash /terasort-output; yarn jar {} {} {} /terasort-input /terasort-output'.format( jar_name, test_name, job_name_option) log_file = self.mapr_home + os.path.sep + job_name + '.log' yarn_command = 'su -l {} -c "{} &> {}"'.format( self.mapr_user, job_cmd, log_file) print "#" * 50 print "YARN Command : ", yarn_command print "#" * 50 if yarn_command: salt_client = salt.client.LocalClient() job_exec_status = salt_client.cmd(resource_manager, fun="cmd.run_bg", arg=[yarn_command]) print 'Job exec status :' + json.dumps(job_exec_status) self.logger.debug("Job Execution Status: %s", json.dumps(job_exec_status)) counter, result = job_timeout, {} while True: counter -= 1 job_started_expr = ' Submitted application application_' result = salt_client.cmd( resource_manager, fun="file.grep", arg=[log_file, job_started_expr]) if (not result.get(resource_manager).get('retcode') ) or (not counter): break time.sleep(1) print "\nYARN Result: ", result if result: application_id = result.get(resource_manager).get( 'stdout').split()[-1] self.logger.info( "YARN application Started Successfully") return application_id else: raise Exception('Unable to execute yarn application') else: raise Exception('Unable to execute yarn application') except Exception as e: self.logger.exception( "Exception while starting job %s, Exception: %s" % (job_name, str(e))) raise e def monitor_poller_job(self, app_id, test_var): self.logger.info( "Starting hadoop application {} polling for template {}.".format( app_id, self.tid)) job_status_flag = 0 temp, job_plugin_status = None, None while True: time.sleep(5) temp = self.check_state(plugin_id=JOB_POLLER) print "=" * 50 print "APP {} STATUS:{} ".format(app_id, temp) print "=" * 50 if temp != {}: job_plugin_status = temp else: print "******LAST STATUS :", str(job_plugin_status) continue for app in job_plugin_status.get('applications_status'): if app['application_id'] == app_id: if app['status'] in ['FINISHED', 'KILLED']: job_meta = self.sub_template.get(PLUGINS).get( JOB_POLLER) if app_id in job_meta.get('application_ids'): job_meta.get('application_ids').remove(app_id) self.sub_template[PLUGINS][JOB_POLLER][ META] = job_meta print "APP {} REMOVE".format(app_id) # GAMGAM self.logger.info( "Got to set profile to idle here.") # profile_manager.set_idle_profile(template_id) self.logger.info( "For template %s, Stopped hadoop application [{}] Polling" % (self.tid, app_id)) return return None # Code ends here. def start_job(self, job_meta): # Update poller Meta data app_ids = [] self.logger.debug("Template before starting Job: %s" % (self.sub_template)) self.logger.debug("Updating Poller Job plugin meta data.") dirty_list, app_ids = [], [] print 'subtemplate: ', json.dumps(self.sub_template) for item in self.sub_template.gete(PLUGINS, []): if item.get(NAME) == JOB_POLLER: dirty_list.append(JOB_POLLER) # item[META] = job_meta app_ids = item.get(META).get("application_names", []) self.deploy(START, dirty_list) break self.logger.debug("Template updated to: %s" % (self.sub_template)) app_id = self.start_yarn_application(job_meta) app_ids.append(app_id) self.logger.info("Application Started successfully: %s" % (app_id)) # Set Active Profile. thread.start_new_thread(self.monitor_poller_job, (app_id, 'test')) response = dict() response["application_id"] = app_id response["message"] = "Successfully initiated the yarn application" return json.dumps(response)
import json from kafka import KafkaConsumer, KafkaProducer import multiprocessing import sys import time import os sys.path.append(os.getcwd()) from deepInsight.util import get_logger # from deepInsight.monitor.constants import * from deepInsight.monitor.poller_watch import * from django.conf import settings # Logger Handle log = get_logger(PCONTROLLER) # Take kafka topics as arguments if len(sys.argv) != 4: log.debug("Provide - kafka_client, orch_comm and pcon_comm") exit(0) kafka_client = sys.argv[1] orch_comm = sys.argv[2] pcon_comm = sys.argv[3] ''' #kafka_client = '192.168.101.12:9092' #orch_comm = 'orch_comm' #pcon_comm = 'pcon_comm' '''
class TemplateManager: __name__ = 'TemplateManager' logger = get_logger(__name__) def __init__(self, id): self.id = id @classmethod def list(self, request): self.logger.info('API Call - List Templates.') all_templates = Template.objects.all() if not len(all_templates): self.logger.debug('Application has no template Added.') response = [] for x_temp in all_templates: response.append({"Id": x_temp.id, "State": x_temp.state, "Cluster": x_temp.cluster, "Created": x_temp.created.isoformat()}) return HttpResponse(json.dumps(response)) @classmethod @csrf_exempt def show(self, request, id): self.logger.info('API Call - Show Template.') x_temp = Template.objects.filter(id=id) response = dict() if len(x_temp) == 0: self.logger.error('Template not found for id - %s.', id) response['status'] = 'In-Valid Template-Id.' return HttpResponseNotFound(json.dumps(response)) else: with open(settings.DATA_DIR + os.path.sep + id) as f: response = json.load(f) return HttpResponse(json.dumps(response)) @classmethod @csrf_exempt def add(self, request): self.logger.info('API Call - Add a new Template.') # Read input and update file to temp directory. response = dict() filedata = request.FILES.get('file', None) if not filedata: self.logger.error('Invalid input ( file not attached as File-Filedata Key-Value pair ).') response['Status'] = 'In-valid input.' return HttpResponseBadRequest(json.dumps(response)) tid = os.urandom(4).encode('hex') filepath = settings.DATA_DIR + os.path.sep + tid destination = open(filepath, 'w') for chunk in filedata.chunks(): destination.write(chunk) destination.close() try: with open(filepath, 'r') as f: filedata = json.load(f) except: self.logger.error('Invalid input ( Not JSON ).') os.remove(filepath) response['Status'] = 'In-valid input.' return HttpResponseBadRequest(json.dumps(response)) self.logger.debug('Template file successfully Stored.') # Update record to the database. record = Template() record.id, record.state = tid, 'Init' record.cluster = filedata.get('name', 'Not Given') response['id'] = tid record.save() self.logger.debug('Template record successfully Added.') self.logger.info('Template successfully Added.') return HttpResponse(json.dumps(response)) @classmethod @csrf_exempt def status(self, request, tid): self.logger.info('API Call - Template Status.') x_temp = Template.objects.filter(id=tid, state='Active') response, template = dict(), dict() if len(x_temp) == 0: self.logger.error('Template currently not Active.') response['status'] = 'Template Not Active.' return HttpResponse(json.dumps(response)) else: with open(settings.DATA_DIR + os.path.sep + tid) as f: template = json.load(f) response = {} sub_templates = [key for key, val in template.get('sub_template')[0].iteritems() if key != 'name'] for watchdog_class in BaseMonitor.__subclasses__(): if watchdog_class.NAME in sub_templates: obj = watchdog_class(tid) response[watchdog_class.NAME] = obj.check_status() else: continue return HttpResponse(json.dumps(response)) @classmethod def delete(self, request, id): self.logger.info('API Call - Delete Template.') x_temp = Template.objects.filter(id=id) response = dict() if len(x_temp) == 0: self.logger.error('Template does not exist for id - %s.', id) response['status'] = 'In-Valid Template-Id.' return HttpResponseNotFound(json.dumps(response)) else: # Delete stored file. filepath = settings.DATA_DIR + os.path.sep + id if os.path.isfile(filepath): os.remove(filepath) self.logger.debug('Template file successfully Deleted.') # Delete database Record. x_temp.delete() self.logger.debug('Template record successfully Deleted.') response['status'] = 'Template Deleted.' return HttpResponse(json.dumps(response))
class BaseMonitor: __metaclass__ = abc.ABCMeta logger = get_logger('BaseMonitor') def __init__(self, name, tid): self.name = name self.tid = tid self.tags, self.nodelist = [], [] self.template, self.sub_template = {}, {} self.read_template_data() self.status = [] @abc.abstractmethod def get_name(self): return @abc.abstractmethod def deploy(self): raise NotImplementedError() def read_template_data(self): self.logger.debug('Reading sub-template for specific watch utility.') filepath = settings.DATA_DIR + os.path.sep + self.tid template = {} try: with open(filepath, 'r') as f: self.template = json.load(f) self.tags = self.template.get('tags', []) self.nodelist = self.template.get('node_list', []) self.sub_template = self.template.get('sub_template')[0].get( self.name) except Exception as e: self.logger.debug('Exception: %s', str(e)) self.logger.error('Expected sub-template Not Found.') return def change_service_status(self, operation): try: salt_obj = SaltManager() salt_status = salt_obj.change_service_status( self.nodelist, [self.service_name], operation) self.logger.debug('salt_status :' + str(salt_status)) self.status = [] for node in self.nodelist: temp = dict() temp['node_name'], temp['status'] = node, "Not Running" if node in salt_status: if salt_status[node]: temp['status'] = "Running" else: temp['status'] = "No Response" self.status.append(temp) except Exception as e: self.logger.error('Exception Caused: %s.', str(e)) return def start(self): self.logger.debug('Server - td-agent - start call.') self.change_service_status("start") def restart(self): self.logger.debug('Server - td-agent - restart call.') self.change_service_status("restart") def stop(self): self.logger.debug('Server - td-agent - stop call.') self.change_service_status("stop") def teardown(self): self.logger.debug('Server - td-agent - Teardown call.') self.change_service_status("stop") def check_status(self): self.logger.debug('Server - td-agent - check_status call.') self.change_service_status("status") return self.status '''
def __init__(self): self.logger = get_logger('BaseMonitor') self.local = salt.client.LocalClient()