def __init__(self, logger, vnc_api, job_input, job_log_utils): """Initializer.""" self._logger = logger self.vnc_api = vnc_api self.vnc_api_init_params = None self.api_server_host = None self.auth_token = None self.contrail_cluster_id = None self.sandesh_args = None self.job_log_utils = job_log_utils self.job_input = job_input self.job_utils = None self.executable_timeout = 1800 self.job_template = None self.job_execution_id = None self.device_name = "" self.job_description = None self.job_transaction_id = None self.job_transaction_descr = None self.job_template_id = None self.result_handler = None self.parse_job_input(job_input) self.job_utils = JobUtils(self.job_execution_id, self.job_template_id, self._logger, self.vnc_api) self.job_template = self.job_utils.read_job_template() self.job_file_write = JobFileWrite(self._logger)
def __init__(self, module): self.module = module self.logger = module.logger self.job_ctx = module.job_ctx self.fabric_uuid = module.params['fabric_uuid'] self.total_retry_timeout = float(module.params['total_retry_timeout']) self._job_file_write = JobFileWrite(self.logger)
def __init__(self, job_template_id, execution_id, fabric_fq_name, logger, job_utils, job_log_utils, device_name, job_description, transaction_id, transaction_descr): """Initializes JobResultHandler.""" self._job_template_id = job_template_id self._execution_id = execution_id self._fabric_fq_name = fabric_fq_name self._logger = logger self._job_utils = job_utils self.job_log_utils = job_log_utils self._device_name = device_name self._job_description = job_description self._transaction_id = transaction_id self._transaction_descr = transaction_descr # job result data self.job_result_status = None # cummulative status self.job_result_message = "" # job result msg when not device spec self.job_warning_message = "" # job warning msg when not device spec self.job_result = dict() # map of the device_id to job result msg self.job_summary_message = None self.failed_device_jobs = list() self.warning_device_jobs = set() # device_management_ip, device_username, etc self.playbook_output = None # marked output from the playbook stdout self.percentage_completed = 0.0 self._job_file_write = JobFileWrite(self._logger)
def __init__(self, module): """Discover device utility initialization.""" self.module = module self.logger = module.logger self.job_ctx = module.job_ctx self.fabric_uuid = module.params['fabric_uuid'] self.total_retry_timeout = float(module.params['total_retry_timeout']) self._job_file_write = JobFileWrite(self.logger)
def report_playbook_results(self, job_ctx, pb_results): logger = FilterLog.instance("WritePbResultsToFileFilter").logger() job_file_write = JobFileWrite(logger) exec_id, unique_pb_id = self.get_job_ctx_details(job_ctx) job_file_write.write_to_file( exec_id, unique_pb_id, JobFileWrite.GEN_DEV_OP_RES, str(pb_results) ) return { 'status': 'success', 'write_to_file_log': 'Successfully wrote command results to streaming file'}
def report_percentage_completion(self, job_ctx, percentage): logger = FilterLog.instance("WritePercentToFileFilter").logger() job_file_write = JobFileWrite(logger) exec_id, unique_pb_id = self.get_job_ctx_details(job_ctx) job_file_write.write_to_file( exec_id, unique_pb_id, JobFileWrite.JOB_PROGRESS, str(percentage) ) return { 'status': 'success', 'write_to_file_log': 'Successfully wrote progress to streaming file'}
def report_playbook_results(self, job_ctx, pb_results): logger = FilterLog.instance("WritePbResultsToFileFilter").logger() job_file_write = JobFileWrite(logger) exec_id, unique_pb_id = self.get_job_ctx_details(job_ctx) job_file_write.write_to_file( exec_id, unique_pb_id, JobFileWrite.GEN_DEV_OP_RES, str(pb_results) ) return { 'status': 'success', 'write_to_file_log': 'Successfully wrote command results to streaming file' }
def report_percentage_completion(self, job_ctx, percentage): logger = FilterLog.instance("WritePercentToFileFilter").logger() job_file_write = JobFileWrite(logger) exec_id, unique_pb_id = self.get_job_ctx_details(job_ctx) job_file_write.write_to_file( exec_id, unique_pb_id, JobFileWrite.JOB_PROGRESS, str(percentage) ) return { 'status': 'success', 'write_to_file_log': 'Successfully wrote progress to streaming file' }
def __init__(self, argument_spec={}, **kwargs): super(FabricAnsibleModule, self).__init__(argument_spec=argument_spec, **kwargs) self.module_name = self._name self.job_ctx = self.params.get('job_ctx') self.logger = fabric_ansible_logger(self.module_name) self.results = dict() self.results['failed'] = False self.logger.debug("Module params: {}".format(self.params)) self.job_log_util = None self._job_file_write = JobFileWrite(self.logger)
class FilterModule(object): @staticmethod def _init_logging(): logger = logging.getLogger('WriteToFileFilter') console_handler = logging.StreamHandler() console_handler.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y/%m/%d %H:%M:%S') console_handler.setFormatter(formatter) logger.addHandler(console_handler) return logger # end _init_logging def __init__(self): self._logger = FilterModule._init_logging() self._job_file_write = JobFileWrite(self._logger) # end __init__ def filters(self): return { 'report_percentage_completion': self.report_percentage_completion, 'report_playbook_results': self.report_playbook_results, } def get_job_ctx_details(self, job_ctx): return job_ctx.get('job_execution_id'), job_ctx.get('unique_pb_id') def report_percentage_completion(self, job_ctx, percentage): exec_id, unique_pb_id = self.get_job_ctx_details(job_ctx) self._job_file_write.write_to_file(exec_id, unique_pb_id, JobFileWrite.JOB_PROGRESS, str(percentage)) return { 'status': 'success', 'write_to_file_log': 'Successfully wrote progress to streaming file' } def report_playbook_results(self, job_ctx, pb_results): exec_id, unique_pb_id = self.get_job_ctx_details(job_ctx) self._job_file_write.write_to_file(exec_id, unique_pb_id, JobFileWrite.GEN_DEV_OP_RES, str(pb_results)) return { 'status': 'success', 'write_to_file_log': 'Successfully wrote command results to streaming file' }
def __init__(self, sandesh_instance_id=None, config_args=None, sandesh=True): self.sandesh_instance_id = sandesh_instance_id self.args = None self.config_logger = self.initialize_sandesh_logger( config_args, sandesh) self._job_file_write = JobFileWrite(self.config_logger)
def __init__(self, sandesh_instance_id=None, config_args=None, sandesh=True, sandesh_instance=None): """Initialize JobLogUtils. Initialize sandesh instance.""" self.sandesh_instance_id = sandesh_instance_id self.args = None self.config_logger = self.initialize_sandesh_logger(config_args, sandesh, sandesh_instance) self._job_file_write = JobFileWrite(self.config_logger)
def __init__(self, job_template_id, execution_id, fabric_fq_name, logger, job_utils, job_log_utils): self._job_template_id = job_template_id self._execution_id = execution_id self._fabric_fq_name = fabric_fq_name self._logger = logger self._job_utils = job_utils self.job_log_utils = job_log_utils # job result data self.job_result_status = None # cummulative status self.job_result_message = "" # job result msg when not device spec self.job_result = dict() # map of the device_id to job result msg self.job_summary_message = None self.failed_device_jobs = list() # device_management_ip, device_username, etc self.playbook_output = None # marked output from the playbook stdout self.percentage_completed = 0.0 self._job_file_write = JobFileWrite(self._logger)
class JobResultHandler(object): def __init__(self, job_template_id, execution_id, fabric_fq_name, logger, job_utils, job_log_utils): self._job_template_id = job_template_id self._execution_id = execution_id self._fabric_fq_name = fabric_fq_name self._logger = logger self._job_utils = job_utils self.job_log_utils = job_log_utils # job result data self.job_result_status = None # cummulative status self.job_result_message = "" # job result msg when not device spec self.job_result = dict() # map of the device_id to job result msg self.job_summary_message = None self.failed_device_jobs = list() # device_management_ip, device_username, etc self.playbook_output = None # marked output from the playbook stdout self.percentage_completed = 0.0 self._job_file_write = JobFileWrite(self._logger) def update_job_status(self, status, message=None, device_id=None, device_name=None): # update cummulative job status if self.job_result_status is None or \ self.job_result_status != JobStatus.FAILURE: self.job_result_status = status # collect failed device ids if status == JobStatus.FAILURE and device_id is not None: self.failed_device_jobs.append(device_id) # collect the result message if message is not None: if device_id is not None: self.job_result.update({ device_id: { "message": message, "device_name": device_name } }) else: self.job_result_message = message # end update_job_status def update_playbook_output(self, pb_output): if self.playbook_output: self.playbook_output.update(pb_output) else: self.playbook_output = pb_output # end update_playbook_output def create_job_summary_log(self, job_template_fqname): # generate job result summary self.job_summary_message = self.create_job_summary_message() timestamp = int(round(time.time() * 1000)) # create the job log self._logger.debug("%s" % self.job_summary_message) job_status = None if self.job_result_status: job_status = self.job_result_status.value #write to the file as well file_write_data = {"job_status": job_status} self._job_file_write.write_to_file(self._execution_id, "job_summary", JobFileWrite.JOB_LOG, file_write_data) self.job_log_utils.send_job_log(job_template_fqname, self._execution_id, self._fabric_fq_name, self.job_summary_message, job_status, 100, timestamp=timestamp) # end create_job_summary_log def create_job_summary_message(self): job_summary_message = MsgBundle.getMessage( MsgBundle.JOB_SUMMARY_MESSAGE_HDR) failed_device_jobs_len = len(self.failed_device_jobs) if self.job_result_status is None: job_summary_message += MsgBundle.getMessage( MsgBundle.JOB_RESULT_STATUS_NONE) elif self.job_result_status == JobStatus.FAILURE: if failed_device_jobs_len > 0: job_summary_message += MsgBundle.getMessage( MsgBundle.JOB_MULTI_DEVICE_FAILED_MESSAGE_HDR) for failed_device in self.failed_device_jobs: msg = failed_device + ',' job_summary_message += msg else: job_summary_message += "Job failed. " job_summary_message += "\n" elif self.job_result_status == JobStatus.SUCCESS: job_summary_message += MsgBundle.getMessage( MsgBundle.JOB_EXECUTION_COMPLETE) device_job_result_len = len(self.job_result) if device_job_result_len > 0: job_summary_message += MsgBundle.getMessage( MsgBundle.PLAYBOOK_RESULTS_MESSAGE) job_summary_message += "Successfully completed "\ "job for %s devices.\n"\ % (device_job_result_len - failed_device_jobs_len) # result_summary would infact be the failed_devices # result summary result_summary = "" for entry in self.job_result: if entry in self.failed_device_jobs: result_summary += \ "%s:%s \n" % (self.job_result[entry]['device_name'], self.job_result[entry]['message']) if result_summary != "": failed_device_msg = "Job execution failed for %s devices.\n"\ % len(self.failed_device_jobs) result_summary = failed_device_msg + result_summary job_summary_message += result_summary if self.job_result_message is not None: job_summary_message += self.job_result_message return job_summary_message
class FabricAnsibleModule(AnsibleModule): def __init__(self, argument_spec={}, **kwargs): super(FabricAnsibleModule, self).__init__(argument_spec=argument_spec, **kwargs) self.module_name = self._name self.job_ctx = self.params.get('job_ctx') self.logger = fabric_ansible_logger(self.module_name) self.results = dict() self.results['failed'] = False self.logger.debug("Module params: {}".format(self.params)) self.job_log_util = None self._job_file_write = JobFileWrite(self.logger) def _validate_job_ctx(self): required_job_ctx_keys = [ 'job_template_fqname', 'job_execution_id', 'config_args', 'job_input'] for key in required_job_ctx_keys: if key not in self.job_ctx or self.job_ctx.get(key) is None: raise ValueError("Missing job context param: %s" % key) @handle_sandesh def execute(self, function, *args, **kwargs): return function(self, *args, **kwargs) def send_prouter_object_log(self, prouter_fqname, onboarding_state, os_version, serial_num): exec_id = self.job_ctx.get('job_execution_id') pb_id = self.job_ctx.get('unique_pb_id') prouter_log = { 'prouter_fqname': prouter_fqname, 'job_execution_id': exec_id, 'job_input': None, 'job_template_fqname': self.job_ctx['job_template_fqname'], 'onboarding_state': onboarding_state, 'os_version': os_version, 'serial_num': serial_num } self._job_file_write.write_to_file( exec_id, pb_id, JobFileWrite.PROUTER_LOG, json.dumps(prouter_log) ) def send_job_object_log(self, message, status, job_result, log_error_percent=False, job_success_percent=None, job_error_percent=None, device_name=None, details=None): if job_success_percent is None or (log_error_percent and job_error_percent is None): try: total_percent = self.job_ctx.get('playbook_job_percentage') if total_percent: total_percent = float(total_percent) self.logger.debug( "Calculating the job completion percentage. " "total_task_count: %s, current_task_index: %s, " "playbook_job_percentage: %s," " task_weightage_array: %s", self.job_ctx.get('total_task_count'), self.job_ctx.get('current_task_index'), total_percent, self.job_ctx.get('task_weightage_array')) job_success_percent, job_error_percent = \ self.job_log_util.calculate_job_percentage( self.job_ctx.get('total_task_count'), buffer_task_percent=False, task_seq_number=self.job_ctx.get('current_task_index'), total_percent=total_percent, task_weightage_array= self.job_ctx.get('task_weightage_array')) except Exception as e: self.logger.error("Exception while calculating the job " "percentage %s", str(e)) if log_error_percent: job_percentage = job_error_percent else: job_percentage = job_success_percent self.results['percentage_completed'] = job_percentage self.logger.debug("Job complete percentage is %s" % job_percentage) exec_id = self.job_ctx.get('job_execution_id') pb_id = self.job_ctx.get('unique_pb_id') job_log = { 'job_template_fqname': self.job_ctx.get('job_template_fqname'), 'job_execution_id': exec_id, 'fabric_fq_name': self.job_ctx.get('fabric_fqname'), 'message': message, 'status': status, 'completion_percent': job_percentage, 'result': job_result, 'device_name': device_name, 'details': details } self._job_file_write.write_to_file( exec_id, pb_id, JobFileWrite.JOB_LOG, json.dumps(job_log) ) def calculate_job_percentage(self, num_tasks, buffer_task_percent=False, task_seq_number=None, total_percent=100, task_weightage_array=None): return self.job_log_util.calculate_job_percentage( num_tasks, buffer_task_percent, task_seq_number, total_percent, task_weightage_array)
class DeviceInfo(object): output = {} def __init__(self, module): self.module = module self.logger = module.logger self.job_ctx = module.job_ctx self.fabric_uuid = module.params['fabric_uuid'] self.total_retry_timeout = float(module.params['total_retry_timeout']) self._job_file_write = JobFileWrite(self.logger) def initial_processing(self, concurrent): self.serial_num_flag = False self.all_serial_num = [] serial_num = [] self.per_greenlet_percentage = None self.job_ctx['current_task_index'] = 2 try: total_percent = self.job_ctx.get('playbook_job_percentage') if total_percent: total_percent = float(total_percent) # Calculate the total percentage of this entire greenlet based task # This will be equal to the percentage alloted to this task in the # weightage array off the total job percentage. For example: # if the task weightage array is [10, 85, 5] and total job % # is 95. Then the 2nd task's effective total percentage is 85% of # 95% total_task_percentage = self.module.calculate_job_percentage( self.job_ctx.get('total_task_count'), task_seq_number=self.job_ctx.get('current_task_index'), total_percent=total_percent, task_weightage_array=self.job_ctx.get( 'task_weightage_array'))[0] # Based on the number of greenlets spawned (i.e num of sub tasks) # split the total_task_percentage equally amongst the greenlets. self.logger.info("Number of greenlets: {} and total_percent: " "{}".format(concurrent, total_task_percentage)) self.per_greenlet_percentage = \ self.module.calculate_job_percentage( concurrent, total_percent=total_task_percentage)[0] self.logger.info("Per greenlet percent: " "{}".format(self.per_greenlet_percentage)) self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY, auth_token=self.job_ctx.get('auth_token')) except Exception as ex: self.logger.info("Percentage calculation failed with error " "{}".format(str(ex))) try: self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY, auth_token=self.job_ctx.get('auth_token')) except Exception as ex: self.module.results['failed'] = True self.module.results['msg'] = "Failed to connect to API server " \ "due to error: %s"\ % str(ex) self.module.exit_json(**self.module.results) # get credentials and serial number if greenfield if self.total_retry_timeout: # get device credentials fabric = self.vncapi.fabric_read(id=self.fabric_uuid) fabric_object = self.vncapi.obj_to_dict(fabric) self.credentials = fabric_object.get('fabric_credentials').get( 'device_credential') # get serial numbers fabric_namespace_obj_list = self.vncapi.fabric_namespaces_list( parent_id=self.fabric_uuid, detail=True) fabric_namespace_list = self.vncapi.obj_to_dict( fabric_namespace_obj_list) for namespace in fabric_namespace_list: if namespace.get('fabric_namespace_type') == "SERIAL_NUM": self.serial_num_flag = True serial_num.append(namespace.get( 'fabric_namespace_value').get('serial_num')) if len(serial_num) > 1: for outer_list in serial_num: for sn in outer_list: self.all_serial_num.append(sn) else: self.credentials = self.module.params['credentials'] for cred in self.credentials: if cred.get('credential', {}).get('password'): cred['credential']['password'] = JobVncApi.decrypt_password( encrypted_password=cred.get('credential', {}).get('password'), admin_password=self.job_ctx.get('vnc_api_init_params').get( 'admin_password')) def ping_sweep(self, host): try: ping_output = subprocess.Popen( ['ping', '-W', '1', '-c', '1', host], stdout=subprocess.PIPE) ping_output.wait() return ping_output.returncode == 0 except Exception as ex: self.logger.error("ERROR: SUBPROCESS.POPEN failed with error {}" .format(str(ex))) return False # end _ping_sweep def _get_device_vendor(self, oid, vendor_mapping): for vendor in vendor_mapping: if vendor.get('oid') in oid: return vendor.get('vendor') return None # end _get_device_vendor def oid_mapping(self, host, pysnmp_output): matched_oid_mapping = {} matched_oid = None device_family_info = self.module.params['device_family_info'] vendor_mapping = self.module.params['vendor_mapping'] if pysnmp_output.get('ansible_sysobjectid'): vendor = self._get_device_vendor( pysnmp_output['ansible_sysobjectid'], vendor_mapping) if not vendor: self.logger.info("Vendor for host {} not supported".format( host)) else: device_family = next( element for element in device_family_info if element['vendor'] == vendor) if device_family: try: matched_oid = next( item for item in device_family['snmp_probe'] if item['oid'] == pysnmp_output[ 'ansible_sysobjectid']) except StopIteration: pass if matched_oid: matched_oid_mapping = matched_oid.copy() matched_oid_mapping['hostname'] = \ pysnmp_output['ansible_sysname'] matched_oid_mapping['host'] = host matched_oid_mapping['vendor'] = vendor else: self.logger.info( "OID {} not present in the given list of device " "info for the host {}".format( pysnmp_output['ansible_sysobjectid'], host)) return matched_oid_mapping # end _oid_mapping def _parse_xml_response(self, xml_response, oid_mapped): xml_response = xml_response.split('">') output = xml_response[1].split('<cli') final = etree.fromstring(output[0]) if final.find('hardware-model') is not None: oid_mapped['product'] = final.find('hardware-model').text if final.find('os-name') is not None: oid_mapped['family'] = final.find('os-name').text if final.find('os-version') is not None: oid_mapped['os-version'] = final.find('os-version').text if final.find('serial-number') is not None: oid_mapped['serial-number'] = final.find('serial-number').text if final.find('host-name') is not None: oid_mapped['hostname'] = final.find('host-name').text # end _parse_xml_response def _ssh_connect(self, ssh_conn, username, password, hostname, commands, oid_mapped): try: ssh_conn.connect( username=username, password=password, hostname=hostname) oid_mapped['username'] = username oid_mapped['password'] = password oid_mapped['host'] = hostname except Exception as ex: self.logger.info( "Could not connect to host {}: {}".format( hostname, str(ex))) return False try: if commands: num_commands = len(commands) - 1 for index, command in enumerate(commands): stdin, stdout, stderr = ssh_conn.exec_command( command['command']) response = stdout.read() if (not stdout and stderr) or ( response is None) or ('error' in response): self.logger.info( "Command {} failed on host {}:{}" .format(command['command'], hostname, stderr)) if index == num_commands: raise RuntimeError("All commands failed on host {}" .format(hostname)) else: break self._parse_xml_response(response, oid_mapped) return True except RuntimeError as rex: self.logger.info("RunTimeError: {}".format(str(rex))) return False except Exception as ex: self.logger.info("SSH failed for host {}: {}".format(hostname, str(ex))) return False # end _ssh_connect def get_device_info_ssh(self, host, oid_mapped, credentials): # find a credential that matches this host status = False device_family_info = self.module.params['device_family_info'] sshconn = paramiko.SSHClient() sshconn.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: for info in device_family_info: for cred in credentials: status = self._ssh_connect( sshconn, cred['credential']['username'], cred['credential']['password'], host, info['ssh_probe'], oid_mapped) if status: oid_mapped['vendor'] = info['vendor'] break finally: sshconn.close() return status # end _get_device_info_ssh def _detailed_cred_check(self, host, oid_mapped, credentials): remove_null = [] ssh_conn = paramiko.SSHClient() ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy()) index = 0 # check if credentials dict has both username and password defined. # If neither avaiable, remove the entire entry from the list. # Cannot check ssh connectivity with just the username or password. for creds in credentials[index:]: for user_pwd in creds.values(): if isinstance(user_pwd, dict): if user_pwd.get('username') and user_pwd.get('password'): index += 1 break else: credentials.remove(creds) break # In a list of dict for credentials, if a dict value is None # remove the key from the dict. Only keys with values are retained. for single_dict in credentials: remove_null.append( dict([(dkey, ddata) for dkey, ddata in single_dict.iteritems() if ddata])) # Sorting based on number of keys in a dict.Max-min sorting done here # resulting list would have dict with max keys as first entry # and min as the last prioritized_creds = sorted(remove_null, key=len, reverse=True) try: for device_cred in prioritized_creds: oid_vendor = oid_mapped['vendor'] oid_family = oid_mapped['family'] device_family = device_cred.get('device_family', None) vendor = device_cred.get('vendor', None) cred = device_cred.get('credential', None) username = cred.get('username', None) if cred else None password = cred.get('password', None) if cred else None if device_family and not vendor: continue if vendor and vendor.lower() != oid_vendor.lower(): continue if vendor and device_family and device_family not in \ oid_family: continue if not username or not password: continue response = self._ssh_connect( ssh_conn, username, password, host, None, oid_mapped) if response: return True self.logger.info( "Credential for '{}' didn't work on host '{}'".format( cred['credential']['username'], host)) return False finally: ssh_conn.close() # end _ssh_check def _pr_object_create_update( self, oid_mapped, fq_name, update): pr_uuid = None msg = None try: os_version = oid_mapped.get('os-version', None) serial_num = oid_mapped.get('serial-number', None) physicalrouter = PhysicalRouter( parent_type='global-system-config', fq_name=fq_name, physical_router_management_ip=oid_mapped.get('host'), physical_router_vendor_name=oid_mapped.get('vendor'), physical_router_product_name=oid_mapped.get('product'), physical_router_device_family=oid_mapped.get('family'), physical_router_vnc_managed=True, physical_router_serial_number=serial_num, physical_router_managed_state='active', physical_router_user_credentials={ 'username': oid_mapped.get('username'), 'password': oid_mapped.get('password') } ) if update: pr_unicode_obj = self.vncapi.physical_router_update( physicalrouter) if pr_unicode_obj: pr_obj_dict = ast.literal_eval(pr_unicode_obj) pr_uuid = pr_obj_dict['physical-router']['uuid'] msg = "Discovered %s:\n Host name: %s\n Vendor: %s\n" \ " Model: %s" % ( oid_mapped.get('host'), fq_name[1], oid_mapped.get('vendor'), oid_mapped.get('product') ) self.logger.info("Discovered {} : {}".format( oid_mapped.get('host'), pr_uuid )) else: # underlay_managed flag should only be set at physical-router # creation time physicalrouter.set_physical_router_underlay_managed( self.job_ctx.get('job_input').get('manage_underlay', True) ) pr_uuid = self.vncapi.physical_router_create(physicalrouter) msg = "Discovered device details: {} : {} : {}".format( oid_mapped.get('host'), fq_name[1], oid_mapped.get( 'product')) self.logger.info("Device created with uuid- {} : {}".format( oid_mapped.get( 'host'), pr_uuid)) self.module.send_prouter_object_log(fq_name, "DISCOVERED", os_version, serial_num) except(RefsExistError, Exception) as ex: if isinstance(ex, RefsExistError): return REF_EXISTS_ERROR, None self.logger.error("VNC create failed with error: {}".format(str( ex))) return False, None self.module.send_job_object_log( msg, JOB_IN_PROGRESS, None, job_success_percent=self.per_greenlet_percentage) self.discovery_percentage_write() return True, pr_uuid def device_info_processing(self, host, oid_mapped): valid_creds = False return_code = True if not oid_mapped.get('family') or not oid_mapped.get('vendor'): self.logger.info("Could not retrieve family/vendor info for " "the host: {}, not creating PR " "object".format(host)) self.logger.info("vendor: {}, family: {}".format( oid_mapped.get('vendor'), oid_mapped.get('family'))) oid_mapped = {} if oid_mapped.get('host'): valid_creds = self._detailed_cred_check(host, oid_mapped, self.credentials) if not valid_creds and oid_mapped: self.logger.info("No credentials matched for host: {}, nothing " "to update in DB".format(host)) oid_mapped = {} if oid_mapped: if self.serial_num_flag: if oid_mapped.get('serial-number') not in \ self.all_serial_num: self.logger.info( "Serial number {} for host {} not present " "in fabric_namespace, nothing to update " "in DB".format( oid_mapped.get('serial-number'), host)) return if oid_mapped.get('hostname') is None: oid_mapped['hostname'] = oid_mapped.get('serial-number') fq_name = [ 'default-global-system-config', oid_mapped.get('hostname')] return_code, pr_uuid = self._pr_object_create_update( oid_mapped, fq_name, False) if return_code == REF_EXISTS_ERROR: physicalrouter = self.vncapi.physical_router_read( fq_name=fq_name) phy_router = self.vncapi.obj_to_dict(physicalrouter) if (phy_router.get('physical_router_management_ip') == oid_mapped.get('host')): self.logger.info( "Device with same mgmt ip already exists {}".format( phy_router.get('physical_router_management_ip'))) return_code, pr_uuid = self._pr_object_create_update( oid_mapped, fq_name, True) else: fq_name = [ 'default-global-system-config', oid_mapped.get('hostname') + '_' + oid_mapped.get('host')] return_code, pr_uuid = self._pr_object_create_update( oid_mapped, fq_name, False) if return_code == REF_EXISTS_ERROR: self.logger.debug("Object already exists") if return_code is True: self.vncapi.ref_update( "physical_router", pr_uuid, "fabric", self.fabric_uuid, None, "ADD") self.logger.info( "Fabric updated with physical router info for " "host: {}".format(host)) temp = {} temp['device_management_ip'] = oid_mapped.get('host') temp['device_fqname'] = fq_name temp['device_username'] = oid_mapped.get('username') temp['device_password'] = oid_mapped.get('password') temp['device_family'] = oid_mapped.get('family') temp['device_vendor'] = oid_mapped.get('vendor') temp['device_product'] = oid_mapped.get('product') temp['device_serial_number'] = oid_mapped.get('serial-number') DeviceInfo.output.update({pr_uuid: temp}) def discovery_percentage_write(self): if self.module.results.get('percentage_completed'): exec_id = self.job_ctx.get('job_execution_id') pb_id = self.job_ctx.get('unique_pb_id') self._job_file_write.write_to_file( exec_id, pb_id, JobFileWrite.JOB_PROGRESS, str(self.module.results.get('percentage_completed')) )
class FabricAnsibleModule(AnsibleModule): """Class for fabricansiblemodule.""" def __init__(self, argument_spec={}, **kwargs): """Init routine for custom ansible module.""" super(FabricAnsibleModule, self).__init__(argument_spec=argument_spec, **kwargs) self.module_name = self._name self.job_ctx = self.params.get('job_ctx') self.logger = fabric_ansible_logger(self.module_name) self.results = dict() self.results['failed'] = False self.logger.debug("Module params: {}".format(self.params)) self.job_log_util = None self._job_file_write = JobFileWrite(self.logger) def _validate_job_ctx(self): required_job_ctx_keys = [ 'job_template_fqname', 'job_execution_id', 'config_args', 'job_input' ] for key in required_job_ctx_keys: if key not in self.job_ctx or self.job_ctx.get(key) is None: raise ValueError("Missing job context param: %s" % key) @handle_sandesh def execute(self, function, *args, **kwargs): """Handle Sandesh for fabric ansible module.""" return function(self, *args, **kwargs) def send_prouter_object_log(self, prouter_fqname, onboarding_state, os_version, serial_num): """Prouter object log.""" exec_id = self.job_ctx.get('job_execution_id') pb_id = self.job_ctx.get('unique_pb_id') prouter_log = { 'prouter_fqname': prouter_fqname, 'job_execution_id': exec_id, 'job_input': None, 'job_template_fqname': self.job_ctx['job_template_fqname'], 'onboarding_state': onboarding_state, 'os_version': os_version, 'serial_num': serial_num } self._job_file_write.write_to_file(exec_id, pb_id, JobFileWrite.PROUTER_LOG, json.dumps(prouter_log)) def send_job_object_log(self, message, status, job_result, log_error_percent=False, job_success_percent=None, job_error_percent=None, device_name=None, details=None): """Job object log.""" if (job_success_percent is None or (log_error_percent and job_error_percent is None)): try: total_percent = self.job_ctx.get('playbook_job_percentage') if total_percent: total_percent = float(total_percent) self.logger.debug( "Calculating the job completion percentage. " "total_task_count: %s, current_task_index: %s, " "playbook_job_percentage: %s," " task_weightage_array: %s", self.job_ctx.get('total_task_count'), self.job_ctx.get('current_task_index'), total_percent, self.job_ctx.get('task_weightage_array')) job_success_percent, job_error_percent = \ self.job_log_util.calculate_job_percentage( self.job_ctx.get('total_task_count'), buffer_task_percent=False, task_seq_number=self.job_ctx.get('current_task_index'), total_percent=total_percent, task_weightage_array=self.job_ctx.get( 'task_weightage_array')) except Exception as e: self.logger.error( "Exception while calculating the job " "percentage %s", str(e)) if log_error_percent: job_percentage = job_error_percent else: job_percentage = job_success_percent self.results['percentage_completed'] = job_percentage self.logger.debug("Job complete percentage is %s" % job_percentage) exec_id = self.job_ctx.get('job_execution_id') pb_id = self.job_ctx.get('unique_pb_id') job_log = { 'job_template_fqname': self.job_ctx.get('job_template_fqname'), 'job_execution_id': exec_id, 'fabric_fq_name': self.job_ctx.get('fabric_fqname'), 'message': message, 'status': status, 'completion_percent': job_percentage, 'result': job_result, 'device_name': device_name, 'details': details, 'description': self.job_ctx.get('job_description', ''), 'transaction_id': self.job_ctx.get('job_transaction_id', ''), 'transaction_descr': self.job_ctx.get('job_transaction_descr', ''), } self._job_file_write.write_to_file(exec_id, pb_id, JobFileWrite.JOB_LOG, json.dumps(job_log)) def calculate_job_percentage(self, num_tasks, buffer_task_percent=False, task_seq_number=None, total_percent=100, task_weightage_array=None): """Job stats.""" return self.job_log_util.calculate_job_percentage( num_tasks, buffer_task_percent, task_seq_number, total_percent, task_weightage_array)
def __init__(self): """Playbook helper initializer. Creates the playbook log util class.""" self._job_file_write = JobFileWrite(logger)
class PlaybookHelper(object): def __init__(self): self._job_file_write = JobFileWrite(logger) def get_plugin_output(self, pbex): output_json = pbex._tqm._variable_manager._nonpersistent_fact_cache[ 'localhost'].get('output') return output_json def execute_playbook(self, playbook_info): output = None try: loader = DataLoader() inventory = InventoryManager(loader=loader, sources=['localhost']) variable_manager = VariableManager(loader=loader, inventory=inventory) Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', 'connection', 'module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check', 'diff']) options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, forks=100, remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, become_user=None, verbosity=None, check=False, diff=False) variable_manager.extra_vars = playbook_info['extra_vars'] pbex = PlaybookExecutor(playbooks=[playbook_info['uri']], inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=None) ret_val = pbex.run() output = self.get_plugin_output(pbex) if ret_val != 0: msg = MsgBundle.getMessage(MsgBundle. PLAYBOOK_RETURN_WITH_ERROR) raise Exception(msg) if output is None or output.get('status') is None: msg = MsgBundle.getMessage(MsgBundle. PLAYBOOK_OUTPUT_MISSING) raise Exception(msg) if output.get('status').lower() == "failure": msg = MsgBundle.getMessage(MsgBundle. PLAYBOOK_STATUS_FAILED) raise Exception(msg) return output except Exception as exp: msg = MsgBundle.getMessage(MsgBundle.PLAYBOOK_EXECUTE_ERROR, playbook_uri=playbook_info['uri'], execution_id=playbook_info['extra_vars'] ['playbook_input']['job_execution_id'], exc_msg=repr(exp)) if exp.message: msg = msg + "\n" + exp.message JM_LOGGER.error(msg) # after handling exception, write an END # to stop listening to the file if created unique_pb_id = playbook_info['extra_vars'][ 'playbook_input']['unique_pb_id'] exec_id = playbook_info['extra_vars']['playbook_input'][ 'job_execution_id'] self._job_file_write.write_to_file( exec_id, unique_pb_id, JobFileWrite.PLAYBOOK_OUTPUT, json.dumps(output) ) with open("/tmp/"+exec_id, "a") as f: f.write(unique_pb_id + 'END' + PLAYBOOK_EOL_PATTERN) sys.exit(msg)
class JobResultHandler(object): def __init__(self, job_template_id, execution_id, fabric_fq_name, logger, job_utils, job_log_utils): """Initializes JobResultHandler.""" self._job_template_id = job_template_id self._execution_id = execution_id self._fabric_fq_name = fabric_fq_name self._logger = logger self._job_utils = job_utils self.job_log_utils = job_log_utils # job result data self.job_result_status = None # cummulative status self.job_result_message = "" # job result msg when not device spec self.job_warning_message = "" # job warning msg when not device spec self.job_result = dict() # map of the device_id to job result msg self.job_summary_message = None self.failed_device_jobs = list() self.warning_device_jobs = set() # device_management_ip, device_username, etc self.playbook_output = None # marked output from the playbook stdout self.percentage_completed = 0.0 self._job_file_write = JobFileWrite(self._logger) def get_retry_devices(self): return (self.playbook_output or {}).get('retry_devices') def update_job_status(self, status, message=None, device_id=None, device_name=None, pb_results=None): # update cummulative job status if self.job_result_status is None or \ self.job_result_status != JobStatus.FAILURE: self.job_result_status = status # collect failed device ids if status == JobStatus.FAILURE and device_id is not None: self.failed_device_jobs.append(device_id) if status == JobStatus.WARNING: if device_id is not None: self.warning_device_jobs.add(device_id) if device_id in self.job_result: exis_warn_mess = self.job_result[device_id].get( "warning_message", "") self.job_result[device_id].update( {"warning_message": exis_warn_mess + message}) else: self.job_result[device_id] = {"warning_message": message} else: self.job_warning_message += message + "\n" # collect the result message if message is not None: if device_id is not None: if device_id in self.job_result: self.job_result[device_id].update({ "message": message, "device_name": device_name, "device_op_result": pb_results }) else: self.job_result.update({ device_id: { "message": message, "device_name": device_name, "device_op_result": pb_results } }) else: self.job_result_message = message # end update_job_status def update_playbook_output(self, pb_output): if self.playbook_output: self.playbook_output.update(pb_output) else: self.playbook_output = pb_output # end update_playbook_output def create_job_summary_log(self, job_template_fqname): # generate job result summary self.job_summary_message, device_op_results, failed_device_names = \ self.create_job_summary_message() result = {"gen_dev_job_op": json.dumps(device_op_results)} \ if device_op_results else None timestamp = int(round(time.time() * 1000)) # create the job log self._logger.debug("%s" % self.job_summary_message) job_status = None if self.job_result_status: job_status = self.job_result_status.value # write to the file as well file_write_data = { "job_status": job_status, "failed_devices_list": failed_device_names } self._job_file_write.write_to_file(self._execution_id, "job_summary", JobFileWrite.JOB_LOG, file_write_data) self.job_log_utils.send_job_log(job_template_fqname, self._execution_id, self._fabric_fq_name, self.job_summary_message, job_status, 100, result=result, timestamp=timestamp) # end create_job_summary_log def create_job_summary_message(self): job_summary_message = MsgBundle.getMessage( MsgBundle.JOB_SUMMARY_MESSAGE_HDR) failed_device_jobs_len = len(self.failed_device_jobs) if self.job_result_status is None: job_summary_message += MsgBundle.getMessage( MsgBundle.JOB_RESULT_STATUS_NONE) elif self.job_result_status == JobStatus.FAILURE: if failed_device_jobs_len > 0: job_summary_message += MsgBundle.getMessage( MsgBundle.JOB_MULTI_DEVICE_FAILED_MESSAGE_HDR) for failed_device in self.failed_device_jobs: msg = failed_device + ',' job_summary_message += msg else: job_summary_message += "Job failed. " job_summary_message += "\n" elif self.job_result_status == JobStatus.SUCCESS: job_summary_message += MsgBundle.getMessage( MsgBundle.JOB_EXECUTION_COMPLETE) device_job_result_len = len(self.job_result) if device_job_result_len > 0: job_summary_message += MsgBundle.getMessage( MsgBundle.PLAYBOOK_RESULTS_MESSAGE) job_summary_message += "Successfully completed "\ "job for %s devices.\n"\ % (device_job_result_len - failed_device_jobs_len) # result_summary would infact be the failed_devices # result summary # warning_summary is warning for multi device jobs result_summary = "" device_op_results = [] failed_device_names = [] warning_summary = "" for entry in self.job_result: if entry in self.failed_device_jobs: result_summary += \ "%s:%s \n" % (self.job_result[entry]['device_name'], self.job_result[entry]['message']) failed_device_names.append( self.job_result[entry]['device_name']) elif self.job_result[entry]['device_op_result']: # could be other device jobs such as device import, topology device_op_results.append( self.job_result[entry]['device_op_result']) if entry in self.warning_device_jobs: warning_summary += \ "%s: %s \n" % (self.job_result[entry]['device_name'], self.job_result[entry]['warning_message']) if result_summary != "": failed_device_msg = "Job execution failed for %s devices.\n"\ % len(self.failed_device_jobs) result_summary = failed_device_msg + result_summary job_summary_message += result_summary if self.job_result_message is not None: job_summary_message += self.job_result_message if self.job_warning_message or self.warning_device_jobs: job_summary_message += "\nJob execution had the following" \ " warnings: \n" job_summary_message += self.job_warning_message job_summary_message += warning_summary return job_summary_message, device_op_results, failed_device_names
class DeviceInfo(object): output = {} def __init__(self, module): self.module = module self.logger = module.logger self.job_ctx = module.job_ctx self.fabric_uuid = module.params['fabric_uuid'] self.total_retry_timeout = float(module.params['total_retry_timeout']) self._job_file_write = JobFileWrite(self.logger) def initial_processing(self, concurrent): self.serial_num_flag = False self.all_serial_num = [] serial_num = [] self.per_greenlet_percentage = None self.job_ctx['current_task_index'] = 2 try: total_percent = self.job_ctx.get('playbook_job_percentage') if total_percent: total_percent = float(total_percent) # Calculate the total percentage of this entire greenlet based task # This will be equal to the percentage alloted to this task in the # weightage array off the total job percentage. For example: # if the task weightage array is [10, 85, 5] and total job % # is 95. Then the 2nd task's effective total percentage is 85% of # 95% total_task_percentage = self.module.calculate_job_percentage( self.job_ctx.get('total_task_count'), task_seq_number=self.job_ctx.get('current_task_index'), total_percent=total_percent, task_weightage_array=self.job_ctx.get( 'task_weightage_array'))[0] # Based on the number of greenlets spawned (i.e num of sub tasks) # split the total_task_percentage equally amongst the greenlets. self.logger.info("Number of greenlets: {} and total_percent: " "{}".format(concurrent, total_task_percentage)) self.per_greenlet_percentage = \ self.module.calculate_job_percentage( concurrent, total_percent=total_task_percentage)[0] self.logger.info("Per greenlet percent: " "{}".format(self.per_greenlet_percentage)) self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY, auth_token=self.job_ctx.get('auth_token')) except Exception as ex: self.logger.info("Percentage calculation failed with error " "{}".format(str(ex))) try: self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY, auth_token=self.job_ctx.get('auth_token')) except Exception as ex: self.module.results['failed'] = True self.module.results['msg'] = "Failed to connect to API server " \ "due to error: %s"\ % str(ex) self.module.exit_json(**self.module.results) # get credentials and serial number if greenfield if self.total_retry_timeout: # get device credentials fabric = self.vncapi.fabric_read(id=self.fabric_uuid) fabric_object = self.vncapi.obj_to_dict(fabric) self.credentials = fabric_object.get('fabric_credentials').get( 'device_credential') # get serial numbers fabric_namespace_obj_list = self.vncapi.fabric_namespaces_list( parent_id=self.fabric_uuid, detail=True) fabric_namespace_list = self.vncapi.obj_to_dict( fabric_namespace_obj_list) for namespace in fabric_namespace_list: if namespace.get('fabric_namespace_type') == "SERIAL_NUM": self.serial_num_flag = True serial_num.append(namespace.get( 'fabric_namespace_value').get('serial_num')) if len(serial_num) > 1: for outer_list in serial_num: for sn in outer_list: self.all_serial_num.append(sn) else: self.credentials = self.module.params['credentials'] for cred in self.credentials: if cred.get('credential', {}).get('password'): cred['credential']['password'] = JobVncApi.decrypt_password( encrypted_password=cred.get('credential', {}).get('password'), admin_password=self.job_ctx.get('vnc_api_init_params').get( 'admin_password')) def ping_sweep(self, host): try: ping_output = subprocess.Popen( ['ping', '-W', '1', '-c', '1', host], stdout=subprocess.PIPE) ping_output.wait() return ping_output.returncode == 0 except Exception as ex: self.logger.error("ERROR: SUBPROCESS.POPEN failed with error {}" .format(str(ex))) return False # end _ping_sweep def _get_device_vendor(self, oid, vendor_mapping): for vendor in vendor_mapping: if vendor.get('oid') in oid: return vendor.get('vendor') return None # end _get_device_vendor def oid_mapping(self, host, pysnmp_output): matched_oid_mapping = {} matched_oid = None device_family_info = self.module.params['device_family_info'] vendor_mapping = self.module.params['vendor_mapping'] if pysnmp_output.get('ansible_sysobjectid'): vendor = self._get_device_vendor( pysnmp_output['ansible_sysobjectid'], vendor_mapping) if not vendor: self.logger.info("Vendor for host {} not supported".format( host)) else: device_family = next( element for element in device_family_info if element['vendor'] == vendor) if device_family: try: matched_oid = next( item for item in device_family['snmp_probe'] if item['oid'] == pysnmp_output[ 'ansible_sysobjectid']) except StopIteration: pass if matched_oid: matched_oid_mapping = matched_oid.copy() matched_oid_mapping['hostname'] = \ pysnmp_output['ansible_sysname'] matched_oid_mapping['host'] = host matched_oid_mapping['vendor'] = vendor else: self.logger.info( "OID {} not present in the given list of device " "info for the host {}".format( pysnmp_output['ansible_sysobjectid'], host)) return matched_oid_mapping # end _oid_mapping def _parse_xml_response(self, xml_response, oid_mapped): xml_response = xml_response.split('">') output = xml_response[1].split('<cli') final = etree.fromstring(output[0]) if final.find('hardware-model') is not None: oid_mapped['product'] = final.find('hardware-model').text if final.find('os-name') is not None: oid_mapped['family'] = final.find('os-name').text if final.find('os-version') is not None: oid_mapped['os-version'] = final.find('os-version').text if final.find('serial-number') is not None: oid_mapped['serial-number'] = final.find('serial-number').text if final.find('host-name') is not None: oid_mapped['hostname'] = final.find('host-name').text # end _parse_xml_response def _ssh_connect(self, ssh_conn, username, password, hostname, commands, oid_mapped): try: ssh_conn.connect( username=username, password=password, hostname=hostname) oid_mapped['username'] = username oid_mapped['password'] = password oid_mapped['host'] = hostname except Exception as ex: self.logger.info( "Could not connect to host {}: {}".format( hostname, str(ex))) return False try: if commands: num_commands = len(commands) - 1 for index, command in enumerate(commands): stdin, stdout, stderr = ssh_conn.exec_command( command['command']) response = stdout.read() if (not stdout and stderr) or ( response is None) or ('error' in response): self.logger.info( "Command {} failed on host {}:{}" .format(command['command'], hostname, stderr)) if index == num_commands: raise RuntimeError("All commands failed on host {}" .format(hostname)) else: break self._parse_xml_response(response, oid_mapped) return True except RuntimeError as rex: self.logger.info("RunTimeError: {}".format(str(rex))) return False except Exception as ex: self.logger.info("SSH failed for host {}: {}".format(hostname, str(ex))) return False # end _ssh_connect def get_device_info_ssh(self, host, oid_mapped, credentials): # find a credential that matches this host status = False device_family_info = self.module.params['device_family_info'] sshconn = paramiko.SSHClient() sshconn.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: for info in device_family_info: for cred in credentials: status = self._ssh_connect( sshconn, cred['credential']['username'], cred['credential']['password'], host, info['ssh_probe'], oid_mapped) if status: oid_mapped['vendor'] = info['vendor'] break finally: sshconn.close() return status # end _get_device_info_ssh def _detailed_cred_check(self, host, oid_mapped, credentials): remove_null = [] ssh_conn = paramiko.SSHClient() ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy()) index = 0 # check if credentials dict has both username and password defined. # If neither avaiable, remove the entire entry from the list. # Cannot check ssh connectivity with just the username or password. for creds in credentials[index:]: for user_pwd in creds.values(): if isinstance(user_pwd, dict): if user_pwd.get('username') and user_pwd.get('password'): index += 1 break else: credentials.remove(creds) break # In a list of dict for credentials, if a dict value is None # remove the key from the dict. Only keys with values are retained. for single_dict in credentials: remove_null.append( dict([(dkey, ddata) for dkey, ddata in single_dict.iteritems() if ddata])) # Sorting based on number of keys in a dict.Max-min sorting done here # resulting list would have dict with max keys as first entry # and min as the last prioritized_creds = sorted(remove_null, key=len, reverse=True) try: for device_cred in prioritized_creds: oid_vendor = oid_mapped['vendor'] oid_family = oid_mapped['family'] device_family = device_cred.get('device_family', None) vendor = device_cred.get('vendor', None) cred = device_cred.get('credential', None) username = cred.get('username', None) if cred else None password = cred.get('password', None) if cred else None if device_family and not vendor: continue if vendor and vendor.lower() != oid_vendor.lower(): continue if vendor and device_family and device_family not in \ oid_family: continue if not username or not password: continue response = self._ssh_connect( ssh_conn, username, password, host, None, oid_mapped) if response: return True self.logger.info( "Credential for '{}' didn't work on host '{}'".format( cred['credential']['username'], host)) return False finally: ssh_conn.close() # end _ssh_check def _pr_object_create_update( self, oid_mapped, fq_name, update): pr_uuid = None msg = None try: os_version = oid_mapped.get('os-version', None) serial_num = oid_mapped.get('serial-number', None) physicalrouter = PhysicalRouter( parent_type='global-system-config', fq_name=fq_name, physical_router_management_ip=oid_mapped.get('host'), physical_router_vendor_name=oid_mapped.get('vendor'), physical_router_product_name=oid_mapped.get('product'), physical_router_device_family=oid_mapped.get('family'), physical_router_vnc_managed=True, physical_router_hostname=fq_name[-1], display_name=fq_name[-1], physical_router_serial_number=serial_num, physical_router_managed_state='active', physical_router_user_credentials={ 'username': oid_mapped.get('username'), 'password': oid_mapped.get('password') } ) if update: pr_unicode_obj = self.vncapi.physical_router_update( physicalrouter) if pr_unicode_obj: pr_obj_dict = ast.literal_eval(pr_unicode_obj) pr_uuid = pr_obj_dict['physical-router']['uuid'] msg = "Discovered %s:\n Host name: %s\n Vendor: %s\n" \ " Model: %s" % ( oid_mapped.get('host'), fq_name[1], oid_mapped.get('vendor'), oid_mapped.get('product') ) self.logger.info("Discovered {} : {}".format( oid_mapped.get('host'), pr_uuid )) else: # underlay_managed flag should only be set at physical-router # creation time physicalrouter.set_physical_router_underlay_managed( self.job_ctx.get('job_input').get('manage_underlay', True) ) pr_uuid = self.vncapi.physical_router_create(physicalrouter) msg = "Discovered device details: {} : {} : {}".format( oid_mapped.get('host'), fq_name[1], oid_mapped.get( 'product')) self.logger.info("Device created with uuid- {} : {}".format( oid_mapped.get( 'host'), pr_uuid)) self.module.send_prouter_object_log(fq_name, "DISCOVERED", os_version, serial_num) except(RefsExistError, Exception) as ex: if isinstance(ex, RefsExistError): return REF_EXISTS_ERROR, None self.logger.error("VNC create failed with error: {}".format(str( ex))) return False, None self.module.send_job_object_log( msg, JOB_IN_PROGRESS, None, job_success_percent=self.per_greenlet_percentage) self.discovery_percentage_write() return True, pr_uuid def get_hostname_from_job_input(self, serial_num): hostname = None devices_to_ztp = self.job_ctx.get('job_input').get('device_to_ztp') for device_info in devices_to_ztp: if device_info.get('serial_number') == serial_num: hostname = device_info.get('hostname') break return hostname def device_info_processing(self, host, oid_mapped): valid_creds = False return_code = True if not oid_mapped.get('family') or not oid_mapped.get('vendor'): self.logger.info("Could not retrieve family/vendor info for " "the host: {}, not creating PR " "object".format(host)) self.logger.info("vendor: {}, family: {}".format( oid_mapped.get('vendor'), oid_mapped.get('family'))) oid_mapped = {} if oid_mapped.get('host'): valid_creds = self._detailed_cred_check(host, oid_mapped, self.credentials) if not valid_creds and oid_mapped: self.logger.info("No credentials matched for host: {}, nothing " "to update in DB".format(host)) oid_mapped = {} if oid_mapped: if self.serial_num_flag: if oid_mapped.get('serial-number') not in \ self.all_serial_num: self.logger.info( "Serial number {} for host {} not present " "in fabric_namespace, nothing to update " "in DB".format( oid_mapped.get('serial-number'), host)) return # use the user input hostname is there. If its none check # for hostname derived from the device system info. If # that is also missing then set the hostname to the serial num user_input_hostname = None if self.job_ctx.get('job_input').get('device_to_ztp') is not None: user_input_hostname = \ self.get_hostname_from_job_input(oid_mapped.get( 'serial-number')) if user_input_hostname is not None: oid_mapped['hostname'] = user_input_hostname elif oid_mapped.get('hostname') is None: oid_mapped['hostname'] = oid_mapped.get('serial-number') fq_name = [ 'default-global-system-config', oid_mapped.get('hostname')] return_code, pr_uuid = self._pr_object_create_update( oid_mapped, fq_name, False) if return_code == REF_EXISTS_ERROR: physicalrouter = self.vncapi.physical_router_read( fq_name=fq_name) phy_router = self.vncapi.obj_to_dict(physicalrouter) if (phy_router.get('physical_router_management_ip') == oid_mapped.get('host')): self.logger.info( "Device with same mgmt ip already exists {}".format( phy_router.get('physical_router_management_ip'))) return_code, pr_uuid = self._pr_object_create_update( oid_mapped, fq_name, True) else: fq_name = [ 'default-global-system-config', oid_mapped.get('hostname') + '_' + oid_mapped.get('host')] return_code, pr_uuid = self._pr_object_create_update( oid_mapped, fq_name, False) if return_code == REF_EXISTS_ERROR: self.logger.debug("Object already exists") if return_code is True: self.vncapi.ref_update( "physical_router", pr_uuid, "fabric", self.fabric_uuid, None, "ADD") self.logger.info( "Fabric updated with physical router info for " "host: {}".format(host)) temp = {} temp['device_management_ip'] = oid_mapped.get('host') temp['device_fqname'] = fq_name temp['device_username'] = oid_mapped.get('username') temp['device_password'] = oid_mapped.get('password') temp['device_family'] = oid_mapped.get('family') temp['device_vendor'] = oid_mapped.get('vendor') temp['device_product'] = oid_mapped.get('product') temp['device_serial_number'] = oid_mapped.get('serial-number') DeviceInfo.output.update({pr_uuid: temp}) def discovery_percentage_write(self): if self.module.results.get('percentage_completed'): exec_id = self.job_ctx.get('job_execution_id') pb_id = self.job_ctx.get('unique_pb_id') self._job_file_write.write_to_file( exec_id, pb_id, JobFileWrite.JOB_PROGRESS, str(self.module.results.get('percentage_completed')) )
def __init__(self): self._job_file_write = JobFileWrite(logger)
def __init__(self): self._logger = FilterModule._init_logging() self._job_file_write = JobFileWrite(self._logger)
class ExecutableManager(object): def __init__(self, logger, vnc_api, job_input, job_log_utils ): self._logger = logger self.vnc_api = vnc_api self.vnc_api_init_params = None self.api_server_host = None self.auth_token = None self.contrail_cluster_id = None self.sandesh_args = None self.job_log_utils = job_log_utils self.job_input = job_input self.job_utils = None self.executable_timeout = 1800 self.job_template = None self.job_execution_id = None self.job_template_id = None self.result_handler = None self.parse_job_input(job_input) self.job_utils = JobUtils(self.job_execution_id, self.job_template_id, self._logger, self.vnc_api) self.job_template = self.job_utils.read_job_template() self.job_file_write = JobFileWrite(self._logger) def parse_job_input(self, job_input_json): # job input should have job_template_id and execution_id field self.job_template_id = job_input_json.get('job_template_id') self.job_execution_id = job_input_json.get('job_execution_id') self.job_data = job_input_json.get('input') self.fabric_fq_name = job_input_json.get('fabric_fq_name') self.auth_token = job_input_json.get('auth_token') self.contrail_cluster_id = job_input_json.get('contrail_cluster_id') self.sandesh_args = job_input_json.get('args') self.vnc_api_init_params = job_input_json.get('vnc_api_init_params') self.api_server_host = job_input_json.get('api_server_host') def _validate_job_input(self, input_schema, ip_json): if ip_json is None: msg = MsgBundle.getMessage( MsgBundle.INPUT_SCHEMA_INPUT_NOT_FOUND) raise JobException(msg, self.job_execution_id) try: ip_schema_json = input_schema if isinstance(input_schema, basestring): ip_schema_json = json.loads(input_schema) jsonschema.validate(ip_json, ip_schema_json) self._logger.error("Input Schema Validation Successful" "for template %s" % self.job_template_id) except Exception as exp: msg = MsgBundle.getMessage(MsgBundle.INVALID_SCHEMA, job_template_id=self.job_template_id, exc_obj=exp) raise JobException(msg, self.job_execution_id) def gather_job_args(self): extra_vars = { 'input': self.job_data, 'job_template_id': self.job_template.get_uuid(), 'job_template_fqname': self.job_template.fq_name, 'fabric_fq_name': self.fabric_fq_name, 'auth_token': self.auth_token, 'contrail_cluster_id': self.contrail_cluster_id, 'api_server_host': self.api_server_host, 'job_execution_id': self.job_execution_id , 'sandesh_args': self.sandesh_args, 'vnc_api_init_params': self.vnc_api_init_params, } return extra_vars def start_job(self): self._logger.info("Starting Executable") job_error_msg = None job_template = self.job_template try: # create job UVE and log self.result_handler = JobResultHandler(self.job_template_id, self.job_execution_id, self.fabric_fq_name, self._logger, self.job_utils, self.job_log_utils) msg = MsgBundle.getMessage(MsgBundle.START_JOB_MESSAGE, job_execution_id=self.job_execution_id, job_template_name=\ job_template.fq_name[-1]) self._logger.debug(msg) timestamp = int(round(time.time() * 1000)) self.job_log_utils.send_job_log(job_template.fq_name, self.job_execution_id, self.fabric_fq_name, msg, JobStatus.STARTING.value, timestamp=timestamp) # validate job input if required by job_template input_schema input_schema = job_template.get_job_template_input_schema() if input_schema: self._validate_job_input(input_schema, self.job_data) executable_list = job_template.get_job_template_executables()\ .get_executable_info() for executable in executable_list: exec_path = executable.get_executable_path() exec_args = executable.get_executable_args() job_input_args = self.gather_job_args() try: exec_process = subprocess32.Popen([exec_path, "--job-input", json.dumps(job_input_args), '--debug', 'True'], close_fds=True, cwd='/', stdout=subprocess32.PIPE, stderr=subprocess32.PIPE) self.job_file_write.write_to_file( self.job_execution_id, "job_summary", JobFileWrite.JOB_LOG, {"job_status": "INPROGRESS"}) msg = "Child process pid = " + str(exec_process.pid) self._logger.info(msg) (out, err) = exec_process.communicate(timeout=self.executable_timeout) self._logger.notice(str(out)) self._logger.notice(str(err)) except subprocess32.TimeoutExpired as timeout_exp: if exec_process is not None: os.kill(exec_process.pid, 9) msg = MsgBundle.getMessage( MsgBundle.RUN_EXECUTABLE_PROCESS_TIMEOUT, exec_path=exec_path, exc_msg=repr(timeout_exp)) raise JobException(msg, self.job_execution_id) self._logger.info(exec_process.returncode) self._logger.info("Executable Completed") if exec_process.returncode != 0: self.job_file_write.write_to_file( self.job_execution_id, "job_summary", JobFileWrite.JOB_LOG, {"job_status": "FAILED"}) msg = MsgBundle.getMessage(MsgBundle. EXECUTABLE_RETURN_WITH_ERROR, exec_uri=exec_path) self._logger.error(msg) else: self.job_file_write.write_to_file( self.job_execution_id, "job_summary", JobFileWrite.JOB_LOG, {"job_status": "COMPLETED"}) except JobException as exp: err_msg = "Job Exception recieved: %s " % repr(exp) self._logger.error(err_msg) self._logger.error("%s" % traceback.format_exc()) self.result_handler.update_job_status(JobStatus.FAILURE, err_msg) if job_template: self.result_handler.create_job_summary_log( job_template.fq_name) job_error_msg = err_msg except Exception as exp: err_msg = "Error while executing job %s " % repr(exp) self._logger.error(err_msg) self._logger.error("%s" % traceback.format_exc()) self.result_handler.update_job_status(JobStatus.FAILURE, err_msg) self.result_handler.create_job_summary_log(job_template.fq_name) job_error_msg = err_msg finally: # need to wait for the last job log and uve update to complete # via sandesh and then close sandesh connection sandesh_util = SandeshUtils(self._logger) sandesh_util.close_sandesh_connection() self._logger.info("Closed Sandesh connection") if job_error_msg is not None: sys.exit(job_error_msg)
class JobResultHandler(object): def __init__(self, job_template_id, execution_id, fabric_fq_name, logger, job_utils, job_log_utils): self._job_template_id = job_template_id self._execution_id = execution_id self._fabric_fq_name = fabric_fq_name self._logger = logger self._job_utils = job_utils self.job_log_utils = job_log_utils # job result data self.job_result_status = None # cummulative status self.job_result_message = "" # job result msg when not device spec self.job_result = dict() # map of the device_id to job result msg self.job_summary_message = None self.failed_device_jobs = list() # device_management_ip, device_username, etc self.playbook_output = None # marked output from the playbook stdout self.percentage_completed = 0.0 self._job_file_write = JobFileWrite(self._logger) def get_retry_devices(self): return (self.playbook_output or {}).get('retry_devices') def update_job_status( self, status, message=None, device_id=None, device_name=None, pb_results=None): # update cummulative job status if self.job_result_status is None or \ self.job_result_status != JobStatus.FAILURE: self.job_result_status = status # collect failed device ids if status == JobStatus.FAILURE and device_id is not None: self.failed_device_jobs.append(device_id) # collect the result message if message is not None: if device_id is not None: self.job_result.update({device_id: {"message": message, "device_name": device_name, "device_op_result": pb_results }}) else: self.job_result_message = message # end update_job_status def update_playbook_output(self, pb_output): if self.playbook_output: self.playbook_output.update(pb_output) else: self.playbook_output = pb_output # end update_playbook_output def create_job_summary_log(self, job_template_fqname): # generate job result summary self.job_summary_message, device_op_results, \ failed_device_names = \ self.create_job_summary_message() result = {"gen_dev_job_op": json.dumps(device_op_results)} \ if device_op_results else None timestamp = int(round(time.time() * 1000)) # create the job log self._logger.debug("%s" % self.job_summary_message) job_status = None if self.job_result_status: job_status = self.job_result_status.value #write to the file as well file_write_data = { "job_status": job_status, "failed_devices_list": failed_device_names } self._job_file_write.write_to_file( self._execution_id, "job_summary", JobFileWrite.JOB_LOG, file_write_data) self.job_log_utils.send_job_log( job_template_fqname, self._execution_id, self._fabric_fq_name, self.job_summary_message, job_status, 100, result=result, timestamp=timestamp) # end create_job_summary_log def create_job_summary_message(self): job_summary_message = MsgBundle.getMessage( MsgBundle.JOB_SUMMARY_MESSAGE_HDR) failed_device_jobs_len = len(self.failed_device_jobs) if self.job_result_status is None: job_summary_message += MsgBundle.getMessage( MsgBundle.JOB_RESULT_STATUS_NONE) elif self.job_result_status == JobStatus.FAILURE: if failed_device_jobs_len > 0: job_summary_message += MsgBundle.getMessage( MsgBundle. JOB_MULTI_DEVICE_FAILED_MESSAGE_HDR) for failed_device in self.failed_device_jobs: msg = failed_device + ',' job_summary_message += msg else: job_summary_message += "Job failed. " job_summary_message += "\n" elif self.job_result_status == JobStatus.SUCCESS: job_summary_message += MsgBundle.getMessage( MsgBundle.JOB_EXECUTION_COMPLETE) device_job_result_len = len(self.job_result) if device_job_result_len > 0: job_summary_message += MsgBundle.getMessage( MsgBundle.PLAYBOOK_RESULTS_MESSAGE) job_summary_message += "Successfully completed "\ "job for %s devices.\n"\ % (device_job_result_len - failed_device_jobs_len) # result_summary would infact be the failed_devices # result summary result_summary = "" device_op_results = [] failed_device_names = [] for entry in self.job_result: if entry in self.failed_device_jobs: result_summary += \ "%s:%s \n" % (self.job_result[entry]['device_name'], self.job_result[entry]['message']) failed_device_names.append( self.job_result[entry]['device_name']) elif self.job_result[entry]['device_op_result']: # could be other device jobs such as device import, topology device_op_results.append( self.job_result[entry]['device_op_result']) if result_summary != "": failed_device_msg = "Job execution failed for %s devices.\n"\ % len(self.failed_device_jobs) result_summary = failed_device_msg + result_summary job_summary_message += result_summary if self.job_result_message is not None: job_summary_message += self.job_result_message return job_summary_message, device_op_results, failed_device_names
class PlaybookHelper(object): def __init__(self): """Playbook helper initializer. Creates the playbook log util class.""" self._job_file_write = JobFileWrite(logger) def get_plugin_output(self, pbex): output_json = pbex._tqm._variable_manager._nonpersistent_fact_cache[ 'localhost'].get('output') return output_json def execute_playbook(self, playbook_info): output = None try: loader = DataLoader() inventory = InventoryManager(loader=loader, sources=['localhost']) variable_manager = VariableManager(loader=loader, inventory=inventory) Options = namedtuple('Options', [ 'listtags', 'listtasks', 'listhosts', 'syntax', 'connection', 'module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check', 'diff' ]) options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, forks=100, remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, become_user=None, verbosity=None, check=False, diff=False) variable_manager.extra_vars = playbook_info['extra_vars'] pbex = PlaybookExecutor(playbooks=[playbook_info['uri']], inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=None) ret_val = pbex.run() output = self.get_plugin_output(pbex) if ret_val != 0: msg = MsgBundle.getMessage( MsgBundle.PLAYBOOK_RETURN_WITH_ERROR) raise Exception(msg) if output is None or output.get('status') is None: msg = MsgBundle.getMessage(MsgBundle.PLAYBOOK_OUTPUT_MISSING) raise Exception(msg) if output.get('status').lower() == "failure": msg = MsgBundle.getMessage(MsgBundle.PLAYBOOK_STATUS_FAILED) raise Exception(msg) return output except Exception as exp: msg = MsgBundle.getMessage(MsgBundle.PLAYBOOK_EXECUTE_ERROR, playbook_uri=playbook_info['uri'], execution_id=playbook_info['extra_vars'] ['playbook_input']['job_execution_id'], exc_msg=repr(exp)) if exp.message: msg = msg + "\n" + exp.message JM_LOGGER.error(msg) # after handling exception, write an END # to stop listening to the file if created unique_pb_id = playbook_info['extra_vars']['playbook_input'][ 'unique_pb_id'] exec_id = playbook_info['extra_vars']['playbook_input'][ 'job_execution_id'] self._job_file_write.write_to_file(exec_id, unique_pb_id, JobFileWrite.PLAYBOOK_OUTPUT, json.dumps(output)) with open("/tmp/" + exec_id, "a") as f: f.write(unique_pb_id + 'END' + PLAYBOOK_EOL_PATTERN) sys.exit(msg)