def _log(self, context, message, level='info'): """ :param ResourceCommandContext context: :return: """ if self.logger is None: if hasattr(context.reservation): self.logger = qs_logger.get_qs_logger( context.reservation.reservation_id, 'PureStorageFlashArray', context.resource.name) else: self.logger = qs_logger.get_qs_logger('Unreserved', 'PureStorageFlashArray', context.resource.name) if level == 'info': self.logger.info(message) elif level == 'debug': self.logger.debug(message) elif level == 'error': self.logger.error(message) elif level == 'critical': self.logger.critical(message)
def test_get_qs_logger_stream_handler(self): """ Test suite for get_qs_logger method """ if "LOG_PATH" in os.environ: del os.environ["LOG_PATH"] qs_logger.get_settings = cut_settings self.assertTrue(isinstance(qs_logger.get_qs_logger(log_group='test2').handlers[0], logging.StreamHandler))
def create_logger_for_context(self, logger_name, context): """ Create QS Logger for command context AutoLoadCommandContext or ResourceCommandContext :param logger_name: :type logger_name: str :param context: :return: """ if self._is_instance_of(context, 'AutoLoadCommandContext'): reservation_id = 'Autoload' handler_name = context.resource.name else: reservation_id = self._get_reservation_id(context) if self._is_instance_of(context, 'ResourceCommandContext'): handler_name = context.resource.name elif self._is_instance_of(context, 'ResourceRemoteCommandContext'): handler_name = context.remote_endpoints[0].name else: raise Exception(ContextBasedLoggerFactory.UNSUPPORTED_CONTEXT_PROVIDED, context) logger = get_qs_logger(log_file_prefix=handler_name, log_group=reservation_id, log_category=logger_name) return logger
def run_driver(self, driver_name): # Reading runtime configuration runtime_config = RuntimeConfiguration( os.path.join(self._driver_path, driver_name + '_runtime_config.yml')) # Creating XMl logger instance xml_file_name = driver_name + '--' + datetime.now().strftime('%d-%b-%Y--%H-%M-%S') + '.xml' xml_logger = XMLLogger(os.path.join(self._log_path, driver_name, xml_file_name)) # Creating command logger instance command_logger = get_qs_logger(log_group=driver_name, log_file_prefix=driver_name + '_commands', log_category='COMMANDS') log_level = runtime_config.read_key('LOGGING.LEVEL', 'INFO') command_logger.setLevel(log_level) command_logger.info('Starting driver {0} on port {1}, PID: {2}'.format(driver_name, self._port, os.getpid())) # Importing and creating driver commands instance driver_commands = importlib.import_module('{}.driver_commands'.format(driver_name), package=None) driver_instance = driver_commands.DriverCommands(command_logger, runtime_config) # Creating command executor instance command_executor = CommandExecutor(driver_instance, command_logger) # Creating listener instance server = DriverListener(command_executor, xml_logger, command_logger) # Start listening server.start_listening(port=self._port)
def __init__(self, method_ame='runTest'): super(SearchObjectsPerfTest, self).__init__(method_ame) cred = TestCredentials() self.pv_service = pyVmomiService(SmartConnect, Disconnect) self.si = self.pv_service.connect(cred.host, cred.username, cred.password) self.logger = get_qs_logger()
def _log(self, context, message): # with open(r'c:\programdata\qualisystems\gigamon.log', 'a') as f: # f.write(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + ' GigamonDriver _log called\r\n') try: try: resid = context.reservation.reservation_id except: resid = 'out-of-reservation' try: resourcename = context.resource.fullname except: resourcename = 'no-resource' logger = get_qs_logger(resid, 'GigaVUE-OS-L2', resourcename) logger.info(message) except Exception as e: try: with open(r'c:\programdata\qualisystems\gigamon.log', 'a') as f: f.write( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + ' qs_logger failed: ' + str(e) + '\r\n') f.write( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + ' (QS LOGGER NOT WORKING): ' + message + '\r\n') except: pass
def get_snmp(self, context, snmp_module_name, miboid): """ :param InitCommandContext context: this is the context passed by cloudshell automatically :param str snmp_module_name: MIB name :param str miboid: 'management information base object id' test two :return: """ session = CloudShellAPISession( host=context.connectivity.server_address, token_id=context.connectivity.admin_auth_token, domain=context.reservation.domain) reservation_id = context.reservation.reservation_id logger = get_qs_logger() address = context.resource.address snmp_read_community = session.DecryptPassword( context.resource.attributes['LinuxServerShell.SNMP Read Community'] ).Value snmp_v2_parameters = SNMPV2ReadParameters( ip=address, snmp_read_community=snmp_read_community) snmp_service = QualiSnmp(snmp_v2_parameters, logger) for index, info in snmp_service.get_table(snmp_module_name, miboid).items(): session.WriteMessageToReservationOutput(reservation_id, "[{0}]".format(index)) for key, value in info.items(): session.WriteMessageToReservationOutput( reservation_id, " {0}: {1}".format(key, value)) return "\nEnd of execution"
def __init__(self): self.automation_api = api_helpers.get_api_session() self.workflow = Workflow(self) self.suppress_exceptions = True self._exception = None self.connectivityContextDetails = helpers.get_connectivity_context_details() self.reservationContextDetails = helpers.get_reservation_context_details() self.reservationLifecycleDetails = helpers.get_lifecycle_context_details() self.global_inputs = helpers.get_global_inputs() self.additional_info_inputs = helpers.get_resource_additional_info_inputs() self.requirement_inputs = helpers.get_resource_requirement_inputs() self.id = self.reservationContextDetails.id reservation_description = self.automation_api.GetReservationDetails(reservationId=self.id, disableCache=True).ReservationDescription self.name = reservation_description.Name self.components = Components(reservation_description.Resources, reservation_description.Services, reservation_description.Apps) self.logger = get_qs_logger(log_file_prefix='CloudShell Sandbox Orchestration', log_group=self.id, log_category='Orchestration') self.apps_configuration = AppsConfiguration(sandbox=self)
def create_logger_for_context(self, logger_name, context): """ Create QS Logger for command context AutoLoadCommandContext or ResourceCommandContext :param logger_name: :type logger_name: str :param context: :return: """ if self._is_instance_of(context, 'AutoLoadCommandContext'): reservation_id = 'Autoload' handler_name = context.resource.name elif self._is_instance_of(context, 'UnreservedResourceCommandContext'): reservation_id = 'DeleteArtifacts' handler_name = context.resource.name else: reservation_id = self._get_reservation_id(context) if self._is_instance_of(context, 'ResourceCommandContext'): handler_name = context.resource.name elif self._is_instance_of(context, 'ResourceRemoteCommandContext'): handler_name = context.remote_endpoints[0].name else: raise Exception( ContextBasedLoggerFactory.UNSUPPORTED_CONTEXT_PROVIDED, context) logger = get_qs_logger(log_file_prefix=handler_name, log_group=reservation_id, log_category=logger_name) return logger
def test_get_qs_logger_stream_handler(self): """ Test suite for get_qs_logger method """ if "LOG_PATH" in os.environ: del os.environ["LOG_PATH"] qs_logger.get_settings = cut_settings self.assertTrue(isinstance(qs_logger.get_qs_logger(log_group="test2").handlers[0], logging.StreamHandler))
def __init__(self): self.cs_helper = CloudshellDriverHelper() self.model_parser = ResourceModelParser() self.ip_manager = VMIPManager() self.task_waiter = SynchronousTaskWaiter() self.logger = get_qs_logger('VM AutoLoad') self.pv_service = pyVmomiService(SmartConnect, Disconnect, self.task_waiter)
def myexcepthook(exctype, value, tb): x = [] if issubclass(exctype, Exception): x.append("Exception type: " + str(exctype)) x.append("Value called: " + str(value)) x.append("Stacktrace: ") for trace in traceback.format_tb(tb): x.append(trace) try: logger = get_qs_logger('out-of-reservation', 'GigaVUE-OS-L2', 'no-resource') logger.error('\r\n'.join(x)) except Exception as e: try: with open(r'c:\programdata\qualisystems\gigamon.log', 'a') as f: f.write( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + ' qs_logger failed: ' + str(e) + '\r\n') f.write( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + ' (QS LOGGER NOT WORKING): ' + '\r\n'.join(x) + '\r\n') except: pass sys.__excepthook__(exctype, value, traceback)
def discover(self, ip, model, vendor, snmp_params): """ :param str ip: The device IP address :param str model: The device model in CloudShell :param str vendor: The device vendor :param SNMPParams snmp_params: The device vendor :return: The loaded resources and attributes :rtype: AutoLoadDetails """ logger = get_qs_logger() self.snmp = QualiSnmp(ip=ip, snmp_version=snmp_params.snmp_version, snmp_user=snmp_params.snmp_v3_user, snmp_password=snmp_params.snmp_v3_password, snmp_community=snmp_params.snmp_read_community, snmp_private_key=snmp_params.snmp_v3_privatekey, logger=logger) self.attributes=[] self.exclusion_list=[] self.chassis_list=[] self.port_exclude_pattern='TEST' self.port_mapping ={} self.port_list =[] self.power_supply_list=[] self.entity_table_black_list=[] self._excluded_models = [] self.relative_path = {} self.module_list = [] self.resources = [] self._get_device_details(model, vendor) self._load_snmp_tables() if(len(self.chassis_list)<1): print 'Empty Chasis List Found' exit(1) for chassis in self.chassis_list: if chassis not in self.exclusion_list: chassis_id = self._get_resource_id(chassis) if chassis_id == '-1': chassis_id = '0' self.relative_path[chassis] = chassis_id # to add custom MIBSs #snmp_handler.update_mib_sources() self._filter_lower_bay_containers() self.get_module_list() self.add_relative_paths() self._get_chassis_attributes(self.chassis_list) self._get_ports_attributes() self._get_module_attributes() result = AutoLoadDetails(resources=self.resources, attributes=self.attributes) print result #an Object you can play with it
def __init__(self): self.reservation_description = None self.reservation_id = helpers.get_reservation_context_details().id self.resource = None self.logger = qs_logger.get_qs_logger( log_file_prefix="CloudShell Sandbox Setup", log_group=self.reservation_id, log_category='Setup')
def __init__(self, action, name='', logger=None): self.action = action self.name = name self._print_format = '{0} ran: {1} times - total: {2} (sec) avg: {3} (sec)' if logger is None: logger = get_qs_logger('performance') self.logger = logger
def __init__(self): DriverHandlerBase.__init__(self) self._port = ConfigurationParser.get("common_variable", "connection_port") self._driver_name = ConfigurationParser.get("common_variable", "driver_name") self._logger = get_qs_logger(log_group=self._driver_name + '_internal', log_file_prefix=self._driver_name + '_internal', log_category='INTERNAL')
def retrieving_snmp_table(self, ip): logger = get_qs_logger() snmp_service = QualiSnmp(ip=ip, snmp_version='2', snmp_community="Community String", logger=logger) if_table = snmp_service.get_table('IF-MIB', 'ifDescr')
def test_get_qs_logger_full_settings(self): """Test suite for get_qs_logger method.""" qs_logger.get_settings = full_settings self.assertTrue( isinstance( qs_logger.get_qs_logger(log_group="test1").handlers[0], MultiProcessingLog, ))
def __init__(self, host, port, request_manager, exe_folder_str): def __export_log_path(): os.environ['LOG_PATH'] = os.path.join(exe_folder_str, '..', 'Logs') __export_log_path() driver_name = ConfigurationParser.get("common_variable", "driver_name") self._xml_logger = get_qs_logger(log_group=driver_name + '_xml', log_file_prefix=driver_name + '_xml', log_category='XML') self._command_logger = get_qs_logger( log_group=driver_name + '_commands', log_file_prefix=driver_name + '_commands', log_category='COMMANDS') print 'Logger created with path {}'.format( self._command_logger.handlers[0]._handler.baseFilename) self._command_logger.info("Driver name: " + driver_name) self._command_logger.info("Driver host: " + host) self._command_logger.info("Driver port: " + str(port)) self._is_running = True self._host = host self._port = port self._request_manager = request_manager self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._command_logger.debug('New socket created') try: self._server_socket.bind((self._host, self._port)) except socket.error as error_object: # log will be here self._command_logger.error(str(error_object)) raise Exception( 'ServerConnection', 'Can\'t bind host and port: ' + self._host + ':' + self._port + '!') self._command_logger.debug('Start listenning ...') self._server_socket.listen(100)
def retrieving_snmp_properties(self, ip, community_string): logger = get_qs_logger() snmp_service = QualiSnmp(ip=ip, snmp_version='2', snmp_community=community_string, logger=logger) return snmp_service.get_property('SNMPv2-MIB', 'sysName', 0)
def initialize(self, context): """ Initialize the driver session, this function is called everytime a new instance of the driver is created This is a good place to load and cache the driver configuration, initiate sessions etc. :param InitCommandContext context: the context the command runs on """ self.logger = get_qs_logger() return
def __init__(self): self._logger = get_qs_logger('Bootstrap', 'QS', 'Generic resource') self._logger.debug('Initializing project') self._modules_configuration_path = configuration_path.__path__ or CONFIGURATION_PATH self._configuration_file_name_pattern = r'configuration.py$' self._bindings_file_name_pattern = r'bindings.py$' self._bindings_func_name = 'bindings' self._config = types.ModuleType('config') self._bindings = [] self.add_config(DriverBootstrap.BASE_CONFIG)
def __init__(self): DriverHandlerBase.__init__(self) self._port = ConfigurationParser.get("common_variable", "connection_port") self._driver_name = ConfigurationParser.get("common_variable", "driver_name") self._logger = get_qs_logger(log_group=self._driver_name + '_internal', log_file_prefix=self._driver_name + '_internal', log_category='INTERNAL') self.log('Driver Version: 1.0.2') self._last_active_time = 0
def example_command_with_cancellation(self, context, cancellation_token, user_param1): """ :type context: cloudshell.shell.core.driver_context.ResourceCommandContext :type cancellation_token: cloudshell.shell.core.driver_context.CancellationContext """ logger = get_qs_logger("my_log", context.resource.name, reservation_id=context.reservation.reservation_id) logger.info("This is a test log") result = self._helper_method(user_param1) return result
def __init__(self, host, port, request_manager, exe_folder_str): def __export_log_path(): os.environ['LOG_PATH'] = os.path.join(exe_folder_str, '..', 'Logs') __export_log_path() driver_name = ConfigurationParser.get("common_variable", "driver_name") self._xml_logger = get_qs_logger(log_group=driver_name + '_xml', log_file_prefix=driver_name + '_xml', log_category='XML') self._command_logger = get_qs_logger(log_group=driver_name + '_commands', log_file_prefix=driver_name + '_commands', log_category='COMMANDS') print 'Logger created with path {}'.format(self._command_logger.handlers[0]._handler.baseFilename) self._command_logger.info("Driver name: " + driver_name) self._command_logger.info("Driver host: " + host) self._command_logger.info("Driver port: " + str(port)) self._is_running = True self._host = host self._port = port self._request_manager = request_manager self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._command_logger.debug('New socket created') try: self._server_socket.bind((self._host, self._port)) except socket.error as error_object: # log will be here self._command_logger.error(str(error_object)) raise Exception('ServerConnection', 'Can\'t bind host and port: ' + self._host + ':' + self._port + '!') self._command_logger.debug('Start listenning ...') self._server_socket.listen(100)
def __init__(self): self.cwd = os.getcwd() self.config_file = 'C:\ProgramData\QualiSystems\QBlueprintsBackup\config.json' self.configs = json.loads(open(self.config_file).read()) self.sandbox = Sandbox() self.FileDescription = namedtuple('FileDescription', 'path contents executable') self.logger = get_qs_logger( log_file_prefix="CloudShell Sandbox Backup", log_group=self.sandbox.id, log_category='BluePrintBackup')
def _get_handler(self, ip, shell_name="", shell_type="", community="public"): logger = get_qs_logger(log_file_prefix=ip) snmp = QualiSnmp( SNMPV2ReadParameters(ip=ip, snmp_read_community=community), logger=logger ) handler = CiscoGenericSNMPAutoload( shell_name=shell_name, shell_type=shell_type, logger=logger, snmp_handler=snmp, resource_name=ip, ) return handler
def send_email(subject='', message=''): msg = MIMEText(message) msg['Subject'] = subject msg['From'] = SMTP_USER msg['To'] = SMTP_MAIL_LIST try: s = smtplib.SMTP(SMTP_SERVER, SMTP_PORT) s.sendmail(SMTP_USER, SMTP_MAIL_LIST, msg.as_string()) s.quit() except Exception as e: logger = qs_logger.get_qs_logger() logger.warning('Unable to send email message') logger.warning(e.message) logger.warning(message)
def get_inventory(self, context): """ Discovers the resource structure and attributes. :param AutoLoadCommandContext context: the context the command runs on :return Attribute and sub-resource information for the Shell resource you can return an AutoLoadDetails object :rtype: AutoLoadDetails """ # See below some example code demonstrating how to return the resource structure and attributes # In real life, this code will be preceded by SNMP/other calls to the resource details and will not be static # run 'shellfoundry generate' in order to create classes that represent your data model self._logger = self._get_logger(context) resource = LinuxServerShell.create_from_context(context) session = CloudShellAPISession( host=context.connectivity.server_address, token_id=context.connectivity.admin_auth_token, domain='Global') logger = get_qs_logger() address = context.resource.address snmp_read_community = session.DecryptPassword( context.resource.attributes['LinuxServerShell.SNMP Read Community'] ).Value snmp_v2_parameters = SNMPV2ReadParameters( ip=address, snmp_read_community=snmp_read_community) snmp_service = QualiSnmp(snmp_v2_parameters, logger) for if_table in snmp_service.get_table('IF-MIB', 'ifTable').values(): port = ResourcePort(if_table['ifDescr']) port.model_name = if_table['ifType'] port.mac_address = if_table['ifPhysAddress'] port.port_speed = if_table['ifSpeed'] for ip_table in snmp_service.get_table('IP-MIB', 'ipAddrTable').values(): if ip_table['ipAdEntIfIndex'] == if_table['ifIndex']: port.ipv4_address = ip_table['ipAdEntAddr'] resource.add_sub_resource(if_table['ifIndex'], port) autoload_details = resource.create_autoload_details() self._logger.info( 'autoload attributes: ' + ','.join([str(vars(x)) for x in autoload_details.attributes])) self._logger.info( 'autoload resources: ' + ','.join([str(vars(x)) for x in autoload_details.resources])) return autoload_details
def test_get_qs_logger_container_filling(self): """ Test suite for get_qs_logger method """ qs_logger.get_settings = full_settings qs_logger.get_qs_logger() qs_logger.get_qs_logger(log_group='test1') if "LOG_PATH" in os.environ: del os.environ["LOG_PATH"] qs_logger.get_settings = cut_settings qs_logger.get_qs_logger(log_group='test2') self.assertEqual(sorted(qs_logger._LOGGER_CONTAINER.keys()), sorted(["Ungrouped", "test1", "test2"]))
def test_get_qs_logger_container_filling(self): """ Test suite for get_qs_logger method """ qs_logger.get_settings = full_settings qs_logger.get_qs_logger() qs_logger.get_qs_logger(log_group="test1") if "LOG_PATH" in os.environ: del os.environ["LOG_PATH"] qs_logger.get_settings = cut_settings qs_logger.get_qs_logger(log_group="test2") self.assertEqual(sorted(qs_logger._LOGGER_CONTAINER.keys()), sorted(["Ungrouped", "test1", "test2"]))
def initialize(self, context): """ Initialize the driver session, this function is called everytime a new instance of the driver is created This is a good place to load and cache the driver configuration, initiate sessions etc. :param InitCommandContext context: the context the command runs on """ warnings.filterwarnings("ignore") self.logger = qs_logger.get_qs_logger('Canvas', context.resource.name) self.logger.info(str(context.resource.attributes)) self.resourcename = context.resource.name self.resourceaddress = context.resource.address self.idrac_ip = context.resource.attributes[ 'Dellredfishshell1.iDRAC_ip'] self.idrac_username = context.resource.attributes[ 'Dellredfishshell1.iDRAC_username'] api = CloudShellSessionContext(context).get_api() self.idrac_password = \ api.DecryptPassword(context.resource.attributes['Dellredfishshell1.iDRAC_password']).Value self.logger.info("idrac: ip {0}, user {1}, pass {2}".format( self.idrac_ip, self.idrac_username, self.idrac_password))
def _get_logger(self, context): """ returns a logger :param context: :return: the logger object :rtype: logging.Logger """ try: try: res_id = context.reservation.reservation_id except: res_id = 'out-of-reservation' try: resource_name = context.resource.fullname except: resource_name = 'no-resource' logger = get_qs_logger(res_id, 'PutShellDriver', resource_name) return logger except Exception as e: return None
def create_logger_for_context(self, logger_name, context): """ Create QS Logger for command context AutoLoadCommandContext or ResourceCommandContext :param logger_name: :type logger_name: str :param context: :return: """ if self._is_instance_of(context, 'AutoLoadCommandContext'): reservation_id = 'Autoload' handler_name = 'Default' elif self._is_instance_of(context, 'ResourceCommandContext'): reservation_id = context.reservation.reservation_id handler_name = context.resource.name elif self._is_instance_of(context, 'ResourceRemoteCommandContext'): reservation_id = context.remote_reservation.reservation_id handler_name = context.remote_endpoints[0].name else: raise Exception(ContextBasedLoggerFactory.UNSUPPORTED_CONTEXT_PROVIDED, context) logger = get_qs_logger(name=logger_name, handler_name=handler_name, reservation_id=reservation_id) return logger
def get_logger_for_context(context): """ Create logger for context :param context: :return: the logger object :rtype: logging.Logger """ if is_instance_of(context, 'AutoLoadCommandContext'): log_group = INVENTORY resource_name = context.resource.name elif is_instance_of(context, 'ResourceCommandContext'): log_group = context.reservation.reservation_id if context.reservation else INVENTORY resource_name = context.resource.name elif is_instance_of(context, 'ResourceRemoteCommandContext'): log_group = context.remote_reservation.reservation_id if context.remote_reservation else INVENTORY resource_name = context.remote_endpoints[0].name else: raise Exception('get_logger_for_context', 'Unsupported command context provided {0}'.format(context)) exec_info = get_execution_info(context) qs_logger = get_qs_logger(log_group=log_group, log_category='QS', log_file_prefix=resource_name) log_execution_info(qs_logger, exec_info) return qs_logger
def get_logger_for_driver(context=None, config=None): """ Create QS Logger for command context AutoLoadCommandContext, ResourceCommandContext or ResourceRemoteCommandContext :param context: :param config: :return: """ if hasattr(config, 'HANDLER_CLASS'): handler_class = config.HANDLER_CLASS else: handler_class = None if handler_class and isinstance(handler_class, types.ClassType): logger_name = handler_class.__name__ elif handler_class and isinstance(handler_class, str): logger_name = handler_class else: logger_name = 'QS' if is_instance_of(context, config.AUTOLOAD_COMMAND_CONTEXT): reservation_id = 'Autoload' resource_name = context.resource.name elif is_instance_of(context, config.RESOURCE_COMMAND_CONTEXT): reservation_id = context.reservation.reservation_id resource_name = context.resource.name elif is_instance_of(context, config.RESOURCE_REMOTE_COMMAND_CONTEXT): reservation_id = context.remote_reservation.reservation_id resource_name = context.remote_endpoints[0].name else: raise Exception('get_context_based_logger', 'Unsupported command context provided {0}'.format(context)) exec_info = get_execution_info(context) qs_logger = get_qs_logger(reservation_id, logger_name, resource_name) log_execution_info(qs_logger, exec_info) return qs_logger
""" Read data from device :param timeout: time between retries :return: str """ # Set the channel timeout timeout = timeout if timeout else self._timeout self._current_channel.settimeout(timeout) return self._current_channel.recv(self._buffer_size) if __name__ == "__main__": from collections import OrderedDict from cloudshell.core.logger.qs_logger import get_qs_logger logger = get_qs_logger() session = SSHSession(username='******', password='******', host='192.168.42.235', logger=logger) #session = SSHSession(username='******', password='******', host='192.168.42.193', logger=logger, timeout=2) prompt = '[$#] *$' session.connect(prompt) actions = OrderedDict() actions["--[Mm]ore--"] = lambda: session.send_line('') output = session.hardware_expect('cd /', re_string=prompt, expect_map=actions) output = session.hardware_expect('ls', re_string=prompt, expect_map=actions) output = output
def test_get_qs_logger_full_settings(self): """ Test suite for get_qs_logger method """ qs_logger.get_settings = full_settings self.assertTrue(isinstance(qs_logger.get_qs_logger(log_group="test1").handlers[0], MultiProcessingLog))
def vmx_orch_hook_during_provisioning(self, context): logger = get_qs_logger(log_group=context.reservation.reservation_id, log_file_prefix='vMX') logger.info('deploy called') api = CloudShellAPISession(host=context.connectivity.server_address, token_id=context.connectivity.admin_auth_token, domain=context.reservation.domain) resid = context.reservation.reservation_id vmxtemplate_resource = context.resource.name logger.info('context attrs: ' + str(context.resource.attributes)) vmxuser = context.resource.attributes['User'] vmxpassword = api.DecryptPassword(context.resource.attributes['Password']).Value vcp_app_template_name = context.resource.attributes['Chassis App'] vfp_app_template_name_template = context.resource.attributes['Module App'] internal_vlan_service_name = context.resource.attributes['Internal Network Service'] or 'VLAN Auto' vlantype = context.resource.attributes.get('Vlan Type') or 'VLAN' ncards = int(context.resource.attributes.get('Number of modules', '1')) router_family = context.resource.attributes['Deployed Resource Family'] router_model = context.resource.attributes['Deployed Resource Model'] router_driver = context.resource.attributes['Deployed Resource Driver'] chassis_deployed_model_name = context.resource.attributes['Controller App Resource Model'] card_deployed_model_name = context.resource.attributes['Card App Resource Model'] requested_vmx_ip = context.resource.attributes.get('Management IP', 'dhcp') username = context.resource.attributes.get('User', 'user') userpassword = api.DecryptPassword(context.resource.attributes.get('Password', '')).Value rootpassword = userpassword userfullname = context.resource.attributes.get('User Full Name', username) missing = [] for a in ['Chassis App', 'Module App', 'Deployed Resource Family', 'Deployed Resource Model']: if a not in context.resource.attributes: missing.append(a) if missing: raise Exception('Template resource missing values for attributes: %s' % ', '.join(missing)) if '%d' not in vfp_app_template_name_template: vfp_app_template_name_template += '%d' px, py = get_resource_position(api, resid, vmxtemplate_resource) vmx_resource = vmxtemplate_resource.replace('Template ', '').replace('Template', '') + '_' + str(randint(1, 10000)) fakel2name = '%s L2' % vmx_resource todeploy = [ (vcp_app_template_name, '%s_vcp' % vmx_resource, px, py + 100) ] + [ (vfp_app_template_name_template % i, '%s_vfp%d' % (vmx_resource, i), px, py+100+100+100*i) for i in range(ncards) ] for _ in range(5): with Mutex(api, resid, logger): for template, alias, x, y in todeploy: add_app(api, resid, template, alias, x, y) app_aliases = [alias for template, alias, x, y in todeploy] api.DeployAppToCloudProviderBulk(resid, app_aliases) with Mutex(api, resid, logger): logger.info('original app aliases = %s' % str(app_aliases)) vmname2details = get_details_of_deployed_app_resources(api, resid, app_aliases) deployed_vcp = sorted([x for x in vmname2details if 'vcp' in x]) deployed_vfp = sorted([x for x in vmname2details if 'vfp' in x]) deployed = deployed_vcp + deployed_vfp logger.info('deployed apps = %s' % str(deployed)) vmxip, mac2nicname, netid50, cpname = post_creation_vm_setup(api, resid, deployed, deployed_vcp, deployed_vfp, internal_vlan_service_name, requested_vmx_ip, rootpassword, userfullname, username, userpassword, vmname2details, vmx_resource, logger) if not vmxip: raise Exception('VCP did not receive an IP (requested %s)' % (requested_vmx_ip)) if not wait_for_ssh_up(api, resid, vmxip, vmxuser, vmxpassword, logger): raise Exception('VCP not reachable via SSH within 5 minutes at IP %s -- check management network' % vmxip) if ssh_wait_for_ge_interfaces(api, resid, vmxip, vmxpassword, ncards, logger): logger.info('All expected ge- interfaces found') break msg = '%d card(s) not discovered within 3 minutes - recreating VMs' % ncards logger.info(msg) api.WriteMessageToReservationOutput(resid, msg) api.DeleteResources(deployed) sleep(10) else: raise Exception('%d cards were not discovered after 10 minutes in 5 attempts' % ncards) for kj in deployed_vfp: api.UpdateResourceAddress(kj, kj) api.CreateResource(router_family, router_model, vmx_resource, vmxip) api.AddResourcesToReservation(resid, [vmx_resource]) api.SetReservationResourcePosition(resid, vmxtemplate_resource, px, py-50) api.SetReservationResourcePosition(resid, vmx_resource, px, py) if router_driver: api.UpdateResourceDriver(vmx_resource, router_driver) try: api.RemoveServicesFromReservation(resid, [vmx_resource + ' vMX resource cleanup']) except: pass api.AddServiceToReservation(resid, 'VNF Cleanup Service', vmx_resource + ' vMX resource cleanup', [ AttributeNameValue('Resources to Delete', ','.join([ vmx_resource, ])), ]) copy_resource_attributes(api, vmxtemplate_resource, vmx_resource) for _ in range(5): logger.info('Running autoload...') try: api.AutoLoad(vmx_resource) children_flat = get_all_child_resources(api, vmx_resource) ge_children_flat = {a: b for a, b in children_flat.iteritems() if '/' in a and '-' in a.split('/')[-1]} foundcards2ports = defaultdict(list) for fullpath, attrs in ge_children_flat.iteritems(): foundcards2ports[attrs['ResourceBasename'].split('-')[1]].append(attrs['ResourceBasename']) if len(foundcards2ports) >= ncards: logger.info('Autoload found ports: %s' % (foundcards2ports)) break logger.info('Autoload did not find all cards (%d) or ports per card (10). Retrying in 10 seconds. Found: %s' % (ncards, foundcards2ports)) sleep(10) except Exception as ek: logger.info('Autoload error: %s. Retrying in 30 seconds.' % str(ek)) sleep(30) else: raise Exception('Autoload did not discover all expected ports - unhandled vMX failure') post_autoload_cleanup(api, resid, deployed_vfp, vmname2details, netid50, logger) vfpcardidstr2deployedapp3 = {vfpname.split('_')[2].replace('vfp', '').split('-')[0]: vfpname for vfpname in deployed_vfp} def vm_from_ge_port(portname): if '/' in portname: portname = portname.split('/')[-1] return vfpcardidstr2deployedapp3[portname.split('-')[1]] logger.info('vfpcardidstr2deployedapp = %s' % str(vfpcardidstr2deployedapp3)) autoloadport2vmname_nicname = {} for ch, attrs in ge_children_flat.iteritems(): for attr, val in attrs.iteritems(): if 'mac' in attr.lower() and 'address' in attr.lower(): autoloadport2vmname_nicname[ch] = (vm_from_ge_port(ch), mac2nicname.get(val, attrs['ResourceBasename'].split('-')[-1])) create_fake_L2(api, fakel2name, vlantype, autoloadport2vmname_nicname) try: api.RemoveServicesFromReservation(resid, [vmx_resource + ' L2 cleanup']) except: pass api.AddServiceToReservation(resid, 'VNF Cleanup Service', vmx_resource + ' L2 cleanup', [ AttributeNameValue('Resources to Delete', ','.join([ fakel2name, ])), ]) logger.info('deployed_vcp=%s deployed_vfp=%s deployed=%s' % (deployed_vcp, deployed_vfp, deployed)) with Mutex(api, resid, logger): basename2fullpath = {fullpath.split('/')[-1]: fullpath for fullpath in autoloadport2vmname_nicname} def mapfunc(oldpath): basename = oldpath.split('/')[-1] return basename2fullpath[basename] move_connectors_of(api, resid, vmxtemplate_resource, mapfunc, logger) api.RemoveResourcesFromReservation(resid, [vmxtemplate_resource]) logger.info('SUCCESS deploying vMX %s' % vmx_resource)
def test_get_qs_logger_full_settings_default_params(self): """ Test suite for get_qs_logger method """ qs_logger.get_settings = full_settings self.assertTrue(isinstance(qs_logger.get_qs_logger().handlers[0], MultiProcessingLog))
def __init__(self): self.reservation_id = helpers.get_reservation_context_details().id self.logger = qs_logger.get_qs_logger(name="CloudShell Sandbox Teardown", reservation_id=self.reservation_id)
def __init__(self): self.reservation_id = helpers.get_reservation_context_details().id self.logger = qs_logger.get_qs_logger(log_file_prefix='Connect_All', log_group=self.reservation_id, log_category="Connect All")
def do_action(cli, mode, attrs): session_type = SSHSession with cli.get_session(session_type, attrs, mode, cli.logger) as default_session: out = default_session.send_command('show interfaces', logger=cli.logger) # print(out) # with default_session.enter_mode(CONFIG_MODE) as config_session: # out = config_session.send_command('show interfaces', logger=cli.logger) # print(out) # out = config_session.send_command('show interfaces', logger=cli.logger) # print(out) if __name__ == '__main__': logger = get_qs_logger() pool = SessionPoolManager(max_pool_size=1) cli = Cli(logger=logger, session_pool=pool) connection_attrs = { 'host': '192.168.28.150', 'username': '******', 'password': '******' } connection_attrs1 = { 'host': '192.168.28.150', 'username': '******', 'password': '******' } Thread(target=do_action,
from cloudshell.networking.juniper.autoload.juniper_snmp_autoload import JuniperSnmpAutoload from cloudshell.networking.juniper.utils import FakeSnmpHandler # from cloudshell.networking.juniper.examples.autoload_test_data import MIB_DATA_MAP # from cloudshell.networking.juniper.examples.autoload_srx220h_data import MIB_DATA_MAP from cloudshell.snmp.quali_snmp import QualiSnmp from cloudshell.networking.juniper.examples.autoload_debug_data import G_DATA from cloudshell.core.logger.qs_logger import get_qs_logger from cloudshell.snmp.quali_snmp import QualiSnmp # ip = "192.168.28.150" community = "public" logger = get_qs_logger('autoload_test_logger') # snmp_handler = QualiSnmp(ip, community=community, logger=logger) # MIB_DATA_MAP = { 'JUNIPER-MIB::jnxContainersTable': G_DATA['jnxContainersTable'], 'JUNIPER-MIB::jnxContentsTable': G_DATA['jnxContentsTable'], 'SNMPv2-MIB::system': G_DATA['system'], 'JUNIPER-IF-MIB::ifChassisTable': G_DATA['ifChassisTable'], 'IF-MIB::interfaces': G_DATA['interfaces'], 'IP-MIB::ipAddrTable': G_DATA['ipAddrTable'], 'IEEE8023-LAG-MIB::dot3adAggPortAttachedAggID': G_DATA['dot3adAggPortTable'], 'EtherLike-MIB::dot3StatsDuplexStatus': G_DATA['dot3StatsDuplexStatus'] } snmp_handler = FakeSnmpHandler(MIB_DATA_MAP)
command = "dlt-crs-fiber::{0}&{1}:{2};".format(src_in_port, dst_in_port, self._incr_ctag()) self._session.send_command(command, re_string=self._prompt) else: self.map_clear_to(src_port, dst_port, command_logger) else: raise Exception(self.__class__.__name__, "Selected '{}' connection type is not supported".format(self._service_mode)) def set_speed_manual(self, command_logger=None): pass if __name__ == '__main__': import sys from cloudshell.core.logger.qs_logger import get_qs_logger from common.xml_wrapper import XMLWrapper ConfigurationParser.set_root_folder(get_file_folder(sys.argv[0].replace("/glimmerglass/", "/"))) gglass = GlimmerglassDriverHandler() plogger = get_qs_logger('Autoload', 'GlimmerGlass', 'GlimmerGlass') gglass.login('localhost:1023', 'admin', '********', plogger) result = gglass.get_resource_description('localhost:1023') result1 = gglass.get_resource_description('localhost:1023') print XMLWrapper.get_string_from_xml(result) print XMLWrapper.get_string_from_xml(result1)
def __init__(self): self.reservation_id = helpers.get_reservation_context_details().id self.logger = qs_logger.get_qs_logger(name="Connect All", reservation_id=self.reservation_id)
# CloudShell L1 main # # It is not necessary to edit this file. # # This file will be the entry point of PolatisPython.exe # # It will be invoked by CloudShell as PolatisPython.exe <listening port number> from cloudshell.core.logger.qs_logger import get_qs_logger from l1_driver import l1_driver_main_loop from polatis_l1_handler import PolatisL1Handler import os import sys # LOG_PATH required for qs_logger startup os.environ['LOG_PATH'] = os.path.join(os.path.dirname(sys.argv[0]), '..', 'Logs') logger = get_qs_logger(log_group='Polatis', log_file_prefix='Polatis', log_category='INTERNAL') # Instantiate your implementation of L1HandlerBase handler = PolatisL1Handler(logger=logger) # Listen for commands forever - never returns l1_driver_main_loop(handler=handler, listen_port=int(sys.argv[1]), logger=logger)
def __init__(self): self.reservation_id = helpers.get_reservation_context_details().id self.logger = qs_logger.get_qs_logger(log_file_prefix="CloudShell Sandbox Setup", log_group=self.reservation_id, log_category='Setup')