def setUpClass(cls): super(BaseTestFixture, cls).setUpClass() #Master Config Provider #Setup root log handler only if the root logger doesn't already haves if cclogging.getLogger('').handlers == []: cclogging.getLogger('').addHandler( cclogging.setup_new_cchandler('cc.master')) #Setup fixture log, which is really just a copy of the master log #for the duration of this test fixture cls.fixture_log = cclogging.getLogger('') cls._fixture_log_handler = cclogging.setup_new_cchandler( cclogging.get_object_namespace(cls)) cls.fixture_log.addHandler(cls._fixture_log_handler) ''' @todo: Upgrade the metrics to be more unittest compatible. Currently the unittest results are not available at the fixture level, only the test case or the test suite and runner level. ''' # Setup the fixture level metrics cls.fixture_metrics = TestRunMetrics() cls.fixture_metrics.timer.start() # Report cls.fixture_log.info("{0}".format('=' * 56)) cls.fixture_log.info("Fixture...: {0}".format( str(cclogging.get_object_namespace(cls)))) cls.fixture_log.info("Created At: {0}".format( cls.fixture_metrics.timer.start_time)) cls.fixture_log.info("{0}".format('=' * 56))
def setUpClass(cls): super(BaseTestFixture, cls).setUpClass() #Master Config Provider #Setup root log handler only if the root logger doesn't already haves if cclogging.getLogger('').handlers == []: cclogging.getLogger('').addHandler( cclogging.setup_new_cchandler('cc.master')) #Setup fixture log, which is really just a copy of the master log #for the duration of this test fixture cls.fixture_log = cclogging.getLogger('') cls._fixture_log_handler = cclogging.setup_new_cchandler( cclogging.get_object_namespace(cls)) cls.fixture_log.addHandler(cls._fixture_log_handler) """ @todo: Upgrade the metrics to be more unittest compatible. Currently the unittest results are not available at the fixture level, only the test case or the test suite and runner level. """ # Setup the fixture level metrics cls.fixture_metrics = TestRunMetrics() cls.fixture_metrics.timer.start() # Report cls.fixture_log.info("{0}".format('=' * 56)) cls.fixture_log.info("Fixture...: {0}".format( str(cclogging.get_object_namespace(cls)))) cls.fixture_log.info("Created At: {0}" .format(cls.fixture_metrics.timer.start_time)) cls.fixture_log.info("{0}".format('=' * 56))
def __init__(self): self.print_mug() self.cl_args = ArgumentParser().parse_args() self.test_env = TestEnvManager( "", self.cl_args.config, test_repo_package_name="") self.test_env.test_data_directory = self.test_env.test_data_directory self.test_env.finalize() cclogging.init_root_log_handler() # This is where things diverge from the regular parallel runner # Extract the runfile contents self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self.datagen_start = time.time() self.run_file = BrewFile(self.cl_args.runfiles) # Log the runfile here so that it appears in the logs before any tests self._log.debug("\n" + str(self.run_file)) # TODO: Once the parallel_runner is changed to a yielding model, # change this to yielding brews instead of generating a list self.suites = SuiteBuilder( testrepos=self.run_file.brew_modules(), dry_run=self.cl_args.dry_run, exit_on_error=True).get_suites() self.print_configuration(self.test_env, brewfile=self.run_file)
def __init__(self, ip_address, username='******', password=None, key=None, connection_timeout=600, retry_interval=10): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) # Verify the IP address has a valid format try: IP(ip_address) except ValueError: raise ServerUnreachable(ip_address) if not self._is_instance_reachable(ip_address=ip_address, retry_interval=retry_interval, timeout=connection_timeout): raise ServerUnreachable(ip_address) self.ip_address = ip_address self.username = username self.password = password self.client = WinRMClient(username=username, password=password, host=ip_address) self.client.connect_with_retries()
def __init__(self, ip_address, username='******', password=None, key=None, connection_timeout=600, retry_interval=10): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) # Verify the IP address has a valid format try: IP(ip_address) except ValueError: raise InvalidAddressFormat(ip_address) # Verify the server can be pinged before attempting to connect PingClient.ping_until_reachable(ip_address, timeout=connection_timeout, interval_time=retry_interval) self.ip_address = ip_address self.username = username self.password = password self.client = WinRMClient(username=username, password=password, host=ip_address) connected = self.client.connect_with_retries() if not connected: raise WinRMConnectionException(ip_address=ip_address)
def __init__(self, ip_address=None, password=None, os_distro=None, config=None, username=None, server_id=None): self._client = InstanceClientFactory.get_instance_client( ip_address=ip_address, password=password, os_distro=os_distro, username=username, server_id=server_id, config=config) self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__))
def __init__(self, ip_address=None, server_id=None, username=None, password=None, config=None, os_distro=None, key=None): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) ssh_timeout = config.connection_timeout if ip_address is None: raise ServerUnreachable("None") self.ip_address = ip_address self.username = username if self.username is None: self.username = '******' self.password = password self.server_id = server_id start = int(time.time()) reachable = False while not reachable: reachable = PingClient.ping(ip_address, config.ip_address_version_for_ssh) time.sleep(config.connection_retry_interval) if int(time.time()) - start >= config.connection_timeout: raise ServerUnreachable(ip_address) self.ssh_client = SSHBaseClient(self.ip_address, self.username, self.password, timeout=ssh_timeout, key=key) if not self.ssh_client.test_connection_auth(): self.client_log.error("Ssh connection failed for: IP:{0} \ Username:{1} Password: {2}".format(self.ip_address, self.username, self.password)) raise SshConnectionException("ssh connection failed")
def __init__(self, ip_address, username='******', password=None, key=None, connection_timeout=600, retry_interval=10): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) # Verify the IP address has a valid format try: IP(ip_address) except ValueError: raise InvalidAddressFormat(ip_address) # Verify the server can be pinged before attempting to connect PingClient.ping_until_reachable(ip_address, timeout=connection_timeout, interval_time=retry_interval) self.ip_address = ip_address self.username = username self.password = password self.client = WinRMClient( username=username, password=password, host=ip_address) connected = self.client.connect_with_retries() if not connected: raise WinRMConnectionException(ip_address=ip_address)
def deserialize(cls, serialized_str, format_type): cls._log = cclogging.getLogger(cclogging.get_object_namespace(cls)) model_object = None deserialization_exception = None if serialized_str and len(serialized_str) > 0: try: deserialize_method = '_{0}_to_obj'.format(format_type) model_object = getattr(cls, deserialize_method)(serialized_str) except Exception as deserialization_exception: cls._log.exception(deserialization_exception) # Try to log string and format_type if deserialization broke if deserialization_exception is not None: try: cls._log.debug( "Deserialization Error: Attempted to deserialize type" " using type: {0}".format( format_type.decode(encoding='UTF-8', errors='ignore'))) cls._log.debug( "Deserialization Error: Unble to deserialize the " "following:\n{0}".format( serialized_str.decode(encoding='UTF-8', errors='ignore'))) except Exception as exception: cls._log.exception(exception) cls._log.debug("Unable to log information regarding the " "deserialization exception") return model_object
def decorator(cls): """Creates classes with variables named after datasets. Names of classes are equal to (class_name with out fixture) + ds_name """ module = import_module(cls.__module__) cls = DataDrivenFixture(cls) class_name = re.sub("fixture", "", cls.__name__, flags=re.IGNORECASE) if not re.match(".*fixture", cls.__name__, flags=re.IGNORECASE): cls.__name__ = "{0}Fixture".format(cls.__name__) unittest_driver_config = DriverConfig() for i, dataset_list in enumerate(dataset_lists): if all([not dataset_list, not unittest_driver_config.ignore_empty_datasets, not getattr(cls, '__unittest_skip__', False)]): # The DSL did not generate anything class_name_new = "{class_name}_{exception}_{index}".format( class_name=class_name, exception="DSL_EXCEPTION", index=i) # We are creating a new, special class here that willd allow us # to force an error during test set up that contains # information useful for triaging the DSL failure. # Additionally this should surface any tests that did not run # due to the DSL issue. new_cls = DataDrivenFixture(_FauxDSLFixture) new_class = type(class_name_new, (new_cls,), {}) dsl_namespace = cclogging.get_object_namespace( dataset_list.__class__) test_ls = [test for test in dir(cls) if test.startswith( 'test_') or test.startswith(DATA_DRIVEN_TEST_PREFIX)] new_class.dsl_namespace = dsl_namespace new_class.original_test_list = test_ls new_class.__module__ = cls.__module__ setattr(module, class_name_new, new_class) for dataset in dataset_list: class_name_new = "{0}_{1}".format(class_name, dataset.name) new_class = type(class_name_new, (cls,), dataset.data) new_class.__module__ = cls.__module__ if dataset.metadata["tags"] or dataset.metadata["decorators"]: # Find all test methods, add tags and other decorators, # then set the appropriate test method on the new class for member_name, member in inspect.getmembers(cls): if member_name.startswith("test_"): method_name, original_method = member_name, member @functools.wraps(original_method) @tags(*dataset.metadata["tags"]) def new_method(*args, **kwargs): return original_method(*args, **kwargs) for decorator_ in dataset.metadata["decorators"]: new_method = decorator_(new_method) setattr(new_class, method_name, new_method) setattr(module, class_name_new, new_class) return cls
def deserialize(cls, serialized_str, format_type): cls._log = cclogging.getLogger( cclogging.get_object_namespace(cls)) model_object = None deserialization_exception = None if serialized_str and len(serialized_str) > 0: try: deserialize_method = '_{0}_to_obj'.format(format_type) model_object = getattr(cls, deserialize_method)(serialized_str) except Exception as deserialization_exception: cls._log.exception(deserialization_exception) #Try to log string and format_type if deserialization broke if deserialization_exception is not None: try: cls._log.debug( "Deserialization Error: Attempted to deserialize type" " using type: {0}".format(format_type.decode( encoding='UTF-8', errors='ignore'))) cls._log.debug( "Deserialization Error: Unble to deserialize the " "following:\n{0}".format(serialized_str.decode( encoding='UTF-8', errors='ignore'))) except Exception as exception: cls._log.exception(exception) cls._log.debug( "Unable to log information regarding the " "deserialization exception") return model_object
def deserialize(cls, serialized_str): cls._log = cclogging.getLogger(cclogging.get_object_namespace(cls)) model_object = None deserialization_exception = None try: model_object = cls._prettytable_str_to_obj(serialized_str) except Exception as deserialization_exception: cls._log.exception(deserialization_exception) try: if hasattr(model_object, '_postprocess'): model_object._postprocess() except Exception as post_deserialization_exception: cls._log.error("Unable to run post-deserialization process") cls._log.exception(post_deserialization_exception) if deserialization_exception is not None: try: cls._log.debug( "Deserialization Error: Attempted to deserialize string " "as a prettytable:") cls._log.debug("\n{0}".format(serialized_str.decode( encoding='UTF-8', errors='ignore'))) except Exception as exception: cls._log.exception(exception) cls._log.warning( "Unable to log information regarding the deserialization " "exception") return model_object
def __init__(self): self.print_mug() self.cl_args = ArgumentParser().parse_args() self.test_env = TestEnvManager("", self.cl_args.config, test_repo_package_name="") self.test_env.test_data_directory = self.test_env.test_data_directory self.test_env.finalize() cclogging.init_root_log_handler() # This is where things diverge from the regular parallel runner # Extract the runfile contents self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self.datagen_start = time.time() self.run_file = BrewFile(self.cl_args.runfiles) # Log the runfile here so that it appears in the logs before any tests self._log.debug("\n" + str(self.run_file)) # TODO: Once the parallel_runner is changed to a yielding model, # change this to yielding brews instead of generating a list self.suites = SuiteBuilder(testrepos=self.run_file.brew_modules(), dry_run=self.cl_args.dry_run, exit_on_error=True).get_suites() self.print_configuration(self.test_env, brewfile=self.run_file)
def __init__(self, config_file_path, section_name): self._log = cclogging.logging.getLogger( cclogging.get_object_namespace(self.__class__)) self._override = EnvironmentVariableDataSource(section_name) self._data_source = ConfigParserDataSource(config_file_path, section_name) self._section_name = section_name
def tearDownClass(cls): # Kill the timers and calculate the metrics objects cls.fixture_metrics.timer.stop() if(cls.fixture_metrics.total_passed == cls.fixture_metrics.total_tests): cls.fixture_metrics.result = TestResultTypes.PASSED else: cls.fixture_metrics.result = TestResultTypes.FAILED # Report cls.fixture_log.info("{0}".format('=' * 56)) cls.fixture_log.info("Fixture.....: {0}".format( str(cclogging.get_object_namespace(cls)))) cls.fixture_log.info("Result......: {0}" .format(cls.fixture_metrics.result)) cls.fixture_log.info("Start Time..: {0}" .format(cls.fixture_metrics.timer.start_time)) cls.fixture_log.info( "Elapsed Time: {0}".format( cls.fixture_metrics.timer.get_elapsed_time())) cls.fixture_log.info("Total Tests.: {0}" .format(cls.fixture_metrics.total_tests)) cls.fixture_log.info("Total Passed: {0}" .format(cls.fixture_metrics.total_passed)) cls.fixture_log.info("Total Failed: {0}" .format(cls.fixture_metrics.total_failed)) cls.fixture_log.info("{0}".format('=' * 56)) #Remove the fixture log handler from the fixture log cls.fixture_log.removeHandler(cls._fixture_log_handler) #Call super teardown after we've finished out additions to teardown super(BaseTestFixture, cls).tearDownClass()
def __init__(self, ip_address, username='******', password=None, key=None, connection_timeout=600, retry_interval=10): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) # Verify the IP address has a valid format try: IP(ip_address) except ValueError: raise ServerUnreachable(ip_address) if not self._is_instance_reachable( ip_address=ip_address, retry_interval=retry_interval, timeout=connection_timeout): raise ServerUnreachable(ip_address) self.ip_address = ip_address self.username = username self.password = password self.client = WinRMClient( username=username, password=password, host=ip_address) connected = self.client.connect_with_retries() if not connected: raise WinRMConnectionException(ip_address=ip_address)
def __init__(self, ip_address=None, password=None, os_distro=None, config=None, username=None, server_id=None, key=None): self._client = InstanceClientFactory.get_instance_client( ip_address=ip_address, password=password, os_distro=os_distro, username=username, server_id=server_id, config=config, key=key) self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__))
def tearDownClass(cls): # Kill the timers and calculate the metrics objects cls.fixture_metrics.timer.stop() if (cls.fixture_metrics.total_passed == cls.fixture_metrics.total_tests ): cls.fixture_metrics.result = TestResultTypes.PASSED else: cls.fixture_metrics.result = TestResultTypes.FAILED # Report cls.fixture_log.info("{0}".format('=' * 56)) cls.fixture_log.info("Fixture.....: {0}".format( str(cclogging.get_object_namespace(cls)))) cls.fixture_log.info("Result......: {0}".format( cls.fixture_metrics.result)) cls.fixture_log.info("Start Time..: {0}".format( cls.fixture_metrics.timer.start_time)) cls.fixture_log.info("Elapsed Time: {0}".format( cls.fixture_metrics.timer.get_elapsed_time())) cls.fixture_log.info("Total Tests.: {0}".format( cls.fixture_metrics.total_tests)) cls.fixture_log.info("Total Passed: {0}".format( cls.fixture_metrics.total_passed)) cls.fixture_log.info("Total Failed: {0}".format( cls.fixture_metrics.total_failed)) cls.fixture_log.info("{0}".format('=' * 56)) #Remove the fixture log handler from the fixture log cls.fixture_log.removeHandler(cls._fixture_log_handler) #Call super teardown after we've finished out additions to teardown super(BaseTestFixture, cls).tearDownClass()
def deserialize(cls, serialized_str): cls._log = cclogging.getLogger(cclogging.get_object_namespace(cls)) model_object = None deserialization_exception = None try: model_object = cls._prettytable_str_to_obj(serialized_str) except Exception as deserialization_exception: cls._log.exception(deserialization_exception) try: if hasattr(model_object, '_postprocess'): model_object._postprocess() except Exception as post_deserialization_exception: cls._log.error("Unable to run post-deserialization process") cls._log.exception(post_deserialization_exception) if deserialization_exception is not None: try: cls._log.debug( "Deserialization Error: Attempted to deserialize string " "as a prettytable:") cls._log.debug("\n{0}".format( serialized_str.decode(encoding='UTF-8', errors='ignore'))) except Exception as exception: cls._log.exception(exception) cls._log.warning( "Unable to log information regarding the deserialization " "exception") return model_object
def __init__(self, config_file_path, section_name): self._log = cclogging.logging.getLogger( cclogging.get_object_namespace(self.__class__)) self._override = EnvironmentVariableDataSource( section_name) self._data_source = ConfigParserDataSource( config_file_path, section_name) self._section_name = section_name
def __init__(self, ip_address=None, username='******', password=None, key=None, connection_timeout=600, retry_interval=10): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self.ip_address = ip_address self.username = username self.password = password self.connection_timeout = connection_timeout # Verify the IP address has a valid format try: IP(ip_address) except ValueError: raise InvalidAddressFormat(ip_address) # Verify the server can be pinged before attempting to connect PingClient.ping_until_reachable(ip_address, timeout=connection_timeout, interval_time=retry_interval) if key is not None: auth_strategy = SSHAuthStrategy.KEY_STRING else: auth_strategy = SSHAuthStrategy.PASSWORD allow_agent = True if not key: allow_agent = False self.ssh_client = SSHClient(username=self.username, password=self.password, host=self.ip_address, tcp_timeout=20, auth_strategy=auth_strategy, look_for_keys=False, key=key, allow_agent=allow_agent) self.ssh_client.connect_with_timeout(cooldown=20, timeout=connection_timeout) if not self.ssh_client.is_connected(): message = ('SSH timeout after {timeout} seconds: ' 'Could not connect to {ip_address}.') raise SshConnectionException( message.format(timeout=connection_timeout, ip_address=ip_address))
def __init__(self, ip_address=None, server_id=None, username=None, password=None, config=None, os_distro=None, key=None): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) ssh_timeout = config.connection_timeout if ip_address is None: raise ServerUnreachable("None") self.ip_address = ip_address self.username = username if self.username is None: self.username = '******' self.password = password self.server_id = server_id start = int(time.time()) reachable = False while not reachable: reachable = PingClient.ping(ip_address, config.ip_address_version_for_ssh) time.sleep(config.connection_retry_interval) if int(time.time()) - start >= config.connection_timeout: raise ServerUnreachable(ip_address) if key is not None: auth_strategy = SSHAuthStrategy.KEY_STRING else: auth_strategy = SSHAuthStrategy.PASSWORD self.ssh_client = SSHBehaviors(username=self.username, password=self.password, host=self.ip_address, tcp_timeout=20, auth_strategy=auth_strategy, look_for_keys=False, key=key) self.ssh_client.connect_with_timeout(cooldown=20, timeout=ssh_timeout) if not self.ssh_client.is_connected(): message = ('SSH timeout after {timeout} seconds: ' 'Could not connect to {ip_address}.') raise SshConnectionException( message.format(timeout=ssh_timeout, ip_address=ip_address))
def __init__(self, config_file_path, section_name): self._log = cclogging.getLogger(cclogging.get_object_namespace(self.__class__)) self._section_name = section_name # Check if file path exists if not os.path.exists(config_file_path): msg = "Could not verify the existence of config file at {0}".format(config_file_path) raise NonExistentConfigPathError(msg) with open(config_file_path) as config_file: config_data = config_file.read() try: self._data_source = json.loads(config_data) except Exception as exception: self._log.exception(exception) raise exception
def __init__(self): self.cl_args = _UnittestRunnerCLI().get_cl_args() self.test_env = TestEnvManager( self.cl_args.product, self.cl_args.config, test_repo_package_name=self.cl_args.test_repo) # If something in the cl_args is supposed to override a default, like # say that data directory or something, it needs to happen before # finalize() is called self.test_env.test_data_directory = ( self.test_env.test_data_directory or self.cl_args.data_directory) self.test_env.finalize() cclogging.init_root_log_handler() self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self.product = self.cl_args.product self.print_mug_and_paths(self.test_env)
def __init__(self, config_file_path, section_name): self._log = cclogging.getLogger(cclogging.get_object_namespace(self.__class__)) self._data_source = ConfigParser.SafeConfigParser() self._section_name = section_name # Check if the path exists if not os.path.exists(config_file_path): msg = "Could not verify the existence of config file at {0}".format(config_file_path) raise NonExistentConfigPathError(msg) # Read the file in and turn it into a SafeConfigParser instance try: self._data_source.read(config_file_path) except Exception as exception: self._log.exception(exception) raise exception
def __init__(self): self.cl_args = _UnittestRunnerCLI().get_cl_args() self.test_env = TestEnvManager( self.cl_args.product, self.cl_args.config, test_repo_package_name=self.cl_args.test_repo) # If something in the cl_args is supposed to override a default, like # say that data directory or something, it needs to happen before # finalize() is called self.test_env.test_data_directory = (self.test_env.test_data_directory or self.cl_args.data_directory) self.test_env.finalize() cclogging.init_root_log_handler() self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self.product = self.cl_args.product self.print_mug_and_paths(self.test_env)
def decorator(cls): """Creates classes with variables named after datasets. Names of classes are equal to (class_name with out fixture) + ds_name """ module = import_module(cls.__module__) cls = DataDrivenFixture(cls) class_name = re.sub("fixture", "", cls.__name__, flags=re.IGNORECASE) if not re.match(".*fixture", cls.__name__, flags=re.IGNORECASE): cls.__name__ = "{0}Fixture".format(cls.__name__) unittest_driver_config = DriverConfig() for i, dataset_list in enumerate(dataset_lists): if (not dataset_list and not unittest_driver_config.ignore_empty_datasets): # The DSL did not generate anything class_name_new = "{class_name}_{exception}_{index}".format( class_name=class_name, exception="DSL_EXCEPTION", index=i) # We are creating a new, special class here that willd allow us # to force an error during test set up that contains # information useful for triaging the DSL failure. # Additionally this should surface any tests that did not run # due to the DSL issue. new_cls = DataDrivenFixture(_FauxDSLFixture) new_class = type( class_name_new, (new_cls,), {}) dsl_namespace = cclogging.get_object_namespace( dataset_list.__class__) test_ls = [test for test in dir(cls) if test.startswith( 'test_') or test.startswith(DATA_DRIVEN_TEST_PREFIX)] new_class.dsl_namespace = dsl_namespace new_class.original_test_list = test_ls new_class.__module__ = cls.__module__ setattr(module, class_name_new, new_class) for dataset in dataset_list: class_name_new = "{0}_{1}".format(class_name, dataset.name) new_class = type(class_name_new, (cls,), dataset.data) new_class.__module__ = cls.__module__ setattr(module, class_name_new, new_class) return cls
def __init__(self, config_file_path, section_name): self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self._data_source = configparser.SafeConfigParser() self._section_name = section_name # Check if the path exists if not os.path.exists(config_file_path): msg = 'Could not verify the existence of config file at {0}'\ .format(config_file_path) raise NonExistentConfigPathError(msg) # Read the file in and turn it into a SafeConfigParser instance try: self._data_source.read(config_file_path) except Exception as exception: self._log.exception(exception) raise exception
def __init__(self, config_file_path, section_name): self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self._section_name = section_name # Check if file path exists if not os.path.exists(config_file_path): msg = 'Could not verify the existence of config file at {0}'\ .format(config_file_path) raise NonExistentConfigPathError(msg) with open(config_file_path) as config_file: config_data = config_file.read() try: self._data_source = json.loads(config_data) except Exception as exception: self._log.exception(exception) raise exception
def __init__(self, use_proxy=True, proxy_os=LINUX, ip_version=4, logger=None, debug=False): """ Proxy Server Constructor @param use_proxy: (Boolean) - Is there a proxy/bastion that should execute commands or be used as a hop to another address? True - Yes False - No, execute cmds from the localhost. @param proxy_os: (ENUM) - Support for multiple OSs. A hook for future functionality. Only supports Linux currently. @param ip_version: Version to use by default, if utilities differ across IP versions. @param logger: Logging functionality. @param debug: (Boolean) Used for debugging system and mixin utilities @return: None """ self.use_proxy = use_proxy self._proxy_svr = None self._proxy_ip = None self._proxy_os = proxy_os self._ip_version = ip_version self.logger = logger or cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self.connection = None self.debug = debug self.session_password = None self.prompt_pattern = self.PROMPT_PATTERN self.last_response = None # Track IPs (hops) currently connected to... self._conn_path = [] # Delay between commands if iterating a list of commands self._pexpect_cmd_delay = self.STANDARD_CMD_DELAY
def __init__(self, ip_address=None, username='******', password=None, key=None, connection_timeout=600, retry_interval=10): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self.ip_address = ip_address self.username = username self.password = password self.connection_timeout = connection_timeout # Verify the IP address has a valid format try: IP(ip_address) except ValueError: raise InvalidAddressFormat(ip_address) # Verify the server can be pinged before attempting to connect PingClient.ping_until_reachable(ip_address, timeout=connection_timeout, interval_time=retry_interval) if key is not None: auth_strategy = SSHAuthStrategy.KEY_STRING else: auth_strategy = SSHAuthStrategy.PASSWORD allow_agent = True if not key: allow_agent = False self.ssh_client = SSHClient( username=self.username, password=self.password, host=self.ip_address, tcp_timeout=20, auth_strategy=auth_strategy, look_for_keys=False, key=key, allow_agent=allow_agent) self.ssh_client.connect_with_timeout( cooldown=20, timeout=connection_timeout) if not self.ssh_client.is_connected(): message = ('SSH timeout after {timeout} seconds: ' 'Could not connect to {ip_address}.') raise SshConnectionException(message.format( timeout=connection_timeout, ip_address=ip_address))
def __init__(self, ip_address=None, username='******', password=None, key=None, connection_timeout=600, retry_interval=10): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) if ip_address is None: raise ServerUnreachable("None") self.ip_address = ip_address self.username = username self.password = password self.connection_timeout = connection_timeout # Verify the server can be pinged before attempting to connect start = int(time.time()) reachable = False while not reachable: reachable = PingClient.ping(ip_address) if reachable: break time.sleep(retry_interval) if int(time.time()) - start >= connection_timeout: raise ServerUnreachable( 'Could not reach the server at {ip_address}'.format( ip_address=ip_address)) if key is not None: auth_strategy = SSHAuthStrategy.KEY_STRING else: auth_strategy = SSHAuthStrategy.PASSWORD self.ssh_client = SSHClient( username=self.username, password=self.password, host=self.ip_address, tcp_timeout=20, auth_strategy=auth_strategy, look_for_keys=False, key=key) self.ssh_client.connect_with_timeout( cooldown=20, timeout=connection_timeout) if not self.ssh_client.is_connected(): message = ('SSH timeout after {timeout} seconds: ' 'Could not connect to {ip_address}.') raise SshConnectionException(message.format( timeout=connection_timeout, ip_address=ip_address))
def __call__(self, *args): log_name = "{0}.{1}".format( cclogging.get_object_namespace(args[0]), self.__name__) self._start_logging(log_name) try: hash(args) except TypeError: # unhashable arguments in args value = self.func(*args) debug = "Uncacheable. Data returned" else: if args in self.cache: value = self.cache[args] debug = "Cached data returned." else: value = self.cache[args] = self.func(*args) debug = "Data cached for future calls" self.func._log.debug(debug) self._stop_logging() return value
def __call__(self, *args): self._start_logging(cclogging.get_object_namespace(args[0])) if not isinstance(args, collections.Hashable): # uncacheable. a list, for instance. # better to not cache than blow up. value = self.func(*args) self.func._log.debug("Uncacheable. Data returned") self._stop_logging() return value if args in self.cache: self.func._log.debug("Cached data returned.") self._stop_logging() return self.cache[args] else: value = self.func(*args) self.cache[args] = value self.func._log.debug("Data cached for future calls") self._stop_logging() return value
def __init__(self, ip_address=None, username='******', password=None, key=None, connection_timeout=600, retry_interval=10): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) if ip_address is None: raise ServerUnreachable("None") self.ip_address = ip_address self.username = username self.password = password self.connection_timeout = connection_timeout # Verify the server can be pinged before attempting to connect start = int(time.time()) reachable = False while not reachable: reachable = PingClient.ping(ip_address) if reachable: break time.sleep(retry_interval) if int(time.time()) - start >= connection_timeout: raise ServerUnreachable(ip_address) if key is not None: auth_strategy = SSHAuthStrategy.KEY_STRING else: auth_strategy = SSHAuthStrategy.PASSWORD self.ssh_client = SSHClient( username=self.username, password=self.password, host=self.ip_address, tcp_timeout=20, auth_strategy=auth_strategy, look_for_keys=False, key=key) self.ssh_client.connect_with_timeout( cooldown=20, timeout=connection_timeout) if not self.ssh_client.is_connected(): message = ('SSH timeout after {timeout} seconds: ' 'Could not connect to {ip_address}.') raise SshConnectionException(message.format( timeout=connection_timeout, ip_address=ip_address))
def __init__(self, ip_address, server_id, os_distro, username, password): self.client_log = cclogging.getLogger \ (cclogging.get_object_namespace(self.__class__)) ssh_timeout = 600 if ip_address is None: raise ServerUnreachable("None") self.ip_address = ip_address self.username = username if self.username is None: self.username = '******' self.password = password self.server_id = server_id self.ssh_client = SSHBaseClient(self.ip_address, self.username, self.password, timeout=ssh_timeout) if not self.ssh_client.test_connection_auth(): self.client_log.error("Ssh connection failed for: IP:{0} \ Username:{1} Password: {2}".format(self.ip_address, self.username, self.password)) raise SshConnectionException("ssh connection failed")
def __init__(self, ip_address=None, server_id=None, username=None, password=None, config=None, os_distro=None, key=None): self.client_log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) ssh_timeout = config.connection_timeout if ip_address is None: raise ServerUnreachable("None") self.ip_address = ip_address self.username = username if self.username is None: self.username = '******' self.password = password self.server_id = server_id start = int(time.time()) reachable = False while not reachable: reachable = PingClient.ping(ip_address, config.ip_address_version_for_ssh) time.sleep(config.connection_retry_interval) if int(time.time()) - start >= config.connection_timeout: raise ServerUnreachable(ip_address) if key is not None: auth_strategy = SSHAuthStrategy.KEY_STRING else: auth_strategy = SSHAuthStrategy.PASSWORD self.ssh_client = SSHBehaviors( username=self.username, password=self.password, host=self.ip_address, tcp_timeout=20, auth_strategy=auth_strategy, look_for_keys=False, key=key) self.ssh_client.connect_with_timeout(cooldown=20, timeout=ssh_timeout) if not self.ssh_client.is_connected(): message = ('SSH timeout after {timeout} seconds: ' 'Could not connect to {ip_address}.') raise SshConnectionException(message.format( timeout=ssh_timeout, ip_address=ip_address))
def __call__(self, *args): log_name = "{0}.{1}".format(cclogging.get_object_namespace(args[0]), self.__name__) self._start_logging(log_name) if not isinstance(args, collections.Hashable): # uncacheable. a list, for instance. # better to not cache than blow up. value = self.func(*args) self.func._log.debug("Uncacheable. Data returned") self._stop_logging() return value if args in self.cache: self.func._log.debug("Cached data returned.") self._stop_logging() return self.cache[args] else: value = self.func(*args) self.cache[args] = value self.func._log.debug("Data cached for future calls") self._stop_logging() return value
def __init__(self, files): """Accepts mutiple (config-like) run files and generates a consolidated representation of them, enforcing rules during parsing. A BrewFile is a SafeConfigParser file, except: The section 'cli-defaults' is special and can only be used for defining defaults for optional command-line arguments. (NOTE: This feature is not yet implemented) All keys in any given section must be unique. All section names across all files passed into BrewFile must be unique, with the exception of 'defaults' and 'cli-defaults', which are special and not vetted. The section 'cli-defaults' should only appear once across all files passed into BrewFile. """ self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self.files = files self._data = self._validate_runfiles(files)
def __init__(self): self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__))
def __init__(self, parent_object): self.logger = _FixtureLogger(parent_object) self.metrics = TestRunMetrics() self.report_name = str(get_object_namespace(parent_object))
def __init__(self, parent_object): self.log = getLogger('') self.log_handler = setup_new_cchandler( get_object_namespace(parent_object)) self._is_logging = False
def __init__(self, section_name): self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__)) self._section_name = section_name
def __init__(self): super(AutoMarshallingModel, self).__init__() self._log = cclogging.getLogger( cclogging.get_object_namespace(self.__class__))
def _log(cls): return cclogging.getLogger(cclogging.get_object_namespace(cls))