def make_host_reachable(self): updates = { "rpc_address": "0.0.0.0", "broadcast_rpc_address": netutils.get_my_ipv4(), "listen_address": netutils.get_my_ipv4(), "seed": netutils.get_my_ipv4(), } self.update_conf_with_group(updates)
def make_host_reachable(self): updates = { 'rpc_address': "0.0.0.0", 'broadcast_rpc_address': netutils.get_my_ipv4(), 'listen_address': netutils.get_my_ipv4(), 'seed': netutils.get_my_ipv4() } self.update_conf_with_group(updates)
def __enable_remote_access(self): updates = { 'rpc_address': "0.0.0.0", 'broadcast_rpc_address': netutils.get_my_ipv4(), 'listen_address': netutils.get_my_ipv4(), 'seed_provider': {'parameters': [{'seeds': netutils.get_my_ipv4()}] } } self.configuration_manager.apply_system_override(updates)
def do_mongo(self, db_cmd): cmd = ('mongo --host ' + netutils.get_my_ipv4() + ' --quiet --eval \'printjson(%s)\'' % db_cmd) # TODO(ramashri) see if hardcoded values can be removed out, err = utils.execute_with_timeout(cmd, shell=True, timeout=100) LOG.debug(out.strip()) return (out, err)
def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), 'port': service.get_port(), 'requirepass': service.get_auth_password(), } return master_ref
def get_initiator(): """Get the initiator connector dict.""" # Get the intiator side connector properties my_ip = netutils.get_my_ipv4() initiator = connector.get_connector_properties('sudo', my_ip, False, False) LOG.debug("initiator = %s", initiator) return initiator
def initial_setup(self): self.ip_address = netutils.get_my_ipv4() mount_point = CONF.couchbase.mount_point try: LOG.info(_('Couchbase Server change data dir path.')) operating_system.chown(mount_point, 'couchbase', 'couchbase', as_root=True) pwd = CouchbaseRootAccess.get_password() utils.execute_with_timeout( (system.cmd_node_init % {'data_path': mount_point, 'IP': self.ip_address, 'PWD': pwd}), shell=True) operating_system.remove(system.INSTANCE_DATA_DIR, force=True, as_root=True) LOG.debug('Couchbase Server initialize cluster.') utils.execute_with_timeout( (system.cmd_cluster_init % {'IP': self.ip_address, 'PWD': pwd}), shell=True) utils.execute_with_timeout(system.cmd_set_swappiness, shell=True) utils.execute_with_timeout(system.cmd_update_sysctl_conf, shell=True) LOG.info(_('Couchbase Server initial setup finished.')) except exception.ProcessExecutionError: LOG.exception(_('Error performing initial Couchbase setup.')) raise RuntimeError("Couchbase Server initial setup failed")
def _get_actual_db_status(self): self.ip_address = netutils.get_my_ipv4() pwd = None try: pwd = CouchbaseRootAccess.get_password() return self._get_status_from_couchbase(pwd) except exception.ProcessExecutionError: LOG.exception(_("Error getting the Couchbase status.")) return rd_instance.ServiceStatuses.SHUTDOWN
def update_vertica(self, command, members=netutils.get_my_ipv4()): LOG.info(_("Calling update_vertica with command %s") % command) try: update_vertica_cmd = (system.UPDATE_VERTICA % (command, members, MOUNT_POINT)) system.shell_execute(update_vertica_cmd) except exception.ProcessExecutionError: LOG.exception(_("update_vertica failed.")) raise RuntimeError(_("update_vertica failed.")) # self._generate_database_password() LOG.info(_("update_vertica completed."))
def _configure_network(self, port=None): """Make the service accessible at a given (or default if not) port. """ instance_ip = netutils.get_my_ipv4() bind_interfaces_string = ','.join([instance_ip, '127.0.0.1']) options = {'net.bindIp': bind_interfaces_string} if port is not None: guestagent_utils.update_dict({'net.port': port}, options) self.configuration_manager.apply_system_override(options) self.status.set_host(instance_ip, port=port)
def _get_actual_db_status(self): try: if self._is_config_server() is True: status_check = (system.CMD_STATUS % (netutils.get_my_ipv4() + ' --port 27019')) else: status_check = (system.CMD_STATUS % netutils.get_my_ipv4()) out, err = utils.execute_with_timeout(status_check, shell=True) if not err: return ds_instance.ServiceStatuses.RUNNING else: return ds_instance.ServiceStatuses.SHUTDOWN except exception.ProcessExecutionError as e: LOG.exception(_("Process execution %s.") % e) return ds_instance.ServiceStatuses.SHUTDOWN except OSError as e: LOG.exception(_("OS Error %s.") % e) return ds_instance.ServiceStatuses.SHUTDOWN
def install_vertica(self, members=netutils.get_my_ipv4()): """Prepare the guest machine with a Vertica db creation.""" LOG.info(_("Installing Vertica Server.")) try: # Create db after install install_vertica_cmd = (system.INSTALL_VERTICA % (members, MOUNT_POINT)) system.shell_execute(install_vertica_cmd) except exception.ProcessExecutionError: LOG.exception(_("install_vertica failed.")) self._generate_database_password() LOG.info(_("install_vertica completed."))
def create_admin_user(self, password): """Create the admin user while the localhost exception is active.""" LOG.debug("Creating the admin user.") creds = self.store_admin_password(password) user = models.MongoDBUser(name="admin.%s" % creds.username, password=creds.password) user.roles = system.MONGO_ADMIN_ROLES # the driver engine is already cached, but we need to change it it with MongoDBClient(None, host="localhost", port=MONGODB_PORT) as client: MongoDBAdmin().create_user(user, client=client) # now revert to the normal engine self.status.set_host(host=netutils.get_my_ipv4(), port=MONGODB_PORT) LOG.debug("Created admin user.")
def create_db(self, members=netutils.get_my_ipv4()): """Prepare the guest machine with a Vertica db creation.""" LOG.info(_("Creating database on Vertica host.")) try: # Create db after install db_password = self._get_database_password() create_db_command = (system.CREATE_DB % (members, DB_NAME, MOUNT_POINT, MOUNT_POINT, db_password)) system.shell_execute(create_db_command, "dbadmin") except Exception: LOG.exception(_("Vertica database create failed.")) LOG.info(_("Vertica database create completed."))
def get_master_ref(self, service, snapshot_info): """Capture information from a master node""" pfile = '/tmp/init%s_stby.ora' % self.ORA_CONF.db_name pwfile = ('%(ora_home)s/dbs/orapw%(db_name)s' % {'ora_home': CONF.get(MANAGER).oracle_home, 'db_name': self.ORA_CONF.db_name}) ctlfile = '/tmp/%s_stby.ctl' % self.ORA_CONF.db_name oratabfile = '/etc/oratab' oracnffile = CONF.get(MANAGER).conf_file datafile = '/tmp/oradata.tar.gz' def _cleanup_tmp_files(): operating_system.remove(ctlfile, force=True, as_root=True) operating_system.remove(pfile, force=True, as_root=True) operating_system.remove(datafile, force=True, as_root=True) _cleanup_tmp_files() with ora_service.LocalOracleClient(self.ORA_CONF.db_name, service=True) as client: client.execute("ALTER DATABASE CREATE STANDBY CONTROLFILE AS " "'%s'" % ctlfile) ora_service.OracleAdmin().create_parameter_file(target=pfile, client=client) q = sql_query.Query() q.columns = ["value"] q.tables = ["v$parameter"] q.where = ["name = 'fal_server'"] client.execute(str(q)) row = client.fetchone() db_list = [] if row is not None and row[0] is not None: db_list = str(row[0]).split(",") db_list.insert(0, self.ORA_CONF.db_name) # Create a tar file containing files needed for slave creation utils.execute_with_timeout('tar', '-Pczvf', datafile, ctlfile, pwfile, pfile, oratabfile, oracnffile, run_as_root=True, root_helper='sudo') oradata_encoded = operating_system.read_file( datafile, codec=stream_codecs.Base64Codec(), as_root=True) _cleanup_tmp_files() master_ref = { 'host': netutils.get_my_ipv4(), 'db_name': self.ORA_CONF.db_name, 'db_list': db_list, 'post_processing': True, 'oradata': oradata_encoded, } return master_ref
def get_config_changes(self, cluster_config, mount_point=None): LOG.debug("Getting configuration changes.") config_changes = {} if cluster_config is not None: config_changes['bind_ip'] = netutils.get_my_ipv4() if cluster_config["instance_type"] == "config_server": config_changes["configsvr"] = "true" elif cluster_config["instance_type"] == "member": config_changes["replSet"] = cluster_config["replica_set_name"] if (mount_point is not None and (cluster_config is None or cluster_config['instance_type'] != "query_router")): config_changes['dbpath'] = mount_point return config_changes
def add_db_to_node(self, members=netutils.get_my_ipv4()): """Add db to host with admintools""" LOG.info(_("Calling admintools to add DB to host")) try: # Create db after install db_password = self._get_database_password() create_db_command = (system.ADD_DB_TO_NODE % (members, DB_NAME, db_password)) system.shell_execute(create_db_command, "dbadmin") except exception.ProcessExecutionError: # Give vertica some time to get the node up, won't be available # by the time adminTools -t db_add_node completes LOG.info(_("adminTools failed as expected - wait for node")) self.wait_for_node_status() LOG.info(_("Vertica add db to host completed."))
def get_master_ref(self, service, snapshot_info): """Capture information from a master node""" ctlfile = path.basename(sorted(operating_system.list_files_in_directory( service.paths.ctlfile1_dir, recursive=True, as_root=True))[0]) + '.bak' ctlfile = path.join(TMP_DIR, ctlfile) datafile = path.join(TMP_DIR, 'oradata.tar.gz') def _cleanup_tmp_files(): operating_system.remove(ctlfile, force=True, as_root=True) operating_system.remove(datafile, force=True, as_root=True) _cleanup_tmp_files() with service.cursor(service.admin.database_name) as cursor: cursor.execute(str(sql_query.AlterDatabase( "CREATE STANDBY CONTROLFILE AS '%s'" % ctlfile))) cursor.execute(str(sql_query.Query( columns=['VALUE'], tables=['V$PARAMETER'], where=["NAME = 'fal_server'"]))) row = cursor.fetchone() db_list = [] if row is not None and row[0] is not None: db_list = str(row[0]).split(",") db_list.insert(0, service.admin.database_name) # Create a tar file containing files needed for slave creation utils.execute_with_timeout('tar', '-Pczvf', datafile, ctlfile, service.paths.orapw_file, service.paths.oratab_file, CONF.get(MANAGER).conf_file, run_as_root=True, root_helper='sudo') oradata_encoded = operating_system.read_file( datafile, codec=stream_codecs.Base64Codec(), as_root=True, decode=False) _cleanup_tmp_files() master_ref = { 'host': netutils.get_my_ipv4(), 'db_name': service.admin.database_name, 'db_list': db_list, 'oradata': oradata_encoded, } return master_ref
def _get_actual_db_status(self): try: port = CONFIGSVR_PORT if self._is_config_server() else MONGODB_PORT out, err = utils.execute_with_timeout( 'mongostat', '--host', str(netutils.get_my_ipv4()), '--port', str(port), '-n', str(1), check_exit_code=[0, 1] ) if not err: return ds_instance.ServiceStatuses.RUNNING else: return ds_instance.ServiceStatuses.SHUTDOWN except exception.ProcessExecutionError as e: LOG.exception(_("Process execution %s.") % e) return ds_instance.ServiceStatuses.SHUTDOWN except OSError as e: LOG.exception(_("OS Error %s.") % e) return ds_instance.ServiceStatuses.SHUTDOWN
def get_config_changes(self, cluster_config, mount_point=None): LOG.debug("Getting configuration changes.") config_changes = {} # todo mvandijk: uncomment the following when auth is being enabled # config_changes['auth'] = 'true' config_changes["bind_ip"] = ",".join([netutils.get_my_ipv4(), "127.0.0.1"]) if cluster_config is not None: # todo mvandijk: uncomment the following when auth is being enabled # config_changes['keyFile'] = self.app.get_key_file() if cluster_config["instance_type"] == "config_server": config_changes["configsvr"] = "true" elif cluster_config["instance_type"] == "member": config_changes["replSet"] = cluster_config["replica_set_name"] if mount_point is not None and (cluster_config is None or cluster_config["instance_type"] != "query_router"): config_changes["dbpath"] = mount_point return config_changes
def __init__(self, reactor, hpepluginconfig): """ :param IReactorTime reactor: Reactor time interface implementation. :param Ihpepluginconfig : hpedefaultconfig configuration """ LOG.info(_LI('Initialize Volume Plugin')) self._reactor = reactor self._hpepluginconfig = hpepluginconfig hpeplugin_driver = hpepluginconfig.hpedockerplugin_driver self.hpeplugin_driver = \ importutils.import_object(hpeplugin_driver, self._hpepluginconfig) if self.hpeplugin_driver is None: msg = (_('hpeplugin_driver import driver failed')) LOG.error(msg) raise exception.HPEPluginNotInitializedException(reason=msg) try: self.hpeplugin_driver.do_setup() self.hpeplugin_driver.check_for_setup_error() except Exception as ex: msg = (_('hpeplugin_driver do_setup failed, error is: %s'), six.text_type(ex)) LOG.error(msg) raise exception.HPEPluginNotInitializedException(reason=msg) self._voltracker = {} self._path_info = [] self._my_ip = netutils.get_my_ipv4() self._etcd = util.EtcdUtil( self._hpepluginconfig.host_etcd_ip_address, self._hpepluginconfig.host_etcd_port_number, self._hpepluginconfig.host_etcd_client_cert, self._hpepluginconfig.host_etcd_client_key) # TODO: make device_scan_attempts configurable # see nova/virt/libvirt/volume/iscsi.py root_helper = 'sudo' self.use_multipath = self._hpepluginconfig.use_multipath self.enforce_multipath = self._hpepluginconfig.enforce_multipath self.connector = connector.InitiatorConnector.factory( 'ISCSI', root_helper, use_multipath=self.use_multipath, device_scan_attempts=5, transport='default')
def apply_initial_guestagent_configuration(self, cluster_config=False): """Configure this node. Initialize the node as a single-server cluster if no cluster configuration is provided. If cluster configuration is provided retrieve the cluster password and store it on the filesystem. Skip the cluster initialization as it will be performed later from the task manager. """ self.ip_address = netutils.get_my_ipv4() mount_point = CONF.couchbase.mount_point self.run_node_init(mount_point, mount_point, self.ip_address) if not cluster_config: self.initialize_cluster() else: CouchbaseRootAccess().write_password_to_file( cluster_config['cluster_password'])
def remove_db_from_node(self, members=netutils.get_my_ipv4()): """Remove db from node with admintools""" LOG.info(_("Removing db from node")) try: # Create db after install db_password = self._get_database_password() create_db_command = (system.REMOVE_DB_FROM_NODE % (members, DB_NAME, db_password)) system.shell_execute(create_db_command, "dbadmin") except exception.ProcessExecutionError: # Give vertica some time to get the node up, won't be available # by the time adminTools -t db_add_node completes LOG.info(_("adminTools failed as expected - wait for node")) # Give vertica some time to take the node down - it won't be available # by the time adminTools -t db_add_node completes self.wait_for_node_status() LOG.info(_("Vertica remove host from db completed."))
def set_password(self, root_password): self.ip_address = netutils.get_my_ipv4() child = pexpect.spawn(system.cmd_reset_pwd % {'IP': self.ip_address}) try: child.expect('.*password.*') child.sendline(root_password) child.expect('.*(yes/no).*') child.sendline('yes') child.expect('.*successfully.*') except pexpect.TIMEOUT: child.delayafterclose = 1 child.delayafterterminate = 1 try: child.close(force=True) except pexpect.ExceptionPexpect: # Close fails to terminate a sudo process on some OSes. subprocess.call(['sudo', 'kill', str(child.pid)]) self.write_password_to_file(root_password)
def __init__(self, host, project_name, prog_name, fqdn=socket.gethostname(), pid=os.getpid(), config_file_list=None, config_list=None, region_name=None, i_am_launcher=False): self.host = host self.project_name = project_name self.fqdn = fqdn self.prog_name = prog_name self.pid = pid self.config_file_dict = self.get_config_files(config_file_list) self.config_list = config_list or list() self.identification = IDENTIFICATION self.region_name = region_name or cfg.CONF.os_namos.region_name self.i_am_launcher = i_am_launcher self.ips = [netutils.get_my_ipv4()]
def _get_actual_db_status(self): self.ip_address = netutils.get_my_ipv4() pwd = None try: pwd = CouchbaseRootAccess.get_password() return self._get_status_from_couchbase(pwd) except exception.ProcessExecutionError: # log the exception, but continue with native config approach LOG.exception(_("Error getting the Couchbase status.")) try: out, err = utils.execute_with_timeout( system.cmd_get_password_from_config, shell=True) except exception.ProcessExecutionError: LOG.exception(_("Error getting the root password from the " "native Couchbase config file.")) return rd_instance.ServiceStatuses.SHUTDOWN config_pwd = out.strip() if out is not None else None if not config_pwd or config_pwd == pwd: LOG.debug("The root password from the native Couchbase config " "file is either empty or already matches the " "stored value.") return rd_instance.ServiceStatuses.SHUTDOWN try: status = self._get_status_from_couchbase(config_pwd) except exception.ProcessExecutionError: LOG.exception(_("Error getting Couchbase status using the " "password parsed from the native Couchbase " "config file.")) return rd_instance.ServiceStatuses.SHUTDOWN # if the parsed root password worked, update the stored value to # avoid having to consult/parse the couchbase config file again. LOG.debug("Updating the stored value for the Couchbase " "root password.") CouchbaseRootAccess().write_password_to_file(config_pwd) return status
), cfg.StrOpt( "state_path", default="/var/lib/cinder", deprecated_name="pybasedir", help="Top-level directory for maintaining cinder's state", ), ] debug_opts = [] CONF.register_cli_opts(core_opts) CONF.register_cli_opts(debug_opts) global_opts = [ cfg.StrOpt("my_ip", default=netutils.get_my_ipv4(), help="IP address of this host"), cfg.StrOpt("glance_host", default="$my_ip", help="Default glance host name or IP"), cfg.IntOpt("glance_port", default=9292, min=1, max=65535, help="Default glance port"), cfg.ListOpt( "glance_api_servers", default=["$glance_host:$glance_port"], help="A list of the glance API servers available to cinder " "([hostname|ip]:port)", ), cfg.IntOpt("glance_api_version", default=1, help="Version of the glance API to use"), cfg.IntOpt("glance_num_retries", default=0, help="Number retries when downloading an image from glance"), cfg.BoolOpt( "glance_api_insecure", default=False, help="Allow to perform insecure SSL (https) requests to " "glance" ), cfg.BoolOpt( "glance_api_ssl_compression", default=False,
def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), 'port': service.get_port() } return master_ref
cfg.StrOpt('state_path', default='/var/lib/venus', deprecated_name='pybasedir', help="Top-level directory for " "maintaining venus's state"), ] debug_opts = [ ] CONF.register_cli_opts(core_opts) CONF.register_cli_opts(debug_opts) global_opts = [ cfg.StrOpt('my_ip', default=netutils.get_my_ipv4(), help='IP address of this host'), cfg.StrOpt('venusmanager_topic', default='venus-venusmanager', help='The topic that venusmanager nodes listen on'), cfg.BoolOpt('enable_v1_api', default=True, help=_("DEPRECATED: Deploy v1 of the Venus API.")), cfg.BoolOpt('api_rate_limit', default=True, help='Enables or disables rate limit of the API.'), cfg.ListOpt('osapi_venus_ext_list', default=[], help='Specify list of extensions to load when using ' 'osapi_venus_extension option with venus.api.' 'contrib.select_extensions'),
def get_master_ref(self, service, snapshot_info): master_ref = {"host": netutils.get_my_ipv4(), "port": cfg.get_configuration_property("postgresql_port")} return master_ref
from oslo_utils import netutils print(netutils.escape_ipv6('fe80::f493:20ff:fe5b:6cf')) print( netutils.get_ipv6_addr_by_EUI64('fe80::d480:b0ff:fe33:1543/64', 'f2:2c:d8:c3:73:fb')) print(netutils.get_my_ipv4()) print(netutils.is_ipv6_enabled()) print(netutils.is_valid_cidr('10.10.10.10/24')) code_list = [] for n in range(-5, 5): code_list.append(netutils.is_valid_icmp_code(n)) print(code_list) print( netutils.urlsplit( 'https://foxfox.mybluemix.net.com:8443/index.html?auto=off')) # SplitResult(scheme='https', netloc='foxfox.mybluemix.net.com:8443', path='/index.html', query='auto=off', fragment='')
def setUp(self): super(BaseTest, self).setUp() self.uuid = uuidutils.generate_uuid() self.my_ip = 'http://' + netutils.get_my_ipv4() + ':5050' self.token = "token"
# limitations under the License. """Generic code for inspector client.""" import json import logging from keystoneauth1 import exceptions as ks_exc from keystoneauth1 import session as ks_session from keystoneauth1 import token_endpoint from oslo_utils import netutils import requests import six from ironic_inspector_client.common.i18n import _, _LW _DEFAULT_URL = 'http://' + netutils.get_my_ipv4() + ':5050' _ERROR_ENCODING = 'utf-8' LOG = logging.getLogger('ironic_inspector_client') _MIN_VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Minimum-Version' _MAX_VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Maximum-Version' _VERSION_HEADER = 'X-OpenStack-Ironic-Inspector-API-Version' _AUTH_TOKEN_HEADER = 'X-Auth-Token' def _parse_version(api_version): try: return tuple(int(x) for x in api_version.split('.')) except (ValueError, TypeError): raise ValueError( _("Malformed API version: expect tuple, string "
# under the License. # from oslo_log import log as logging from oslo_utils import netutils from trove.common import cfg from trove.common import utils from trove.guestagent.common import operating_system from trove.guestagent.datastore.experimental.mongodb import ( service as mongo_service) from trove.guestagent.strategies.restore import base CONF = cfg.CONF LOG = logging.getLogger(__name__) IP = netutils.get_my_ipv4() LARGE_TIMEOUT = 1200 MONGODB_DBPATH = CONF.mongodb.mount_point MONGO_DUMP_DIR = MONGODB_DBPATH + "/dump" class MongoDump(base.RestoreRunner): __strategy_name__ = 'mongodump' base_restore_cmd = 'sudo tar xPf -' def __init__(self, *args, **kwargs): super(MongoDump, self).__init__(*args, **kwargs) self.app = mongo_service.MongoDBApp() def post_restore(self): """
def __call__(self, environ, start_response): print 'Hydrant' start_response('200 ok', [('Content Type', 'text/plain')]) LOG.debug('this is hydrant') print netutils.get_my_ipv4() return ['%s, %s!\n' % (self.in_arg, 'Hydrant')]
cfg.StrOpt('api_paste_config', default="api-paste.ini", help='File name for the paste.deploy config for manila-api.'), cfg.StrOpt('state_path', default='/var/lib/manila', help="Top-level directory for maintaining manila's state."), ] debug_opts = [ ] CONF.register_cli_opts(core_opts) CONF.register_cli_opts(debug_opts) global_opts = [ cfg.StrOpt('my_ip', default=netutils.get_my_ipv4(), help='IP address of this host.'), cfg.StrOpt('scheduler_topic', default='manila-scheduler', help='The topic scheduler nodes listen on.'), cfg.StrOpt('share_topic', default='manila-share', help='The topic share nodes listen on.'), cfg.BoolOpt('enable_v1_api', default=False, help=_('Deploy v1 of the Manila API. This option is ' 'deprecated, is not used, and will be removed ' 'in a future release.')), cfg.BoolOpt('enable_v2_api', default=False, help=_('Deploy v2 of the Manila API. This option is '
def test_get_my_ip_socket_error(self, ip, mock_socket): mock_socket.side_effect = socket.error ip.return_value = '1.2.3.4' addr = netutils.get_my_ipv4() self.assertEqual(addr, '1.2.3.4')
def initialize_node(self): ip_address = netutils.get_my_ipv4() mount_point = CONF.couchbase.mount_point self.build_admin().run_node_init(mount_point, mount_point, ip_address)
def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), 'port': CONF.postgresql.postgresql_port } return master_ref
def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), 'port': cfg.get_configuration_property('postgresql_port') } return master_ref
def test_get_my_ip(self): sock_attrs = {'return_value.getsockname.return_value': ['1.2.3.4', '']} with mock.patch('socket.socket', **sock_attrs): addr = netutils.get_my_ipv4() self.assertEqual(addr, '1.2.3.4')