def _normalize_coe_cluster(self, coe_cluster): """Normalize Magnum COE cluster.""" coe_cluster = coe_cluster.copy() # Discard noise coe_cluster.pop('links', None) c_id = coe_cluster.pop('uuid') ret = munch.Munch( id=c_id, location=self._get_current_location(), ) for key in ('status', 'cluster_template_id', 'stack_id', 'keypair', 'master_count', 'create_timeout', 'node_count', 'name'): ret[key] = coe_cluster.pop(key) ret['properties'] = coe_cluster return ret
def test_create_network_existing(self): cls = subnet_drv.NamespacePodSubnetDriver m_driver = mock.MagicMock(spec=cls) ns_uid = '4f7ea026-3ae4-4baa-84df-1942977fe1be' namespace = {'metadata': {'name': 'test', 'uid': ns_uid}} project_id = mock.sentinel.project_id os_net = self.useFixture(k_fix.MockNetworkClient()).client net = munch.Munch({ 'id': mock.sentinel.net, 'description': ns_uid, 'name': 'test' }) os_net.networks.return_value = iter([net]) net_id_resp = cls.create_network(m_driver, namespace, project_id) self.assertEqual(net_id_resp, net['id']) os_net.create_network.assert_not_called() os_net.networks.assert_called_once()
def apply_types(config): """ This is a hack, but having e.g. paths as Pathlib objects is convenient... :P """ # load env variables for section, envs in _ENV_VARS.items(): if section not in config: config[section] = munch.Munch() for env in envs: value = os.environ.get(env) if value is None: raise ValueError("You need to define '%s' env variable!" % env) config[section][env.lower()] = value # connection_string if "database" in config: config.database["conn_str"] = "postgresql://{dbuser}:{dbpass}@{dbhost}:{dbport}/{dbname}".format(**config.database) # convert paths to pathlib objects. for section in ("directories", "files"): if section in config: for key, value in config.directories.items(): config.directories[key] = ROOT_DIR / value return config
def test_list_servers(self, mock_add_srv_int, mock_serverlist): '''This test verifies that calling list_servers results in a call to the ServerList task.''' server_obj = munch.Munch({ 'name': 'testserver', 'id': '1', 'flavor': {}, 'addresses': {}, 'accessIPv4': '', 'accessIPv6': '', 'image': '' }) mock_serverlist.return_value = [server_obj] mock_add_srv_int.side_effect = [server_obj] r = self.cloud.list_servers() self.assertEquals(1, len(r)) self.assertEquals(1, mock_add_srv_int.call_count) self.assertEquals('testserver', r[0]['name'])
def process_config(config): """ Prepare user's config file. Also handles validation. Args: config (str): path to config file. Returns: conf (dict): configuration values. """ conf = munch.munchify(yaml.load(config)) conf.meta_data = munch.Munch() conf.meta_data.path = os.path.abspath(config.name) # # Run all validations that we can do on conf # # Can add more later return conf
def _normalize_image(self, image): new_image = munch.Munch(location=self._get_current_location( project_id=image.get('owner'))) properties = image.pop('properties', {}) visibility = image.pop('visibility', None) if visibility: is_public = (visibility == 'public') else: is_public = image.pop('is_public', False) visibility = 'public' if is_public else 'private' for field in _IMAGE_FIELDS: new_image[field] = image.pop(field, None) for key, val in image.items(): properties[key] = val new_image[key] = val new_image['properties'] = properties new_image['visibility'] = visibility new_image['is_public'] = is_public return new_image
def test_without_cleanup(self): resulted_yaml = deepcopy(self.yaml_output) args = munch.Munch({ 'cmd': '', 'auction_worker_config': 'path/to/config', 'with_api_version': 'another api version', 'auction_doc_id': '1' * 32, 'debug': False }) self.mocked_parser_obj.parse_args.return_value = args main() self.assertEqual(self.mocked_os.path.isfile.call_count, 1) self.mocked_os.path.isfile.assert_called_with( args.auction_worker_config) self.assertEqual(self.mocked_yaml.load.call_count, 1) self.mocked_yaml.load.assert_called_with(self.open_result) self.assertEqual(self.mocked_open.call_count, 1) self.mocked_open.assert_called_with(args.auction_worker_config) self.assertEqual(self.mocked_logging.config.dictConfig.call_count, 1) resulted_yaml['resource_api_version'] = args.with_api_version resulted_yaml['handlers']['journal'][ 'TENDERS_API_VERSION'] = resulted_yaml['resource_api_version'] resulted_yaml['handlers']['journal'][ 'TENDERS_API_URL'] = resulted_yaml['resource_api_server'] resulted_yaml['handlers']['journal']['TENDER_ID'] = args.auction_doc_id self.mocked_logging.config.dictConfig.assert_called_with(resulted_yaml) self.assertEqual(self.mocked_register_utilities.call_count, 1) self.mocked_register_utilities.assert_called_with(resulted_yaml, args) self.assertEqual(self.mocked_auction_class.call_count, 1) self.mocked_auction_class.assert_called_with( args.auction_doc_id, worker_defaults=resulted_yaml, debug=args.debug)
def __init__(self, chassis_username, chassis_password, chassis_hostname, app_hostname): """Initializer of SspPatterns.""" self.prompt = munch.Munch() self.chassis_username = chassis_username self.chassis_password = chassis_password self.chassis_hostname = chassis_hostname # the below 2 lines are for backward compatibility # should not be used internally in the library self.login_username = chassis_username self.login_password = chassis_password # Prelogin prompts self.prompt.password_prompt = r'[\r\n]*[Pp]assword: $' self.prompt.prelogin_prompt = \ r'[\r\n]*({} )?([Ll]ast )?[Ll]ogin: $'.format(self.chassis_hostname) # MIO level prompts self.prompt.mio_prompt = r'[\r\n]*{}([ /\w\-\*\\]+)?# $'.format( self.chassis_hostname) self.prompt.local_mgmt_prompt = \ r'[\r\n]*({})?\(local-mgmt\)# $'.format(self.chassis_hostname) self.prompt.fxos_prompt = r'[\r\n]*({})?\(fxos\)# $'.format( self.chassis_hostname) self.prompt.fpr_module_prompt = r'[\r\n]*Firepower-module.*>$' self.prompt.cimc_prompt = r'[\r\n]*\[.*?\]# $' # FTD level prompts self.prompt.fireos_prompt = r'[\r\n]*(\x1bE\x1b\[J)?> $' self.prompt.expert_cli = r'[\r\n]*(\x1b\[18t)?admin@.*?\$ $' self.prompt.sudo_prompt = r'[\r\n]*root@.*?# $' self.prompt.disable_prompt = '[\r\n]*({}|ftd\d*|firepower\d*|sensor\d*)> $'.format( app_hostname) self.prompt.enable_prompt = '[\r\n]*({}|ftd\d*|firepower\d*|sensor\d*)# $'.format( app_hostname) self.prompt.config_prompt = '[\r\n]*({}|ftd\d*|firepower\d*|sensor\d*)[\w]*\([\w\-]+\)# $'.format( app_hostname) # ASA level prompts self.prompt.asa_prompt = r'[\r\n]*asa.*?[>#] $'
def test_create_image_put_v2_bad_delete(self, mock_image_client, mock_is_client_version): mock_is_client_version.return_value = True self.cloud.image_api_use_tasks = False mock_image_client.get.return_value = [] self.assertEqual([], self.cloud.list_images()) args = { 'name': '42 name', 'container_format': 'bare', 'disk_format': 'qcow2', 'owner_specified.shade.md5': mock.ANY, 'owner_specified.shade.sha256': mock.ANY, 'owner_specified.shade.object': 'images/42 name', 'visibility': 'private', 'min_disk': 0, 'min_ram': 0 } ret = munch.Munch(args.copy()) ret['id'] = '42' ret['status'] = 'success' mock_image_client.get.side_effect = [ [], [ret], [ret], ] mock_image_client.post.return_value = ret mock_image_client.put.side_effect = exc.OpenStackCloudHTTPError( "Some error", {}) self.assertRaises(exc.OpenStackCloudHTTPError, self._call_create_image, '42 name', min_disk='0', min_ram=0) mock_image_client.post.assert_called_with('/images', json=args) mock_image_client.put.assert_called_with( '/images/42/file', headers={'Content-Type': 'application/octet-stream'}, data=mock.ANY) mock_image_client.delete.assert_called_with('/images/42')
def _normalize_project(self, project): # Copy incoming project because of shared dicts in unittests project = project.copy() # Discard noise self._remove_novaclient_artifacts(project) # In both v2 and v3 project_id = project.pop('id') name = project.pop('name', '') description = project.pop('description', '') is_enabled = project.pop('enabled', True) # v3 additions domain_id = project.pop('domain_id', 'default') parent_id = project.pop('parent_id', None) is_domain = project.pop('is_domain', False) # Projects have a special relationship with location location = self._get_identity_location() location['project']['domain_id'] = domain_id location['project']['id'] = parent_id ret = munch.Munch(location=location, id=project_id, name=name, description=description, is_enabled=is_enabled, is_domain=is_domain, domain_id=domain_id, properties=project.copy()) # Backwards compat if not self.strict_mode: ret['enabled'] = is_enabled ret['parent_id'] = parent_id for key, val in ret['properties'].items(): ret.setdefault(key, val) return ret
def test_associate_lb_fip_id_not_exist_neutron_exception(self): cls = d_lb_public_ip.FloatingIpServicePubIPDriver m_driver = mock.Mock(spec=cls) m_driver._drv_pub_ip = public_ip.FipPubIpDriver() os_net = self.useFixture(k_fix.MockNetworkClient()).client os_net.update_ip.side_effect = os_exc.SDKException fip = munch.Munch({ 'floating_ip_address': '1.2.3.5', 'id': 'ec29d641-fec4-4f67-928a-124a76b3a888' }) service_pub_ip_info = { 'ip_id': fip.id, 'ip_addr': fip.floating_ip_address, 'alloc_method': 'pool' } vip_port_id = 'ec29d641-fec4-4f67-928a-124a76b3a777' self.assertRaises(os_exc.SDKException, cls.associate_pub_ip, m_driver, service_pub_ip_info, vip_port_id)
def test_create_subnet_existing(self): cls = subnet_drv.NamespacePodSubnetDriver m_driver = mock.MagicMock(spec=cls) namespace = 'test' project_id = mock.sentinel.project_id net_id = mock.sentinel.net_id subnet = munch.Munch({ 'id': mock.sentinel.subnet, 'cidr': mock.sentinel.cidr }) os_net = self.useFixture(k_fix.MockNetworkClient()).client os_net.subnets.return_value = iter([subnet]) subnet_id, subnet_cidr = cls.create_subnet(m_driver, namespace, project_id, net_id) self.assertEqual(subnet_id, subnet['id']) self.assertEqual(subnet_cidr, subnet['cidr']) os_net.create_subnet.assert_not_called() os_net.subnets.assert_called_once()
def create_deal_with_decimal_value(self, **attributes): deal = { 'id': rand(), 'currency': "EUR", 'hot': True, 'name': 'Website Redesign' + rand(), 'tags': ["important"], 'value': '11.12', 'contact_id': self.create_contact().id, } deal.update(attributes) client = self.client original_request_func = client.http_client.request client.http_client.request = lambda *args, **kwargs: (200, {}, munch.Munch(deal)) deal = self.client.deals.create(**deal); client.http_client.request = original_request_func return deal;
def test_request_vifs(self, m_to_vif): cls = neutron_vif.NeutronPodVIFDriver cls._tag_on_creation = True m_driver = mock.Mock(spec=cls) os_net = self.useFixture(k_fix.MockNetworkClient()).client pod = mock.sentinel.pod project_id = mock.sentinel.project_id subnets = mock.sentinel.subnets security_groups = mock.sentinel.security_groups num_ports = 2 port_request = mock.sentinel.port_request m_driver._get_port_request.return_value = port_request port = munch.Munch({'id': '910b1183-1f4a-450a-a298-0e80ad06ec8b'}) vif_plugin = mock.sentinel.vif_plugin port.binding_vif_type = vif_plugin vif = mock.sentinel.vif bulk_rq = {'ports': [port_request for _ in range(num_ports)]} os_net.create_ports.return_value = (p for p in [port, port]) m_to_vif.return_value = vif semaphore = mock.MagicMock(spec=eventlet.semaphore.Semaphore(20)) self.assertEqual([vif, vif], cls.request_vifs(m_driver, pod, project_id, subnets, security_groups, num_ports, semaphore)) m_driver._get_port_request.assert_called_once_with(pod, project_id, subnets, security_groups, unbound=True) os_net.create_ports.assert_called_once_with(bulk_rq) calls = [ mock.call(vif_plugin, port, subnets), mock.call(vif_plugin, port, subnets) ] m_to_vif.assert_has_calls(calls)
def test_get_storage_node_exists(self): expected_storage_node_dict = dict( name='Example', bucket_info=dict(bucket_type=portal_enum.BucketType.AWS, bucket='ctera_bucket', direct=True, access_key='ACCESS_KEY', secret_key='SECRET', endpoint='s3.example.com', https=True), read_only=True, dedicated_to='Main', ) storage_node_obj_dict = copy.deepcopy(expected_storage_node_dict) storage_node_obj_dict['readOnly'] = storage_node_obj_dict.pop( 'read_only') storage_node_obj_dict['dedicatedPortal'] = storage_node_obj_dict.pop( 'dedicated_to') storage_node_obj_dict['storage'] = storage_node_obj_dict[ 'bucket_info']['bucket_type'] storage_node_obj_dict['bucket'] = storage_node_obj_dict['bucket_info'][ 'bucket'] storage_node_obj_dict['directUpload'] = storage_node_obj_dict[ 'bucket_info']['direct'] storage_node_obj_dict['awsAccessKey'] = storage_node_obj_dict[ 'bucket_info']['access_key'] storage_node_obj_dict['awsSecretKey'] = storage_node_obj_dict[ 'bucket_info']['secret_key'] storage_node_obj_dict['s3Endpoint'] = storage_node_obj_dict[ 'bucket_info']['endpoint'] storage_node_obj_dict['httpsOnly'] = storage_node_obj_dict[ 'bucket_info']['https'] storage_node_obj_dict.pop('bucket_info') storage_node = ctera_portal_storage_node.CteraPortalStorageNode() storage_node.parameters = dict(name=expected_storage_node_dict['name']) storage_node._ctera_portal.buckets.get = mock.MagicMock( return_value=munch.Munch(storage_node_obj_dict)) self.assertDictEqual(expected_storage_node_dict, storage_node._get_storage_node())
def _normalize_secgroup_rule(self, rule): ret = munch.Munch() # Copy incoming rule because of shared dicts in unittests rule = rule.copy() ret['id'] = rule.pop('id') ret['direction'] = rule.pop('direction', 'ingress') ret['ethertype'] = rule.pop('ethertype', 'IPv4') port_range_min = rule.get('port_range_min', rule.pop('from_port', None)) if port_range_min == -1: port_range_min = None if port_range_min is not None: port_range_min = int(port_range_min) ret['port_range_min'] = port_range_min port_range_max = rule.pop('port_range_max', rule.pop('to_port', None)) if port_range_max == -1: port_range_max = None if port_range_min is not None: port_range_min = int(port_range_min) ret['port_range_max'] = port_range_max ret['protocol'] = rule.pop('protocol', rule.pop('ip_protocol', None)) ret['remote_ip_prefix'] = rule.pop( 'remote_ip_prefix', rule.pop('ip_range', {}).get('cidr', None)) ret['security_group_id'] = rule.pop('security_group_id', rule.pop('parent_group_id', None)) ret['remote_group_id'] = rule.pop('remote_group_id', None) project_id = rule.pop('tenant_id', '') project_id = rule.pop('project_id', project_id) ret['location'] = self._get_current_location(project_id=project_id) ret['properties'] = rule # Backwards compat with Neutron if not self.strict_mode: ret['tenant_id'] = project_id ret['project_id'] = project_id for key, val in ret['properties'].items(): ret.setdefault(key, val) return ret
def test_update_user_password_v2(self, mock_keystone, mock_api_version): mock_api_version.return_value = '2' name = 'Mickey Mouse' email = '*****@*****.**' password = '******' domain_id = '1' user = {'id': '1', 'name': name, 'email': email} fake_user = fakes.FakeUser(**user) munch_fake_user = munch.Munch(user) mock_keystone.users.list.return_value = [fake_user] mock_keystone.users.get.return_value = fake_user mock_keystone.users.update.return_value = fake_user mock_keystone.users.update_password.return_value = fake_user user = self.cloud.update_user(name, name=name, email=email, password=password, domain_id=domain_id) mock_keystone.users.update.assert_called_once_with( user=munch_fake_user, name=name, email=email) mock_keystone.users.update_password.assert_called_once_with( user=munch_fake_user, password=password) self.assertEqual(name, user.name) self.assertEqual(email, user.email)
def test_list_images(self, mock_image_client): mock_image_client.get.return_value = [] self.assertEqual([], self.cloud.list_images()) fake_image = munch.Munch(id='42', status='success', name='42 name', container_format='bare', disk_format='qcow2', properties={ 'owner_specified.shade.md5': mock.ANY, 'owner_specified.shade.sha256': mock.ANY, 'owner_specified.shade.object': 'images/42 name', 'is_public': False }) mock_image_client.get.return_value = [fake_image] self.assertEqual([], self.cloud.list_images()) self.cloud.list_images.invalidate(self.cloud) self.assertEqual(self._munch_images(fake_image), self.cloud.list_images()) mock_image_client.get.assert_called_with('/images')
def test_create_image_put_v1(self, mock_image_client, mock_is_client_version): # TODO(mordred) Fix this to use requests_mock mock_is_client_version.return_value = False mock_image_client.get.return_value = [] self.assertEqual([], self.cloud.list_images()) args = { 'name': '42 name', 'container_format': 'bare', 'disk_format': 'qcow2', 'properties': { 'owner_specified.shade.md5': mock.ANY, 'owner_specified.shade.sha256': mock.ANY, 'owner_specified.shade.object': 'images/42 name', 'is_public': False } } ret = munch.Munch(args.copy()) ret['id'] = '42' ret['status'] = 'success' mock_image_client.get.side_effect = [ [], [ret], [ret], ] mock_image_client.post.return_value = ret mock_image_client.put.return_value = ret self._call_create_image('42 name') mock_image_client.post.assert_called_with('/images', json=args) mock_image_client.put.assert_called_with( '/images/42', data=mock.ANY, headers={ 'x-image-meta-checksum': mock.ANY, 'x-glance-registry-purge-props': 'false' }) mock_image_client.get.assert_called_with('/images/detail', params={}) self.assertEqual(self._munch_images(ret), self.cloud.list_images())
def test_acquire_service_pub_ip_info_usr_specified_ip(self): cls = d_lb_public_ip.FloatingIpServicePubIPDriver m_driver = mock.Mock(spec=cls) m_driver._drv_pub_ip = public_ip.FipPubIpDriver() os_net = self.useFixture(k_fix.MockNetworkClient()).client fip = munch.Munch({'floating_ip_address': '1.2.3.4', 'port_id': None, 'id': 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b'}) os_net.ips.return_value = (ip for ip in [fip]) project_id = mock.sentinel.project_id spec_type = 'LoadBalancer' spec_lb_ip = '1.2.3.4' expected_resp = (obj_lbaas .LBaaSPubIp(ip_id=fip.id, ip_addr=fip.floating_ip_address, alloc_method='user')) result = cls.acquire_service_pub_ip_info(m_driver, spec_type, spec_lb_ip, project_id) self.assertEqual(result, expected_resp)
def test_find_loadbalancer_error(self): lbaas = self.useFixture(k_fix.MockLBaaSClient()).client cls = d_lbaasv2.LBaaSv2Driver m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) loadbalancer = obj_lbaas.LBaaSLoadBalancer( name='TEST_NAME', project_id='TEST_PROJECT', ip='1.2.3.4', subnet_id='D3FA400A-F543-4B91-9CD3-047AF0CE42D1') loadbalancer_id = '00EE9E11-91C2-41CF-8FD4-7970579E5C4C' resp = iter([o_lb.LoadBalancer(id=loadbalancer_id, provider='haproxy', provisioning_status='ERROR')]) lbaas.load_balancers.return_value = resp m_driver._get_vip_port.return_value = munch.Munch( {'id': mock.sentinel.port_id}) ret = cls._find_loadbalancer(m_driver, loadbalancer) lbaas.load_balancers.assert_called_once_with( name=loadbalancer.name, project_id=loadbalancer.project_id, vip_address=str(loadbalancer.ip), vip_subnet_id=loadbalancer.subnet_id) self.assertIsNone(ret) m_driver.release_loadbalancer.assert_called_once()
def test_contract_overdue_predicate(self): need_status = 'need_status' end_data = get_now() item_end_data = get_now() - timedelta(days=1) item = munch.Munch({ 'status': need_status, 'signingPeriod': { 'endDate': item_end_data } }) result = contract_overdue_predicate(item, need_status, end_data) self.assertEqual(result, True) item.status = 'differ_status' result = contract_overdue_predicate(item, need_status, end_data) self.assertEqual(result, False) item.status = 'need_status' item.signingPeriod['endDate'] = end_data + timedelta(days=1) result = contract_overdue_predicate(item, need_status, end_data) self.assertEqual(result, False)
def test_do_list_admin_token(self, conf, rinp): """ Test the do_list_admin_token function of pagure-admin. """ # Create an admin token to use conf.return_value = True rinp.return_value = '1,2,3' args = munch.Munch({'user': '******'}) pagure.cli.admin.do_create_admin_token(args) # Retrieve all tokens cmd = ['python', PAGURE_ADMIN, 'admin-token', 'list'] output = _get_ouput(cmd)[0] self.assertNotEqual(output, 'No user "pingou" found\n') self.assertEqual(len(output.split('\n')), 2) self.assertIn(' -- pingou -- ', output) # Retrieve pfrields's tokens cmd = [ 'python', PAGURE_ADMIN, 'admin-token', 'list', '--user', 'pfrields' ] output = _get_ouput(cmd)[0] self.assertEqual(output, 'No admin tokens found\n')
def test_neutron_to_osvif_subnet(self, m_conv_routes): gateway = '1.1.1.1' cidr = '1.1.1.1/8' dns = ['2.2.2.2', '3.3.3.3'] host_routes = mock.sentinel.host_routes route_list = osv_route.RouteList( objects=[osv_route.Route(cidr='4.4.4.4/8', gateway='5.5.5.5')]) m_conv_routes.return_value = route_list neutron_subnet = munch.Munch({ 'cidr': cidr, 'dns_nameservers': dns, 'host_routes': host_routes, 'gateway_ip': gateway, }) subnet = ovu.neutron_to_osvif_subnet(neutron_subnet) self.assertEqual(cidr, str(subnet.cidr)) self.assertEqual(route_list, subnet.routes) self.assertEqual(set(dns), set([str(addr) for addr in subnet.dns])) self.assertEqual(gateway, str(subnet.gateway)) m_conv_routes.assert_called_once_with(host_routes)
def test_cmd_run(self): args = munch.Munch({ 'cmd': 'run', 'auction_worker_config': 'path/to/config', 'with_api_version': 'another api version', 'auction_doc_id': '1' * 32, 'debug': False }) self.mocked_parser_obj.parse_args.return_value = args main() self.assertEqual(self.mocked_SCHEDULER.start.call_count, 1) self.mocked_SCHEDULER.start.assert_called_with() self.assertEqual(self.mocked_SCHEDULER.shutdown.call_count, 1) self.mocked_SCHEDULER.shutdown.assert_called_with() self.assertEqual(self.auction_instance.schedule_auction.call_count, 1) self.auction_instance.schedule_auction.assert_called_with() self.assertEqual(self.auction_instance.wait_to_end.call_count, 1) self.auction_instance.wait_to_end.assert_called_with()
def set_session(): # pragma: no-cover """ Set the flask session as permanent. """ flask.session.permanent = True if OIDC.user_loggedin: if not hasattr(flask.session, 'fas_user') \ or not flask.session.fas_user: flask.session.fas_user = munch.Munch({ 'username': OIDC.user_getfield('nickname'), 'email': OIDC.user_getfield('email') or '', 'timezone': OIDC.user_getfield('zoneinfo'), 'cla_done': 'http://admin.fedoraproject.org/accounts/cla/done' in (OIDC.user_getfield('cla') or []), }) flask.g.fas_user = flask.session.fas_user else: flask.session.fas_user = None flask.g.fas_user = None
def test_ui_events_UserInputEvent(data): import tcod as tdl from kelte.ui.event import KeyboardEvent data = munch.Munch(data) tdl_keys = { k.replace("KEY_", "").lower(): getattr(tdl, k) for k in dir(tdl) if k.startswith("KEY_") } TCODK_CHAR = 0x41 # This should mean a single character enum tdl_value = tdl_keys.get(data.key[0], TCODK_CHAR) key_args = (tdl_value, *data.key) # tdl.Keys are not comparable, so we fudge this by testing # equality across two KeyboardEvents event = KeyboardEvent() event2 = KeyboardEvent() key = tdl.libtcodpy.Key(*key_args) key2 = tdl.libtcodpy.Key(*key_args) event.tdl_key = key event2.tdl_key = key2 assert event == event2
def test_create_subnet_existing(self): cls = subnet_drv.NamespacePodSubnetDriver m_driver = mock.MagicMock(spec=cls) ns_uid = '7f3a59b4-dd81-490d-9904-8294a6c93326' namespace = {'metadata': {'name': 'test', 'uid': ns_uid}} project_id = mock.sentinel.project_id net_id = mock.sentinel.net_id subnet = munch.Munch({ 'id': mock.sentinel.subnet, 'cidr': mock.sentinel.cidr }) os_net = self.useFixture(k_fix.MockNetworkClient()).client os_net.subnets.return_value = iter([subnet]) subnet_id, subnet_cidr = cls.create_subnet(m_driver, namespace, project_id, net_id) self.assertEqual(subnet_id, subnet['id']) self.assertEqual(subnet_cidr, subnet['cidr']) os_net.create_subnet.assert_not_called() os_net.subnets.assert_called_once()
def _test__execute(self, is_first_login, with_email): first_user = ctera_filer_first_user.CteraFilerFirstUser() username = '******' password = '******' email = '*****@*****.**' first_user.parameters = dict(ctera_user=username, ctera_password=password) if with_email: first_user.parameters['email'] = email first_user._ctera_filer.get = mock.MagicMock(return_value=munch.Munch( isfirstlogin=is_first_login)) first_user._execute() if is_first_login: first_user._ctera_filer.users.add_first_user.assert_called_once_with( username, password, email=email if with_email else '') self.assertEqual(first_user.ansible_return_value.param.msg, 'User created') self.assertTrue(first_user.ansible_return_value.param.changed) else: self.assertEqual(first_user.ansible_return_value.param.msg, 'First user was already created') self.assertEqual(first_user.ansible_return_value.param.user, username)
def test_create_image_put_v1_bad_delete(self, mock_image_client, mock_is_client_version): mock_is_client_version.return_value = False mock_image_client.get.return_value = [] self.assertEqual([], self.cloud.list_images()) args = { 'name': '42 name', 'container_format': 'bare', 'disk_format': 'qcow2', 'properties': { 'owner_specified.shade.md5': mock.ANY, 'owner_specified.shade.sha256': mock.ANY, 'owner_specified.shade.object': 'images/42 name', 'is_public': False } } ret = munch.Munch(args.copy()) ret['id'] = '42' ret['status'] = 'success' mock_image_client.get.side_effect = [ [], [ret], ] mock_image_client.post.return_value = ret mock_image_client.put.side_effect = exc.OpenStackCloudHTTPError( "Some error", {}) self.assertRaises(exc.OpenStackCloudHTTPError, self._call_create_image, '42 name') mock_image_client.post.assert_called_with('/images', json=args) mock_image_client.put.assert_called_with( '/images/42', data=mock.ANY, headers={ 'x-image-meta-checksum': mock.ANY, 'x-glance-registry-purge-props': 'false' }) mock_image_client.delete.assert_called_with('/images/42')