def test_responder_call_hashcat(self, mock_os, mock_subprocess): self.handler.accept_eula = Mock() self.handler.get_configs = Mock() self.handler.get_configs.return_value = ['hashcat', 'ruleset', 'wordlist'] outfile = Mock() outfile.name = 'outfile' self.handler.get_outfile = Mock() self.handler.get_outfile.return_value = outfile junkfile = Mock() junkfile.name = 'junkfile' self.handler.get_junkfile = Mock() self.handler.get_junkfile.return_value = junkfile tempfile = Mock() tempfile.name = 'tempfile' self.handler.get_tempfile = Mock() self.handler.get_tempfile.return_value = tempfile self.handler.outpath = 'outpath' mock_os.path = Mock() mock_os.path.exists.return_value = True mock_subprocess.Popen = Mock() self.handler.call_hashcat('5600', 'SMB-NTLMv2.txt') self.assertEqual(['hashcat', '-m', '5600', '-r', 'ruleset', '-o', 'outfile', 'junkfile', 'wordlist'], mock_subprocess.Popen.call_args[0][0])
def test_print_instance(colorize): colorize.return_value = 'fancy-cloud' # as name is an argument in the Mock constructor # we have to set it on an instance group1 = Mock() group1.name = 'group1' group2 = Mock() group2.name = 'group2' groups = [group1, group2] instance = Mock( name='cloud', instance_type='fancy', status='happy', tags={'owner': 'ofs'}, launch_time=datetime.datetime(2000, 01, 01), groups=groups, availability_zone='cloud-zone' ) expected = [ 'fancy-cloud', 'fancy', 'fancy-cloud', 'ofs', 'fancy-cloud', 'group1,group2', 'cloud-zone', ] result = print_instance_summary(instance) assert result == expected
def test__has_access_to_location(self): location = Location("i4x://edX/toy/course/2012_Fall") self.assertFalse(access._has_access_to_location(None, location, "staff", None)) u = Mock() u.is_authenticated.return_value = False self.assertFalse(access._has_access_to_location(u, location, "staff", None)) u = Mock(is_staff=True) self.assertTrue(access._has_access_to_location(u, location, "instructor", None)) # A user has staff access if they are in the staff group u = Mock(is_staff=False) g = Mock() g.name = "staff_edX/toy/2012_Fall" u.groups.all.return_value = [g] self.assertTrue(access._has_access_to_location(u, location, "staff", None)) # A user has staff access if they are in the instructor group g.name = "instructor_edX/toy/2012_Fall" self.assertTrue(access._has_access_to_location(u, location, "staff", None)) # A user has instructor access if they are in the instructor group g.name = "instructor_edX/toy/2012_Fall" self.assertTrue(access._has_access_to_location(u, location, "instructor", None)) # A user does not have staff access if they are # not in either the staff or the the instructor group g.name = "student_only" self.assertFalse(access._has_access_to_location(u, location, "staff", None)) # A user does not have instructor access if they are # not in the instructor group g.name = "student_only" self.assertFalse(access._has_access_to_location(u, location, "instructor", None))
def test_server_sends_marshaled_exception_when_callback_raises_exception(self, mock_command_response): request = '{"name": "foobar", "arg": {"arg1": "value"}}' expected = '{"error": "raise me!", "value": null}' command_response = Mock(CommandResponse) command_response.error = "raise me!" command_response.value = None mock_command_response.return_value = command_response callback = Mock(side_effect=BusCtlServerError()) marshaler = Mock(Marshaler) server = BusCtlServer() server._marshaler = marshaler command = Mock() command.name = "foobar" marshaler.unmarshal_command.return_value = command marshaler.marshal_response.return_value = expected command_class = Mock() command_class.name = "foobar" server.add_command(command_class, callback) response = server._process_next_command(request) self.assertEqual(response, expected) marshaler.marshal_response.assert_called_once_with(command_response)
def make_bigip(): bigip = Mock() vip_port = '1010' vaddr = '192.168.1.1' netmask = '255.255.255.0' protocol = 'HTTP' lb_id = 'TEST_FOO' fwd_name = 'hello' name = 'name' dest = "{}/{}:{}".format(fwd_name, lb_id, vip_port) partition = 'Common' va = Mock() vs = Mock() bigip.tm.ltm.virtuals.get_collection.return_value = \ [vs] vs.destination = dest vs.mask = netmask vs.ipProtocol = protocol vs.name = name bigip.tm.ltm.virtual_address_s.virtual_address.load.return_value \ = va va.raw = dict(address=vaddr) bigip.vip_port = vip_port bigip.vaddr = vaddr bigip.netmask = netmask bigip.protocol = protocol bigip.lb_id = lb_id bigip.fwd_name = fwd_name bigip.name = name bigip.dest = dest bigip.partition = partition return bigip
def test_resource_offers_creates_nodes(self, mock_get_slave): mock_get_slave.return_value = SlaveInfo(u'localhost', 5151) mock_cpu = Mock() mock_cpu.name = u'cpus' mock_cpu.value = 10 mock_disk = Mock() mock_disk.name = u'disk' mock_disk.value = 4000 mock_mem = Mock() mock_mem.name = u'mem' mock_mem.value = 100000 mock_offer = Mock() mock_offer.id.value = 1 mock_offer.slave_id.value = 1 mock_offer.hostname.value = u'localhost' mock_offer.resources = [mock_cpu, mock_disk, mock_mem] offers = [mock_offer] my_scheduler, driver, master_info = self._get_mocked_scheduler_driver_master() localhost_exists = Node.objects.filter(hostname=u'localhost').exists() self.assertFalse(localhost_exists, 'there should not be a node before the first offer') my_scheduler.resourceOffers(driver, offers) localhost_exists = Node.objects.all().exists() self.assertTrue(localhost_exists, 'there should be a node after the first offer')
def test_with_two_valid_issues(self, mock1, mock2): ''' A commit with two issues with correct labels ''' # Mock the commit message mock1.return_value = ( 'This is a test commit\n\n' 'Fixes: #1234\n' 'Updates: #4567') commit_msg = commit.get_commit_message() c = commit.CommitHandler('glusterfs') issues = c.parse_commit_message(commit_msg) self.assertEqual(issues, [{ 'id': '1234', 'status': 'Fixes' }, { 'id': '4567', 'status': 'Updates' }] ) # Handle a valid issue mock2.side_effect = None ghub = handle_github.GitHubHandler('glusterfs', True) ghub.ghub = Mock(name='mockedgithub') label1 = Mock(name='mockedlabel') label1.name = 'SpecApproved' label2 = Mock(name='mockedlabel') label2.name = 'DocApproved' ghub.ghub.issue.return_value.labels = [label1, label2] for issue in issues: self.assertTrue(ghub.check_issue(issue['id']))
def test_gameplay(self): host = Mock(spec=Player) player1 = Mock(spec=Player) player1.name = "Jeb" player2 = Mock(spec=Player) player2.name = "Zedd" game = GameFactory(Mock(new_id=Mock(return_value="0000")), Mock(spec=NotificationManager)).new_game(host) game.register_player(player1) game.register_player(player2) game = game.start() expected_story = "" first_player_prompt = "First player prompt" second_player_prompt = "Second player prompt" for i in range(0, TOTAL_ROUNDS): game = game.receive_prompt(Prompt(first_player_prompt, player1.name)) game = game.receive_prompt(Prompt(second_player_prompt, player2.name)) choice = first_player_prompt if i % 2 == 0 else second_player_prompt expected_story = "{} {}".format(expected_story, choice) game = game.choose_prompt(ChoosePrompt(choice)) self.assertIs(type(game), CompleteGame) self.assertEqual(game.story, expected_story)
def test_update_cluster_with_same_discovery(self): riak_discovery = Mock() riak_discovery.name = "riak" dns_discovery = Mock() dns_discovery.name = "dns" cluster = Mock() cluster.name = "app" cluster.discovery = "riak" cluster.config = {"discovery": "riak"} writer = Writer("/etc/configs") writer.add_configurable(Cluster, "app", cluster) writer.add_configurable(Discovery, "riak", riak_discovery) writer.add_configurable(Discovery, "dns", dns_discovery) writer.update_configurable( Cluster, cluster.name, {"discovery": "riak"} ) self.assertEqual(writer.nodes_updated.is_set(), True) self.assertEqual(riak_discovery.stop_watching.called, False) self.assertEqual(dns_discovery.start_watching.called, False)
def test_data(self): datastore = Mock() datastore.name = self.module.datastore ds_version = Mock() ds_version.name = self.module.datastore_version with patch.object(models, 'get_datastore_version', Mock(return_value=(datastore, ds_version))): view = DetailedModuleView(self.module) result = view.data() self.assertEqual(self.module.name, result['module']['name']) self.assertEqual(self.module.type, result['module']['type']) self.assertEqual(self.module.md5, result['module']['md5']) self.assertEqual(self.module.created, result['module']['created']) self.assertEqual(self.module.updated, result['module']['updated']) self.assertEqual(self.module.datastore_version, result['module']['datastore_version']) self.assertEqual(self.module.datastore, result['module']['datastore']) self.assertEqual(self.module.auto_apply, result['module']['auto_apply']) self.assertEqual(self.module.tenant_id, result['module']['tenant_id']) self.assertEqual(self.module.is_admin, result['module']['is_admin']) self.assertEqual(self.module.priority_apply, result['module']['priority_apply']) self.assertEqual(self.module.apply_order, result['module']['apply_order'])
def test_peers(self): peer1 = Mock() peer1.name = "server1" peer1.ip = "192.168.0.13" peer1.port = "88" peer2 = Mock() peer2.name = "server2" peer2.ip = "192.168.0.22" peer2.port = "88" cluster = Mock() cluster.name = "a_cluster" cluster.nodes = [ Mock(peer=peer1), Mock(peer=None), # skipped Mock(peer=peer2), Mock(peer=peer1), # duplicate, skipped ] stanza = PeersStanza(cluster) self.assertEqual(stanza.header, "peers a_cluster") self.assertEqual( set(stanza.lines), set([ "peer server1 192.168.0.13:88", "peer server2 192.168.0.22:88" ]) )
def test_traverse_bucket(self, *mocks): def ret_sample(population, offset=1): if population and offset == 1: # sample keys return [population[0]] else: return population with patch('plugins.S3AclPlugin.sample_population', side_effect=ret_sample) as MockClass: bucket = Mock() bucket.name = 'bucket1' prefix = Mock() prefix.name = 'prefix' key = Mock(Key) key.name = 'key1' key2 = Mock(Key) key2.name = 'key2' def ret_list(pref, slash): if pref == '': return [key, prefix, key2] else: return [] bucket.list.side_effect = ret_list self.plugin.init(Mock(), {'user': '******', 'key': 'xxx'}, {}) self.assertEqual(self.plugin.traverse_bucket(bucket, ''), [key])
def test_measure(self, model_mock): ds = Mock() amt = Mock() amt.name = 'amount' bar = Mock() bar.name = 'bar' ds.measures = [amt, bar] model_mock.by_name.return_value = ds out, err = AggregateParamParser({'dataset': 'foo'}).parse() assert out['measure'] == ['amount'] out, err = AggregateParamParser( {'dataset': 'foo', 'measure': 'bar'}).parse() assert out['measure'] == ['bar'] out, err = AggregateParamParser( {'dataset': 'foo', 'measure': 'amount|bar'}).parse() assert 'amount' in out['measure'], \ "AggregateParamParser doesn't return amount measure" assert 'bar' in out['measure'], \ "AggregateParamParser doesn't return bar measure" out, err = AggregateParamParser( {'dataset': 'foo', 'measure': 'baz'}).parse() assert 'no measure with name "baz"' in err[0]
def test_sync_nodes_new_cluster_begets_restart(self, Config, Control): node1 = Mock() node2 = Mock() node3 = Mock() node4 = Mock() cluster1 = Mock(nodes=[node1, node4]) cluster1.name = "cluster1" cluster2 = Mock(nodes=[node3, node2]) cluster2.name = "cluster2" Control.return_value.get_active_nodes.return_value = { "cluster1": [ {"svname": "app01:8888"}, {"svname": "app02:8888"}, ] } balancer = HAProxy() balancer.apply_config( { "config_file": "/etc/haproxy/haproxy.conf", "socket_file": "/var/run/haproxy.sock", } ) balancer.restart_required = False balancer.sync_nodes([cluster1, cluster2]) self.assertEqual(balancer.restart_required, True)
def test_get_installed_classes(self, pkg_resources): bad_plugin = Mock() bad_plugin.name = "bad" bad_plugin.load.side_effect = ImportError fake_plugin = Mock() fake_plugin.name = "fakeplugin" fake_plugin.load.return_value = FakePlugin incomplete_plugin = Mock() incomplete_plugin.name = "incomplete" incomplete_plugin.load.return_value = IncompletePlugin other_plugin = Mock() other_plugin.name = "other" other_plugin.load.return_value = OtherPlugin pkg_resources.iter_entry_points.return_value = [ bad_plugin, fake_plugin, incomplete_plugin, other_plugin ] self.assertEqual( FakePlugin.get_installed_classes(), {"fakeplugin": FakePlugin} )
def test_node_remove(self, m_client, m_cont_running, m_veth): """ Test the client removes the host when node_remove called, and that endpoints are removed when remove_endpoints flag is set. """ # Call method under test endpoint1 = Mock() endpoint1.name = "vethname1" endpoint1.ipv4_nets = {IPNetwork("1.2.3.4/32")} endpoint1.ipv6_nets = set() endpoint2 = Mock() endpoint2.name = "vethname2" endpoint2.ipv4_nets = set() endpoint2.ipv6_nets = {IPNetwork("aa:bb::cc/128")} m_client.get_endpoints.return_value = [endpoint1, endpoint2] node.node_remove(True, False) # Assert m_client.get_endpoints.assert_called_once_with(hostname=node.hostname) m_client.release_ips.assert_called_once_with({IPAddress("1.2.3.4"), IPAddress("aa:bb::cc")}) m_client.remove_ipam_host.assert_called_once_with(node.hostname) m_veth.assert_has_calls([call("vethname1"), call("vethname2")]) m_cont_running.assert_has_calls([call("calico-node")]) m_client.remove_host.assert_called_once_with(node.hostname)
def testGetSuggestedInstancesTwoDifferentSize(self, getAutoScalingGroupsMock): region = "us-west-2" # Instance 1 instanceMock1 = Mock(spec="boto.ec2.auto_scale.group.AutoScalingGroup") instanceMock1.name = "testName1" instanceMock1.desired_capacity = 64 # Instance 2 instanceMock2 = Mock(spec="boto.ec2.auto_scale.group.AutoScalingGroup") instanceMock2.name = "testName2" instanceMock2.desired_capacity = 65 getAutoScalingGroupsMock.return_value = [ instanceMock1, instanceMock2, ] suggestions = asg_utils.getSuggestedInstances(region) self.assertIsInstance(suggestions, types.GeneratorType) suggestions = list(suggestions) self.assertSequenceEqual(suggestions, [ {"id": "testName2", "name": "testName2", "namespace": "AWS/AutoScaling", "region": region}, {"id": "testName1", "name": "testName1", "namespace": "AWS/AutoScaling", "region": region}, ]) getAutoScalingGroupsMock.assert_call_once_with(region)
def test_first_synch_topologies(self, mock_get_all_state_managers, mock_add_new_topology, mock_remove_topology, mock_get_topologies_for_state_location): mock_state_manager_1 = Mock() mock_state_manager_1.name = 'mock_name1' mock_state_manager_2 = Mock() mock_state_manager_2.name = 'mock_name2' watches = {} mock_get_all_state_managers.return_value = [mock_state_manager_1, mock_state_manager_2] mock_get_topologies_for_state_location.return_value = [] def side_effect1(on_topologies_watch): watches["1"] = on_topologies_watch on_topologies_watch(['top_name1', 'top_name2']) mock_state_manager_1.get_topologies = side_effect1 def side_effect2(on_topologies_watch): watches["2"] = on_topologies_watch on_topologies_watch(['top_name3', 'top_name4']) mock_state_manager_2.get_topologies = side_effect2 self.tracker.synch_topologies() mock_get_topologies_for_state_location.assert_has_calls([call("mock_name2"), call("mock_name1")], any_order=True) mock_add_new_topology.assert_has_calls([call(mock_state_manager_1, 'top_name1'), call(mock_state_manager_1, 'top_name2'), call(mock_state_manager_2, 'top_name3'), call(mock_state_manager_2, 'top_name4')], any_order=True)
def test_node_remove_specific_host(self, m_client, m_cont_running, m_veth): """ Test the client removes the specific host when node_remove called, and that endpoints are removed when remove_endpoints flag is set. """ # Call method under test endpoint1 = Mock() endpoint1.name = "vethname1" endpoint1.ipv4_nets = {IPNetwork("1.2.3.4/32")} endpoint1.ipv6_nets = set() endpoint2 = Mock() endpoint2.name = "vethname2" endpoint2.ipv4_nets = set() endpoint2.ipv6_nets = set() m_client.get_endpoints.return_value = [endpoint1, endpoint2] # This should not cause a failure with specific host m_cont_running.return_value = True node.node_remove(True, "other-host") # Assert m_client.get_endpoints.assert_called_once_with(hostname="other-host") m_client.release_ips.assert_called_once_with({IPAddress("1.2.3.4")}) m_client.remove_ipam_host.assert_called_once_with("other-host") m_client.remove_host.assert_called_once_with("other-host") m_veth.assert_has_calls([call("vethname1"), call("vethname2")])
def make_bigip(): bigip = Mock() vip_port = '1010' vaddr = '192.168.1.1' netmask = '255.255.255.0' protocol = 'HTTP' lb_id = 'TEST_FOO' fwd_name = 'hello' name = 'name' dest = "{}/{}:{}".format(fwd_name, lb_id, vip_port) partition = 'Common' va = Mock() vs = Mock() BigIPResourceHelper.get_resources = Mock() BigIPResourceHelper.get_resources.return_value = [vs] vs.destination = dest vs.mask = netmask vs.ipProtocol = protocol vs.name = name BigIPResourceHelper.load = Mock() BigIPResourceHelper.load.return_value = va va.raw = dict(address=vaddr) bigip.vip_port = vip_port bigip.vaddr = vaddr bigip.netmask = netmask bigip.protocol = protocol bigip.lb_id = lb_id bigip.fwd_name = fwd_name bigip.name = name bigip.dest = dest bigip.partition = partition bigip.tmos_version = '12.1.2' return bigip
def testGetSuggestedInstancesTwoDifferentSize(self, getELBInstancesMock): region = "us-west-2" # Instance 1 instanceMock1 = Mock(spec="boto.ec2.elb.load_balancer.LoadBalancer") instanceMock1.name = "testName1" instanceMock1.instances = ["a", "b"] # Instance 2 instanceMock2 = Mock(spec="boto.ec2.elb.load_balancer.LoadBalancer") instanceMock2.name = "testName2" instanceMock2.instances = ["c", "d", "e"] getELBInstancesMock.return_value = [instanceMock1, instanceMock2] suggestions = elb_utils.getSuggestedInstances(region) self.assertIsInstance(suggestions, types.GeneratorType) suggestions = list(suggestions) self.assertSequenceEqual( suggestions, [ {"id": "testName2", "name": "testName2", "namespace": "AWS/ELB", "region": region}, {"id": "testName1", "name": "testName1", "namespace": "AWS/ELB", "region": region}, ], ) getELBInstancesMock.assert_call_once_with(region)
def test_create_resource(self): # Create ResourceType rt = Mock() rt.name = "bad name" with self.assertRaises(BadRequest): self.rms.create_resource_type(rt, "123") rt.name = "good_name" bad_object_id = None with self.assertRaises(BadRequest): self.rms.create_resource_type(rt, bad_object_id) # Create ObjectType ot = Mock() ot.definition = self.object_definition ot.name = "good_name" ot.description = "This is just a test. No need to panic" self.rms.clients.resource_registry.read.return_value = ot resource_id_return_value = '123' version_return_value = 1 self.rms.clients.resource_registry.create.return_value = [resource_id_return_value, version_return_value] self.rms.clients.resource_registry.create_association.return_value = '999' object_id = "444" resource_id = self.rms.create_resource_type(rt, object_id) self.assertEqual(resource_id, '123') self.rms.clients.resource_registry.read.assert_called_once_with(object_id, '') self.rms.clients.resource_registry.create.assert_called_once_with(rt) self.rms.clients.resource_registry.create_association.assert_called_once_with(resource_id_return_value, PRED.hasObjectType, object_id, 'H2H')
def test_restructure_layout_preferences_forwards(self): empty_workspace = Mock() empty_workspace_layout_pref = Mock() empty_workspace_layout_preferences = TestQueryResult([]) empty_workspace.workspacepreference_set.filter.side_effect = ( lambda **kwargs: empty_workspace_layout_pref if kwargs.get("name") == "layout" else empty_workspace_layout_preferences ) empty_workspace.tab_set.all.return_value = TestQueryResult([]) workspace = Mock() workspace_layout_pref = Mock() workspace_columns_pref = Mock() workspace_columns_pref.name = "columns" workspace_columns_pref.value = 15 workspace_layout_preferences = TestQueryResult([workspace_columns_pref]) workspace.workspacepreference_set.filter.side_effect = ( lambda **kwargs: workspace_layout_pref if kwargs.get("name", "") == "layout" else workspace_layout_preferences ) workspace_tab = Mock() workspace_tab_layout_pref = Mock() workspace_tab_columns_pref = Mock() workspace_tab_columns_pref.name = "columns" workspace_tab_columns_pref.value = 15 workspace_tab_layout_preferences = TestQueryResult([workspace_tab_columns_pref]) workspace_tab.tabpreference_set.filter.side_effect = ( lambda **kwargs: workspace_tab_layout_pref if kwargs.get("name", "") == "layout" else workspace_tab_layout_preferences ) workspace_empty_tab = Mock() workspace_empty_tab_layout_pref = Mock() workspace_empty_tab_layout_preferences = TestQueryResult([]) workspace_empty_tab.tabpreference_set.filter.side_effect = ( lambda **kwargs: workspace_empty_tab_layout_pref if kwargs.get("name", "") == "layout" else workspace_empty_tab_layout_preferences ) workspace.tab_set.all.return_value = TestQueryResult([workspace_empty_tab, workspace_tab]) migration = self._pick_migration("0016_restructure_layout_preferences") orm = Mock(autospec=migration.orm()) orm.Workspace.objects.all.return_value = TestQueryResult([empty_workspace, workspace]) migration.migration_instance().forwards(orm) self.assertTrue(empty_workspace_layout_pref.update.called) self.assertEqual(empty_workspace_layout_preferences.delete.call_count, 0) self.assertFalse(empty_workspace.workspacepreference_set.create.called) self.assertTrue(workspace_layout_pref.update.called) self.assertTrue(workspace_layout_preferences.delete.called) self.assertTrue(workspace_tab_layout_preferences.delete.called) self.assertFalse(workspace_empty_tab_layout_preferences.delete.called) self.assertTrue(workspace.workspacepreference_set.create.called)
def test_repo_groups(self): line_evaluator = Mock() line_evaluator.key = "line" line_evaluator.matches = Mock(return_value=True) rule = Mock() rule.name = "os_code_exec::python" rule.evaluators = [line_evaluator] junk_repo = Mock() junk_repo.name = 'junk' local_repo = Mock() local_repo.name = 'tooling' repo_groups = { 'skipped_repos': ['junk'], 'local_repos': ['tooling'] } rules_to_groups = { 'skipped_repos': [{'except': '.*'}], 'local_repos': [ {'match': '.*'}, {'except': 'os_code_exec::.*'} ] } code_checker = CodeChecker(context_processors=[], rules=[rule], repo_groups=repo_groups, rules_to_groups=rules_to_groups) check_context = {"filename": "macbeth.txt"} self.assertEquals(code_checker.check(lines=self.code, context=check_context, repo=junk_repo), []) self.assertEquals(code_checker.check(lines=self.code, context=check_context, repo=local_repo), [])
def test_topological_order_packages(self): plist = [] self.assertEqual([], _topological_order_packages(plist)) p1 = Mock('catkin.topological_order.PackageData') p1.name = 'p1' p1.build_depends = set(['p3', 'px']) p1.message_generator = Mock() p2 = Mock('catkin.topological_order.PackageData') p2.name = 'p2' p2.build_depends = set(['p1', 'p3']) p2.message_generator = Mock() p3 = Mock('catkin.topological_order.PackageData') p3.name = 'p3' p3.build_depends = set() p3.message_generator = Mock() p4 = Mock('catkin.topological_order.PackageData') p4.name = 'catkin' plist = [p1, p2, p3] self.assertEqual([['p3', p3], ['p1', p1], ['p2', p2]], _topological_order_packages(plist))
def test_dirty_save(self): age_field = Mock() age_field.name = 'age' age_field.value_to_string.return_value = 30 name_field = Mock() name_field.name = 'name' name_field.value_to_string.return_value = 'Bob' class DirtyUser(mock_orm.Model): _fields = [age_field, name_field] pk = 3 class DirtyUserResource(resources.ModelResource): parent_resource_path = 'users' model_class = DirtyUser fields = [ fields.AttributeField(attribute='name', type=str), fields.AttributeField(attribute='age', type=int) ] dirty_user = DirtyUser() dirty_user.save = Mock() dirty_user.is_dirty = lambda: True resource = DirtyUserResource(dirty_user) resource.put(mock_context(), { 'name': 'Bob', 'age': 30, 'resourceUri': 'uri://users/1' }) self.assertTrue(dirty_user.save.called)
def test_batch_norm_macro(): from lasagne.layers import (Layer, BatchNormLayer, batch_norm, NonlinearityLayer) from lasagne.nonlinearities import identity input_shape = (2, 3) obj = object() # check if it steals the nonlinearity layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj) bnstack = batch_norm(layer) assert isinstance(bnstack, NonlinearityLayer) assert isinstance(bnstack.input_layer, BatchNormLayer) assert layer.nonlinearity is identity assert bnstack.nonlinearity is obj # check if it removes the bias layer = Mock(Layer, output_shape=input_shape, b=obj, params={obj: set()}) bnstack = batch_norm(layer) assert isinstance(bnstack, BatchNormLayer) assert layer.b is None assert obj not in layer.params # check if it can handle an unset bias layer = Mock(Layer, output_shape=input_shape, b=None, params={obj: set()}) bnstack = batch_norm(layer) assert isinstance(bnstack, BatchNormLayer) assert layer.b is None # check if it passes on kwargs layer = Mock(Layer, output_shape=input_shape) bnstack = batch_norm(layer, name='foo') assert isinstance(bnstack, BatchNormLayer) assert bnstack.name == 'foo' # check if created layers are named with kwargs name layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj) layer.name = 'foo' bnstack = batch_norm(layer, name='foo_bnorm') assert isinstance(bnstack, NonlinearityLayer) assert isinstance(bnstack.input_layer, BatchNormLayer) assert bnstack.name == 'foo_bnorm_nonlin' assert bnstack.input_layer.name == 'foo_bnorm' # check if created layers are named with wrapped layer name layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj) layer.name = 'foo' bnstack = batch_norm(layer) assert isinstance(bnstack, NonlinearityLayer) assert isinstance(bnstack.input_layer, BatchNormLayer) assert bnstack.name == 'foo_bn_nonlin' assert bnstack.input_layer.name == 'foo_bn' # check if created layers remain unnamed if no names are given layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj) bnstack = batch_norm(layer) assert isinstance(bnstack, NonlinearityLayer) assert isinstance(bnstack.input_layer, BatchNormLayer) assert bnstack.name is None assert bnstack.input_layer.name is None
def setUp(self): super(MongoDbClusterTasksTest, self).setUp() self.cluster_id = "1232" self.cluster_name = "Cluster-1234" self.tenant_id = "6789" self.db_cluster = DBCluster(ClusterTaskStatus.NONE, id=self.cluster_id, created=str(datetime.date), updated=str(datetime.date), name=self.cluster_name, task_id=ClusterTaskStatus.NONE._code, tenant_id=self.tenant_id, datastore_version_id="1", deleted=False) self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1", compute_instance_id="compute-1", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-1", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="member") self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2", compute_instance_id="compute-2", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-2", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="member") self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="mongos", compute_instance_id="compute-3", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-3", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="query_router") self.dbinst4 = DBInstance(InstanceTasks.NONE, id="4", name="configserver", compute_instance_id="compute-4", task_id=InstanceTasks.NONE._code, task_description=InstanceTasks.NONE._db_text, volume_id="volume-4", datastore_version_id="1", cluster_id=self.cluster_id, shard_id="shard-1", type="config_server") mock_ds1 = Mock() mock_ds1.name = 'mongodb' mock_dv1 = Mock() mock_dv1.name = '2.0.4' self.clustertasks = ClusterTasks(Mock(), self.db_cluster, datastore=mock_ds1, datastore_version=mock_dv1)
def test_get_sources(self, get_source_map): get_source_map.return_value = {'system': ['foo']} system = Mock() system.name = 'system' eq_(sources.get_sources(system), ['foo']) system = Mock() system.name = 'bar' eq_(sources.get_sources(system), [])
def test_should_get_group_from_nova_security_group_manager_when_multiple_groups_present(self): security_group1 = Mock() security_group1.name = "secGroup" security_group2 = Mock() security_group2.name = "otherGroup" self.security_group_manager.list.return_value = [security_group1, security_group2] self.assertEqual(self.openstack_group_service.get_group(security_group2.name), security_group2)
def test_synch_topologies_leading_with_add_and_remove_topologies( self, mock_get_all_state_managers, mock_add_new_topology, mock_remove_topology, mock_get_topologies_for_state_location): mock_state_manager_1 = Mock() mock_state_manager_1.name = 'mock_name1' mock_state_manager_2 = Mock() mock_state_manager_2.name = 'mock_name2' watches = {} mock_get_all_state_managers.return_value = [ mock_state_manager_1, mock_state_manager_2 ] mock_get_topologies_for_state_location.return_value = [] def side_effect1(on_topologies_watch): watches["1"] = on_topologies_watch on_topologies_watch(['top_name1', 'top_name2']) mock_state_manager_1.get_topologies = side_effect1 def side_effect2(on_topologies_watch): watches["2"] = on_topologies_watch on_topologies_watch(['top_name3', 'top_name4']) mock_state_manager_2.get_topologies = side_effect2 self.tracker.synch_topologies() mock_get_topologies_for_state_location.assert_has_calls( [call("mock_name2"), call("mock_name1")], any_order=True) mock_add_new_topology.assert_has_calls([ call(mock_state_manager_1, 'top_name1'), call(mock_state_manager_1, 'top_name2'), call(mock_state_manager_2, 'top_name3'), call(mock_state_manager_2, 'top_name4') ], any_order=True) self.assertEqual(4, mock_add_new_topology.call_count) self.assertEqual(0, mock_remove_topology.call_count) mock_get_topologies_for_state_location.reset_mock() mock_add_new_topology.reset_mock() mock_remove_topology.reset_mock() def get_topologies_for_state_location_side_effect(name): if name == 'mock_name1': return [ Topology('top_name1', 'mock_name1'), Topology('top_name2', 'mock_name1') ] if name == 'mock_name2': return [ Topology('top_name3', 'mock_name2'), Topology('top_name4', 'mock_name2') ] return [] # pylint: disable=line-too-long mock_get_topologies_for_state_location.side_effect = get_topologies_for_state_location_side_effect watches["1"](['top_name1', 'top_name3']) watches["2"](['top_name5', 'top_name6']) mock_add_new_topology.assert_has_calls([ call(mock_state_manager_1, 'top_name3'), call(mock_state_manager_2, 'top_name5'), call(mock_state_manager_2, 'top_name6') ], any_order=True) mock_remove_topology.assert_has_calls([ call('top_name2', 'mock_name1'), call('top_name3', 'mock_name2'), call('top_name4', 'mock_name2') ], any_order=False) self.assertEqual(3, mock_add_new_topology.call_count) self.assertEqual(3, mock_remove_topology.call_count)
def test_should_not_equal_to_esx_with_other_name(self): other_raw_esx = Mock() other_raw_esx.name = "other-esx-name" other_esx = ESX(other_raw_esx) self.assertFalse(self.wrapped_esx == other_esx)
def _handler_res(self): m = Mock() m.name = "handler_res" return m
def run_fake_build(self, notifier, info=None): notifier.master = fakemaster.make_master() notifier.master_status = notifier.master.status builders = [] builds = [] for i in [0, 1, 2]: builder = Mock() build = FakeBuildStatus() builder.getBuild.return_value = build builder.name = "Builder%d" % i build.results = SUCCESS build.finished = True build.reason = "testReason" build.getBuilder.return_value = builder builders.append(builder) builds.append(build) def fakeGetBuilder(buildername): return { "Builder0": builders[0], "Builder1": builders[1], "Builder2": builders[2] }[buildername] notifier.master_status.getBuilder = fakeGetBuilder notifier.master.db.insertTestData([ fakedb.Master(id=92), fakedb.Buildslave(id=13, name='sl'), fakedb.Buildset(id=99, results=SUCCESS, reason="testReason"), fakedb.Builder(id=80, name='Builder0'), fakedb.Builder(id=81, name='Builder1'), fakedb.Builder(id=82, name='Builder2'), fakedb.BuildRequest(id=10, buildsetid=99, builderid=80), fakedb.Build(number=0, buildrequestid=10, masterid=92, buildslaveid=13), fakedb.BuildRequest(id=11, buildsetid=99, builderid=81), fakedb.Build(number=0, buildrequestid=11, masterid=92, buildslaveid=13), fakedb.BuildRequest(id=12, buildsetid=99, builderid=82), fakedb.Build(number=0, buildrequestid=12, masterid=92, buildslaveid=13) ]) if info is not None: info['bsid'] = 99 info['builds'] = builds d = notifier._buildsetComplete('buildset.99.complete', { 'bsid': 99, 'result': SUCCESS }) return d
def the_amazon_provider(): the_amazon_provider = Mock() the_amazon_provider.name = AMAZON_PROVIDER_NAME the_amazon_provider.id = PROVIDER_ID yield the_amazon_provider
def MockStep(step=None): step = Mock() if step is None else step step.blueprint = Mock() step.blueprint.name = 'MockNS' step.name = 'MockStep(%s)' % (id(step), ) return step
def test_get_probe_data(self): ps = self.get_obj() # build some fairly complex test data for this. in the end, # we want the probe data to include only the most specific # version of a given probe, and by basename only, not full # (specific) name. We don't fully test the specificity stuff, # we just check to make sure sort() is called and trust that # sort() does the right thing on Specificity objects. (I.e., # trust that Specificity is well-tested. Hah!) We also test # to make sure the interpreter is determined correctly. ps.get_matching = Mock() matching = FakeList() matching.sort = Mock() p1 = Mock() p1.specific = Bcfg2.Server.Plugin.Specificity(group=True, prio=10) p1.name = "fooprobe.G10_foogroup" p1.data = """#!/bin/bash group-specific""" matching.append(p1) p2 = Mock() p2.specific = Bcfg2.Server.Plugin.Specificity(all=True) p2.name = "fooprobe" p2.data = "#!/bin/bash" matching.append(p2) p3 = Mock() p3.specific = Bcfg2.Server.Plugin.Specificity(all=True) p3.name = "barprobe" p3.data = "#! /usr/bin/env python" matching.append(p3) p4 = Mock() p4.specific = Bcfg2.Server.Plugin.Specificity(all=True) p4.name = "bazprobe" p4.data = "" matching.append(p4) ps.get_matching.return_value = matching metadata = Mock() metadata.version_info = \ Bcfg2.version.Bcfg2VersionInfo(Bcfg2.version.__version__) pdata = ps.get_probe_data(metadata) ps.get_matching.assert_called_with(metadata) # we can't create a matching operator.attrgetter object, and I # don't feel the need to mock that out -- this is a good # enough check self.assertTrue(matching.sort.called) self.assertEqual(len(pdata), 3, "Found: %s" % [p.get("name") for p in pdata]) for probe in pdata: if probe.get("name") == "fooprobe": self.assertIn("group-specific", probe.text) self.assertEqual(probe.get("interpreter"), "/bin/bash") elif probe.get("name") == "barprobe": self.assertEqual(probe.get("interpreter"), "/usr/bin/env python") elif probe.get("name") == "bazprobe": self.assertIsNotNone(probe.get("interpreter")) else: assert False, "Strange probe found in get_probe_data() return"
def blob(name, id): b = Mock() b.name = name b.id = id return b
def get_mock_blob(name, md5): new_blob = Mock() new_blob.name = name new_blob.properties.content_settings.content_md5 = md5 return new_blob
def test__apply(self, *_): class DummyDesired: def __init__(self, name, changes): self.name = name self.changes = changes apply_z = Zone("unit.tests.", []) create_r = Record.new(apply_z, '', { 'ttl': 0, 'type': 'A', 'values': ['1.2.3.4', '10.10.10.10']}) delete_r = Record.new(apply_z, 'a', { 'ttl': 1, 'type': 'A', 'values': ['1.2.3.4', '1.1.1.1']}) update_existing_r = Record.new(apply_z, 'aa', { 'ttl': 9001, 'type': 'A', 'values': ['1.2.4.3']}) update_new_r = Record.new(apply_z, 'aa', { 'ttl': 666, 'type': 'A', 'values': ['1.4.3.2']}) gcloud_zone_mock = DummyGoogleCloudZone("unit.tests.", "unit-tests") status_mock = Mock() return_values_for_status = iter( ["pending"] * 11 + ['done', 'done']) type(status_mock).status = PropertyMock( side_effect=lambda: next(return_values_for_status)) gcloud_zone_mock.changes = Mock(return_value=status_mock) provider = self._get_provider() provider.gcloud_client = Mock() provider._gcloud_zones = {"unit.tests.": gcloud_zone_mock} desired = Mock() desired.name = "unit.tests." changes = [] changes.append(Create(create_r)) changes.append(Delete(delete_r)) changes.append(Update(existing=update_existing_r, new=update_new_r)) provider.apply(Plan( existing=[update_existing_r, delete_r], desired=desired, changes=changes, exists=True )) calls_mock = gcloud_zone_mock.changes.return_value mocked_calls = [] for mock_call in calls_mock.add_record_set.mock_calls: mocked_calls.append(mock_call[1][0]) self.assertEqual(mocked_calls, [ DummyResourceRecordSet( 'unit.tests.', 'A', 0, ['1.2.3.4', '10.10.10.10']), DummyResourceRecordSet( 'aa.unit.tests.', 'A', 666, ['1.4.3.2']) ]) mocked_calls2 = [] for mock_call in calls_mock.delete_record_set.mock_calls: mocked_calls2.append(mock_call[1][0]) self.assertEqual(mocked_calls2, [ DummyResourceRecordSet( 'a.unit.tests.', 'A', 1, ['1.2.3.4', '1.1.1.1']), DummyResourceRecordSet( 'aa.unit.tests.', 'A', 9001, ['1.2.4.3']) ]) type(status_mock).status = "pending" with self.assertRaises(RuntimeError): provider.apply(Plan( existing=[update_existing_r, delete_r], desired=desired, changes=changes, exists=True )) unsupported_change = Mock() unsupported_change.__len__ = Mock(return_value=1) type_mock = Mock() type_mock._type = "A" unsupported_change.record = type_mock mock_plan = Mock() type(mock_plan).desired = PropertyMock(return_value=DummyDesired( "dummy name", [])) type(mock_plan).changes = [unsupported_change] with self.assertRaises(RuntimeError): provider.apply(mock_plan)
def test__get_task_name(self): name = "mytask" task = Mock() task.is_generate_resmoke_task = False task.name = name self.assertEqual(name, under_test._get_task_name(task))
def get(self, name): assert name == 'defaultbranch' m = Mock(commit=dict(id='defaultbranchsha')) m.name = 'defaultbranch' return m
def get_mock_xiv_host(name, iscsi_ports, fc_ports): host = Mock() host.iscsi_ports = iscsi_ports host.fc_ports = fc_ports host.name = name return host
def _create_mock_package(self, name, arch, repoid): mock_package = Mock(spec=yum.rpmsack.RPMInstalledPackage) mock_package.repoid = repoid mock_package.name = name mock_package.arch = arch return mock_package
def run_fake_summary_build(self, gsp, buildResults, finalResult, resultText, expWarning=False): buildpairs = [] i = 0 for i in xrange(len(buildResults)): buildResult = buildResults[i] builder = Mock() build = FakeBuildStatus() builder.getBuild.return_value = build builder.name = "Builder-%d" % i builder.getName.return_value = builder.name builder._builderid = i build.results = buildResult build.finished = True build.reason = "testReason" build.getBuilder.return_value = builder build.getResults.return_value = build.results build.getText.return_value = ['buildText'] build.getProperty = self.TEST_PROPS.get buildpairs.append((builder, build)) def fakeGetBuilder(buildername): # e.g. Builder-5 will be buildpairs[5][0] return buildpairs[int(buildername.split("-")[1])][0] gsp.master_status.getBuilder = fakeGetBuilder gsp.master_status.getURLForThing = Mock() gsp.master_status.getURLForThing.return_value = self.THING_URL gsp.master.db = fakedb.FakeDBConnector(gsp.master, self) fakedata = [ fakedb.Master(id=92), fakedb.Buildslave(id=13, name='sl'), fakedb.Buildset(id=99, results=finalResult, reason="testReason"), ] breqid = 1000 for (builder, build) in buildpairs: fakedata.append( fakedb.Builder(id=builder._builderid, name=builder.name)) fakedata.append( fakedb.BuildRequest(id=breqid, buildsetid=99, builderid=builder._builderid)) fakedata.append( fakedb.Build(number=0, buildrequestid=breqid, masterid=92, buildslaveid=13)) breqid = breqid + 1 gsp.master.db.insertTestData(fakedata) d = gsp._buildsetComplete('buildset.99.complete', dict(bsid=99, result=SUCCESS)) @d.addCallback def check(_): info = [] for i in xrange(len(buildResults)): info.append({ 'name': "Builder-%d" % i, 'result': buildResults[i], 'resultText': resultText[i], 'text': 'buildText', 'url': self.THING_URL }) if expWarning: self.assertEqual( [w['message'] for w in self.flushWarnings()], [ 'The Gerrit status callback uses the old ' 'way to communicate results. The outcome ' 'might be not what is expected.' ]) return str(info) return d
def test_glob(self): # test the pattern matches in directories and file names # simple test for listing of directory, no wild cards # NOTE: Mock class also has a 'name' attribute so we cannot # instantiate a mock node with Mock(name='blah'). There are # two other wasy to do it as seen below mock_node = MagicMock(type='vos:ContainerNode') mock_node.configure_mock(name='anode') client = Client() client.get_node = Mock(return_value=mock_node) self.assertEquals(['vos:/anode/'], client.glob('vos:/anode/')) # simple test for file listing of file mock_node = MagicMock(type='vos:DataNode') mock_node.configure_mock(name='afile') client = Client() client.get_node = Mock(return_value=mock_node) self.assertEquals(['vos:/afile'], client.glob('vos:/afile')) # create a mock directory structure on the form # /anode/abc /anode/def - > anode/a* should return # /anode/adc mock_node = MagicMock(type='vos:ContainerNode') mock_node.configure_mock(name='anode') mock_child_node1 = Mock(type='vos:DataNode') mock_child_node1.name = 'abc' mock_child_node2 = Mock(type='vos:DataNode') mock_child_node2.name = 'def' # because we use wild characters in the root node, # we need to create a corresponding node for the base node mock_base_node = Mock(type='vos:ContainerNode') mock_base_node.name = 'vos:' mock_base_node.node_list = [mock_node] mock_node.node_list = [ mock_base_node, mock_child_node1, mock_child_node2 ] client = Client() client.get_node = Mock( side_effect=[mock_node, mock_base_node, mock_node]) self.assertEquals(['vos:/anode/abc'], client.glob('vos:/anode/a*')) self.assertEquals(['vos:/anode/abc'], client.glob('vos:/*node/abc')) # test nodes: # /anode/.test1 /bnode/sometests /bnode/blah # /[a,c]node/*test* should return /bnode/somtests (.test1 is filtered # out as a special file) mock_node1 = MagicMock(type='vos:ContainerNode') mock_node1.configure_mock(name='anode') mock_node1.node_list = [mock_child_node1] mock_child_node2 = Mock(type='vos:DataNode') mock_child_node2.name = 'sometests' mock_child_node3 = Mock(type='vos:DataNode') mock_child_node3.name = 'blah' mock_node2 = MagicMock(type='vos:ContainerNode') mock_node2.configure_mock(name='bnode') mock_node2.node_list = [mock_child_node2, mock_child_node3] # because we use wild characters in the root node, # we need to create a corresponding node for the base node mock_base_node = Mock(type='vos:DataNode') mock_base_node.name = 'vos:' mock_base_node.node_list = [mock_node1, mock_node2] client = Client() client.get_node = Mock( side_effect=[mock_base_node, mock_node1, mock_node2]) self.assertEquals(['vos:/bnode/sometests'], client.glob('vos:/[a,b]node/*test*'))
def get(self, name): m = Mock(commit=dict(id='mybranchsha')) m.name = 'mybranch' return m
def test_batch_norm_macro(dnn): if not dnn: from lasagne.layers import (BatchNormLayer, batch_norm) else: try: from lasagne.layers.dnn import ( BatchNormDNNLayer as BatchNormLayer, batch_norm_dnn as batch_norm) except ImportError: pytest.skip("cuDNN batch norm not available") from lasagne.layers import (Layer, NonlinearityLayer) from lasagne.nonlinearities import identity input_shape = (2, 3) obj = object() # check if it steals the nonlinearity layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj) bnstack = batch_norm(layer) assert isinstance(bnstack, NonlinearityLayer) assert isinstance(bnstack.input_layer, BatchNormLayer) assert layer.nonlinearity is identity assert bnstack.nonlinearity is obj # check if it removes the bias layer = Mock(Layer, output_shape=input_shape, b=obj, params={obj: set()}) bnstack = batch_norm(layer) assert isinstance(bnstack, BatchNormLayer) assert layer.b is None assert obj not in layer.params # check if it can handle an unset bias layer = Mock(Layer, output_shape=input_shape, b=None, params={obj: set()}) bnstack = batch_norm(layer) assert isinstance(bnstack, BatchNormLayer) assert layer.b is None # check if it passes on kwargs layer = Mock(Layer, output_shape=input_shape) bnstack = batch_norm(layer, name='foo') assert isinstance(bnstack, BatchNormLayer) assert bnstack.name == 'foo' # check if created layers are named with kwargs name layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj) layer.name = 'foo' bnstack = batch_norm(layer, name='foo_bnorm') assert isinstance(bnstack, NonlinearityLayer) assert isinstance(bnstack.input_layer, BatchNormLayer) assert bnstack.name == 'foo_bnorm_nonlin' assert bnstack.input_layer.name == 'foo_bnorm' # check if created layers are named with wrapped layer name layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj) layer.name = 'foo' bnstack = batch_norm(layer) assert isinstance(bnstack, NonlinearityLayer) assert isinstance(bnstack.input_layer, BatchNormLayer) assert bnstack.name == 'foo_bn_nonlin' assert bnstack.input_layer.name == 'foo_bn' # check if created layers remain unnamed if no names are given layer = Mock(Layer, output_shape=input_shape, nonlinearity=obj) bnstack = batch_norm(layer) assert isinstance(bnstack, NonlinearityLayer) assert isinstance(bnstack.input_layer, BatchNormLayer) assert bnstack.name is None assert bnstack.input_layer.name is None
def the_provider(): the_provider = Mock() the_provider.name = RESOURCE_NAME the_provider.id = 1 yield the_provider
def testMetricNotFoundFromSetLastTimestamp(self, engineMock, repoMock, ec2InstanceMetricGetterMock, _metricStreamerMock): """Test handling of ObjectNotFoundError when calling repository.setMetricLastTimestamp in _processAutostackMetricRequests. In this case, we expect _processAutostackMetricRequests to skip this collection and continue processing the next one(s) """ # Ignore attemting to look for MySQL transient errors. # We're not testing those here. repoMock.retryOnTransientErrors.side_effect = lambda f: f # Define metric to skip over errMetric = Mock(spec_set=self.MetricRowSpec) errMetric.name = "errMetric" errInstanceID = "i-00000" errRefID = 0 # index into requests sequence errRequest = AutostackMetricRequest( refID=errRefID, autostack=Mock(spec_set=self.AutostackRowSpec), metric=errMetric) errMetricRecord = MetricRecord(timestamp=datetime.datetime.utcnow(), value=2) errData = InstanceMetricData(instanceID=errInstanceID, records=[errMetricRecord]) errCollection = MetricCollection(refID=errRefID, slices=[errData], timeRange=self.timeRange, nextMetricTime=self.timeRange.end) # Define "ok" metric okMetric = Mock(spec_set=self.MetricRowSpec) okMetric.name = "okMetric" okInstanceID = "i-11111" okRefID = 1 # index into requests sequence okRequest = AutostackMetricRequest( refID=okRefID, autostack=Mock(spec_set=self.AutostackRowSpec), metric=okMetric) okDataValue = 111 okMetricRecord = MetricRecord(timestamp=datetime.datetime.utcnow(), value=okDataValue) okData = InstanceMetricData(instanceID=okInstanceID, records=[okMetricRecord]) okCollection = MetricCollection(refID=okRefID, slices=[okData], timeRange=self.timeRange, nextMetricTime=self.timeRange.end) # Make setMetricLastTimestamp error on first call (error metric) and pass # on second call (ok metric) repoMock.setMetricLastTimestamp.side_effect = ( app_exceptions.ObjectNotFoundError("Expected: things happen"), None) requests = [errRequest, okRequest] collections = [errCollection, okCollection] metricGetterInstanceMock = ec2InstanceMetricGetterMock.return_value metricGetterInstanceMock.collectMetricData.return_value = iter( collections) streamedData = [] _metricStreamerMock.return_value.streamMetricData.side_effect = ( lambda data, *args, **kwargs: streamedData.append(data)) aggSvc = aggregator_service.AggregatorService() with patch.object(aggregator_service, "getAggregationFn", autospec=True, return_value=None): aggSvc._processAutostackMetricRequests( engine=engineMock, requests=requests, modelSwapper=Mock(spec_set=ModelSwapperInterface)) self.assertEqual(len(streamedData), 1) self.assertEqual(len(streamedData[0]), 1) self.assertEqual(streamedData[0][0][1], okDataValue)
def make_fake_milestone(name, tasks=[]): milestone = Mock(spec=['searchTasks']) # the name attribute cannot be set in __init__ because Mock defines it. milestone.name = name milestone.searchTasks.return_value = tasks return milestone
def get_maintainer(self): maint = Mock() maint.email = '*****@*****.**' maint.name = 'John Foo' return maint
def the_zone(): the_zone = Mock() the_zone.name = "default" the_zone.id = 1 yield the_zone
def mock_field(scope, name): field = Mock() field.scope = scope field.name = name return field
def the_provider(): the_provider = Mock() the_provider.name = PROVIDER_NAME the_provider.id = PROVIDER_ID yield the_provider
def create_mock_step(name): step = Mock() step.return_value = step step.startStep.return_value = SUCCESS step.name = name return step
def get_mock_xiv_config_param(name, value): config_param = Mock() config_param.name = name config_param.value = value return config_param
def _make_ep(self, name, cls): m = Mock() m.name = name m.load.return_value = cls return m
def test_with_sourcepath(self): def create_mock_workspace(root_dir, ws): ws1 = os.path.join(root_dir, ws) inc = os.path.join(ws1, "include") share = os.path.join(ws1, "share") p1inc = os.path.join(inc, "foo") p1share = os.path.join(share, "foo") os.makedirs(ws1) os.makedirs(inc) os.makedirs(share) os.makedirs(p1inc) os.makedirs(p1share) with open(os.path.join(ws1, CATKIN_MARKER_FILE), 'w') as fhand: fhand.write('loc1;loc2') try: fp_backup = catkin.find_in_workspaces.find_packages root_dir = tempfile.mkdtemp() catkin.find_in_workspaces.find_packages = Mock() foomock = Mock() foomock.name = 'foo' barmock = Mock() barmock.name = 'bar' catkin.find_in_workspaces.find_packages.return_value = { 'bar': barmock, 'foo': foomock } create_mock_workspace(root_dir, 'ws1') create_mock_workspace(root_dir, 'ws2') checked = [] existing = find_in_workspaces( ['share', 'etc'], 'foo', 'foopath', _workspaces=[os.path.join(root_dir, 'ws1')], considered_paths=checked) self.assertEqual([ os.path.join(root_dir, 'ws1', 'share', 'foo', 'foopath'), 'loc1/foo/foopath', 'loc2/foo/foopath', os.path.join(root_dir, 'ws1', 'etc', 'foo', 'foopath') ], checked) self.assertEqual([], existing) checked = [] existing = find_in_workspaces( ['share', 'etc'], 'foo', None, _workspaces=[os.path.join(root_dir, 'ws1')], considered_paths=checked) self.assertEqual([ os.path.join(root_dir, 'ws1', 'share', 'foo'), 'loc1/foo', 'loc2/foo', os.path.join(root_dir, 'ws1', 'etc', 'foo') ], checked) self.assertEqual([os.path.join(root_dir, 'ws1', 'share', 'foo')], existing) # first-only option checked = [] existing = find_in_workspaces(None, None, None, _workspaces=[ os.path.join(root_dir, 'ws1'), os.path.join(root_dir, 'ws2') ], considered_paths=checked) self.assertEqual([ os.path.join(root_dir, 'ws1', 'include'), os.path.join(root_dir, 'ws1', 'share'), os.path.join(root_dir, 'ws2', 'include'), os.path.join(root_dir, 'ws2', 'share') ], existing) existing = find_in_workspaces(None, None, None, _workspaces=[ os.path.join(root_dir, 'ws1'), os.path.join(root_dir, 'ws2') ], considered_paths=checked, first_matching_workspace_only=True) self.assertEqual([ os.path.join(root_dir, 'ws1', 'include'), os.path.join(root_dir, 'ws1', 'share') ], existing) existing = find_in_workspaces(None, None, None, _workspaces=[ os.path.join(root_dir, 'ws1'), os.path.join(root_dir, 'ws2') ], considered_paths=checked, first_match_only=True) self.assertEqual([os.path.join(root_dir, 'ws1', 'include')], existing) # overlay: first_matching_workspace_only=True checked = [] existing = find_in_workspaces(None, 'foo', None, _workspaces=[ os.path.join(root_dir, 'ws1'), os.path.join(root_dir, 'ws2') ], considered_paths=checked, first_matching_workspace_only=True) self.assertEqual([ os.path.join(root_dir, 'ws1', 'include', 'foo'), os.path.join(root_dir, 'ws1', 'share', 'foo') ], existing) finally: catkin.find_in_workspaces.find_packages = fp_backup shutil.rmtree(root_dir)
def setup(self, mock_exists): Defaults.set_platform_name('x86_64') def side_effect(filename): if filename.endswith('.config/kiwi/config.yml'): return False elif filename.endswith('etc/kiwi.yml'): return False else: return True mock_exists.side_effect = side_effect description = XMLDescription( '../data/example_disk_config.xml' ) self.device_map = { 'root': MappedDevice('/dev/root-device', Mock()), 'swap': MappedDevice('/dev/swap-device', Mock()), 'readonly': MappedDevice('/dev/readonly-root-device', Mock()), 'boot': MappedDevice('/dev/boot-device', Mock()), 'prep': MappedDevice('/dev/prep-device', Mock()), 'efi': MappedDevice('/dev/efi-device', Mock()), 'spare': MappedDevice('/dev/spare-device', Mock()) } self.id_map = { 'kiwi_RootPart': 1, 'kiwi_BootPart': 1 } self.id_map_sorted = OrderedDict( sorted(self.id_map.items()) ) self.boot_names_type = namedtuple( 'boot_names_type', ['kernel_name', 'initrd_name'] ) self.block_operation = Mock() self.block_operation.get_blkid = Mock( return_value='blkid_result' ) self.block_operation.get_filesystem = Mock( return_value='blkid_result_fs' ) kiwi.builder.disk.BlockID = Mock( return_value=self.block_operation ) self.loop_provider = Mock() kiwi.builder.disk.LoopDevice = Mock( return_value=self.loop_provider ) self.disk = Mock() provider = Mock() provider.get_device = Mock( return_value='/dev/some-loop' ) self.disk.storage_provider = provider self.partitioner = Mock() self.partitioner.get_id = Mock( return_value=1 ) self.disk.partitioner = self.partitioner self.disk.get_uuid = Mock( return_value='0815' ) self.disk.get_public_partition_id_map = Mock( return_value=self.id_map_sorted ) self.disk.get_device = Mock( return_value=self.device_map ) kernel_info = Mock() kernel_info.version = '1.2.3' kernel_info.name = 'vmlinuz-1.2.3-default' self.kernel = Mock() self.kernel.get_kernel = Mock( return_value=kernel_info ) self.kernel.get_xen_hypervisor = Mock() self.kernel.copy_kernel = Mock() self.kernel.copy_xen_hypervisor = Mock() kiwi.builder.disk.Kernel = Mock( return_value=self.kernel ) kiwi.builder.disk.Disk = Mock( return_value=self.disk ) self.disk_setup = Mock() self.disk_setup.get_disksize_mbytes.return_value = 1024 self.disk_setup.boot_partition_size.return_value = 0 self.disk_setup.get_efi_label = Mock( return_value='EFI' ) self.disk_setup.get_root_label = Mock( return_value='ROOT' ) self.disk_setup.get_boot_label = Mock( return_value='BOOT' ) self.disk_setup.need_boot_partition = Mock( return_value=True ) self.bootloader_install = Mock() kiwi.builder.disk.BootLoaderInstall.new = MagicMock( return_value=self.bootloader_install ) self.bootloader_config = Mock() self.bootloader_config.get_boot_cmdline = Mock( return_value='boot_cmdline' ) kiwi.builder.disk.BootLoaderConfig.new = MagicMock( return_value=self.bootloader_config ) kiwi.builder.disk.DiskSetup = MagicMock( return_value=self.disk_setup ) self.boot_image_task = Mock() self.boot_image_task.boot_root_directory = 'boot_dir' self.boot_image_task.kernel_filename = 'kernel' self.boot_image_task.initrd_filename = 'initrd' self.boot_image_task.xen_hypervisor_filename = 'xen_hypervisor' self.boot_image_task.get_boot_names.return_value = self.boot_names_type( kernel_name='linux.vmx', initrd_name='initrd.vmx' ) kiwi.builder.disk.BootImage.new = Mock( return_value=self.boot_image_task ) self.firmware = Mock() self.firmware.get_legacy_bios_partition_size.return_value = 0 self.firmware.get_efi_partition_size.return_value = 0 self.firmware.get_prep_partition_size.return_value = 0 self.firmware.efi_mode = Mock( return_value='efi' ) kiwi.builder.disk.FirmWare = Mock( return_value=self.firmware ) self.setup = Mock() kiwi.builder.disk.SystemSetup = Mock( return_value=self.setup ) self.install_image = Mock() kiwi.builder.disk.InstallImageBuilder = Mock( return_value=self.install_image ) self.raid_root = Mock() self.raid_root.get_device.return_value = MappedDevice( '/dev/md0', Mock() ) kiwi.builder.disk.RaidDevice = Mock( return_value=self.raid_root ) self.luks_root = Mock() kiwi.builder.disk.LuksDevice = Mock( return_value=self.luks_root ) self.fstab = Mock() kiwi.builder.disk.Fstab = Mock( return_value=self.fstab ) self.xml_state = XMLState(description.load()) self.disk_builder = DiskBuilder( self.xml_state, 'target_dir', 'root_dir', custom_args={'signing_keys': ['key_file_a', 'key_file_b']} ) self.disk_builder.root_filesystem_is_overlay = False self.disk_builder.build_type_name = 'oem' self.disk_builder.image_format = None