def test_move(self): l = LogCapture(names="drivar.DrivarNoop", install=False, level=logging.DEBUG) drivar = DrivarNoop() drivar.initialize() adapter = PahoMqttAdapter(drivar) # Test that messages get interpreted correctly adapter.start() l.install() params = {'speed': Drivar.SPEED_FAST} self.client.publish("scene/robot/drivar/command/motor/move", json.dumps(params), 0, True) time.sleep(1) l.check( ('drivar.DrivarNoop', 'INFO', 'Drivar : Moving all wheels with power ' + str(DrivarNoop._getMotorPowerLevel(params['speed'])) + '.'), ('drivar.DrivarNoop', 'INFO', 'Drivar : Stopping the vehicle.')) l.clear() self.client.publish("scene/robot/drivar/command/motor/turn", "", 0, True) time.sleep(1) l.check(('drivar.DrivarNoop', 'INFO', 'Drivar : Turning the vehicle left by 90 degrees.')) l.uninstall()
class TestHandleAnswer(unittest.TestCase): def setUp(self): self.cn = cn_interface.ControlNodeSerial('tty') self.log = LogCapture('gateway_code', level=logging.DEBUG) def tearDown(self): self.log.uninstall() def test_config_ack(self): self.cn._handle_answer('config_ack set_time 0.123456') self.log.check( ('gateway_code', 'DEBUG', 'config_ack set_time'), ('gateway_code', 'INFO', 'Control Node set time delay: 123456 us') ) self.log.clear() self.cn._handle_answer('config_ack anything') self.log.check( ('gateway_code', 'DEBUG', 'config_ack anything'), ) def test_error(self): self.cn._handle_answer('error 42') self.log.check( ('gateway_code', 'ERROR', 'Control node error: %r' % '42')) def test_cn_serial_error(self): self.cn._handle_answer('cn_serial_error: any error msg') self.log.check( ('gateway_code', 'ERROR', 'cn_serial_error: any error msg')) def test_measures_debug(self): msg = ('measures_debug: consumption_measure 1377268768.841070:' '1.78250 0.000000 3.230000 0.080003') m_debug = mock.Mock() self.cn.measures_debug = m_debug self.cn._handle_answer(msg) m_debug.assert_called_with(msg) m_debug.reset_mock() self.cn.measures_debug = None self.cn._handle_answer(msg) self.assertFalse(m_debug.called)
class TestHandleAnswer(unittest.TestCase): def setUp(self): self.cn = cn_interface.ControlNodeSerial('tty') self.log = LogCapture('gateway_code', level=logging.DEBUG) def tearDown(self): self.log.uninstall() def test_config_ack(self): self.cn._handle_answer('config_ack set_time 0.123456') self.log.check( ('gateway_code', 'DEBUG', 'config_ack set_time'), ('gateway_code', 'INFO', 'Control Node set time delay: 123456 us') ) self.log.clear() self.cn._handle_answer('config_ack anything') self.log.check( ('gateway_code', 'DEBUG', 'config_ack anything'), ) def test_error(self): self.cn._handle_answer('error 42') self.log.check( ('gateway_code', 'ERROR', 'Control node error: %r' % '42')) def test_cn_serial_error(self): self.cn._handle_answer('cn_serial_error: any error msg') self.log.check( ('gateway_code', 'ERROR', 'cn_serial_error: any error msg')) def test_measures_debug(self): msg = ('measures_debug: consumption_measure 1377268768.841070:' '1.78250 0.000000 3.230000 0.080003') m_debug = mock.Mock() self.cn.measures_debug = m_debug self.cn._handle_answer(msg) m_debug.assert_called_with(msg) m_debug.reset_mock() self.cn.measures_debug = None self.cn._handle_answer(msg) self.assertFalse(m_debug.called)
def testLoadConfig(self): """ check that the config loading works properly :return: """ # check if it is complaining about not having a json l = LogCapture('sync-cass-elastic') _ = PyCassElastic() l.check(('sync-cass-elastic', 'WARNING', 'No config file passed for the sync class'), ) l.clear() # check if it has setup correctly sync = PyCassElastic(self.config) # assert the last time log file is created properly os.remove(sync.lastRunFileName) if os.path.exists( sync.lastRunFileName) else None self.assertRaises(IOError, sync.setup) with open(sync.lastRunFileName, 'w') as f: f.write(' ') self.assertRaises(ValueError, sync.setup) # check connection l.clear() minutes_ago = self._createLastRunFile() sync.setup() l.check( ('sync-cass-elastic', 'DEBUG', u"Connected to Cassandra: [u'localhost'] / test"), ('sync-cass-elastic', 'DEBUG', u"Connected to Elasticsearch: [u'localhost']"), ) self.assertEqual(minutes_ago.strftime('%Y%m%d %H:%M'), sync.time_last_run.strftime('%Y%m%d %H:%M'), 'The time should be the same') self.assertNotEqual(sync.time_this_run, None, 'Time of this run should be filled') self.assertNotEqual(sync.time_delta, None, 'Time of this run should be filled') # get rid of the logger checker l.uninstall() os.remove(sync.lastRunFileName) if os.path.exists( sync.lastRunFileName) else None
class TestVpcBotoInteractions(unittest.TestCase): """ We use the moto mock framework for boto in order to test our interactions with boto. """ def setUp(self): self.lc = LogCapture() self.lc.addFilter(test_common.MyLogCaptureFilter()) self.addCleanup(self.cleanup) # Hosts are chosen randomly from a prefix group. Therefore, we need to # seed the random number generator with a specific value in order to # have reproducible tests. random.seed(123) def cleanup(self): self.lc.uninstall() @mock_ec2_deprecated def make_mock_vpc(self): """ Use plain (but mocked) boto functions to create a small VPC with two subnets and two instances as a basis for our tests. (not quite sure why this doesn't run in setUp(). """ con = boto.vpc.connect_to_region("ap-southeast-2") # Note that moto doesn't seem to honor the subnet and VPC address # ranges, it seems all instances always get something random from a # 10/8 range. self.new_vpc = con.create_vpc('10.0.0.0/16') self.new_subnet_a = con.create_subnet(self.new_vpc.id, '10.1.0.0/16') self.new_subnet_b = con.create_subnet(self.new_vpc.id, '10.2.0.0/16') res1 = con.run_instances('ami-1234abcd', subnet_id=self.new_subnet_a.id) res2 = con.run_instances('ami-1234abcd', subnet_id=self.new_subnet_b.id) self.i1 = res1.instances[0] self.i2 = res2.instances[0] self.i1ip = self.i1.private_ip_address self.i2ip = self.i2.private_ip_address @mock_ec2_deprecated def test_connect(self): self.make_mock_vpc() # With a test VPC created, we now test our own functions # In the mocked test the meta data won't contain the info we need (vpc # and region name), because the emulated EC2 instance isn't in any # region or vpc. meta = vpc.get_ec2_meta_data() self.assertTrue(meta == {}) self.assertRaises(VpcRouteSetError, vpc.connect_to_region, "blah") con = vpc.connect_to_region("ap-southeast-2") # Error when specifying non-existent VPC self.assertRaises(VpcRouteSetError, vpc.get_vpc_overview, con, "non-existent-vpc", "ap-southeast-2") # Get the default: First VPC if no VPC is specified d = vpc.get_vpc_overview(con, None, "ap-southeast-2") self.assertEqual(d['vpc'].id, "vpc-be745e76") # Get specified VPC d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.assertEqual(d['vpc'].id, "vpc-be745e76") self.assertEqual( sorted([ 'subnets', 'route_tables', 'instance_by_id', 'instances', 'subnet_rt_lookup', 'zones', 'vpc' ]), sorted(d.keys())) self.assertEqual(self.new_vpc.id, d['vpc'].id) self.assertTrue(self.new_subnet_a.id in [s.id for s in d['subnets']]) self.assertTrue(self.new_subnet_b.id in [s.id for s in d['subnets']]) self.assertTrue(len(d['zones']) == 3) self.assertTrue(len(d['route_tables']) == 1) self.assertTrue(len(d['instance_by_id'].keys()) == 2) self.assertTrue(d['instance_by_id'][self.i1.id].id == self.i1.id) self.assertTrue(d['instance_by_id'][self.i2.id].id == self.i2.id) self.assertRaises(VpcRouteSetError, vpc.find_instance_and_eni_by_ip, d, "9.9.9.9") # Non existent IP self.assertTrue( vpc.find_instance_and_eni_by_ip(d, self.i1ip)[0].id == self.i1.id) self.assertTrue( vpc.find_instance_and_eni_by_ip(d, self.i2ip)[0].id == self.i2.id) def _prepare_mock_env(self): self.make_mock_vpc() con = vpc.connect_to_region("ap-southeast-2") d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") i1, eni1 = vpc.find_instance_and_eni_by_ip(d, self.i1ip) i2, eni2 = vpc.find_instance_and_eni_by_ip(d, self.i2ip) rt_id = d['route_tables'][0].id return con, d, i1, eni1, i2, eni2, rt_id @mock_ec2_deprecated def test_process_route_spec_config(self): con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env() route_spec = {u"10.1.0.0/16": [self.i1ip, self.i2ip]} # Process a simple route spec, a route should have been added self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, [], []) # One of the hosts is randomly chosen. We seeded the random number # generator at in this module, so we know that it will choose the # second host in this case. self.lc.check( ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- adding route in RT '%s' " "10.1.0.0/16 -> %s (%s, %s)" % (rt_id, self.i1ip, i1.id, eni1.id))) # One of the two IPs questionable, switch over d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, [], [self.i1ip]) self.lc.check( ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- eni in route in RT 'rtb-84dc7f2c' can't be found: " "10.1.0.0/16 -> (none) (instance '%s')" % i1.id), ('root', 'INFO', "--- updating existing route in RT '%s' 10.1.0.0/16 -> " "%s (%s, %s) (old IP: None, reason: old IP failed/questionable " "or not eligible anymore)" % (rt_id, self.i2ip, i2.id, eni2.id))) # Now switch back d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, [], [self.i2ip]) self.lc.check( ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- eni in route in RT 'rtb-84dc7f2c' can't be found: " "10.1.0.0/16 -> (none) (instance '%s')" % i2.id), ('root', 'INFO', "--- updating existing route in RT '%s' 10.1.0.0/16 -> " "%s (%s, %s) (old IP: None, reason: old IP failed/questionable " "or not eligible anymore)" % (rt_id, self.i1ip, i1.id, eni1.id))) # One of the two IPs failed, switch over d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, [self.i1ip], []) self.lc.check( ('root', 'DEBUG', 'Route spec processing. Failed IPs: %s' % self.i1ip), ('root', 'INFO', "--- eni in route in RT 'rtb-84dc7f2c' can't be found: " "10.1.0.0/16 -> (none) (instance '%s')" % i1.id), ('root', 'INFO', "--- updating existing route in RT '%s' 10.1.0.0/16 -> " "%s (%s, %s) (old IP: None, reason: old IP failed/questionable " "or not eligible anymore)" % (rt_id, self.i2ip, i2.id, eni2.id))) # Now all IPs for a route have failed d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, [self.i1ip, self.i2ip], []) self.lc.check( ('root', 'DEBUG', 'Route spec processing. Failed IPs: %s,%s' % (self.i1ip, self.i2ip)), ('root', 'INFO', "--- eni in route in RT 'rtb-84dc7f2c' can't be found: " "10.1.0.0/16 -> (none) (instance '%s')" % i2.id), ('root', 'WARNING', '--- cannot find available target for route update 10.1.0.0/16! ' 'Nothing I can do...')) # Add new route, remove old one route_spec = {u"10.2.0.0/16": [self.i1ip]} d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, [], []) self.lc.check( ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- eni in route in RT 'rtb-84dc7f2c' can't be found: " "10.1.0.0/16 -> (none) (instance '%s')" % i2.id), ('root', 'INFO', "--- route not in spec, deleting in RT '%s': " "10.1.0.0/16 -> ... ((unknown), (unknown))" % rt_id), ('root', 'INFO', "--- adding route in RT '%s' " "10.2.0.0/16 -> %s (%s, %s)" % (rt_id, self.i1ip, i1.id, eni1.id))) # Protect old route (ignore_routes), add new route, watch the old route # NOT disappear. CURRENT_STATE.ignore_routes.append("10.2.0.0/16") # protected route route_spec = {u"10.3.0.0/16": [self.i1ip]} d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, [], []) # See in the logs that 10.2.0.0/16 wasn't deleted, even though it's not # in the route spec anymore. self.lc.check( ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- adding route in RT '%s' " "10.3.0.0/16 -> %s (%s, %s)" % (rt_id, self.i1ip, i1.id, eni1.id))) @mock_ec2_deprecated def test_add_new_route(self): con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env() self.lc.clear() vpc._add_new_route("10.9.0.0/16", self.i1ip, d, con, rt_id) self.lc.check(('root', 'INFO', "--- adding route in RT '%s' " "10.9.0.0/16 -> %s (%s, %s)" % (rt_id, self.i1ip, i1.id, eni1.id))) self.lc.clear() vpc._add_new_route("10.9.0.0/16", "99.99.99.99", d, con, rt_id) self.lc.check( ('root', 'ERROR', "*** failed to add route in RT '%s' " "10.9.0.0/16 -> 99.99.99.99 (Could not find instance/eni " "for '99.99.99.99' in VPC '%s'.)" % (rt_id, self.new_vpc.id))) @mock_ec2_deprecated def test_update_route(self): con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env() vpc._add_new_route("10.9.0.0/16", self.i1ip, d, con, rt_id) self.lc.clear() vpc._update_route("10.9.0.0/16", self.i2ip, self.i1ip, d, con, rt_id, "foobar") self.lc.check( ('root', 'INFO', "--- updating existing route in RT '%s' " "10.9.0.0/16 -> %s (%s, %s) " "(old IP: %s, reason: foobar)" % (rt_id, self.i2ip, i2.id, eni2.id, self.i1ip))) self.lc.clear() vpc._update_route("10.9.0.0/16", "9.9.9.9", self.i2ip, d, con, rt_id, "foobar") self.lc.check( ('root', 'ERROR', "*** failed to update route in RT '%s' " "10.9.0.0/16 -> %s (Could not find instance/eni " "for '9.9.9.9' in VPC '%s'.)" % (rt_id, self.i2ip, self.new_vpc.id))) # Trying to update a non-existent route self.lc.clear() vpc._update_route("10.9.9.9/16", self.i1ip, self.i2ip, d, con, rt_id, "foobar") self.lc.check( ('root', 'INFO', "--- updating existing route in RT '%s' 10.9.9.9/16 -> %s " "(%s, %s) (old IP: %s, reason: foobar)" % (rt_id, self.i1ip, i1.id, eni1.id, self.i2ip)), ('root', 'ERROR', "*** failed to update route in RT '%s' 10.9.9.9/16 -> %s " "(replace_route failed: u'%s~10.9.9.9/16')" % (rt_id, self.i2ip, rt_id))) @mock_ec2_deprecated def test_get_real_instance_if_mismatched(self): con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env() self.assertFalse(vpc._get_real_instance_if_mismatch(d, None, i1, eni1)) ret = vpc._get_real_instance_if_mismatch(d, self.i1ip, i1, eni1) self.assertFalse(ret) for inst, eni in [(i2, eni2), (i1, eni2), (i2, eni1), (i1, None), (None, eni1), (i2, None), (None, eni2), (None, None)]: ret = vpc._get_real_instance_if_mismatch(d, self.i1ip, inst, eni) self.assertEqual(ret.id, i1.id) @mock_ec2_deprecated def test_get_host_for_route(self): con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env() vpc._add_new_route("10.9.0.0/16", self.i1ip, d, con, rt_id) rt = d['route_tables'][0] self.assertEqual(rt.id, rt_id) route = rt.routes[0] # Moto doesn't maintain intance or interface ID in the routes # correctly, so need to set this one manually route.instance_id = i1.id route.interface_id = eni1.id # Find correct host for route (the passed in cidr is only used for # logging) self.assertEqual((i1.id, self.i1ip, eni1.id), vpc._get_host_for_route(d, route, rt, "cidr-log")) # Look for broken route without an instance id route.instance_id = None self.lc.clear() self.assertEqual(('(unknown)', None, '(unknown)'), vpc._get_host_for_route(d, route, rt, "cidr-log")) self.lc.check( ('root', 'INFO', "--- obsoleted route in RT '%s' cidr-log -> " "... (doesn't point to instance anymore)" % rt_id)) # Look for broken route with instance id for non-existent instance route.instance_id = "blah" self.lc.clear() self.assertEqual(('(unknown)', None, '(unknown)'), vpc._get_host_for_route(d, route, rt, "cidr-log")) self.lc.check(('root', 'INFO', "--- instance in route in RT '%s' can't be found: " "cidr-log -> ... (instance 'blah')" % rt_id)) @mock_ec2_deprecated def test_update_existing_routes(self): con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env() vpc._add_new_route("10.0.0.0/16", self.i1ip, d, con, rt_id) route_spec = {u"10.0.0.0/16": [self.i1ip]} routes_in_rts = {} # Test that a protected route doesn't get updated self.lc.clear() CURRENT_STATE.ignore_routes = ["10.0.0.0/8"] vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts) self.assertTrue(rt_id in CURRENT_STATE.vpc_state['route_tables']) self.assertTrue( "10.0.0.0/16" in CURRENT_STATE.vpc_state['route_tables'][rt_id]) self.assertTrue("Ignored: Protected CIDR" in CURRENT_STATE. vpc_state['route_tables'][rt_id]["10.0.0.0/16"]) self.lc.check() # Now we un-protect the route and try again. Moto doesn't manage the # instance or interface ID in routes, so this will fail, because the # route doesn't look like it's pointing to an instance CURRENT_STATE.ignore_routes = [] vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts) self.assertTrue("Ignored: Not a route to an instance" in CURRENT_STATE. vpc_state['route_tables'][rt_id]["10.0.0.0/16"]) self.lc.check() # Now we manually set the instance and eni id in the route, so that the # test can proceed. rt = d['route_tables'][0] self.assertEqual(rt.id, rt_id) route = rt.routes[0] # Moto doesn't maintain intance or interface ID in the routes # correctly, so need to set this one manually. This time the route spec # won't contain eligible hosts. route.instance_id = i1.id route.interface_id = eni1.id self.lc.clear() route_spec = {u"10.0.0.0/16": []} vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts) self.lc.check( ('root', 'INFO', "--- route not in spec, deleting in RT '%s': 10.0.0.0/16 -> " "... (%s, %s)" % (rt_id, i1.id, eni1.id))) # Get a refresh, since deleting via Boto interface doesn't update the # cached vpc-info d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") # There shouldn't be any routes left now rt = d['route_tables'][0] self.assertFalse(rt.routes) # Now try again, but with proper route spec. First we need to create # the route again and manually... vpc._add_new_route("10.0.0.0/16", self.i1ip, d, con, rt_id) # ... and update our cached vpc info d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") rt = d['route_tables'][0] route = rt.routes[0] route.instance_id = i1.id route.interface_id = eni1.id route_spec = {u"10.0.0.0/16": [self.i2ip]} # Only IP for spec is in failed IPs, can't do anything self.lc.clear() vpc._update_existing_routes(route_spec, [self.i2ip], [], d, con, routes_in_rts) self.lc.check(('root', 'WARNING', '--- cannot find available target for route update ' '10.0.0.0/16! Nothing I can do...')) # Now with available IPs self.lc.clear() vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts) self.lc.check( ('root', 'INFO', "--- updating existing route in RT '%s' 10.0.0.0/16 -> " "%s (%s, %s) (old IP: %s, reason: old IP failed/questionable " "or not eligible anymore)" % (rt_id, self.i2ip, i2.id, eni2.id, self.i1ip))) # Now with same route spec again d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") rt = d['route_tables'][0] route = rt.routes[0] route.instance_id = i2.id route.interface_id = eni2.id self.lc.clear() routes_in_rts = {} vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts) self.lc.check(('root', 'INFO', "--- route exists already in RT '%s': 10.0.0.0/16 -> " "%s (%s, %s)" % (rt_id, self.i2ip, i2.id, eni2.id))) @mock_ec2_deprecated def test_add_missing_routes(self): con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env() route_spec = {u"10.0.0.0/16": [self.i1ip]} routes_in_rts = {} self.lc.clear() vpc._update_existing_routes(route_spec, [], [], d, con, routes_in_rts) self.lc.check() self.lc.clear() vpc._add_missing_routes(route_spec, [], [], {}, d, con, routes_in_rts) self.lc.check( ('root', 'INFO', "--- adding route in RT '%s' 10.0.0.0/16 -> " "%s (%s, %s)" % (rt_id, self.i1ip, i1.id, eni1.id))) # The route exists already (passed in routes_in_rts), so no new route # should be created here. self.lc.clear() vpc._add_missing_routes(route_spec, [], [], {"10.0.0.0/16": self.i1ip}, d, con, {rt_id: ["10.0.0.0/16"]}) self.lc.check() # Force a route creation by passing nothing for routes_in_rts and # passing in a 'previous' choice for the router self.lc.clear() vpc._add_missing_routes(route_spec, [], [], {"10.0.0.0/16": self.i1ip}, d, con, {rt_id: []}) self.lc.check( ('root', 'INFO', "--- adding route in RT '%s' 10.0.0.0/16 -> " "%s (%s, %s)" % (rt_id, self.i1ip, i1.id, eni1.id))) # Now try the same with the only possible IP in failed IPs. self.lc.clear() vpc._add_missing_routes(route_spec, [self.i1ip], [], {}, d, con, {rt_id: []}) self.lc.check(('root', 'WARNING', '--- cannot find available target for route addition ' '10.0.0.0/16! Nothing I can do...')) @mock_ec2_deprecated def test_multi_address(self): # Testing that we can find interfaces, which have the specified IP on a # second, private IP address con, d, i1, eni1, i2, eni2, rt_id = self._prepare_mock_env() priv = eni1.private_ip_addresses[0] priv = boto.ec2.networkinterface.PrivateIPAddress( private_ip_address="10.9.9.9", primary=False) eni1.private_ip_addresses.append(priv) self.lc.clear() route_spec = {"10.0.0.0/16": ["10.9.9.9"]} self.lc.clear() vpc._add_missing_routes(route_spec, [], [], {}, d, con, {rt_id: []}) self.lc.check(('root', 'INFO', "--- adding route in RT '%s' 10.0.0.0/16 -> 10.9.9.9 " "(%s, %s)" % (rt_id, i1.id, eni1.id))) @mock_ec2_deprecated def test_handle_spec(self): self.make_mock_vpc() # Need to take a peek inside the VPC so we can properly evaluate the # output later on con = vpc.connect_to_region("ap-southeast-2") d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") i, eni = vpc.find_instance_and_eni_by_ip(d, self.i1ip) rt_id = d['route_tables'][0].id route_spec = {u"10.2.0.0/16": [self.i1ip]} # Test handle_spec vid = self.new_vpc.id self.lc.clear() vpc.handle_spec("ap-southeast-2", vid, route_spec, [], []) self.lc.check( ('root', 'DEBUG', 'Handle route spec'), ('root', 'DEBUG', "Connecting to AWS region 'ap-southeast-2'"), ('root', 'DEBUG', "Retrieving information for VPC '%s'" % vid), ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- adding route in RT '%s' 10.2.0.0/16 -> %s (%s, %s)" % (rt_id, self.i1ip, self.i1.id, eni.id))) # mock the get_instance_private_ip_from_route() function in vpc. Reason # being: The boto mocking library (moto) doesn't handle ENIs in routes # correctly. Therefore, a match against the information we get from the # routes will never work. So, we provide a wrapper, which fills the # instance's ENI information into the route. This means that this # function now will always match. It's good for testing the 'match' # part of the code. old_func = vpc.get_instance_private_ip_from_route def my_get_instance_private_ip_from_route(instance, route): route.interface_id = instance.interfaces[0].id return old_func(instance, route) vpc.get_instance_private_ip_from_route = \ my_get_instance_private_ip_from_route self.lc.clear() vpc.handle_spec("ap-southeast-2", vid, route_spec, [], []) vpc.get_instance_private_ip_from_route = old_func self.lc.check( ('root', 'DEBUG', 'Handle route spec'), ('root', 'DEBUG', "Connecting to AWS region 'ap-southeast-2'"), ('root', 'DEBUG', "Retrieving information for VPC '%s'" % vid), ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- route exists already in RT '%s': 10.2.0.0/16 -> " "%s (%s, %s)" % (rt_id, self.i1ip, self.i1.id, eni.id)))
class TestControlNodeSerial(unittest.TestCase): def setUp(self): self.popen_patcher = mock.patch( 'gateway_code.utils.subprocess_timeout.Popen') popen_class = self.popen_patcher.start() self.popen = popen_class.return_value self.popen.terminate.side_effect = self._terminate self.popen.poll.return_value = None self.readline_ret_vals = Queue.Queue(0) self.popen.stderr.readline.side_effect = self.readline_ret_vals.get self.readline_ret_vals.put('cn_serial_ready\n') self.cn = cn_interface.ControlNodeSerial('tty') self.log_error = LogCapture('gateway_code', level=logging.WARNING) def tearDown(self): self.cn.stop() mock.patch.stopall() self.log_error.uninstall() def _terminate(self): self.readline_ret_vals.put('') def test_normal_start_stop(self): ret_start = self.cn.start() self.assertEquals(0, ret_start) self.assertTrue(self.popen.stderr.readline.called) self.cn.stop() self.assertTrue(self.popen.terminate.called) self.assertTrue(self.readline_ret_vals.empty()) def test_start_error_in_cn_serial(self): # poll should return an error self.popen.poll.return_value = 2 ret_start = self.cn.start() self.assertNotEquals(0, ret_start) self.log_error.check( ('gateway_code', 'ERROR', 'Control node serial reader thread ended prematurely')) self.cn.stop() def test_stop_before_start(self): self.cn.stop() def test_stop_with_cn_interface_allready_stopped(self): # Simulate cn_interface stopped self.readline_ret_vals.put('') self.popen.stdin.write.side_effect = IOError() self.popen.terminate.side_effect = OSError() self.cn.start() # try sending command ret = self.cn.send_command(['test', 'cmd']) self.assertEquals(None, ret) self.log_error.check(('gateway_code', 'ERROR', 'control_node_serial process is terminated')) self.log_error.clear() self.cn.stop() self.log_error.check(('gateway_code', 'ERROR', 'Control node process already terminated')) def test_stop_terminate_failed(self): """Stop cn_interface but terminate does not stop it.""" # terminate does not stop process self.popen.terminate.side_effect = None timeout_expired = cn_interface.subprocess_timeout.TimeoutExpired self.popen.wait.side_effect = timeout_expired('cn_serial_interface', 3) # kill does it self.popen.kill.side_effect = self._terminate self.cn.start() self.cn.stop() self.assertTrue(self.popen.kill.called) self.log_error.check(('gateway_code', 'WARNING', 'Control node serial not terminated, kill it')) # Test command sending def test_send_command(self): self.popen.stdin.write.side_effect = \ (lambda *x: self.readline_ret_vals.put('start ACK\n')) self.cn.start() ret = self.cn.send_command(['start', 'DC']) self.assertEquals(['start', 'ACK'], ret) self.cn.stop() def test_send_command_no_answer(self): self.cn.start() ret = self.cn.send_command(['start', 'DC']) self.assertIsNone(ret) self.cn.stop() def test_send_command_cn_interface_stoped(self): ret = self.cn.send_command(['lala']) self.assertIsNone(ret) def test_answer_and_answer_with_queue_full(self): # get two answers without sending command self.readline_ret_vals.put('set ACK\n') self.readline_ret_vals.put('start ACK\n') self.cn.start() self.cn.stop() self.log_error.check( ('gateway_code', 'ERROR', 'Control node answer queue full: %r' % ['start', 'ACK'])) # _cn_interface_args def test__cn_interface_args(self): args = self.cn._cn_interface_args() self.assertIn(self.cn.tty, args) self.assertNotIn('-c', args) self.assertNotIn('-d', args) # OML config args = self.cn._cn_interface_args('<omlc></omlc>') self.assertIn('-c', args) self.assertNotIn('-d', args) self.cn._oml_cfg_file.close() # Debug mode self.cn.measures_debug = (lambda x: None) args = self.cn._cn_interface_args() self.assertNotIn('-c', args) self.assertIn('-d', args) # _config_oml coverage tests def test_empty_config_oml(self): # No experiment description ret = self.cn._oml_config_file(None) self.assertIsNone(ret) @mock.patch(utils.READ_CONFIG, utils.read_config_mock('m3')) def test_config_oml(self): oml_xml_cfg = '''<omlc id='{node_id}' exp_id='{exp_id}'>\n</omlc>''' self.cn.start(oml_xml_cfg) self.assertIsNotNone(self.cn._oml_cfg_file) self.cn.stop() def test_oml_xml_config(self): exp_files = { 'consumption': '/tmp/consumption', 'radio': '/tmp/radio', 'event': '/tmp/event', 'sniffer': '/tmp/sniffer', 'log': '/tmp/log', } oml_xml_cfg = self.cn.oml_xml_config('m3-1', '1234', exp_files) self.assertIsNotNone(oml_xml_cfg) self.assertTrue(oml_xml_cfg.startswith('<omlc')) # No output if none or empty oml_xml_cfg = self.cn.oml_xml_config('m3-1', '1234', None) self.assertIsNone(oml_xml_cfg) oml_xml_cfg = self.cn.oml_xml_config('m3-1', '1234', {}) self.assertIsNone(oml_xml_cfg)
class TestCase(unittest.TestCase): __metaclass__ = abc.ABCMeta root_path = os.path.dirname(os.path.dirname(__file__)) environ = dict() domain = None use_cookie = False def setUp(self): # webtest if self.use_cookie: cookiejar = cookielib.CookieJar() else: cookiejar = None self.app = TestApp(utils.app, domain=self.domain, cookiejar=cookiejar) # os.environ self.origin_environ = dict() if "HTTP_HOST" not in self.environ.viewkeys(): self.environ["HTTP_HOST"] = "localhost" for key, value in self.environ.viewitems(): self.origin_environ[key], os.environ[key] = os.environ.get(key), value # testbed self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub(consistency_policy=datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=0)) self.testbed.init_blobstore_stub() self.testbed.init_files_stub() self.testbed.init_memcache_stub() self.testbed.init_taskqueue_stub(root_path=self.root_path) # logging self.log = LogCapture(level=logging.WARNING) self.log.install() def tearDown(self): try: # logging for record in self.log.records: pathname = get_tail(record.pathname).group("tail") log = (record.levelname, pathname.replace(os.path.abspath(os.curdir), "").lstrip("/"), record.funcName, record.getMessage()) if getattr(self, "expected_logs", None): if log in self.expected_logs: continue matched = None for expected_log in self.expected_logs: if "..." in expected_log[3]: if log[:2] == expected_log[:2]: if doctest._ellipsis_match(expected_log[3], log[3]): matched = True continue if matched: continue print(record.levelname, pathname, record.lineno, record.funcName, record.getMessage()) assert not log finally: self.log.clear() self.log.uninstall() # testbed self.testbed.deactivate() # os.environ for key, value in self.origin_environ.iteritems(): if value is not None: os.environ[key] = value def execute_tasks(self, queue_name): taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub("taskqueue") tasks = taskqueue_stub.GetTasks(queue_name) for task in tasks: deferred.run(base64.b64decode(task["body"])) taskqueue_stub.DeleteTask(queue_name, task["name"])
class TestCase(unittest.TestCase): __metaclass__ = abc.ABCMeta root_path = os.path.dirname(os.path.dirname(__file__)) environ = dict() domain = None use_cookie = False def setUp(self): # webtest if self.use_cookie: cookiejar = cookielib.CookieJar() else: cookiejar = None self.app = TestApp(utils.app, domain=self.domain, cookiejar=cookiejar) # os.environ self.origin_environ = dict() if "HTTP_HOST" not in self.environ.viewkeys(): self.environ["HTTP_HOST"] = "localhost" for key, value in self.environ.viewitems(): self.origin_environ[key], os.environ[key] = os.environ.get( key), value # testbed self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub( consistency_policy=datastore_stub_util. PseudoRandomHRConsistencyPolicy(probability=0)) self.testbed.init_blobstore_stub() self.testbed.init_files_stub() self.testbed.init_memcache_stub() self.testbed.init_taskqueue_stub(root_path=self.root_path) # logging self.log = LogCapture(level=logging.WARNING) self.log.install() def tearDown(self): try: # logging for record in self.log.records: pathname = get_tail(record.pathname).group("tail") log = (record.levelname, pathname.replace(os.path.abspath(os.curdir), "").lstrip("/"), record.funcName, record.getMessage()) if getattr(self, "expected_logs", None): if log in self.expected_logs: continue matched = None for expected_log in self.expected_logs: if "..." in expected_log[3]: if log[:2] == expected_log[:2]: if doctest._ellipsis_match( expected_log[3], log[3]): matched = True continue if matched: continue print(record.levelname, pathname, record.lineno, record.funcName, record.getMessage()) assert not log finally: self.log.clear() self.log.uninstall() # testbed self.testbed.deactivate() # os.environ for key, value in self.origin_environ.iteritems(): if value is not None: os.environ[key] = value def execute_tasks(self, queue_name): taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub("taskqueue") tasks = taskqueue_stub.GetTasks(queue_name) for task in tasks: deferred.run(base64.b64decode(task["body"])) taskqueue_stub.DeleteTask(queue_name, task["name"])
class TestVpcBotoInteractions(unittest.TestCase): """ We use the moto mock framework for boto in order to test our interactions with boto. """ def setUp(self): self.lc = LogCapture() self.lc.addFilter(test_common.MyLogCaptureFilter()) self.addCleanup(self.cleanup) # Hosts are chosen randomly from a prefix group. Therefore, we need to # seed the random number generator with a specific value in order to # have reproducible tests. random.seed(123) def cleanup(self): self.lc.uninstall() @mock_ec2_deprecated def make_mock_vpc(self): """ Use plain (but mocked) boto functions to create a small VPC with two subnets and two instances as a basis for our tests. (not quite sure why this doesn't run in setUp(). """ con = boto.vpc.connect_to_region("ap-southeast-2") # Note that moto doesn't seem to honor the subnet and VPC address # ranges, it seems all instances always get something random from a # 10/8 range. self.new_vpc = con.create_vpc('10.0.0.0/16') self.new_subnet_a = con.create_subnet(self.new_vpc.id, '10.1.0.0/16') self.new_subnet_b = con.create_subnet(self.new_vpc.id, '10.2.0.0/16') res1 = con.run_instances('ami-1234abcd', subnet_id=self.new_subnet_a.id) res2 = con.run_instances('ami-1234abcd', subnet_id=self.new_subnet_b.id) self.i1 = res1.instances[0] self.i2 = res2.instances[0] self.i1ip = self.i1.private_ip_address self.i2ip = self.i2.private_ip_address @mock_ec2_deprecated def test_connect(self): self.make_mock_vpc() # With a test VPC created, we now test our own functions con = vpc.connect_to_region("ap-southeast-2") d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.assertEqual( sorted([ 'subnets', 'route_tables', 'instance_by_id', 'instances', 'zones', 'vpc' ]), sorted(d.keys())) self.assertEqual(self.new_vpc.id, d['vpc'].id) self.assertTrue(self.new_subnet_a.id in [s.id for s in d['subnets']]) self.assertTrue(self.new_subnet_b.id in [s.id for s in d['subnets']]) self.assertTrue(len(d['zones']) == 3) self.assertTrue(len(d['route_tables']) == 1) self.assertTrue(len(d['instance_by_id'].keys()) == 2) self.assertTrue(d['instance_by_id'][self.i1.id].id == self.i1.id) self.assertTrue(d['instance_by_id'][self.i2.id].id == self.i2.id) self.assertTrue( vpc.find_instance_and_eni_by_ip(d, self.i1ip)[0].id == self.i1.id) self.assertTrue( vpc.find_instance_and_eni_by_ip(d, self.i2ip)[0].id == self.i2.id) @mock_ec2_deprecated def test_process_route_spec_config(self): self.make_mock_vpc() con = vpc.connect_to_region("ap-southeast-2") d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") i1, eni1 = vpc.find_instance_and_eni_by_ip(d, self.i1ip) i2, eni2 = vpc.find_instance_and_eni_by_ip(d, self.i2ip) rt_id = d['route_tables'][0].id route_spec = {u"10.1.0.0/16": [self.i1ip, self.i2ip]} # Process a simple route spec, a route should have been added self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, []) # One of the hosts is randomly chosen. We seeded the random number # generator at in this module, so we know that it will choose the # second host in this case. self.lc.check( ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- adding route in RT '%s' " "10.1.0.0/16 -> %s (%s, %s)" % (rt_id, self.i2ip, i2.id, eni2.id))) # One of the two IPs failed, switch over d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, [self.i1ip]) self.lc.check( ('root', 'DEBUG', 'Route spec processing. Failed IPs: %s' % self.i1ip), ('root', 'INFO', "--- updating existing route in RT '%s' 10.1.0.0/16 -> " "%s (%s, %s) (old IP: None, reason: old IP failed or not " "eligible anymore)" % (rt_id, self.i2ip, i2.id, eni2.id))) # Now all IPs for a route have failed d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, [self.i1ip, self.i2ip]) self.lc.check( ('root', 'DEBUG', 'Route spec processing. Failed IPs: %s,%s' % (self.i1ip, self.i2ip)), ('root', 'WARNING', '--- cannot find available target for route update 10.1.0.0/16! ' 'Nothing I can do...')) # Add new route, remove old one route_spec = {u"10.2.0.0/16": [self.i1ip]} d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") self.lc.clear() vpc.process_route_spec_config(con, d, route_spec, []) self.lc.check( ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- route not in spec, deleting in RT '%s': " "10.1.0.0/16 -> ... (%s, (unknown))" % (rt_id, i2.id)), ('root', 'INFO', "--- adding route in RT '%s' " "10.2.0.0/16 -> %s (%s, %s)" % (rt_id, self.i1ip, i1.id, eni1.id))) @mock_ec2_deprecated def test_handle_spec(self): self.make_mock_vpc() # Need to take a peek inside the VPC so we can properly evaluate the # output later on con = vpc.connect_to_region("ap-southeast-2") d = vpc.get_vpc_overview(con, self.new_vpc.id, "ap-southeast-2") i, eni = vpc.find_instance_and_eni_by_ip(d, self.i1ip) rt_id = d['route_tables'][0].id route_spec = {u"10.2.0.0/16": [self.i1ip]} # Test handle_spec vid = self.new_vpc.id self.lc.clear() vpc.handle_spec("ap-southeast-2", vid, route_spec, []) self.lc.check( ('root', 'DEBUG', 'Handle route spec'), ('root', 'DEBUG', "Connecting to AWS region 'ap-southeast-2'"), ('root', 'DEBUG', "Retrieving information for VPC '%s'" % vid), ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- adding route in RT '%s' 10.2.0.0/16 -> %s (%s, %s)" % (rt_id, self.i1ip, self.i1.id, eni.id))) # mock the get_instance_private_ip_from_route() function in vpc. Reason # being: The boto mocking library (moto) doesn't handle ENIs in routes # correctly. Therefore, a match against the information we get from the # routes will never work. So, we provide a wrapper, which fills the # instance's ENI information into the route. This means that this # function now will always match. It's good for testing the 'match' # part of the code. old_func = vpc.get_instance_private_ip_from_route def my_get_instance_private_ip_from_route(instance, route): route.interface_id = instance.interfaces[0].id return old_func(instance, route) vpc.get_instance_private_ip_from_route = \ my_get_instance_private_ip_from_route self.lc.clear() vpc.handle_spec("ap-southeast-2", vid, route_spec, []) vpc.get_instance_private_ip_from_route = old_func self.lc.check( ('root', 'DEBUG', 'Handle route spec'), ('root', 'DEBUG', "Connecting to AWS region 'ap-southeast-2'"), ('root', 'DEBUG', "Retrieving information for VPC '%s'" % vid), ('root', 'DEBUG', 'Route spec processing. No failed IPs.'), ('root', 'INFO', "--- route exists already in RT '%s': 10.2.0.0/16 -> " "%s (%s, %s)" % (rt_id, self.i1ip, self.i1.id, eni.id)))
class TestCase(unittest.TestCase): __metaclass__ = abc.ABCMeta root_path = os.path.dirname(os.path.dirname(__file__)) environ = dict() domain = None use_cookie = False application = tap.app is_endpoints = False def __init__(self, *argv, **kwargv): super(TestCase, self).__init__(*argv, **kwargv) if isinstance(self.application, endpoints.api_config._ApiDecorator): api_names = list() for api_class in self.application.get_api_classes(): for name, _method in api_class.all_remote_methods().items(): api_names.append(name) self.api_names = tuple(api_names) self.application = endpoints.api_server([self.application], restricted=False) self.is_endpoints = True else: self.api_names = tuple() def setUp(self): # webtest if self.use_cookie: cookiejar = cookielib.CookieJar() else: cookiejar = None self.app = TestApp(self.application, domain=self.domain, cookiejar=cookiejar) # os.environ self.origin_environ = dict() if "HTTP_HOST" not in self.environ.viewkeys(): self.environ["HTTP_HOST"] = "localhost" for key, value in self.environ.viewitems(): self.origin_environ[key], os.environ[key] = os.environ.get(key), value # testbed self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_datastore_v3_stub( consistency_policy=datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=0), root_path=self.root_path, ) self.testbed.init_blobstore_stub() self.testbed.init_files_stub() self.testbed.init_memcache_stub() self.testbed.init_taskqueue_stub(root_path=self.root_path) self.testbed.init_urlfetch_stub() self.testbed.init_user_stub() # logging self.log = LogCapture(level=logging.WARNING) self.log.install() def tearDown(self): restore() try: # logging for record in self.log.records: if self.is_endpoints and len(record.args) >= 3: exception = record.args[2] if isinstance(exception, endpoints.ServiceException): try: str(exception) except TypeError: record.args[2].args = [str(arg) for arg in exception.args] except UnicodeEncodeError: record.args[2].args = [unicode(arg) for arg in exception.args] pathname = get_tail(record.pathname).group("tail") curdir_abspath = os.path.abspath(os.curdir) if is_mac: if curdir_abspath.startswith(mac_volumes_prefix): curdir_abspath = curdir_abspath[mac_volumes_prefix_length:] if pathname.startswith(mac_volumes_prefix): pathname = pathname[mac_volumes_prefix_length:] else: curdir_abspath = curdir_abspath.lstrip('/') pathname = pathname.lstrip('/') log = (record.levelname, pathname.replace(curdir_abspath, "").lstrip("/"), record.funcName, record.getMessage()) if getattr(self, "expected_logs", None): if log in self.expected_logs: continue matched = None for expected_log in self.expected_logs: if "..." in expected_log[3]: if log[:2] == expected_log[:2]: if doctest._ellipsis_match(expected_log[3], log[3]): matched = True continue if matched: continue elif self.is_endpoints: expected_log = ( 'WARNING', 'google/appengine/ext/ndb/tasklets.py', '_help_tasklet_along', '... generator ...(....py:...) raised ...Exception(...)') if log[:2] == expected_log[:2]: if doctest._ellipsis_match(expected_log[3], log[3]): matched = True continue print(record.levelname, pathname, record.lineno, record.funcName, record.getMessage()) assert not log finally: self.log.clear() self.log.uninstall() # testbed self.testbed.deactivate() # os.environ for key, value in self.origin_environ.iteritems(): if value is not None: os.environ[key] = value def execute_tasks(self, queue_name): taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub("taskqueue") tasks = taskqueue_stub.GetTasks(queue_name) for task in tasks: deferred.run(base64.b64decode(task["body"])) taskqueue_stub.DeleteTask(queue_name, task["name"]) def endpoints_uri(self, endpoint): api_class_name, api_method_name = endpoint.split(".", 1) real_api_method_name = "_{0}_{1}".format(api_class_name, api_method_name) assert real_api_method_name in self.api_names return "/_ah/spi/{0}.{1}".format(api_class_name, real_api_method_name) def endpoints_via_oauth(self, email=None, _auth_domain=None, _user_id=None, federated_identity=None, federated_provider=None, _strict_mode=False): if email is not None: if _auth_domain is None: _auth_domain = email.split("@", 1)[1] if _user_id is None: _user_id = str(tap.base_decoder(sorted(set(email)))(email)) user = users.User(email, _auth_domain, _user_id, federated_identity, federated_provider, _strict_mode) mock("endpoints.get_current_user", returns=user, tracker=None)
class TestControlNodeSerial(unittest.TestCase): def setUp(self): self.popen_patcher = mock.patch( 'gateway_code.utils.subprocess_timeout.Popen') popen_class = self.popen_patcher.start() self.popen = popen_class.return_value self.popen.terminate.side_effect = self._terminate self.popen.poll.return_value = None self.readline_ret_vals = Queue.Queue(0) self.popen.stderr.readline.side_effect = self.readline_ret_vals.get self.readline_ret_vals.put('cn_serial_ready\n') self.cn = cn_interface.ControlNodeSerial('tty') self.log_error = LogCapture('gateway_code', level=logging.WARNING) def tearDown(self): self.cn.stop() mock.patch.stopall() self.log_error.uninstall() def _terminate(self): self.readline_ret_vals.put('') def test_normal_start_stop(self): ret_start = self.cn.start() self.assertEquals(0, ret_start) self.assertTrue(self.popen.stderr.readline.called) self.cn.stop() self.assertTrue(self.popen.terminate.called) self.assertTrue(self.readline_ret_vals.empty()) def test_start_error_in_cn_serial(self): # poll should return an error self.popen.poll.return_value = 2 ret_start = self.cn.start() self.assertNotEquals(0, ret_start) self.log_error.check( ('gateway_code', 'ERROR', 'Control node serial reader thread ended prematurely')) self.cn.stop() def test_stop_before_start(self): self.cn.stop() def test_stop_with_cn_interface_allready_stopped(self): # Simulate cn_interface stopped self.readline_ret_vals.put('') self.popen.stdin.write.side_effect = IOError() self.popen.terminate.side_effect = OSError() self.cn.start() # try sending command ret = self.cn.send_command(['test', 'cmd']) self.assertEquals(None, ret) self.log_error.check( ('gateway_code', 'ERROR', 'control_node_serial process is terminated')) self.log_error.clear() self.cn.stop() self.log_error.check( ('gateway_code', 'ERROR', 'Control node process already terminated')) def test_stop_terminate_failed(self): """Stop cn_interface but terminate does not stop it.""" # terminate does not stop process self.popen.terminate.side_effect = None timeout_expired = cn_interface.subprocess_timeout.TimeoutExpired self.popen.wait.side_effect = timeout_expired('cn_serial_interface', 3) # kill does it self.popen.kill.side_effect = self._terminate self.cn.start() self.cn.stop() self.assertTrue(self.popen.kill.called) self.log_error.check(('gateway_code', 'WARNING', 'Control node serial not terminated, kill it')) # Test command sending def test_send_command(self): self.popen.stdin.write.side_effect = \ (lambda *x: self.readline_ret_vals.put('start ACK\n')) self.cn.start() ret = self.cn.send_command(['start', 'DC']) self.assertEquals(['start', 'ACK'], ret) self.cn.stop() def test_send_command_no_answer(self): self.cn.start() ret = self.cn.send_command(['start', 'DC']) self.assertIsNone(ret) self.cn.stop() def test_send_command_cn_interface_stoped(self): ret = self.cn.send_command(['lala']) self.assertIsNone(ret) def test_answer_and_answer_with_queue_full(self): # get two answers without sending command self.readline_ret_vals.put('set ACK\n') self.readline_ret_vals.put('start ACK\n') self.cn.start() self.cn.stop() self.log_error.check( ('gateway_code', 'ERROR', 'Control node answer queue full: %r' % ['start', 'ACK'])) # _cn_interface_args def test__cn_interface_args(self): args = self.cn._cn_interface_args() self.assertIn(self.cn.tty, args) self.assertNotIn('-c', args) self.assertNotIn('-d', args) # OML config args = self.cn._cn_interface_args('<omlc></omlc>') self.assertIn('-c', args) self.assertNotIn('-d', args) self.cn._oml_cfg_file.close() # Debug mode self.cn.measures_debug = (lambda x: None) args = self.cn._cn_interface_args() self.assertNotIn('-c', args) self.assertIn('-d', args) # _config_oml coverage tests def test_empty_config_oml(self): # No experiment description ret = self.cn._oml_config_file(None) self.assertIsNone(ret) @mock.patch(utils.READ_CONFIG, utils.read_config_mock('m3')) def test_config_oml(self): oml_xml_cfg = '''<omlc id='{node_id}' exp_id='{exp_id}'>\n</omlc>''' self.cn.start(oml_xml_cfg) self.assertIsNotNone(self.cn._oml_cfg_file) self.cn.stop() def test_oml_xml_config(self): exp_files = { 'consumption': '/tmp/consumption', 'radio': '/tmp/radio', 'event': '/tmp/event', 'sniffer': '/tmp/sniffer', 'log': '/tmp/log', } oml_xml_cfg = self.cn.oml_xml_config('m3-1', '1234', exp_files) self.assertIsNotNone(oml_xml_cfg) self.assertTrue(oml_xml_cfg.startswith('<omlc')) # No output if none or empty oml_xml_cfg = self.cn.oml_xml_config('m3-1', '1234', None) self.assertIsNone(oml_xml_cfg) oml_xml_cfg = self.cn.oml_xml_config('m3-1', '1234', {}) self.assertIsNone(oml_xml_cfg)
class TestWatcherConfigfile(TestBase): def additional_setup(self): self.temp_dir = tempfile.mkdtemp() self.abs_fname = self.temp_dir + "/r.spec" self.conf = { "file" : self.abs_fname, "region_name" : "dummy-region", "vpc_id" : "dummy-vpc", "mode" : "configfile", "health" : "icmpecho", "icmp_check_interval" : 2 } self.watcher_plugin_class = \ main.load_plugin("configfile", DEFAULT_WATCHER_PLUGIN_MOD) self.health_plugin_class = \ main.load_plugin("icmpecho", DEFAULT_HEALTH_PLUGIN_MOD) # The watcher thread needs to have a config file available right at the # start, even if there's nothing in it self.write_config({}) def setUp(self): self.lc = LogCapture() self.lc.setLevel(logging.DEBUG) self.lc.addFilter(test_common.MyLogCaptureFilter()) self.additional_setup() self.addCleanup(self.cleanup) self.old_handle_spec = vpc.handle_spec # Monkey patch the handle_spec function, which is called by the # watcher. The handle_spec function is defined in the VPC module. # However, it was directly imported by the watcher module, so it's now # a copy in the watcher module namespace. Thus, the patch has to be # done actually in the watcher module. For safety, we'll do it in both # the vpc and watcher module. def new_handle_spec(*args, **kwargs): pass watcher.handle_spec = vpc.handle_spec = new_handle_spec def additional_cleanup(self): shutil.rmtree(self.temp_dir) def cleanup(self): self.lc.uninstall() watcher.handle_spec = vpc.handle_spec = self.old_handle_spec self.additional_cleanup() def write_config(self, data): with open(self.abs_fname, "w+") as f: f.write(json.dumps(data)) def start_thread_log_tuple(self): return [ ('root', 'INFO', "Configfile watcher plugin: Starting to watch route spec file " "'%s' for changes..." % self.abs_fname) ] def change_event_log_tuple(self): return ('root', 'INFO', "Detected file change event for %s" % self.abs_fname) def test_watcher_thread_no_config(self): os.remove(self.abs_fname) watcher_plugin, health_plugin = \ watcher.start_plugins( self.conf, self.watcher_plugin_class, self.health_plugin_class, 2) time.sleep(0.5) # Config file doesn't exist yet, so we should get an error. # Health monitor is started with a second delay, so no messages from # there, yet. l = self.start_thread_log_tuple() l.extend([ ('root', 'ERROR', "Config ignored: Cannot open file: " "[Errno 2] No such file or directory: '%s'" % self.abs_fname), ('root', 'INFO', 'ICMPecho health monitor plugin: Starting to watch instances.') ]) self.lc.check(*l) watcher.stop_plugins(watcher_plugin, health_plugin) def test_watcher_thread_wrong_config(self): watcher_plugin, health_plugin = \ watcher.start_plugins( self.conf, self.watcher_plugin_class, self.health_plugin_class, 2) time.sleep(1.2) self.lc.clear() inp = "MALFORMED" self.write_config(inp) time.sleep(1) # Config file malformed l = [ self.change_event_log_tuple(), ('root', 'ERROR', 'Config ignored: Expected dictionary at top level') ] self.lc_compare(l) watcher.stop_plugins(watcher_plugin, health_plugin) def test_watcher_thread(self): # Monkey patch the healthcheck method of the ICMP health monitor class, # since we don't really want to send out ICMP echo requests when we run # the tests. Will indicate failure for all IP addresses starting with # "3." def new_do_health_checks(s, addrs): return [a for a in addrs if a.startswith("3.")], [] # We do this in the class, before the plugin is instantiated self.health_plugin_class.do_health_checks = new_do_health_checks watcher_plugin, health_plugin = \ watcher.start_plugins( self.conf, self.watcher_plugin_class, self.health_plugin_class, 2) time.sleep(2) l = self.start_thread_log_tuple() l.extend([ ('root', 'INFO', 'ICMPecho health monitor plugin: Starting to watch instances.'), ('root', 'DEBUG', 'Checking live IPs: (none alive)')]) self.lc.check(*l) self.lc.clear() inp = { u"10.1.0.0/16" : [u"1.1.1.1", u"2.2.2.2"], u"10.2.0.0/16" : [u"3.3.3.3"] } self.write_config(inp) time.sleep(2) watcher._event_monitor_loop( "dummy-region", "dummy-vpc", watcher_plugin, health_plugin, iterations=1, sleep_time=0.5) time.sleep(2) self.lc.check( self.change_event_log_tuple(), ('root', 'DEBUG', 'Checking live IPs: (none alive)'), ('root', 'DEBUG', 'New route spec detected. Updating health-monitor ' 'with: 1.1.1.1,2.2.2.2,3.3.3.3'), ('root', 'DEBUG', 'event_monitor_loop ended: Global stop'), ('root', 'DEBUG', u'Checking live IPs: 1.1.1.1,2.2.2.2,3.3.3.3'), ('root', 'INFO', u'Currently failed IPs: 3.3.3.3')) self.lc.clear() inp = { u"10.1.0.0/16" : [u"4.4.4.4", u"2.2.2.2"], u"10.2.0.0/16" : [u"3.3.3.3"] } self.write_config(inp) time.sleep(1) """ Remove this check: The log messages may come through in a different order, which isn't a problem. self.lc.check( ('root', 'INFO', 'Detected file change event for %s' % self.abs_fname), ('root', 'DEBUG', 'Checking live IPs: 1.1.1.1,2.2.2.2')) """ self.lc.clear() watcher._event_monitor_loop( "dummy-region", "dummy-vpc", watcher_plugin, health_plugin, iterations=1, sleep_time=0.5) time.sleep(2) self.lc.check( ('root', 'DEBUG', 'New route spec detected. Updating health-monitor ' 'with: 2.2.2.2,3.3.3.3,4.4.4.4'), ('root', 'DEBUG', 'event_monitor_loop ended: Global stop'), ('root', 'DEBUG', u'Checking live IPs: 2.2.2.2,4.4.4.4')) self.lc.clear() watcher._event_monitor_loop( "dummy-region", "dummy-vpc", watcher_plugin, health_plugin, iterations=2, sleep_time=1, route_check_time_interval=1) time.sleep(2) self.lc.check( ('root', 'DEBUG', u'Checking live IPs: 2.2.2.2,4.4.4.4'), ('root', 'DEBUG', 'Time for regular route check'), ('root', 'DEBUG', 'event_monitor_loop ended: Global stop'), ('root', 'DEBUG', u'Checking live IPs: 2.2.2.2,4.4.4.4')) watcher.stop_plugins(watcher_plugin, health_plugin)