def logout(self): output = self.run_snapcraft('logout') expected = (r'.*Clearing credentials for Ubuntu One SSO.\n' r'Credentials cleared.\n.*') self.assertThat(output, MatchesRegex(expected, flags=re.DOTALL))
def update_instance_group(self, init_template, updt_template, num_updates_expected_on_updt, num_creates_expected_on_updt, num_deletes_expected_on_updt, update_replace): # setup stack from the initial template tmpl = template_format.parse(init_template) stack = utils.parse_stack(tmpl) stack.validate() # test stack create size = int(stack['JobServerGroup'].properties['Size']) self._stub_grp_create(size) self.m.ReplayAll() stack.create() self.m.VerifyAll() self.assertEqual(stack.state, ('CREATE', 'COMPLETE')) # test that update policy is loaded current_grp = stack['JobServerGroup'] self.assertTrue('RollingUpdate' in current_grp.update_policy) current_policy = current_grp.update_policy['RollingUpdate'] self.assertTrue(current_policy) self.assertTrue(len(current_policy) > 0) init_grp_tmpl = tmpl['Resources']['JobServerGroup'] init_roll_updt = init_grp_tmpl['UpdatePolicy']['RollingUpdate'] init_batch_sz = int(init_roll_updt['MaxBatchSize']) self.assertEqual(int(current_policy['MaxBatchSize']), init_batch_sz) # test that physical resource name of launch configuration is used conf = stack['JobServerConfig'] conf_name_pattern = '%s-JobServerConfig-[a-zA-Z0-9]+$' % stack.name self.assertThat(conf.FnGetRefId(), MatchesRegex(conf_name_pattern)) # get launch conf name here to compare result after update conf_name = self.get_launch_conf_name(stack, 'JobServerGroup') # test the number of instances created nested = stack['JobServerGroup'].nested() self.assertEqual(len(nested.resources), size) # clean up for next test self.m.UnsetStubs() # saves info from initial list of instances for comparison later init_instances = current_grp.get_instances() init_names = current_grp.get_instance_names() init_images = [(i.name, i.t['Properties']['ImageId']) for i in init_instances] init_flavors = [(i.name, i.t['Properties']['InstanceType']) for i in init_instances] # test stack update updated_tmpl = template_format.parse(updt_template) updated_stack = utils.parse_stack(updated_tmpl) new_grp_tmpl = updated_tmpl['Resources']['JobServerGroup'] new_roll_updt = new_grp_tmpl['UpdatePolicy']['RollingUpdate'] new_batch_sz = int(new_roll_updt['MaxBatchSize']) self.assertNotEqual(new_batch_sz, init_batch_sz) if update_replace: self._stub_grp_replace(size, size) else: self._stub_grp_update(num_creates_expected_on_updt, num_deletes_expected_on_updt) self.stub_wallclock() self.m.ReplayAll() stack.update(updated_stack) self.m.VerifyAll() self.assertEqual(stack.state, ('UPDATE', 'COMPLETE')) # test that the update policy is updated updated_grp = stack['JobServerGroup'] self.assertTrue('RollingUpdate' in updated_grp.update_policy) updated_policy = updated_grp.update_policy['RollingUpdate'] self.assertTrue(updated_policy) self.assertTrue(len(updated_policy) > 0) self.assertEqual(int(updated_policy['MaxBatchSize']), new_batch_sz) # test that the launch configuration is replaced updated_conf_name = self.get_launch_conf_name(stack, 'JobServerGroup') self.assertNotEqual(conf_name, updated_conf_name) # test that the group size are the same updt_instances = updated_grp.get_instances() updt_names = updated_grp.get_instance_names() self.assertEqual(len(updt_names), len(init_names)) # test that the appropriate number of instance names are the same matched_names = set(updt_names) & set(init_names) self.assertEqual(len(matched_names), num_updates_expected_on_updt) # test that the appropriate number of new instances are created self.assertEqual(len(set(updt_names) - set(init_names)), num_creates_expected_on_updt) # test that the appropriate number of instances are deleted self.assertEqual(len(set(init_names) - set(updt_names)), num_deletes_expected_on_updt) # test that the older instances are the ones being deleted if num_deletes_expected_on_updt > 0: deletes_expected = init_names[:num_deletes_expected_on_updt] self.assertNotIn(deletes_expected, updt_names) # test if instances are updated if update_replace: # test that the image id is changed for all instances updt_images = [(i.name, i.t['Properties']['ImageId']) for i in updt_instances] self.assertEqual(len(set(updt_images) & set(init_images)), 0) else: # test that instance type is changed for all instances updt_flavors = [(i.name, i.t['Properties']['InstanceType']) for i in updt_instances] self.assertEqual(len(set(updt_flavors) & set(init_flavors)), 0)
def test_take_screenshot_returns_resulting_filename(self): with patch.object(_ss.subprocess, 'check_call'): self.assertThat(_ss._take_mirscreencast_screenshot(), MatchesRegex(".*ap-screenshot-data-\d+.rgba"))
def test_mismatch_and_too_many_values(self): self.assertMismatchWithDescriptionMatching( [2, 3, 4], MatchesSetwise(Equals(1), Equals(2)), MatchesRegex('.*There was 1 mismatch and 1 extra value: \[[34]\]', re.S))
def test_mismatch_and_two_too_many_matchers(self): self.assertMismatchWithDescriptionMatching( [3, 4], MatchesSetwise(Equals(0), Equals(1), Equals(2), Equals(3)), MatchesRegex( '.*There was 1 mismatch and 2 extra matchers: ' 'Equals\([012]\), Equals\([012]\)', re.S))
def test_two_too_many_values(self): self.assertMismatchWithDescriptionMatching( [1, 2, 3, 4], MatchesSetwise(Equals(1), Equals(2)), MatchesRegex('There were 2 values left over: \[[34], [34]\]'))
def test_mismatch_and_too_many_matchers(self): self.assertMismatchWithDescriptionMatching( [2, 3], MatchesSetwise(Equals(0), Equals(1), Equals(2)), MatchesRegex( '.*There was 1 mismatch and 1 extra matcher: Equals\([01]\)', re.S))
def test_mismatches(self): self.assertMismatchWithDescriptionMatching( [2, 3], MatchesSetwise(Equals(1), Equals(2)), MatchesRegex('.*There was 1 mismatch$', re.S))
def test_two_too_many_matchers(self): self.assertMismatchWithDescriptionMatching( [3], MatchesSetwise(Equals(1), Equals(2), Equals(3)), MatchesRegex('There were 2 matchers left over: Equals\([12]\), ' 'Equals\([12]\)'))
def test_guess_server_address_finds_IP_address(self): self.assertThat( address.guess_server_address(), MatchesRegex("^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$"))
def __eq__(self, args): # Matching like this for order independence (otherwise it would be # quite fragile) command = " ".join(args) if "debug" in self.test.build_attributes: self.test.assertThat( command, MatchesRegex(".*--cmake-args.*-DCMAKE_BUILD_TYPE=Debug") ) else: self.test.assertThat( command, MatchesRegex(".*--cmake-args.*-DCMAKE_BUILD_TYPE=Release") ) if self.test.colcon_cmake_args: expected_args = " ".join(self.test.colcon_cmake_args) self.test.assertThat( command, MatchesRegex(".*--cmake-args.*{}".format(re.escape(expected_args))), ) if self.test.properties.disable_parallel: self.test.assertThat(command, MatchesRegex(".*--parallel-workers=1")) else: self.test.assertThat( command, MatchesRegex( ".*--parallel-workers={}".format(self.plugin.parallel_build_count) ), ) if self.test.colcon_catkin_cmake_args: expected_args = " ".join(self.test.colcon_catkin_cmake_args) self.test.assertThat( command, MatchesRegex( ".*--catkin-cmake-args.*{}".format(re.escape(expected_args)) ), ) if self.test.colcon_ament_cmake_args: expected_args = " ".join(self.test.colcon_ament_cmake_args) self.test.assertThat( command, MatchesRegex( ".*--ament-cmake-args.*{}".format(re.escape(expected_args)) ), ) if self.test.colcon_packages_ignore: expected_args = " ".join(self.test.colcon_packages_ignore) self.test.assertThat( command, MatchesRegex( ".*--packages-ignore.*{}".format(re.escape(expected_args)) ), ) if self.test.colcon_packages: self.test.assertThat( command, Contains( "--packages-select {}".format(" ".join(self.test.colcon_packages)) ), ) else: self.test.assertThat(command, Not(Contains("--packages-select"))) self.test.assertThat(args[0:2], Equals(["colcon", "build"])) self.test.assertThat(command, Contains("--merge-install")) self.test.assertThat( command, Contains("--build-base {}".format(self.plugin.builddir)) ) self.test.assertThat( command, Contains("--base-paths {}".format(self.plugin._ros_package_path)) ) self.test.assertThat( command, Contains("--install-base {}".format(self.plugin._ros_overlay)) ) return True
def logout(self): output = self.run_snapcraft("logout") expected = r".*Credentials cleared.\n.*" self.assertThat(output, MatchesRegex(expected, flags=re.DOTALL))
def test_only_autoscale_nodes_are_modified(self): """ Autoscale only self-heals the nodes that it added, without touching any other nodes. Assuming 1 CLB: 1. Create two non-autoscaled servers and add them to the CLB. 2. Wait for all servers to be on the CLB 3. Create a scaling group with said CLB and 1 server 4. Wait for AS server to be active and on the CLB. 4. Delete autoscaled server and 1 non-autoscaled server from the CLB 5. Converge 6. Assert that the autoscaled server is put back on the CLB, the non-autoscaled server is left off the CLB, and the untouched non-autoscaled server is left on the CLB. """ clb = self.helper.clbs[0] nodes = yield clb.list_nodes(self.rcs) self.assertEqual(len(nodes['nodes']), 0, "There should be no nodes on the CLB yet.") # create the other two non-autoscaled servers - just wait until they # have servicenet addresses - don't bother waiting for them to be # active, which will take too long other_servers = yield self.helper.create_servers( self.rcs, 2, wait_for=ContainsDict({ "addresses": ContainsDict({ 'private': MatchesSetwise( ContainsDict({"addr": MatchesRegex("(\d+\.){3}\d+")})) }) })) # add non-autoscaled servers to the CLB clb_response = yield clb.add_nodes( self.rcs, [{ 'address': server['addresses']['private'][0]['addr'], 'port': 8080, 'condition': "ENABLED" } for server in other_servers]) remove_non_as_node, untouch_non_as_node = clb_response['nodes'] # set up the group and get the group's server's CLB node group, _ = self.helper.create_group(min_entities=1) yield self.helper.start_group_and_wait(group, self.rcs) # Should be 3 nodes now that all servers are added nodes = yield clb.wait_for_nodes(self.rcs, HasLength(3), timeout=timeout_default) as_node = [ node for node in nodes if node not in (remove_non_as_node, untouch_non_as_node) ][0] # delete 1 autoscale node and 1 non-autoscale node yield clb.delete_nodes(self.rcs, [as_node['id'], remove_non_as_node['id']]) # There should be 1 node left yield clb.wait_for_nodes(self.rcs, HasLength(1), timeout=timeout_default) yield group.trigger_convergence(self.rcs) yield clb.wait_for_nodes( self.rcs, MatchesSetwise( # means there are only these two nodes and no more # the untouched node should remain exactly the same Equals(untouch_non_as_node), # the AS node should have the same paramters, but not the same # ID since it was re-added ContainsDict({ k: Equals(v) for k, v in as_node.items() if k in ('address', 'port', 'weight' 'type', 'condition') })), timeout=timeout_default)
def test_logout_clears_config(self, mock_clear): result = self.run_command(['logout']) self.assertThat(result.exit_code, Equals(0)) self.assertThat(result.output, MatchesRegex( '.*Credentials cleared.\n', flags=re.DOTALL))
def __eq__(self, args): command = " ".join(args) if "debug" in build_attributes: self.test.assertThat( command, MatchesRegex(".*--cmake-args.*-DCMAKE_BUILD_TYPE=Debug"), ) else: self.test.assertThat( command, MatchesRegex(".*--cmake-args.*-DCMAKE_BUILD_TYPE=Release"), ) if colcon_cmake_args: expected_args = " ".join(colcon_cmake_args) self.test.assertThat( command, MatchesRegex( ".*--cmake-args.*{}".format(re.escape(expected_args)) ), ) if colcon_catkin_cmake_args: expected_args = " ".join(colcon_catkin_cmake_args) self.test.assertThat( command, MatchesRegex( ".*--catkin-cmake-args.*{}".format(re.escape(expected_args)) ), ) if colcon_ament_cmake_args: expected_args = " ".join(colcon_ament_cmake_args) self.test.assertThat( command, MatchesRegex( ".*--ament-cmake-args.*{}".format(re.escape(expected_args)) ), ) if self.test.colcon_packages: self.test.assertThat( command, Contains( "--packages-select {}".format( " ".join(self.test.colcon_packages) ) ), ) else: self.test.assertThat(command, Not(Contains("--packages-select"))) self.test.assertThat(args[0:2], Equals(["colcon", "build"])) self.test.assertThat(command, Contains("--merge-install")) self.test.assertThat( command, Contains("--build-base {}".format(plugin.builddir)) ) self.test.assertThat( command, Contains("--base-paths {}".format(plugin._ros_package_path)), ) self.test.assertThat( command, Contains("--install-base {}".format(plugin._ros_overlay)) ) return True
def test_reporting(self): project = 'org/project' github = self.fake_github.getGithubClient(None) # pipeline reports pull status both on start and success self.executor_server.hold_jobs_in_build = True A = self.fake_github.openFakePullRequest(project, 'master', 'A') self.fake_github.emitEvent(A.getPullRequestOpenedEvent()) self.waitUntilSettled() # We should have a status container for the head sha self.assertIn(A.head_sha, github.repo_from_project(project)._commits.keys()) statuses = self.fake_github.getCommitStatuses(project, A.head_sha) # We should only have one status for the head sha self.assertEqual(1, len(statuses)) check_status = statuses[0] check_url = ('http://zuul.example.com/status/#%s,%s' % (A.number, A.head_sha)) self.assertEqual('tenant-one/check', check_status['context']) self.assertEqual('check status: pending', check_status['description']) self.assertEqual('pending', check_status['state']) self.assertEqual(check_url, check_status['url']) self.assertEqual(0, len(A.comments)) self.executor_server.hold_jobs_in_build = False self.executor_server.release() self.waitUntilSettled() # We should only have two statuses for the head sha statuses = self.fake_github.getCommitStatuses(project, A.head_sha) self.assertEqual(2, len(statuses)) check_status = statuses[0] check_url = ('http://zuul.example.com/status/#%s,%s' % (A.number, A.head_sha)) self.assertEqual('tenant-one/check', check_status['context']) self.assertEqual('check status: success', check_status['description']) self.assertEqual('success', check_status['state']) self.assertEqual(check_url, check_status['url']) self.assertEqual(1, len(A.comments)) self.assertThat(A.comments[0], MatchesRegex(r'.*Build succeeded.*', re.DOTALL)) # pipeline does not report any status but does comment self.executor_server.hold_jobs_in_build = True self.fake_github.emitEvent(A.getCommentAddedEvent('reporting check')) self.waitUntilSettled() statuses = self.fake_github.getCommitStatuses(project, A.head_sha) self.assertEqual(2, len(statuses)) # comments increased by one for the start message self.assertEqual(2, len(A.comments)) self.assertThat( A.comments[1], MatchesRegex(r'.*Starting reporting jobs.*', re.DOTALL)) self.executor_server.hold_jobs_in_build = False self.executor_server.release() self.waitUntilSettled() # pipeline reports success status statuses = self.fake_github.getCommitStatuses(project, A.head_sha) self.assertEqual(3, len(statuses)) report_status = statuses[0] self.assertEqual('tenant-one/reporting', report_status['context']) self.assertEqual('reporting status: success', report_status['description']) self.assertEqual('success', report_status['state']) self.assertEqual(2, len(A.comments)) base = 'http://logs.example.com/tenant-one/reporting/%s/%s/' % ( A.project, A.number) # Deconstructing the URL because we don't save the BuildSet UUID # anywhere to do a direct comparison and doing regexp matches on a full # URL is painful. # The first part of the URL matches the easy base string self.assertThat(report_status['url'], StartsWith(base)) # The rest of the URL is a UUID and a trailing slash. self.assertThat(report_status['url'][len(base):], MatchesRegex(r'^[a-fA-F0-9]{32}\/$'))