def test_checklist_duplicate(self): with fake_checklist_request(): # insert fake data from FakeDataMixin fake_pushid = 2 self.insert_pushes() self.insert_requests() test1_request = self.get_requests_by_user('testuser1')[0] test2_request = self.get_requests_by_user('testuser2')[0] self.insert_pushcontent(test1_request['id'], fake_pushid) self.insert_pushcontent(test2_request['id'], fake_pushid) # insert fake checklist data checklist_queries = [] for req in (test1_request, test2_request): checklist_queries.append(db.push_checklist.insert({ 'request': req['id'], 'type': 'search', 'target': 'prod' })) checklist_queries.append(db.push_checklist.insert({ 'request': req['id'], 'type': 'search-cleanup', 'target': 'post-verify-prod' })) db.execute_transaction_cb(checklist_queries, on_db_return) uri = "/checklist?id=%d" % fake_pushid response = self.fetch(uri) T.assert_equal(response.error, None) T.assert_not_in("No checklist items for this push", response.body) T.assert_not_equal(re.search("for testuser\d,testuser\d", response.body), None) T.assert_in("Before Certifying - Do In Prod", response.body)
def test_region_nobucket_nomatchexists(self): # aws_region specified, no bucket specified, no buckets have matching region self.bucket1.set_location('PUPPYLAND') j = EMRJobRunner(aws_region='KITTYLAND', s3_endpoint='KITTYLAND', conf_path=False) assert_not_equal(j._opts['s3_scratch_uri'], self.bucket1_uri)
def test_suite_pollution_with_suites_attribute(self): """Test if suite decorator modifies the object's attribute objects instead of assigning a new object. Modifying _suite attribute objects causes suite pollution in TestCases. Here we test if the _suites attribute's id() remains the same to verify suite decorator does not modify the object's attribute object. """ def function_to_decorate(): pass function_to_decorate._suites = set(['fake_suite_1']) suites_before_decoration = function_to_decorate._suites function_to_decorate = suite('fake_suite_2')(function_to_decorate) suites_after_decoration = function_to_decorate._suites assert_not_equal( id(suites_before_decoration), id(suites_after_decoration), "suites decorator modifies the object's _suite attribute" )
def reducer(self, key, values): # reducer should always be reading from the "uploaded" file assert_not_equal(self.options.n_file, os.environ['LOCAL_N_FILE_PATH']) # just pass through values as-is for value in values: yield key, value
def test_multistarted_gradient_descent_optimizer_crippled_start(self): """Check that multistarted GD is finding the best result from GD.""" # Only allow 1 GD iteration. gd_parameters_crippled = GradientDescentParameters( 1, 1, self.gd_parameters.num_steps_averaged, self.gd_parameters.gamma, self.gd_parameters.pre_mult, self.gd_parameters.max_relative_change, self.gd_parameters.tolerance, ) gradient_descent_optimizer_crippled = GradientDescentOptimizer(self.domain, self.polynomial, gd_parameters_crippled) num_points = 15 points = self.domain.generate_uniform_random_points_in_domain(num_points) multistart_optimizer = MultistartOptimizer(gradient_descent_optimizer_crippled, num_points) test_best_point, _ = multistart_optimizer.optimize(random_starts=points) # This point set won't include the optimum so multistart GD won't find it. for value in (test_best_point - self.polynomial.optimum_point): T.assert_not_equal(value, 0.0) points_with_opt = numpy.append(points, self.polynomial.optimum_point.reshape((1, self.polynomial.dim)), axis=0) test_best_point, _ = multistart_optimizer.optimize(random_starts=points_with_opt) # This point set will include the optimum so multistart GD will find it. for value in (test_best_point - self.polynomial.optimum_point): T.assert_equal(value, 0.0)
def test_checklist_duplicate(self): with fake_checklist_request(): # insert fake data from FakeDataMixin fake_pushid = 2 self.insert_pushes() self.insert_requests() test1_request = self.get_requests_by_user('testuser1')[0] test2_request = self.get_requests_by_user('testuser2')[0] self.insert_pushcontent(test1_request['id'], fake_pushid) self.insert_pushcontent(test2_request['id'], fake_pushid) # insert fake checklist data checklist_queries = [] for req in (test1_request, test2_request): checklist_queries.append( db.push_checklist.insert({ 'request': req['id'], 'type': 'search', 'target': 'prod' })) checklist_queries.append( db.push_checklist.insert({ 'request': req['id'], 'type': 'search-cleanup', 'target': 'post-verify-prod' })) db.execute_transaction_cb(checklist_queries, on_db_return) uri = "/checklist?id=%d" % fake_pushid response = self.fetch(uri) T.assert_equal(response.error, None) T.assert_not_in("No checklist items for this push", response.body) T.assert_not_equal( re.search("for testuser\d,testuser\d", response.body), None) T.assert_in("Before Certifying - Do In Prod", response.body)
def test_failing_child_initialized_hook(self): def child_initialized_hook(child_pid): raise Exception("child_initialized hook raises exception") # When child_initialized hook fails parent process will # exit. To test a failing initilization hook we fork and watch # the new child. pid = os.fork() if not pid: event_hooks = {"child_initialized": child_initialized_hook} with testing.no_stderr(): # This will fail. redirecting stderr to /dev/null will # silence the test output. self.run_child_function_in_catbox(event_hooks=event_hooks) else: status = 0 wait_pid = 0 try: for _ in range(5): (wait_pid, status, _) = os.wait4(pid, os.WNOHANG) if wait_pid == pid: break time.sleep(.1) except OSError as e: T.assert_in("No child processes", e) else: T.assert_not_equal( status, 0, "Failing child_initialized hook did not make parent exit")
def test_find_python_traceback(self): def run(*args): return Popen(args, stdout=PIPE, stderr=PIPE).communicate() # sanity-check normal operations ok_stdout, ok_stderr = run('python', '-c', "print sorted('321')") assert_equal(ok_stdout.rstrip(), "['1', '2', '3']") assert_equal(find_python_traceback(StringIO(ok_stderr)), None) # Oops, can't sort a number. stdout, stderr = run('python', '-c', "print sorted(321)") # We expect something like this: # # Traceback (most recent call last): # File "<string>", line 1, in <module> # TypeError: 'int' object is not iterable assert_equal(stdout, '') # save the traceback for the next step tb = find_python_traceback(StringIO(stderr)) assert_not_equal(tb, None) assert isinstance(tb, list) assert_equal(len(tb), 3) # The first line ("Traceback...") is not skipped # make sure we can find the same traceback in noise verbose_stdout, verbose_stderr = run( 'python', '-v', '-c', "print sorted(321)") assert_equal(verbose_stdout, '') assert_not_equal(verbose_stderr, stderr) verbose_tb = find_python_traceback(StringIO(verbose_stderr)) assert_equal(verbose_tb, tb)
def test_find_python_traceback(self): def run(*args): return Popen(args, stdout=PIPE, stderr=PIPE).communicate() # sanity-check normal operations ok_stdout, ok_stderr = run('python', '-c', "print sorted('321')") assert_equal(ok_stdout.rstrip(), "['1', '2', '3']") assert_equal(find_python_traceback(StringIO(ok_stderr)), None) # Oops, can't sort a number. stdout, stderr = run('python', '-c', "print sorted(321)") # We expect something like this: # # Traceback (most recent call last): # File "<string>", line 1, in <module> # TypeError: 'int' object is not iterable assert_equal(stdout, '') # save the traceback for the next step tb = find_python_traceback(StringIO(stderr)) assert_not_equal(tb, None) assert isinstance(tb, list) assert_equal(len(tb), 2) # The first line ("Traceback...") is skipped # make sure we can find the same traceback in noise verbose_stdout, verbose_stderr = run('python', '-v', '-c', "print sorted(321)") assert_equal(verbose_stdout, '') assert_not_equal(verbose_stderr, stderr) verbose_tb = find_python_traceback(StringIO(verbose_stderr)) assert_equal(verbose_tb, tb)
def test_simple_merge(self): """Test merging a config with an empty 'cartoVars' attribute.""" with open(self.source) as f: source_mss = f.read().decode('utf8') result = process_mss(self.source, self.config) lines = result.splitlines() source_lines = source_mss.splitlines() # Make sure we changed something, but that the number of lines is # unchanged assert_not_equal(result, source_mss) assert_equal(len(lines), len(source_lines)) for i, (old, new) in enumerate(zip(source_lines, lines)): # We know there's a variable on this line that has a substitution # in our test config if i == 29: # Make sure we've changed something, but that the new output is # still a valid MSS variable assert_not_equal(old, new) assert_match_regex(MSS_VAR_RE, new) assert_equal( new[-5:-1], self.config.get('cartoVars').get('park') ) else: assert_equal(old, new)
def test_include_no_request_buttons(self): tree = self.render_module_request_with_users(self.basic_request, 'testuser', 'testuser', **self.basic_kwargs) for span in tree.iter('span'): T.assert_not_equal('push-request-buttons', span.attrib['class']) T.assert_not_equal('edit-request-buttons', span.attrib['class'])
def test_verbose(self): stdout, stderr, returncode = self.run_job() assert_equal(stdout, '2\t"bar"\n1\t"foo"\n3\tnull\n') assert_not_equal(stderr, '') assert_equal(returncode, 0) normal_stderr = stderr stdout, stderr, returncode = self.run_job(['-v']) assert_equal(stdout, '2\t"bar"\n1\t"foo"\n3\tnull\n') assert_not_equal(stderr, '') assert_equal(returncode, 0) assert_gt(len(stderr), len(normal_stderr))
def test_include_edit_buttons(self): kwargs = dict(self.basic_kwargs) kwargs['edit_buttons'] = True tree = self.render_module_request_with_users(self.basic_request, 'testuser', 'testuser', **kwargs) found_span = [] for span in tree.iter('span'): T.assert_not_equal('push-request-buttons', span.attrib['class']) if span.attrib['class'] == 'edit-request-buttons': found_span.append(span) T.assert_equal(1, len(found_span))
def test_get_unittest_with_dbconfig(self, mock_openfile, mocked_sa_url): self.options_db.unittest_db_url = 'sqlite:///fake/database' self.options_db.unittest_db_config = '/fake/path/to/db/' mocked_open = mock.Mock(spec=file) mocked_open.__enter__ = mock.Mock() mocked_open.__exit__ = mock.Mock() mock_openfile.return_value = mocked_open testify.assert_not_equal(get_db_url(self.options_db), self.options_db.unittest_db_url) mocked_open.read.assert_called mocked_sa_url.URL.assert_called
def test_open(self): # Not yet in cache fh_wrapper = self.manager.open(self.file1.name) assert_in(fh_wrapper.name, self.manager.cache) # Should now be in cache fh_wrapper2 = self.manager.open(self.file1.name) # Same wrapper assert_equal(fh_wrapper, fh_wrapper2) # Different wrapper assert_not_equal(fh_wrapper, self.manager.open(self.file2.name))
def test_get_unittest_with_dbconfig(self, mock_openfile, mocked_sa_url): self.options_db.unittest_db_url = 'sqlite:///fake/database' self.options_db.unittest_db_config = '/fake/path/to/db/' mocked_open = mock.Mock(spec=file) mocked_open.__enter__ = mock.Mock() mocked_open.__exit__ = mock.Mock() mock_openfile.return_value = mocked_open testify.assert_not_equal(find_db_url(self.options_db), self.options_db.unittest_db_url) mocked_open.read.assert_called mocked_sa_url.URL.assert_called
def test_pushcontents_duplicate_key(self): with self.fake_pickme_request_ignore_error(): # push_pushcontents table should define a multi column # primary key on (request id, push id). # # Fake data from ServletTestMixin already have this # (pushid, requestid) binding. Adding another pickme request # with same (request id, push id) should fail. requestid = 1 pushid = 1 uri = "/pickmerequest?push=%d&request=%d" % (pushid, requestid) response = self.fetch(uri) T.assert_not_equal(response.error, None)
def test_griftcard(self): griftcard = Griftcard(card_number=self.card_number, phone_number=self.phone_number, security_code=self.security_code) testify.assert_equal(griftcard.phone_number, self.phone_number) testify.assert_equal(griftcard.card_number, self.card_number) testify.assert_equal(griftcard.security_code, self.security_code) testify.assert_equal(griftcard.phone_number_last_four, self.phone_number[-4:]) # TODO: replace lame test testify.assert_not_equal(griftcard.transactions, None) testify.assert_equal(griftcard.balance, 7.37)
def test_duplicate_pickmerequest(self): with self.fake_pickme_request_ignore_error(): # Pickme request shoud not be on two valid # pushes. Allowing so would create confusion when/if a # pushmaster accepts the request in a push. pushid = 1 duplicate_pushid = 2 requestid = 2 response = self.fetch("/pickmerequest?push=%d&request=%d" % (pushid, requestid)) T.assert_equal(response.error, None) response = self.fetch("/pickmerequest?push=%d&request=%d" % (duplicate_pushid, requestid)) T.assert_not_equal(response.error, None)
def test_get_db_url_with_dbconfig(self): options = mock.Mock() options.violation_dburl = 'sqlite:///fake/database' options.violation_dbconfig = '/fake/path/to/db/' mocked_open = mock.Mock(spec=file) mocked_open.__enter__ = mock.Mock() mocked_open.__exit__ = mock.Mock() with mock.patch('testify.plugins.violation_collector.open', create=True, return_value=mocked_open): with mock.patch.object(SA.engine.url, 'URL') as mocked_sa_url: T.assert_not_equal(get_db_url(options), options.violation_dburl) mocked_open.read.assert_called mocked_sa_url.URL.assert_called
def test_get_db_url_with_dbconfig(self): options = mock.Mock() options.violation_dburl = 'sqlite:///fake/database' options.violation_dbconfig = '/fake/path/to/db/' mocked_open = mock.Mock(spec=file) mocked_open.__enter__ = mock.Mock() mocked_open.__exit__ = mock.Mock() with mock.patch( 'testify.plugins.violation_collector.open', create=True, return_value=mocked_open ): with mock.patch.object(SA.engine.url, 'URL') as mocked_sa_url: T.assert_not_equal(get_db_url(options), options.violation_dburl) mocked_open.read.assert_called mocked_sa_url.URL.assert_called
def test_bind_collection_vanilla(self): collection = "nodes" collection_id = "http://%s:%s/api/collections/%s" % (self.hostname, self.port, collection) test_collection = { "name": collection, "id": collection_id } expected_method = "nodes" self.razor_client._bind_collection(test_collection) actual_method = getattr(self.razor_client, expected_method, None) T.assert_not_equal(actual_method, None) actual_method() self.mock_list_collection.assert_called_once_with(collection_id)
def test_bind_command(self): command = "unbind-node" command_id = "http://%s:%s/api/collections/%s" % (self.hostname, self.port, command) test_command = { "name": command, "id": command_id } expected_command_name = "unbind_node" self.razor_client._bind_command(test_command) actual_method = getattr(self.razor_client, expected_command_name, None) T.assert_not_equal(actual_method, None) actual_method() self.mock_execute_command.assert_called_once_with(command_id)
def test_no_output(self): assert_equal(os.listdir(self.tmp_dir), []) # sanity check args = ['--no-output', '--output-dir', self.tmp_dir] stdout, stderr, returncode = self.run_job(args) assert_equal(stdout, '') assert_not_equal(stderr, '') assert_equal(returncode, 0) # make sure the correct output is in the temp dir assert_not_equal(os.listdir(self.tmp_dir), []) output_lines = [] for dirpath, _, filenames in os.walk(self.tmp_dir): for filename in filenames: with open(os.path.join(dirpath, filename)) as output_f: output_lines.extend(output_f) assert_equal(sorted(output_lines), ['1\t"foo"\n', '2\t"bar"\n', '3\tnull\n'])
def test_suite_pollution_with_suites_attribute(self): """Test if suite decorator modifies the object's attribute objects instead of assigning a new object. Modifying _suite attribute objects causes suite pollution in TestCases. Here we test if the _suites attribute's id() remains the same to verify suite decorator does not modify the object's attribute object. """ def function_to_decorate(): pass function_to_decorate._suites = set(['fake_suite_1']) suites_before_decoration = function_to_decorate._suites function_to_decorate = suite('fake_suite_2')(function_to_decorate) suites_after_decoration = function_to_decorate._suites assert_not_equal( id(suites_before_decoration), id(suites_after_decoration), "suites decorator modifies the object's _suite attribute")
def test_loading_boostrapped_mrjob_library(self): # track the dir we're loading mrjob from rather than the full path # to deal with edge cases where we load from the .py file, # and the script loads from the .pyc compiled from that .py file. our_mrjob_dir = os.path.dirname(os.path.realpath(mrjob.__file__)) mr_job = MRJobWhereAreYou(['--no-conf']) mr_job.sandbox() with mr_job.make_runner() as runner: # sanity check assert_equal(runner.get_opts()['bootstrap_mrjob'], True) local_tmp_dir = os.path.realpath(runner._get_local_tmp_dir()) runner.run() output = list(runner.stream_output()) assert_equal(len(output), 1) # script should load mrjob from its working dir _, script_mrjob_dir = mr_job.parse_output_line(output[0]) assert_not_equal(our_mrjob_dir, script_mrjob_dir) assert script_mrjob_dir.startswith(local_tmp_dir)
def test_multistarted_gradient_descent_optimizer_crippled_start(self): """Check that multistarted GD is finding the best result from GD.""" # Only allow 1 GD iteration. gd_parameters_crippled = GradientDescentParameters( 1, 1, self.gd_parameters.num_steps_averaged, self.gd_parameters.gamma, self.gd_parameters.pre_mult, self.gd_parameters.max_relative_change, self.gd_parameters.tolerance, ) gradient_descent_optimizer_crippled = GradientDescentOptimizer( self.domain, self.polynomial, gd_parameters_crippled) num_points = 15 points = self.domain.generate_uniform_random_points_in_domain( num_points) multistart_optimizer = MultistartOptimizer( gradient_descent_optimizer_crippled, num_points) test_best_point, _ = multistart_optimizer.optimize( random_starts=points) # This point set won't include the optimum so multistart GD won't find it. for value in (test_best_point - self.polynomial.optimum_point): T.assert_not_equal(value, 0.0) points_with_opt = numpy.append(points, self.polynomial.optimum_point.reshape( (1, self.polynomial.dim)), axis=0) test_best_point, _ = multistart_optimizer.optimize( random_starts=points_with_opt) # This point set will include the optimum so multistart GD will find it. for value in (test_best_point - self.polynomial.optimum_point): T.assert_equal(value, 0.0)
def test_create_server(self): location = righteous.create_server(self.env, 'm1.small') assert_not_equal(location, None) self.envs.append(self.env)
# When child_initialized hook fails parent process will # exit. To test a failing initilization hook we fork and watch # the new child. pid = os.fork() if not pid: event_hooks = {"child_initialized" : child_initialized_hook} with testing.no_stderr(): # This will fail. redirecting stderr to /dev/null will # silence the test output. self.run_child_function_in_catbox(event_hooks=event_hooks) else: status = 0 wait_pid = 0 try: for _ in range(5): (wait_pid, status, _) = os.wait4(pid, os.WNOHANG) if wait_pid == pid: break time.sleep(.1) except OSError, e: T.assert_in("No child processes", e) else: T.assert_not_equal( status, 0, "Failing child_initialized hook did not make parent exit" ) def test_catbox_run_with_no_event_hooks(self): catbox.run(self.default_child_function)
raise Exception, "child_initialized hook raises exception" # When child_initialized hook fails parent process will # exit. To test a failing initilization hook we fork and watch # the new child. pid = os.fork() if not pid: event_hooks = {"child_initialized": child_initialized_hook} with testing.no_stderr(): # This will fail. redirecting stderr to /dev/null will # silence the test output. self.run_child_function_in_catbox(event_hooks=event_hooks) else: status = 0 wait_pid = 0 try: for _ in range(5): (wait_pid, status, _) = os.wait4(pid, os.WNOHANG) if wait_pid == pid: break time.sleep(.1) except OSError, e: T.assert_in("No child processes", e) else: T.assert_not_equal( status, 0, "Failing child_initialized hook did not make parent exit") def test_catbox_run_with_no_event_hooks(self): catbox.run(self.default_child_function)
def mapper(self, _, value): # mapper should always be reading from the "uploaded" file assert_not_equal(self.options.n_file, os.environ['LOCAL_N_FILE_PATH']) yield None, value ** self.n
def test__ne__(self): other = turtle.Turtle(base='one/two', parts=['three']) assert_not_equal(self.path, other)
def test_region_nobucket_nolocation(self): # aws_region specified, no bucket specified, default bucket has no location j = EMRJobRunner(aws_region='PUPPYLAND', s3_endpoint='PUPPYLAND', conf_path=False) assert_not_equal(j._opts['s3_scratch_uri'], self.bucket1_uri)
def test_eq(self): T.assert_equal(dates.TimePeriod(None, None), dates.TimePeriod(None, None)) T.assert_equal(dates.TimePeriod(date.today(), None), dates.TimePeriod(date.today(), None)) T.assert_not_equal(dates.TimePeriod(None, None), dates.TimePeriod(date.today(), None))
def mapper(self, _, value): # mapper should always be reading from the "uploaded" file assert_not_equal(self.options.n_file, os.environ['LOCAL_N_FILE_PATH']) yield None, value**self.n