def test_command_output(self): out = StringIO() call_command('drf_create_token', self.user.username, stdout=out) token_saved = Token.objects.first() self.assertIn('Generated token', out.getvalue()) self.assertIn(self.user.username, out.getvalue()) self.assertIn(token_saved.key, out.getvalue())
def test_nk_on_serialize(self): """ Check that natural key requirements are taken into account when serializing models """ management.call_command( 'loaddata', 'forward_ref_lookup.json', verbosity=0, commit=False ) stdout = StringIO() management.call_command( 'dumpdata', 'fixtures_regress.book', 'fixtures_regress.person', 'fixtures_regress.store', verbosity=0, format='json', use_natural_keys=True, stdout=stdout, ) self.assertEqual( stdout.getvalue(), """[{"pk": 2, "model": "fixtures_regress.store", "fields": {"main": null, "name": "Amazon"}}, {"pk": 3, "model": "fixtures_regress.store", "fields": {"main": null, "name": "Borders"}}, {"pk": 4, "model": "fixtures_regress.person", "fields": {"name": "Neal Stephenson"}}, {"pk": 1, "model": "fixtures_regress.book", "fields": {"stores": [["Amazon"], ["Borders"]], "name": "Cryptonomicon", "author": ["Neal Stephenson"]}}]""" )
def test_set_history_multiple(self): """ Ensure password history is created for all users without existing history. """ self.UserModel.objects.create_user(username="******") self.UserModel.objects.create_user(username="******") password_age = 5 # days out = StringIO() call_command( "user_password_history", "--days={}".format(password_age), stdout=out ) user = self.UserModel.objects.get(username="******") password_history = user.password_history.all() self.assertEqual(password_history.count(), 1) first_timestamp = password_history[0].timestamp user = self.UserModel.objects.get(username="******") password_history = user.password_history.all() self.assertEqual(password_history.count(), 1) second_timestamp = password_history[0].timestamp self.assertEqual(first_timestamp, second_timestamp) user = self.UserModel.objects.get(username="******") password_history = user.password_history.all() self.assertEqual(password_history.count(), 1) third_timestamp = password_history[0].timestamp self.assertEqual(first_timestamp, third_timestamp) self.assertIn("Password history set to ", out.getvalue()) self.assertIn("for {} users".format(3), out.getvalue())
def test_loaddata_not_existant_fixture_file(self): stdout_output = StringIO() with warnings.catch_warnings(record=True): management.call_command( "loaddata", "this_fixture_doesnt_exist", verbosity=2, commit=False, stdout=stdout_output ) self.assertTrue("No fixture 'this_fixture_doesnt_exist' in" in force_text(stdout_output.getvalue()))
class ChangepasswordManagementCommandTestCase(TestCase): def setUp(self): self.user = models.User.objects.create_user(username='******', password='******') self.stdout = StringIO() self.stderr = StringIO() def tearDown(self): self.stdout.close() self.stderr.close() def test_that_changepassword_command_changes_joes_password(self): "Executing the changepassword management command should change joe's password" self.assertTrue(self.user.check_password('qwerty')) command = changepassword.Command() command._get_pass = lambda *args: 'not qwerty' command.execute("joe", stdout=self.stdout) command_output = self.stdout.getvalue().strip() self.assertEqual(command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'") self.assertTrue(models.User.objects.get(username="******").check_password("not qwerty")) def test_that_max_tries_exits_1(self): """ A CommandError should be thrown by handle() if the user enters in mismatched passwords three times. """ command = changepassword.Command() command._get_pass = lambda *args: args or 'foo' with self.assertRaises(CommandError): command.execute("joe", stdout=self.stdout, stderr=self.stderr)
def test_attribute_name_not_python_keyword(self): out = StringIO() # Lets limit the introspection to tables created for models of this # application call_command('inspectdb', table_name_filter=lambda tn: tn.startswith('inspectdb_'), stdout=out) output = out.getvalue() error_message = "inspectdb generated an attribute name which is a python keyword" # Recursive foreign keys should be set to 'self' self.assertIn("parent = models.ForeignKey('self', models.DO_NOTHING)", output) self.assertNotIn( "from = models.ForeignKey(InspectdbPeople, models.DO_NOTHING)", output, msg=error_message, ) # As InspectdbPeople model is defined after InspectdbMessage, it should be quoted self.assertIn( "from_field = models.ForeignKey('InspectdbPeople', models.DO_NOTHING, db_column='from_id')", output, ) self.assertIn( "people_pk = models.ForeignKey(InspectdbPeople, models.DO_NOTHING, primary_key=True)", output, ) self.assertIn( "people_unique = models.ForeignKey(InspectdbPeople, models.DO_NOTHING, unique=True)", output, )
def test_set_history_force(self): """ Ensure specific password history is created for all users. """ another_user = self.UserModel.objects.create_user(username="******") PasswordHistory.objects.create(user=another_user) password_age = 5 # days out = StringIO() call_command( "user_password_history", "--days={}".format(password_age), "--force", stdout=out ) user = self.UserModel.objects.get(username="******") password_history = user.password_history.all() self.assertEqual(password_history.count(), 1) # verify user with existing history DID get another entry user = self.UserModel.objects.get(username="******") password_history = user.password_history.all() self.assertEqual(password_history.count(), 2) self.assertIn("Password history set to ", out.getvalue()) self.assertIn("for {} users".format(2), out.getvalue())
def serialize_db_to_string(self): """ Serializes all data in the database into a JSON string. Designed only for test runner usage; will not handle large amounts of data. """ # Build list of all apps to serialize from django.db.migrations.loader import MigrationLoader loader = MigrationLoader(self.connection) app_list = [] for app_config in apps.get_app_configs(): if ( app_config.models_module is not None and app_config.label in loader.migrated_apps and app_config.name not in settings.TEST_NON_SERIALIZED_APPS ): app_list.append((app_config, None)) # Make a function to iteratively return every object def get_objects(): for model in serializers.sort_dependencies(app_list): if (not model._meta.proxy and model._meta.managed and router.allow_migrate(self.connection.alias, model)): queryset = model._default_manager.using(self.connection.alias).order_by(model._meta.pk.name) for obj in queryset.iterator(): yield obj # Serialize to a string out = StringIO() serializers.serialize("json", get_objects(), indent=None, stream=out) return out.getvalue()
def finalise(self): if hasattr(self, 'pythonprofiler'): s = StringIO() ps = pstats.Stats(self.pythonprofiler, stream=s).sort_stats('cumulative') ps.print_stats() profile_text = s.getvalue() profile_text = "\n".join( profile_text.split("\n")[0:256]) # don't record too much because it can overflow the field storage size self.request.pyprofile = profile_text for _, query in self.queries.items(): query_model = models.SQLQuery.objects.create(**query) query['model'] = query_model for _, profile in self.profiles.items(): profile_query_models = [] if TYP_QUERIES in profile: profile_queries = profile[TYP_QUERIES] del profile[TYP_QUERIES] for query_temp_id in profile_queries: try: query = self.queries[query_temp_id] try: profile_query_models.append(query['model']) except KeyError: raise SilkInternalInconsistency('Profile references a query dictionary that has not ' 'been converted into a Django model. This should ' 'never happen, please file a bug report') except KeyError: raise SilkInternalInconsistency('Profile references a query temp_id that does not exist. ' 'This should never happen, please file a bug report') profile = models.Profile.objects.create(**profile) if profile_query_models: profile.queries = profile_query_models profile.save() self._record_meta_profiling()
def test_unique_together_meta(self): out = StringIO() call_command('inspectdb', table_name_filter=lambda tn: tn.startswith('inspectdb_uniquetogether'), stdout=out) output = out.getvalue() self.assertIn(" unique_together = (('field1', 'field2'),)", output, msg='inspectdb should generate unique_together.')
def test_dump_and_load_m2m_simple(self): """ Test serializing and deserializing back models with simple M2M relations """ a = M2MSimpleA.objects.create(data="a") b1 = M2MSimpleB.objects.create(data="b1") b2 = M2MSimpleB.objects.create(data="b2") a.b_set.add(b1) a.b_set.add(b2) out = StringIO() management.call_command( 'dumpdata', 'fixtures_regress.M2MSimpleA', 'fixtures_regress.M2MSimpleB', use_natural_foreign_keys=True, stdout=out, ) for model in [M2MSimpleA, M2MSimpleB]: model.objects.all().delete() objects = serializers.deserialize("json", out.getvalue()) for obj in objects: obj.save() new_a = M2MSimpleA.objects.get_by_natural_key("a") self.assertQuerysetEqual(new_a.b_set.all(), [ "<M2MSimpleB: b1>", "<M2MSimpleB: b2>" ], ordered=False)
def test_urlbrevity_management_command(): ct = ContentType.objects.get_for_model(User) user = User.objects.create(username='******') user2 = User.objects.create(username='******') tup1 = (ct.pk, user.pk) tup2 = (ct.pk, user2.pk) enc = encode(*tup1) enc2 = encode(*tup2) stdout = StringIO() call_command('urlbrevity', '1lLi', 'yyyy', enc, enc2, verbosity=2, interactive=False, stdout=stdout) msg = ("`1lLi` contains invalid characters: 1, L, l\n" "`yyyy` does not resolve to a model instance\n" "`{enc}` decodes into {tup1!r}\n" "`{enc}` resolves to a <User> instance\n" "`{enc}` is a short URL for `/test_user/{pk1}/`\n" "`{enc}` is available via the `do_redirect` view, " "via `urlbrevity:short`\n" "`{enc2}` decodes into {tup2!r}\n" "`{enc2}` resolves to a <User> instance\n" "`{enc2}` is a short URL for `/test_user/{pk2}/`\n" "`{enc2}` is available via the `do_redirect` view, " "via `urlbrevity:short`\n") assert stdout.getvalue() == msg.format(enc=enc, enc2=enc2, tup1=tup1, tup2=tup2, pk1=user.pk, pk2=user2.pk)
def test_extraction_warning(self): os.chdir(self.test_dir) shutil.copyfile('./code.sample', './code_sample.py') stdout = StringIO() management.call_command('makemessages', locale=LOCALE, stdout=stdout) os.remove('./code_sample.py') self.assertIn("code_sample.py:4", force_text(stdout.getvalue()))
def test_command_style(self): out = StringIO() management.call_command('dance', style='Jive', stdout=out) self.assertIn("I don't feel like dancing Jive.\n", out.getvalue()) # Passing options as arguments also works (thanks argparse) management.call_command('dance', '--style', 'Jive', stdout=out) self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
def test_dump_load_data_content(self): """ Testing the dump / load with full dump of file content data """ with SettingsOverride(filer_settings, FILER_DUMP_PAYLOAD=True): # Initialize the test data create_folder_structure(1,1) fileobj = self.create_filer_file(Folder.objects.all()[0]) jdata = StringIO() # Dump the current data fobj = tempfile.NamedTemporaryFile(suffix=".json", delete=False) call_command("dumpdata", "filer", stdout=jdata, indent=3) # Delete database and filesystem data and complete = os.path.join(fileobj.file.storage.location, fileobj.path) os.unlink(complete) fileobj.delete() # Dump data to json file fobj.write(jdata.getvalue().encode('utf-8')) fobj.seek(0) # Load data back call_command("loaddata", fobj.name, stdout=jdata) # Database data is restored self.assertEqual(Folder.objects.all().count(), 1) self.assertEqual(File.objects.all().count(), 1) self.assertEqual(File.objects.all()[0].original_filename, self.image_name) fileobj = File.objects.all()[0] complete = os.path.join(fileobj.file.storage.location, fileobj.path) # Filesystem data too! self.assertTrue(os.path.exists(complete))
def test_write_to_file(self): human1 = Human() human1.name = "joe" human1.website = "http://example.com" human2 = Human() human2.name = "john" humans = [] humans.append(human1) humans.append(human2) ht = HumansTXT() target = StringIO() ht.write_to_file(humans, target, "Banner Message", "Developer") output = target.getvalue() expected = '\n'.join(( "Banner Message ", "Developer: joe ", "Website: http://example.com ", "", "Developer: john ", "", "" )) assert output == expected
def test_call_command_option_parsing_non_string_arg(self): """ It should be possible to pass non-string arguments to call_command. """ out = StringIO() management.call_command('dance', 1, verbosity=0, stdout=out) self.assertIn("You passed 1 as a positional argument.", out.getvalue())
class GenerateTweetHtml(TestCase): def setUp(self): user_1 = factories.UserFactory(screen_name='terry') user_2 = factories.UserFactory(screen_name='bob') tweets_1 = factories.TweetFactory.create_batch(2, user=user_1) tweets_2 = factories.TweetFactory.create_batch(3, user=user_2) account_1 = factories.AccountFactory(user=user_1) account_2 = factories.AccountFactory(user=user_2) self.out = StringIO() @patch.object(Tweet, 'save') def test_with_all_accounts(self, save_method): call_command('generate_twitter_tweet_html', stdout=self.out) self.assertEqual(save_method.call_count, 5) self.assertIn('Generated HTML for 5 Tweets', self.out.getvalue()) @patch.object(Tweet, 'save') def test_with_one_account(self, save_method): call_command('generate_twitter_tweet_html', account='terry', stdout=self.out) self.assertEqual(save_method.call_count, 2) self.assertIn('Generated HTML for 2 Tweets', self.out.getvalue()) def test_with_invalid_account(self): with self.assertRaises(CommandError): call_command('generate_twitter_tweet_html', account='thelma')
def test_serialization(self): "m2m-through models aren't serialized as m2m fields. Refs #8134" p = Person.objects.create(name="Bob") g = Group.objects.create(name="Roll") m =Membership.objects.create(person=p, group=g) pks = {"p_pk": p.pk, "g_pk": g.pk, "m_pk": m.pk} out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out) self.assertJSONEqual(out.getvalue().strip(), """[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": 100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": "Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]""" % pks) out = StringIO() management.call_command("dumpdata", "m2m_through_regress", format="xml", indent=2, stdout=out) self.assertXMLEqual(out.getvalue().strip(), """ <?xml version="1.0" encoding="utf-8"?> <django-objects version="1.0"> <object pk="%(m_pk)s" model="m2m_through_regress.membership"> <field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field> <field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field> <field type="IntegerField" name="price">100</field> </object> <object pk="%(p_pk)s" model="m2m_through_regress.person"> <field type="CharField" name="name">Bob</field> </object> <object pk="%(g_pk)s" model="m2m_through_regress.group"> <field type="CharField" name="name">Roll</field> </object> </django-objects> """.strip() % pks)
def test_forum_prune_by_last_reply(self): """command prunes forum content based on last reply date""" forum = Forum.objects.all_forums().filter(role="forum")[:1][0] forum.prune_replied_after = 20 forum.save() # post old threads with recent replies started_on = timezone.now() - timedelta(days=30) for t in xrange(10): thread = testutils.post_thread(forum, started_on=started_on) testutils.reply_thread(thread) # post recent threads that will be preserved threads = [testutils.post_thread(forum) for t in xrange(10)] forum.synchronize() self.assertEqual(forum.threads, 20) self.assertEqual(forum.posts, 30) # run command command = pruneforums.Command() out = StringIO() command.execute(stdout=out) forum.synchronize() self.assertEqual(forum.threads, 10) self.assertEqual(forum.posts, 10) for thread in threads: forum.thread_set.get(id=thread.id) command_output = out.getvalue().strip() self.assertEqual(command_output, 'Forums were pruned')
def compress_string(self, s): """Gzip a given string.""" zbuf = StringIO() zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) zfile.write(s) zfile.close() return zbuf.getvalue()
def test_status_mail(self): """ The ``status_mail`` should return a string that matches: (?P<queued>\d+)/(?P<deferred>\d+)/(?P<seconds>\d+) """ re_string = r"(?P<queued>\d+)/(?P<deferred>\d+)/(?P<seconds>\d+)" p = re.compile(re_string) self.queue_message(subject="test") self.queue_message(subject='deferred') self.queue_message(subject='deferred 2') self.queue_message(subject='deferred 3') models.QueuedMessage.objects.filter( message__subject__startswith='deferred').update(deferred=now()) models.QueuedMessage.objects.non_deferred() time.sleep(1) # Deferred messages are returned to the queue (nothing is sent). out = StringIO() call_command('status_mail', verbosity='0', stdout=out) result = out.getvalue() m = p.match(result) self.assertTrue(m, "Output does not include expected r.e.") v = m.groupdict() self.assertTrue(v['queued'], "1") self.assertEqual(v['deferred'], "3") self.assertTrue(int(v['seconds']) >= 1)
def test_called_with_multiple_files(self): out = StringIO() file_1 = "{0}/valid_quit_screen_conf.yml".format(path) file_2 = "{0}/valid_http_screen_conf.yml".format(path) file_3 = "{0}/valid_input_screen_conf.yml".format(path) file_4 = "{0}/sample_using_inheritance.yml".format(path) call_command( "validate_ussd_journey", file_1, file_2, file_3, file_4, stdout=out) expected_output = { file_1: dict( valid=True, error_message={}, ), file_2: dict( valid=True, error_message={} ), file_3: dict( valid=True, error_message={} ), file_4: dict( valid=True, error_message={} ) } self.assertDictEqual(expected_output, json.loads(out.getvalue()))
def test_identifier_and_toggle_from_preset(self): out = StringIO() call_command('grepdb', 'brown', '-s', '-p', 'model_one_case_insensitive', stdout=out) expected = "\x1b[1m\x1b[36m\n<class 'django_grepdb.tests.models.TestModel'> text_field\x1b[0m\n" \ "\x1b[1m\x1b[32mTestModel object (pk=1)\x1b[0m\n" \ "\x1b[1m\x1b[32mTestModel object (pk=2)\x1b[0m\n" self.assertEqual(out.getvalue(), expected)
def test_category_prune_by_last_reply(self): """command prunes category content based on last reply date""" category = Category.objects.all_categories()[:1][0] category.prune_replied_after = 20 category.save() # post old threads with recent replies started_on = timezone.now() - timedelta(days=30) for t in range(10): thread = testutils.post_thread(category, started_on=started_on) testutils.reply_thread(thread) # post recent threads that will be preserved threads = [testutils.post_thread(category) for t in range(10)] category.synchronize() self.assertEqual(category.threads, 20) self.assertEqual(category.posts, 30) # run command command = prunecategories.Command() out = StringIO() command.execute(stdout=out) category.synchronize() self.assertEqual(category.threads, 10) self.assertEqual(category.posts, 10) for thread in threads: category.thread_set.get(id=thread.id) command_output = out.getvalue().strip() self.assertEqual(command_output, 'Categories were pruned')
def test_media_static_dirs_ignored(self): os.chdir(self.test_dir) stdout = StringIO() management.call_command('makemessages', locale=[LOCALE], verbosity=2, stdout=stdout) data = stdout.getvalue() self.assertIn("ignoring directory static_root", data) self.assertIn("ignoring directory media_root", data)
def writeString(self, encoding): """ Returns the feed in the given encoding as a string. """ s = StringIO() self.write(s, encoding) return s.getvalue()
def test_field_types(self): """Test introspection of various Django field types""" out = StringIO() call_command('inspectdb', table_name_filter=lambda tn:tn.startswith('inspectdb_columntypes'), stdout=out) output = out.getvalue() def assertFieldType(name, definition): out_def = re.search(r'^\s*%s = (models.*)$' % name, output, re.MULTILINE).groups()[0] self.assertEqual(definition, out_def) assertFieldType('id', "models.IntegerField(primary_key=True)") assertFieldType('big_int_field', "models.BigIntegerField()") if connection.vendor == 'mysql': # No native boolean type on MySQL assertFieldType('bool_field', "models.IntegerField()") assertFieldType('null_bool_field', "models.IntegerField(blank=True, null=True)") else: assertFieldType('bool_field', "models.BooleanField()") assertFieldType('null_bool_field', "models.NullBooleanField()") assertFieldType('char_field', "models.CharField(max_length=10)") assertFieldType('comma_separated_int_field', "models.CharField(max_length=99)") assertFieldType('date_field', "models.DateField()") assertFieldType('date_time_field', "models.DateTimeField()") if connection.vendor == 'sqlite': # Guessed arguments, see #5014 assertFieldType('decimal_field', "models.DecimalField(max_digits=10, decimal_places=5) " "# max_digits and decimal_places have been guessed, as this database handles decimal fields as float") else: assertFieldType('decimal_field', "models.DecimalField(max_digits=6, decimal_places=1)") assertFieldType('email_field', "models.CharField(max_length=75)") assertFieldType('file_field', "models.CharField(max_length=100)") assertFieldType('file_path_field', "models.CharField(max_length=100)") assertFieldType('float_field', "models.FloatField()") assertFieldType('int_field', "models.IntegerField()") if connection.vendor == 'postgresql': # Only PostgreSQL has a specific type assertFieldType('ip_address_field', "models.GenericIPAddressField()") assertFieldType('gen_ip_adress_field', "models.GenericIPAddressField()") else: assertFieldType('ip_address_field', "models.CharField(max_length=15)") assertFieldType('gen_ip_adress_field', "models.CharField(max_length=39)") if connection.vendor == 'sqlite': assertFieldType('pos_int_field', "models.PositiveIntegerField()") assertFieldType('pos_small_int_field', "models.PositiveSmallIntegerField()") else: # 'unsigned' property undetected on other backends assertFieldType('pos_int_field', "models.IntegerField()") if connection.vendor == 'postgresql': assertFieldType('pos_small_int_field', "models.SmallIntegerField()") else: assertFieldType('pos_small_int_field', "models.IntegerField()") assertFieldType('slug_field', "models.CharField(max_length=50)") if connection.vendor in ('sqlite', 'postgresql'): assertFieldType('small_int_field', "models.SmallIntegerField()") else: assertFieldType('small_int_field', "models.IntegerField()") assertFieldType('text_field', "models.TextField()") assertFieldType('time_field', "models.TimeField()") assertFieldType('url_field', "models.CharField(max_length=200)")
def test_update_adopters_command(self): out = StringIO() call_command('update_adopters', stdout=out) self.assertIn("Success", out.getvalue()) Adopter.objects.all() self.assertTrue( Adopter.objects.filter(name='Rice University').exists())
def test_delete_expired_thread(self): """test deletes one expired thread tracker, but spares the other""" thread = testutils.post_thread(self.category) existing = ThreadRead.objects.create( user=self.user_a, category=self.category, thread=thread, last_read_on=timezone.now() - timedelta(days=settings.MISAGO_READTRACKER_CUTOFF / 4) ) deleted = ThreadRead.objects.create( user=self.user_b, category=self.category, thread=thread, last_read_on=timezone.now() - timedelta(days=settings.MISAGO_READTRACKER_CUTOFF * 2) ) command = clearreadtracker.Command() out = StringIO() call_command(command, stdout=out) command_output = out.getvalue().strip() self.assertEqual(command_output, "Deleted 1 expired entries") ThreadRead.objects.get(pk=existing.pk) with self.assertRaises(ThreadRead.DoesNotExist): ThreadRead.objects.get(pk=deleted.pk)
def setUp(self): self.old_DJANGO_AUTO_COMPLETE = os.environ.get('DJANGO_AUTO_COMPLETE') os.environ['DJANGO_AUTO_COMPLETE'] = '1' self.output = StringIO() self.old_stdout = sys.stdout sys.stdout = self.output
def test_process_nrsc(self): test_str = 'Successfully' out = StringIO() f = '/vagrant/shared/test_data/asar/ASA_WSM_1PNPDK20081110_205618_000000922073_00401_35024_0844.N1' call_command('process_sar_nrcs', f, stdout=out) self.assertIn(test_str, out.getvalue())
def setUp(self): self.out = StringIO() sys.stdout = self.out call_command("load_skills", stdout=self.out)
def startTest(self, test): self.debug_sql_stream = StringIO() self.handler = logging.StreamHandler(self.debug_sql_stream) self.logger.addHandler(self.handler) super(DebugSQLTextTestResult, self).startTest(test)
def download(modeladmin, request, selected): buf = StringIO('This is the content of the file') return StreamingHttpResponse(FileWrapper(buf))
def test_no_color(self): with force_color_support: out = StringIO() call_command('show_urls', '--no-color', stdout=out) self.output = out.getvalue() self.assertNotIn('\x1b', self.output)
def call_cmd(**opts): out = StringIO() call_command('make_diagram', stdout=out, **opts) return out.getvalue()
class TestSAMLCommand(CacheIsolationTestCase): """ Test django management command for fetching saml metadata. """ def setUp(self): """ Setup operations for saml configurations. these operations contain creation of SAMLConfiguration and SAMLProviderConfig records in database. """ super(TestSAMLCommand, self).setUp() self.stdout = StringIO() # We are creating SAMLConfiguration instance here so that there is always at-least one # disabled saml configuration instance, this is done to verify that disabled configurations are # not processed. SAMLConfigurationFactory.create(enabled=False, site__domain='testserver.fake', site__name='testserver.fake') SAMLProviderConfigFactory.create(site__domain='testserver.fake', site__name='testserver.fake') def __create_saml_configurations__(self, saml_config=None, saml_provider_config=None): """ Helper method to create SAMLConfiguration and AMLProviderConfig. """ SAMLConfigurationFactory.create(enabled=True, **( saml_config or { 'site__domain': 'testserver.fake', 'site__name': 'testserver.fake' } )) SAMLProviderConfigFactory.create(enabled=True, **( saml_provider_config or { 'site__domain': 'testserver.fake', 'site__name': 'testserver.fake' } )) def test_raises_command_error_for_invalid_arguments(self): """ Test that management command raises `CommandError` with a proper message in case of invalid command arguments. This test would fail with an error if ValueError is raised. """ # Call `saml` command without any argument so that it raises a CommandError with self.assertRaisesMessage(CommandError, "Command can only be used with '--pull' option."): call_command("saml") # Call `saml` command without any argument so that it raises a CommandError with self.assertRaisesMessage(CommandError, "Command can only be used with '--pull' option."): call_command("saml", pull=False) def test_no_saml_configuration(self): """ Test that management command completes without errors and logs correct information when no saml configurations are enabled/present. """ expected = "\nDone.\n1 provider(s) found in database.\n1 skipped and 0 attempted.\n0 updated and 0 failed.\n" call_command("saml", pull=True, stdout=self.stdout) self.assertIn(expected, self.stdout.getvalue()) @mock.patch("requests.get", mock_get()) def test_fetch_saml_metadata(self): """ Test that management command completes without errors and logs correct information when one or more saml configurations are enabled. """ # Create enabled configurations self.__create_saml_configurations__() expected = "\nDone.\n1 provider(s) found in database.\n0 skipped and 1 attempted.\n1 updated and 0 failed.\n" call_command("saml", pull=True, stdout=self.stdout) self.assertIn(expected, self.stdout.getvalue()) @mock.patch("requests.get", mock_get(status_code=404)) def test_fetch_saml_metadata_failure(self): """ Test that management command completes with proper message for errors and logs correct information. """ # Create enabled configurations self.__create_saml_configurations__() expected = "\nDone.\n1 provider(s) found in database.\n0 skipped and 1 attempted.\n0 updated and 1 failed.\n" with self.assertRaisesRegex(CommandError, r"HTTPError: 404 Client Error"): call_command("saml", pull=True, stdout=self.stdout) self.assertIn(expected, self.stdout.getvalue()) @mock.patch("requests.get", mock_get(status_code=200)) def test_fetch_multiple_providers_data(self): """ Test that management command completes with proper message for error or success and logs correct information when there are multiple providers with their data. """ # Create enabled configurations self.__create_saml_configurations__() # Add another set of configurations self.__create_saml_configurations__( saml_config={ "site__domain": "second.testserver.fake", "site__name": "testserver.fake", }, saml_provider_config={ "site__domain": "second.testserver.fake", "site__name": "testserver.fake", "slug": "second-test-shib", "entity_id": "https://idp.testshib.org/idp/another-shibboleth", "metadata_source": "https://www.testshib.org/metadata/another-testshib-providers.xml", } ) # Add another set of configurations self.__create_saml_configurations__( saml_config={ "site__domain": "third.testserver.fake", "site__name": "testserver.fake", }, saml_provider_config={ "site__domain": "third.testserver.fake", "site__name": "testserver.fake", "slug": "third-test-shib", # Note: This entity id will not be present in returned response and will cause failed update. "entity_id": "https://idp.testshib.org/idp/non-existent-shibboleth", "metadata_source": "https://www.testshib.org/metadata/third/testshib-providers.xml", } ) expected = '\n3 provider(s) found in database.\n0 skipped and 3 attempted.\n2 updated and 1 failed.\n' with self.assertRaisesRegex(CommandError, r"MetadataParseError: Can't find EntityDescriptor for entityID"): call_command("saml", pull=True, stdout=self.stdout) self.assertIn(expected, self.stdout.getvalue()) # Now add a fourth configuration, and indicate that it should not be included in the update self.__create_saml_configurations__( saml_config={ "site__domain": "fourth.testserver.fake", "site__name": "testserver.fake", }, saml_provider_config={ "site__domain": "fourth.testserver.fake", "site__name": "testserver.fake", "slug": "fourth-test-shib", "automatic_refresh_enabled": False, # Note: This invalid entity id will not be present in the refresh set "entity_id": "https://idp.testshib.org/idp/fourth-shibboleth", "metadata_source": "https://www.testshib.org/metadata/fourth/testshib-providers.xml", } ) # Four configurations -- one will be skipped and three attempted, with similar results. expected = '\nDone.\n4 provider(s) found in database.\n1 skipped and 3 attempted.\n0 updated and 1 failed.\n' with self.assertRaisesRegex(CommandError, r"MetadataParseError: Can't find EntityDescriptor for entityID"): call_command("saml", pull=True, stdout=self.stdout) self.assertIn(expected, self.stdout.getvalue()) @mock.patch("requests.get") def test_saml_request_exceptions(self, mocked_get): """ Test that management command errors out in case of fatal exceptions instead of failing silently. """ # Create enabled configurations self.__create_saml_configurations__() mocked_get.side_effect = exceptions.SSLError expected = "\nDone.\n1 provider(s) found in database.\n0 skipped and 1 attempted.\n0 updated and 1 failed.\n" with self.assertRaisesRegex(CommandError, "SSLError:"): call_command("saml", pull=True, stdout=self.stdout) self.assertIn(expected, self.stdout.getvalue()) mocked_get.side_effect = exceptions.ConnectionError with self.assertRaisesRegex(CommandError, "ConnectionError:"): call_command("saml", pull=True, stdout=self.stdout) self.assertIn(expected, self.stdout.getvalue()) mocked_get.side_effect = exceptions.HTTPError with self.assertRaisesRegex(CommandError, "HTTPError:"): call_command("saml", pull=True, stdout=self.stdout) self.assertIn(expected, self.stdout.getvalue()) @mock.patch("requests.get", mock_get(status_code=200)) def test_saml_parse_exceptions(self): """ Test that management command errors out in case of fatal exceptions instead of failing silently. """ # Create enabled configurations, this configuration will raise MetadataParseError. self.__create_saml_configurations__( saml_config={ "site__domain": "third.testserver.fake", }, saml_provider_config={ "site__domain": "third.testserver.fake", "slug": "third-test-shib", # Note: This entity id will not be present in returned response and will cause failed update. "entity_id": "https://idp.testshib.org/idp/non-existent-shibboleth", "metadata_source": "https://www.testshib.org/metadata/third/testshib-providers.xml", } ) expected = "\nDone.\n2 provider(s) found in database.\n1 skipped and 1 attempted.\n0 updated and 1 failed.\n" with self.assertRaisesRegex(CommandError, "MetadataParseError: Can't find EntityDescriptor for entityID"): call_command("saml", pull=True, stdout=self.stdout) self.assertIn(expected, self.stdout.getvalue()) @mock.patch("requests.get") def test_xml_parse_exceptions(self, mocked_get): """ Test that management command errors out in case of fatal exceptions instead of failing silently. """ response = Response() response._content = "" # pylint: disable=protected-access response.status_code = 200 mocked_get.return_value = response # create enabled configuration self.__create_saml_configurations__() expected = "\nDone.\n1 provider(s) found in database.\n0 skipped and 1 attempted.\n0 updated and 1 failed.\n" with self.assertRaisesRegex(CommandError, "XMLSyntaxError:"): call_command("saml", pull=True, stdout=self.stdout) self.assertIn(expected, self.stdout.getvalue())
def setUp(self): simple_system_check.kwargs = None tagged_system_check.kwargs = None self.old_stdout, self.old_stderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = StringIO(), StringIO()
class BashCompletionTests(unittest.TestCase): """ Testing the Python level bash completion code. This requires setting up the environment as if we got passed data from bash. """ def setUp(self): self.old_DJANGO_AUTO_COMPLETE = os.environ.get('DJANGO_AUTO_COMPLETE') os.environ['DJANGO_AUTO_COMPLETE'] = '1' self.output = StringIO() self.old_stdout = sys.stdout sys.stdout = self.output def tearDown(self): sys.stdout = self.old_stdout if self.old_DJANGO_AUTO_COMPLETE: os.environ['DJANGO_AUTO_COMPLETE'] = self.old_DJANGO_AUTO_COMPLETE else: del os.environ['DJANGO_AUTO_COMPLETE'] def _user_input(self, input_str): os.environ['COMP_WORDS'] = input_str os.environ['COMP_CWORD'] = str(len(input_str.split()) - 1) sys.argv = input_str.split(' ') def _run_autocomplete(self): util = ManagementUtility(argv=sys.argv) try: util.autocomplete() except SystemExit: pass return self.output.getvalue().strip().split('\n') def test_django_admin_py(self): "django_admin.py will autocomplete option flags" self._user_input('django-admin.py sqlall --v') output = self._run_autocomplete() self.assertEqual(output, ['--verbosity=']) def test_manage_py(self): "manage.py will autocomplete option flags" self._user_input('manage.py sqlall --v') output = self._run_autocomplete() self.assertEqual(output, ['--verbosity=']) def test_custom_command(self): "A custom command can autocomplete option flags" self._user_input('django-admin.py test_command --l') output = self._run_autocomplete() self.assertEqual(output, ['--list']) def test_subcommands(self): "Subcommands can be autocompleted" self._user_input('django-admin.py sql') output = self._run_autocomplete() self.assertEqual(output, [ 'sql sqlall sqlclear sqlcustom sqldropindexes sqlflush sqlindexes sqlmigrate sqlsequencereset' ]) def test_help(self): "No errors, just an empty list if there are no autocomplete options" self._user_input('django-admin.py help --') output = self._run_autocomplete() self.assertEqual(output, ['']) def test_runfcgi(self): "Command arguments will be autocompleted" self._user_input('django-admin.py runfcgi h') output = self._run_autocomplete() self.assertEqual(output, ['host=']) def test_app_completion(self): "Application names will be autocompleted for an AppCommand" self._user_input('django-admin.py sqlall a') output = self._run_autocomplete() a_labels = sorted(app_config.label for app_config in apps.get_app_configs() if app_config.label.startswith('a')) self.assertEqual(output, a_labels)
def setUp(self): self.out = StringIO() self.handler = logging.StreamHandler(self.out) logging.root.addHandler(self.handler) Student.objects.create(id=1, first_name='Test', last_name='Student')
def setUp(self): self.old_stdout, self.old_stderr = sys.stdout, sys.stderr self.stdout, self.stderr = StringIO(), StringIO() sys.stdout, sys.stderr = self.stdout, self.stderr
def test_entire_command(self): out = StringIO() call_command('json_to_xml', valid_json_path, stdout=out)
def test_superuser_creation_cancelled_prompt(self): stdout = StringIO() call_command('createtenant', 'tenant', stdout=stdout, interactive=True) stdout.seek(0) self.assertNotIn('Superuser created successfully.', stdout.read()) Tenant.objects.get(name='tenant').delete()
def test_entire_command(self): out = StringIO() call_command('process_xslt', html_path, xslt_path, '--save', stdout=out)
def templatize(src, origin=None): """ Turns a Django template into something that is understood by xgettext. It does so by translating the Django translation tags into standard gettext function invocations. """ from django.conf import settings from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK) src = force_text(src, settings.FILE_CHARSET) out = StringIO('') message_context = None intrans = False inplural = False trimmed = False singular = [] plural = [] incomment = False comment = [] lineno_comment_map = {} comment_lineno_cache = None def join_tokens(tokens, trim=False): message = ''.join(tokens) if trim: message = trim_whitespace(message) return message for t in Lexer(src, origin).tokenize(): if incomment: if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment': content = ''.join(comment) translators_comment_start = None for lineno, line in enumerate(content.splitlines(True)): if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK): translators_comment_start = lineno for lineno, line in enumerate(content.splitlines(True)): if translators_comment_start is not None and lineno >= translators_comment_start: out.write(' # %s' % line) else: out.write(' #\n') incomment = False comment = [] else: comment.append(t.contents) elif intrans: if t.token_type == TOKEN_BLOCK: endbmatch = endblock_re.match(t.contents) pluralmatch = plural_re.match(t.contents) if endbmatch: if inplural: if message_context: out.write(' npgettext(%r, %r, %r,count) ' % (message_context, join_tokens(singular, trimmed), join_tokens(plural, trimmed))) else: out.write(' ngettext(%r, %r, count) ' % (join_tokens(singular, trimmed), join_tokens(plural, trimmed))) for part in singular: out.write(blankout(part, 'S')) for part in plural: out.write(blankout(part, 'P')) else: if message_context: out.write(' pgettext(%r, %r) ' % (message_context, join_tokens(singular, trimmed))) else: out.write(' gettext(%r) ' % join_tokens(singular, trimmed)) for part in singular: out.write(blankout(part, 'S')) message_context = None intrans = False inplural = False singular = [] plural = [] elif pluralmatch: inplural = True else: filemsg = '' if origin: filemsg = 'file %s, ' % origin raise SyntaxError( "Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno)) elif t.token_type == TOKEN_VAR: if inplural: plural.append('%%(%s)s' % t.contents) else: singular.append('%%(%s)s' % t.contents) elif t.token_type == TOKEN_TEXT: contents = one_percent_re.sub('%%', t.contents) if inplural: plural.append(contents) else: singular.append(contents) else: # Handle comment tokens (`{# ... #}`) plus other constructs on # the same line: if comment_lineno_cache is not None: cur_lineno = t.lineno + t.contents.count('\n') if comment_lineno_cache == cur_lineno: if t.token_type != TOKEN_COMMENT: for c in lineno_comment_map[comment_lineno_cache]: filemsg = '' if origin: filemsg = 'file %s, ' % origin warn_msg = ( "The translator-targeted comment '%s' " "(%sline %d) was ignored, because it wasn't the last item " "on the line.") % (c, filemsg, comment_lineno_cache) warnings.warn(warn_msg, TranslatorCommentWarning) lineno_comment_map[comment_lineno_cache] = [] else: out.write( '# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache])) comment_lineno_cache = None if t.token_type == TOKEN_BLOCK: imatch = inline_re.match(t.contents) bmatch = block_re.match(t.contents) cmatches = constant_re.findall(t.contents) if imatch: g = imatch.group(1) if g[0] == '"': g = g.strip('"') elif g[0] == "'": g = g.strip("'") g = one_percent_re.sub('%%', g) if imatch.group(2): # A context is provided context_match = context_re.match(imatch.group(2)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") out.write(' pgettext(%r, %r) ' % (message_context, g)) message_context = None else: out.write(' gettext(%r) ' % g) elif bmatch: for fmatch in constant_re.findall(t.contents): out.write(' _(%s) ' % fmatch) if bmatch.group(1): # A context is provided context_match = context_re.match(bmatch.group(1)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") intrans = True inplural = False trimmed = 'trimmed' in t.split_contents() singular = [] plural = [] elif cmatches: for cmatch in cmatches: out.write(' _(%s) ' % cmatch) elif t.contents == 'comment': incomment = True else: out.write(blankout(t.contents, 'B')) elif t.token_type == TOKEN_VAR: parts = t.contents.split('|') cmatch = constant_re.match(parts[0]) if cmatch: out.write(' _(%s) ' % cmatch.group(1)) for p in parts[1:]: if p.find(':_(') >= 0: out.write(' %s ' % p.split(':', 1)[1]) else: out.write(blankout(p, 'F')) elif t.token_type == TOKEN_COMMENT: if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK): lineno_comment_map.setdefault(t.lineno, []).append(t.contents) comment_lineno_cache = t.lineno else: out.write(blankout(t.contents, 'X')) return force_str(out.getvalue())
def test_command_output(self): out = StringIO() call_command('populate_figures_metrics', '--no-delay', stdout=out) self.assertEqual('', out.getvalue())
def test_entire_command(self): out = StringIO() call_command('validate_xml', valid_xml_path, rng_schema_path, stdout=out)
def test_remove_expired_management_command(self): out = StringIO() call_command('remove_expired', stdout=out) self.assertIn("", out.getvalue())
def test_dumpdata_uses_default_manager(self): """ Regression for #11286 Dumpdata honors the default manager. Dump the current contents of the database as a JSON fixture """ management.call_command( 'loaddata', 'animal.xml', verbosity=0, ) management.call_command( 'loaddata', 'sequence.json', verbosity=0, ) animal = Animal( name='Platypus', latin_name='Ornithorhynchus anatinus', count=2, weight=2.2, ) animal.save() out = StringIO() management.call_command( 'dumpdata', 'fixtures_regress.animal', format='json', stdout=out, ) # Output order isn't guaranteed, so check for parts data = out.getvalue() # Get rid of artifacts like '000000002' to eliminate the differences # between different Python versions. data = re.sub('0{6,}[0-9]', '', data) animals_data = sorted([ { "pk": 1, "model": "fixtures_regress.animal", "fields": { "count": 3, "weight": 1.2, "name": "Lion", "latin_name": "Panthera leo" } }, { "pk": 10, "model": "fixtures_regress.animal", "fields": { "count": 42, "weight": 1.2, "name": "Emu", "latin_name": "Dromaius novaehollandiae" } }, { "pk": animal.pk, "model": "fixtures_regress.animal", "fields": { "count": 2, "weight": 2.2, "name": "Platypus", "latin_name": "Ornithorhynchus anatinus" } }, ], key=lambda x: x["pk"]) data = sorted(json.loads(data), key=lambda x: x["pk"]) self.maxDiff = 1024 self.assertEqual(data, animals_data)
def test_change_content_language(self): self.english = SiteLanguageRelation.objects.create( language_setting=Languages.for_site(self.main.get_site()), locale='en', is_active=True) self.french = SiteLanguageRelation.objects.create( language_setting=Languages.for_site(self.main.get_site()), locale='fr', is_active=True) self.spanish = SiteLanguageRelation.objects.create( language_setting=Languages.for_site(self.main.get_site()), locale='es', is_active=True) self.yourmind2 = self.mk_section( SectionIndexPage.objects.child_of(self.main).first(), title='Your Mind') self.yourmind3 = self.mk_section( SectionIndexPage.objects.child_of( self.main).first(), title='Your mind 2') self.tag = self.mk_tag( SectionIndexPage.objects.child_of(self.main).first()) self.tag2 = self.mk_tag( SectionIndexPage.objects.child_of(self.main).first()) # make articles of different sections self.mk_articles(self.yourmind2, count=5) self.mk_articles(self.yourmind3, count=5) # translate the article into those languages self.mk_section_translation(self.yourmind2, self.french) self.mk_tag_translation(self.tag, self.french) articles = ArticlePage.objects.all()[1::2] for article in articles: self.mk_article_translation(article, self.french) fr_pk = self.french.pk sp_pk = self.spanish.pk fr_articles = [article.title for article in ArticlePage.objects.filter(language=self.french)] fr_tags = [tag.title for tag in Tag.objects.filter(language=self.french)] fr_sections = [section.title for section in SectionPage.objects.filter(language=self.french)] out = StringIO() call_command( 'change_content_language', fr_pk, sp_pk, stdout=out ) self.assertEqual('', out.getvalue()) # test that only the correct articles are translated sp_articles = [article.title for article in ArticlePage.objects.filter(language=self.spanish)] sp_tags = [tag.title for tag in Tag.objects.filter(language=self.spanish)] sp_sections = [section.title for section in SectionPage.objects.filter(language=self.spanish)] self.assertEqual(sp_articles, fr_articles) self.assertEqual(sp_tags, fr_tags) self.assertEqual(sp_sections, fr_sections)
def test_spider_management_command(self): out = StringIO() call_command('spider', stdout=out) self.assertIn("", out.getvalue())
def setUp(self): super(ConsoleBackendTests, self).setUp() self.__stdout = sys.stdout self.stream = sys.stdout = StringIO()
def test__doesnt_remove_data_from_other_sites(self): user2 = User.objects.create_user( 'test2', '*****@*****.**', 'test2') user2.profile.site = self.main2.get_site() user2.profile.save() # Create content section_index_2 = SectionIndexPage.objects.child_of(self.main2).first() section = self.mk_section(section_index_2, title='Section 2') article = self.mk_article( parent=section, title='Article 2') # Note: this 'site' attr is the django site, not the wagtail one MoloComment.objects.create( content_type=self.content_type, site=Site.objects.get_current(), object_pk=article.pk, user=user2, comment="Here's a 2nd comment", submit_date=timezone.now()) form_2 = MoloFormPage( title='Form 2', slug='form-2', ) section_index_2.add_child(instance=form_2) form_2.save_revision().publish() MoloFormSubmission.objects.create( form_data='{"checkbox-question": ["option 1", "option 2"]}', user=user2, page_id=form_2.pk ) profile2 = user2.profile sq_index_2 = SecurityQuestionIndexPage.objects.child_of( self.main2).first() sec_q = SecurityQuestion(title='Sec Question 2') sq_index_2.add_child(instance=sec_q) sec_q.save_revision().publish() SecurityAnswer.objects.create( user=profile2, question=sec_q, answer="Sec Answer 2") out = StringIO() call_command( 'remove_deprecated_site_data', profile2.site.pk, '--commit', stdout=out ) comments = MoloComment.objects.all() self.assertEqual(comments.count(), 1) self.assertNotEqual(comments.first().user, user2) submissions = MoloFormSubmission.objects.all() self.assertEqual(submissions.count(), 1) self.assertNotEqual(submissions.first().user, user2) answers = SecurityAnswer.objects.all() self.assertEqual(answers.count(), 1) self.assertNotEqual(answers.first().user, user2) users = User.objects.filter(username__contains='test') self.assertEqual(users.count(), 1) self.assertNotEqual(users.first(), user2) self.assertEqual(UserProfile.objects.filter( site=profile2.site.pk).count(), 0) self.assertIn('Found 1 profiles for site 5', out.getvalue()) self.assertIn('Found 0 staff profiles', out.getvalue())
def test_bom_rejection(self): with self.assertRaises(CommandError) as cm: call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO()) self.assertIn("file has a BOM (Byte Order Mark)", cm.exception.args[0]) self.assertFalse(os.path.exists(self.MO_FILE))
def flush_mailbox(self): self.stream = sys.stdout = StringIO()
def test_fuzzy_compiling(self): with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]): call_command('compilemessages', locale=[self.LOCALE], fuzzy=True, stdout=StringIO()) with translation.override(self.LOCALE): self.assertEqual(ugettext('Lenin'), force_text('Ленин')) self.assertEqual(ugettext('Vodka'), force_text('Водка'))
def test_percent_symbol_in_po_file(self): call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO()) self.assertTrue(os.path.exists(self.MO_FILE))
def test_multiple_locales_excluded_with_locale(self): call_command('compilemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'], stdout=StringIO()) self.assertTrue(os.path.exists(self.MO_FILE % 'en')) self.assertFalse(os.path.exists(self.MO_FILE % 'fr')) self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_app_locale_compiled(self): call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO()) self.assertTrue(os.path.exists(self.PROJECT_MO_FILE)) self.assertTrue(os.path.exists(self.APP_MO_FILE))