def setUp(self): super(TestSearchView, self).setUp() # Set up some sample data # 4 happy, 3 sad. # 2 Windows XP, 2 Linux, 1 OS X, 2 Windows 7 now = datetime.now() # The dashboard by default shows the last week of data, so # these need to be relative to today. The alternative is that # every test gives an explicit date range, and that is # annoying and verbose. items = [ # happy, platform, locale, description, created (True, '', 'en-US', 'apple', now - timedelta(days=6)), (True, 'Windows 7', 'es', 'banana', now - timedelta(days=5)), (True, 'Linux', 'en-US', 'orange', now - timedelta(days=4)), (True, 'Linux', 'en-US', 'apple', now - timedelta(days=3)), (False, 'Windows XP', 'en-US', 'banana', now - timedelta(days=2)), (False, 'Windows 7', 'en-US', 'orange', now - timedelta(days=1)), (False, 'Linux', 'es', u'\u2713 apple', now - timedelta(days=0)), ] for happy, platform, locale, description, created in items: # We don't need to keep this around, just need to create it. response(happy=happy, platform=platform, locale=locale, description=description, created=created, save=True) self.refresh() # Create analyzer and log analyzer in jane = user(email='*****@*****.**', save=True) profile(user=jane, save=True) jane.groups.add(Group.objects.get(name='analyzers')) self.client_login_user(jane)
def setUp(self): super(TestOccurrencesView, self).setUp() # Set up some sample data items = [ # happy, locale, description (True, 'en-US', 'apple banana orange pear'), (True, 'en-US', 'orange pear kiwi'), (True, 'en-US', 'chocolate chocolate yum'), (False, 'en-US', 'apple banana grapefruit'), # This one doesn't create bigrams because there isn't enough words (False, 'en-US', 'orange'), # This one shouldn't show up (False, 'es', 'apple banana'), ] for happy, locale, description in items: response( happy=happy, locale=locale, description=description, save=True) self.refresh() # Create analyzer and log analyzer in jane = user(email='*****@*****.**', save=True) profile(user=jane, save=True) jane.groups.add(Group.objects.get(name='analyzers')) self.client_login_user(jane)
def setUp(self): super(TestDashboardView, self).setUp() # Set up some sample data # 4 happy, 3 sad. # 2 Windows XP, 2 Linux, 1 OS X, 2 Windows 7 now = datetime.now() # The dashboard by default shows the last week of data, so # these need to be relative to today. The alternative is that # every test gives an explicit date range, and that is # annoying and verbose. items = [ # happy, platform, locale, description, created (True, '', 'en-US', 'apple', now - timedelta(days=6)), (True, 'Windows 7', 'es', 'banana', now - timedelta(days=5)), (True, 'Linux', 'en-US', 'orange', now - timedelta(days=4)), (True, 'Linux', 'en-US', 'apple', now - timedelta(days=3)), (False, 'Windows XP', 'en-US', 'banana', now - timedelta(days=2)), (False, 'Windows 7', 'en-US', 'orange', now - timedelta(days=1)), (False, 'Linux', 'es', u'\u2713 apple', now - timedelta(days=0)), ] for happy, platform, locale, description, created in items: # We don't need to keep this around, just need to create it. response(happy=happy, platform=platform, locale=locale, description=description, created=created, save=True) self.refresh()
def setUp(self): super(TestOccurrencesReportView, self).setUp() # Set up some sample data items = [ # happy, locale, description (True, 'en-US', 'apple banana orange pear'), (True, 'en-US', 'orange pear kiwi'), (True, 'en-US', 'chocolate chocolate yum'), (False, 'en-US', 'apple banana grapefruit'), # This one doesn't create bigrams because there isn't enough words (False, 'en-US', 'orange'), # This one shouldn't show up (False, 'es', 'apple banana'), ] for happy, locale, description in items: response( happy=happy, locale=locale, description=description, save=True) self.refresh() # Create analyzer and log analyzer in jane = user(email='*****@*****.**', save=True) profile(user=jane, save=True) jane.groups.add(Group.objects.get(name='analyzers')) self.client_login_user(jane)
def test_spanish_with_dennis_and_existing_translations(self): """Response should pick up existing translation""" existing_resp = response( locale=u'es', product=u'firefox', description=u'hola', translated_description=u'DUDE!', save=True ) resp = response( locale=u'es', product=u'firefox', description=u'hola', translated_description=u'', save=True ) # Set the product up for translation *after* creating the response # so that it doesn't get auto-translated because Response is set up # for auto-translation. prod = Product.uncached.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() # No jobs should be translated eq_(len(resp.generate_translation_jobs()), 0) eq_(resp.translated_description, existing_resp.translated_description)
def test_basic(self): testdata = [ (True, 'en-US', 'Linux', 'Firefox', 'desc'), (True, 'en-US', 'Mac OSX', 'Firefox for Android', 'desc'), (False, 'de', 'Windows', 'Firefox', 'banana'), ] for happy, locale, platform, product, desc in testdata: response( happy=happy, locale=locale, platform=platform, product=product, description=desc, save=True) self.refresh() resp = self.client.get(reverse('feedback-api')) # FIXME: test headers json_data = json.loads(resp.content) eq_(json_data['count'], 3) eq_(len(json_data['results']), 3) resp = self.client.get(reverse('feedback-api'), {'happy': '1'}) json_data = json.loads(resp.content) eq_(json_data['count'], 2) eq_(len(json_data['results']), 2) resp = self.client.get(reverse('feedback-api'), {'platforms': 'Linux'}) json_data = json.loads(resp.content) eq_(json_data['count'], 1) eq_(len(json_data['results']), 1) resp = self.client.get(reverse('feedback-api'), {'products': 'Firefox'}) json_data = json.loads(resp.content) eq_(json_data['count'], 2) eq_(len(json_data['results']), 2) resp = self.client.get(reverse('feedback-api'), {'locales': 'en-US'}) json_data = json.loads(resp.content) eq_(json_data['count'], 2) eq_(len(json_data['results']), 2) resp = self.client.get(reverse('feedback-api'), {'locales': 'en-US,de'}) json_data = json.loads(resp.content) eq_(json_data['count'], 3) eq_(len(json_data['results']), 3) resp = self.client.get(reverse('feedback-api'), { 'locales': 'de', 'happy': 1 }) json_data = json.loads(resp.content) eq_(json_data['count'], 0) eq_(len(json_data['results']), 0) resp = self.client.get(reverse('feedback-api'), {'q': 'desc'}) json_data = json.loads(resp.content) eq_(json_data['count'], 2) eq_(len(json_data['results']), 2)
def test_truncated_description_on_dashboard(self): # Create a description that's 500 characters long (which is # the truncation length) plus a string that's easy to assert # non-existence of. desc = ('0' * 500) + 'OMGou812' response(description=desc, save=True) self.refresh() url = reverse('dashboard') r = self.client.get(url) assert 'OMGou812' not in r.content
def test_url_domain(self): # Test a "normal domain" resp = response(url=u'http://foo.example.com.br/blah') eq_(resp.url_domain, u'example.com.br') assert isinstance(resp.url_domain, unicode) # Test a unicode domain resp = response( url=u'http://\u30c9\u30e9\u30af\u30a810.jp/dq10_skillpoint.html', save=True) eq_(resp.url_domain, u'\u30c9\u30e9\u30af\u30a810.jp') assert isinstance(resp.url_domain, unicode)
def test_public_fields(self): """The results should only contain publicly-visible fields""" # Note: This test might fail when we add new fields to # ES. What happens is that if a field doesn't have data when # the document is indexed, then there won't be a key/val in # the json results. Easy way to fix that is to make sure it # has a value when creating the response. response(api=True, save=True) self.refresh() resp = self.client.get(reverse('feedback-api')) json_data = json.loads(resp.content) eq_(json_data['count'], 1) eq_(sorted(json_data['results'][0].keys()), sorted(models.ResponseMappingType.public_fields()))
def test_has_email(self): # Test before we create a responsemail r = self.client.get(self.url, {'has_email': '0'}) eq_(r.status_code, 200) pq = PyQuery(r.content) eq_(len(pq('li.opinion')), 7) r = self.client.get(self.url, {'has_email': '1'}) eq_(r.status_code, 200) pq = PyQuery(r.content) eq_(len(pq('li.opinion')), 0) resp = response( happy=True, product=u'Firefox', description=u'ou812', created=datetime.now(), save=True) responseemail(opinion=resp, save=True) # Have to reindex everything because unlike in a request # context, what happens here is we index the Response, but # without the ResponseEmail. self.setup_indexes() r = self.client.get(self.url, {'has_email': '0'}) eq_(r.status_code, 200) pq = PyQuery(r.content) ok_('ou812' not in r.content) eq_(len(pq('li.opinion')), 7) r = self.client.get(self.url, {'has_email': '1'}) eq_(r.status_code, 200) pq = PyQuery(r.content) ok_('ou812' in r.content) eq_(len(pq('li.opinion')), 1)
def test_index_chunk_task(self): responses = [response(save=True) for i in range(10)] # With live indexing, that'll create items in the index. Since # we want to test index_chunk_test, we need a clean index to # start with so we delete and recreate it. self.setup_indexes(empty=True) # Verify there's nothing in the index. eq_(len(ResponseMappingType.search()), 0) # Create the record and the chunk and then run it through # celery. batch_id = 'ou812' rec = record(batch_id=batch_id, save=True) chunk = (ResponseMappingType, [item.id for item in responses]) index_chunk_task.delay(get_index(), batch_id, rec.id, chunk) ResponseMappingType.refresh_index() # Verify everything is in the index now. eq_(len(ResponseMappingType.search()), 10) # Verify the record was marked succeeded. rec = Record.objects.get(pk=rec.id) eq_(rec.status, Record.STATUS_SUCCESS)
def test_spanish_with_dennis(self): """Spanish should get translated""" resp = response( locale=u'es', product=u'firefox', description=u'hola', translated_description=u'', save=True ) # Set the product up for translation *after* creating the response # so that it doesn't get auto-translated because Response is set up # for auto-translation. prod = Product.uncached.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() # One job should be generated jobs = resp.generate_translation_jobs() eq_(len(jobs), 1) job = jobs[0] eq_(job[1:], (u'dennis', u'es', u'description', u'en', 'translated_description')) eq_(resp.translated_description, u'')
def create_basic_sampledata(): happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) products = sentence_generator(PRODUCTS) platforms = sentence_generator(PLATFORMS) locales = sentence_generator(settings.DEV_LANGUAGES) urls = sentence_generator(URLS) now = time.time() # Create 100 happy responses. for i in range(100): product = products.next() now = now - random.randint(500, 2000) response( happy=True, description=happy_feedback.next(), product=product[0], version=product[1], platform=platforms.next(), locale=locales.next(), created=datetime.datetime.fromtimestamp(now), save=True ) now = time.time() # Create 100 sad responses. for i in range(100): product = products.next() now = now - random.randint(500, 2000) response( happy=False, description=sad_feedback.next(), product=product[0], version=product[1], platform=platforms.next(), locale=locales.next(), url=urls.next(), created=datetime.datetime.fromtimestamp(now), save=True )
def test_response_view_mobile(self): """Test response mobile view doesn't die""" resp = response(happy=True, description=u'the best!', save=True) self.refresh() r = self.client.get(reverse('response_view', args=(resp.id, )), {'mobile': 1}) eq_(200, r.status_code) self.assertTemplateUsed(r, 'analytics/mobile/response.html') assert str(resp.description) in r.content
def test_live_indexing(self): S = ResponseMappingType.search count_pre = S().count() s = response(happy=True, description='Test live indexing.', save=True) self.refresh() eq_(count_pre + 1, S().count()) s.delete() self.refresh() eq_(count_pre, S().count())
def test_response_view_mobile(self): """Test response mobile view doesn't die""" resp = response(happy=True, description=u'the best!', save=True) self.refresh() r = self.client.get(reverse('response_view', args=(resp.id,)), {'mobile': 1}) eq_(200, r.status_code) self.assertTemplateUsed(r, 'analytics/mobile/response.html') assert str(resp.description) in r.content
def test_english_no_translation(self): """English descriptions should get copied over""" resp = response(locale=u'en-US', description=u'hello', translated_description=u'', save=True) # No new jobs should be generated eq_(len(resp.generate_translation_jobs()), 0) # Re-fetch from the db and make sure the description was copied over resp = Response.uncached.get(id=resp.id) eq_(resp.description, resp.translated_description)
def test_spanish_no_translation(self): """Spanish should not get translated""" resp = response(locale=u'es', product=u'firefox', description=u'hola', translated_description=u'', save=True) # No jobs should be translated eq_(len(resp.generate_translation_jobs()), 0) # Nothing should be translated eq_(resp.translated_description, u'')
def test_auto_translation(self): prod = Product.uncached.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() resp = response(locale=u'es', product=u'firefox', description=u'hola', save=True) # Fetch it from the db again resp = Response.uncached.get(id=resp.id) eq_(resp.translated_description, u'\xabHOLA\xbb')
def create_additional_sampledata(samplesize): samplesize = int(samplesize) print 'Working on generating {0} feedback responses....'.format( samplesize) happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) products = sentence_generator(PRODUCTS) urls = sentence_generator(URLS) user_agents = sentence_generator(USER_AGENTS) locales = sentence_generator(settings.DEV_LANGUAGES) objs = [] now = time.time() for i in range(samplesize): now = now - random.randint(500, 2000) happy = random.choice([True, False]) if happy: description = happy_feedback.next() url = u'' else: description = sad_feedback.next() url = urls.next() product = products.next() objs.append( response( happy=happy, description=description, product=product[0], version=product[1], url=url, ua=user_agents.next(), locale=locales.next(), created=datetime.datetime.fromtimestamp(now)) ) # Bulk-save the objects to the db 500 at a time and # print something to stdout about it. if i % 500 == 0: Response.objects.bulk_create(objs) objs = [] print ' {0}...'.format(i) if objs: print ' {0}...'.format(samplesize) Response.objects.bulk_create(objs) objs = []
def test_english_gb_no_translation(self): """en-GB descriptions should get copied over""" resp = response( locale=u'en-GB', description=u'hello', translated_description=u'', save=True ) # No new jobs should be generated eq_(len(resp.generate_translation_jobs()), 0) # Re-fetch from the db and make sure the description was copied over resp = Response.uncached.get(id=resp.id) eq_(resp.description, resp.translated_description)
def test_auto_translation(self): prod = Product.uncached.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() resp = response( locale=u'es', product=u'firefox', description=u'hola', save=True ) # Fetch it from the db again resp = Response.uncached.get(id=resp.id) eq_(resp.translated_description, u'\xabHOLA\xbb')
def test_spanish_with_dennis_and_existing_translations(self): """Response should pick up existing translation""" existing_resp = response(locale=u'es', product=u'firefox', description=u'hola', translated_description=u'DUDE!', save=True) resp = response(locale=u'es', product=u'firefox', description=u'hola', translated_description=u'', save=True) # Set the product up for translation *after* creating the response # so that it doesn't get auto-translated because Response is set up # for auto-translation. prod = Product.uncached.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() # No jobs should be translated eq_(len(resp.generate_translation_jobs()), 0) eq_(resp.translated_description, existing_resp.translated_description)
def test_spanish_no_translation(self): """Spanish should not get translated""" resp = response( locale=u'es', product=u'firefox', description=u'hola', translated_description=u'', save=True ) # No jobs should be translated eq_(len(resp.generate_translation_jobs()), 0) # Nothing should be translated eq_(resp.translated_description, u'')
def generate_sampledata(options): """Generates response data. Usage: ``./manage.py generatedata [--with=samplesize=n]`` If you specify a samplesize, then it randomly generates that many responses. Otherwise it generates 5 happy and 5 sad responses. """ samplesize = options.get('samplesize') if samplesize not in (None, True): samplesize = int(samplesize) happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) urls = sentence_generator(URLS) user_agents = sentence_generator(USER_AGENTS) locales = sentence_generator(settings.DEV_LANGUAGES) now = time.time() for i in range(samplesize): now = now - random.randint(500, 2000) happy = random.choice([True, False]) if happy: description = happy_feedback.next() url = u'' else: description = sad_feedback.next() url = urls.next() response(happy=happy, description=description, url=url, ua=user_agents.next(), locale=locales.next(), created=datetime.datetime.fromtimestamp(now), save=True) return # Create 5 happy responses. for i in range(5): response(happy=True, description=HAPPY_FEEDBACK[i], save=True) # Create 5 sad responses. for i in range(5): response(happy=False, description=SAD_FEEDBACK[i], url=URLS[i], save=True)
def generate_sampledata(options): """Generates response data. Usage: ``./manage.py generatedata [--with=samplesize=n]`` If you specify a samplesize, then it randomly generates that many responses. Otherwise it generates 5 happy and 5 sad responses. """ samplesize = options.get('samplesize') if samplesize not in (None, True): samplesize = int(samplesize) happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) urls = sentence_generator(URLS) user_agents = sentence_generator(USER_AGENTS) locales = sentence_generator(settings.DEV_LANGUAGES) now = time.time() for i in range(samplesize): now = now - random.randint(500, 2000) happy = random.choice([True, False]) if happy: description = happy_feedback.next() url = u'' else: description = sad_feedback.next() url = urls.next() response( happy=happy, description=description, url=url, ua=user_agents.next(), locale=locales.next(), created=datetime.datetime.fromtimestamp(now), save=True) return # Create 5 happy responses. for i in range(5): response(happy=True, description=HAPPY_FEEDBACK[i], save=True) # Create 5 sad responses. for i in range(5): response(happy=False, description=SAD_FEEDBACK[i], url=URLS[i], save=True)
def test_response_view(self): """Test dashboard link goes to response view""" resp = response(happy=True, description=u'the best!', save=True) self.refresh() url = reverse('dashboard') r = self.client.get(url) eq_(200, r.status_code) self.assertTemplateUsed(r, 'analytics/dashboard.html') pq = PyQuery(r.content) # Get the permalink permalink = pq('li.opinion a[href*="response"]').attr('href') r = self.client.get(permalink) eq_(200, r.status_code) self.assertTemplateUsed(r, 'analytics/response.html') assert str(resp.description) in r.content
def test_english_with_dennis(self): """English descriptions should get copied over""" resp = response(locale=u'en-US', product=u'firefox', description=u'hello', translated_description=u'', save=True) # Set the product up for translation *after* creating the response # so that it doesn't get auto-translated because Response is set up # for auto-translation. prod = Product.uncached.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() # No new jobs should be generated eq_(len(resp.generate_translation_jobs()), 0) # Re-fetch from the db and make sure the description was copied over resp = Response.uncached.get(id=resp.id) eq_(resp.description, resp.translated_description)
def test_response_view_analyzer(self): """Test secret section only shows up for analyzers""" resp = response(happy=True, description=u'the bestest best!', save=True) self.refresh() r = self.client.get(reverse('response_view', args=(resp.id, ))) eq_(200, r.status_code) self.assertTemplateUsed(r, 'analytics/response.html') assert str(resp.description) in r.content # Verify there is no secret area visible for non-analyzers. pq = PyQuery(r.content) secretarea = pq('dl.secret') eq_(len(secretarea), 0) # Create an analyzer and log her in jane = user(email='*****@*****.**', save=True) profile(user=jane, save=True) jane.groups.add(Group.objects.get(name='analyzers')) self.client_login_user(jane) r = self.client.get(reverse('response_view', args=(resp.id, ))) eq_(200, r.status_code) self.assertTemplateUsed(r, 'analytics/response.html') assert str(resp.description) in r.content # Verify the secret area is there. pq = PyQuery(r.content) secretarea = pq('dl.secret') eq_(len(secretarea), 1) # Verify there is an mlt section in the secret area. mlt = pq('dd#mlt') eq_(len(mlt), 1)
def test_spanish_with_dennis(self): """Spanish should get translated""" resp = response(locale=u'es', product=u'firefox', description=u'hola', translated_description=u'', save=True) # Set the product up for translation *after* creating the response # so that it doesn't get auto-translated because Response is set up # for auto-translation. prod = Product.uncached.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() # One job should be generated jobs = resp.generate_translation_jobs() eq_(len(jobs), 1) job = jobs[0] eq_(job[1:], (u'dennis', u'es', u'description', u'en-US', 'translated_description')) eq_(resp.translated_description, u'')
def test_english_with_dennis(self): """English descriptions should get copied over""" resp = response( locale=u'en-US', product=u'firefox', description=u'hello', translated_description=u'', save=True ) # Set the product up for translation *after* creating the response # so that it doesn't get auto-translated because Response is set up # for auto-translation. prod = Product.uncached.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() # No new jobs should be generated eq_(len(resp.generate_translation_jobs()), 0) # Re-fetch from the db and make sure the description was copied over resp = Response.uncached.get(id=resp.id) eq_(resp.description, resp.translated_description)
def test_response_view_analyzer(self): """Test secret section only shows up for analyzers""" resp = response(happy=True, description=u'the bestest best!', save=True) self.refresh() r = self.client.get(reverse('response_view', args=(resp.id,))) eq_(200, r.status_code) self.assertTemplateUsed(r, 'analytics/response.html') assert str(resp.description) in r.content # Verify there is no secret area visible for non-analyzers. pq = PyQuery(r.content) secretarea = pq('dl.secret') eq_(len(secretarea), 0) # Create an analyzer and log her in jane = user(email='*****@*****.**', save=True) profile(user=jane, save=True) jane.groups.add(Group.objects.get(name='analyzers')) self.client_login_user(jane) r = self.client.get(reverse('response_view', args=(resp.id,))) eq_(200, r.status_code) self.assertTemplateUsed(r, 'analytics/response.html') assert str(resp.description) in r.content # Verify the secret area is there. pq = PyQuery(r.content) secretarea = pq('dl.secret') eq_(len(secretarea), 1) # Verify there is an mlt section in the secret area. mlt = pq('dd#mlt') eq_(len(mlt), 1)
def generate_sampledata(options): """Generates response data. Usage: ``./manage.py generatedata [--with=samplesize=n]`` If you specify a samplesize, then it randomly generates that many responses. Otherwise it generates 5 happy and 5 sad responses. """ samplesize = options.get('samplesize') if samplesize not in (None, True): samplesize = int(samplesize) print 'Working on generating {0} feedback responses....'.format( samplesize) happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) urls = sentence_generator(URLS) user_agents = sentence_generator(USER_AGENTS) locales = sentence_generator(settings.DEV_LANGUAGES) objs = [] now = time.time() for i in range(samplesize): now = now - random.randint(500, 2000) happy = random.choice([True, False]) if happy: description = happy_feedback.next() url = u'' else: description = sad_feedback.next() url = urls.next() objs.append( response( happy=happy, description=description, url=url, ua=user_agents.next(), locale=locales.next(), created=datetime.datetime.fromtimestamp(now)) ) # Bulk-save the objects to the db 500 at a time and # print something to stdout about it. if i % 500 == 0: Response.objects.bulk_create(objs) objs = [] print ' {0}...'.format(i) if objs: print ' {0}...'.format(samplesize) Response.objects.bulk_create(objs) objs = [] print 'Done! Please reindex to pick up db changes.' return # Create 5 happy responses. for i in range(5): response(happy=True, description=HAPPY_FEEDBACK[i], save=True) # Create 5 sad responses. for i in range(5): response(happy=False, description=SAD_FEEDBACK[i], url=URLS[i], save=True)
def test_description_truncate_on_save(self): # Extra 10 characters get lopped off on save. resp = response(description=('a' * 10010), save=True) eq_(resp.description, 'a' * 10000)
def test_description_strip_on_save(self): # Nix leading and trailing whitespace. resp = response(description=u' \n\tou812\t\n ', save=True) eq_(resp.description, u'ou812')
def test_description_truncate_on_save(self): # Extra 10 characters get lopped off on save. resp = response(description=('a' * 10010)) resp.save() eq_(resp.description, 'a' * 10000)
def test_description_strip_on_save(self): # Nix leading and trailing whitespace. resp = response(description=u' \n\tou812\t\n ') resp.save() eq_(resp.description, u'ou812')