def test_spanish_with_dennis_and_existing_translations(self): """Response should pick up existing translation""" existing_resp = ResponseFactory( locale=u'es', product=u'firefox', description=u'hola', translated_description=u'DUDE!' ) resp = ResponseFactory( locale=u'es', product=u'firefox', description=u'hola', translated_description=u'' ) # Set the product up for translation *after* creating the response # so that it doesn't get auto-translated because Response is set up # for auto-translation. prod = Product.objects.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() # No jobs should be translated assert len(resp.generate_translation_jobs()) == 0 assert resp.translated_description == existing_resp.translated_description
def test_match(self): # Note: This isn't an exhaustive test. Just a rough cursory check. # TriggerRule that matches everything matches everything. tr = TriggerRuleFactory( versions=[], locales=[], keywords=[], products=[], ) trm = tr.get_matcher() resp = ResponseFactory() assert trm.match(resp) is True tr = TriggerRuleFactory( versions=[u'38*'], locales=[u'en-US', u'fr'], keywords=[u'rc4'], url_exists=True ) prod = ProductFactory() tr.products.add(prod) trm = tr.get_matcher() resp = ResponseFactory( version=u'38.0.5', locale=u'en-US', product=prod.db_name, description=u'rc4 is awesome', url=u'https://example.com/' ) assert trm.match(resp) is True resp.locale = 'es' assert trm.match(resp) is False
def test_match(self): # Note: This isn't an exhaustive test. Just a rough cursory check. # TriggerRule that matches everything matches everything. tr = TriggerRuleFactory(versions=[], locales=[], keywords=[], products=[]) trm = tr.get_matcher() resp = ResponseFactory() assert trm.match(resp) is True tr = TriggerRuleFactory(versions=[u"38*"], locales=[u"en-US", u"fr"], keywords=[u"rc4"], url_exists=True) prod = ProductFactory() tr.products.add(prod) trm = tr.get_matcher() resp = ResponseFactory( version=u"38.0.5", locale=u"en-US", product=prod.db_name, description=u"rc4 is awesome", url=u"https://example.com/", ) assert trm.match(resp) is True resp.locale = "es" assert trm.match(resp) is False
def test_match(self): # Note: This isn't an exhaustive test. Just a rough cursory check. # TriggerRule that matches everything matches everything. tr = TriggerRuleFactory( versions=[], locales=[], keywords=[], products=[], ) resp = ResponseFactory() assert tr.match(resp) is True tr = TriggerRuleFactory( versions=[u'38*'], locales=[u'en-US', u'fr'], keywords=[u'rc4'], url_exists=True ) prod = ProductFactory() tr.products.add(prod) resp = ResponseFactory( version=u'38.0.5', locale=u'en-US', product=prod.db_name, description=u'rc4 is awesome', url=u'https://example.com/' ) assert tr.match(resp) is True resp.locale = 'es' assert tr.match(resp) is False
def test_spanish_with_dennis_and_existing_translations(self): """Response should pick up existing translation""" existing_resp = ResponseFactory( locale=u'es', product=u'firefox', description=u'hola', translated_description=u'DUDE!' ) resp = ResponseFactory( locale=u'es', product=u'firefox', description=u'hola', translated_description=u'' ) # Set the product up for translation *after* creating the response # so that it doesn't get auto-translated because Response is set up # for auto-translation. prod = Product.objects.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() # No jobs should be translated eq_(len(resp.generate_translation_jobs()), 0) eq_(resp.translated_description, existing_resp.translated_description)
def create_basic_sampledata(): print 'Generating 100 happy and 100 sad responses...' happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) # Note: We're abusing sentence_generator to just return random # choice from a tuple of things. products = sentence_generator(PRODUCT_TUPLES) locales = locale_generator() urls = sentence_generator(URLS) # Create 100 happy responses. now = time.time() objs = [] for i in range(100): product = products.next() now = now - random.randint(500, 2000) if product[0] in ALWAYS_API: api = '1' elif product[0] in NEVER_API: api = None else: api = random.choice(('1', None)) objs.append( ResponseFactory.build(happy=True, description=happy_feedback.next(), product=product[0], version=product[1], platform=product[2], user_agent=product[3], locale=locales.next(), created=datetime.datetime.fromtimestamp(now), api=api)) # Create 100 sad responses. now = time.time() for i in range(100): product = products.next() now = now - random.randint(500, 2000) if product[0] in ALWAYS_API: api = '1' elif product[0] in NEVER_API: api = None else: api = random.choice(('1', None)) objs.append( ResponseFactory.build(happy=False, description=sad_feedback.next(), product=product[0], version=product[1], platform=product[2], locale=locales.next(), user_agent=product[3], url=urls.next(), created=datetime.datetime.fromtimestamp(now), api=api)) Response.objects.bulk_create(objs)
def test_happy_to_rating(self): """Test we don't populate rating from happy""" resp = ResponseFactory.build(happy=True, rating=None) resp.save() eq_(resp.rating, None) resp = ResponseFactory.build(happy=False, rating=None) resp.save() eq_(resp.rating, None)
def test_live_indexing(self): S = ResponseMappingType.search count_pre = S().count() s = ResponseFactory(happy=True, description='Test live indexing.') self.refresh() eq_(count_pre + 1, S().count()) s.delete() self.refresh() eq_(count_pre, S().count())
def test_url_domain(self): # Test a "normal domain" resp = ResponseFactory(url=u'http://foo.example.com.br/blah') assert resp.url_domain == u'example.com.br' assert isinstance(resp.url_domain, unicode) # Test a unicode domain resp = ResponseFactory( url=u'http://\u30c9\u30e9\u30af\u30a810.jp/dq10_skillpoint.html') assert resp.url_domain == u'\u30c9\u30e9\u30af\u30a810.jp' assert isinstance(resp.url_domain, unicode)
def test_live_indexing(self): search = ResponseDocType.docs.search() count_pre = search.count() s = ResponseFactory(happy=True, description="Test live indexing.") self.refresh() assert count_pre + 1 == search.count() s.delete() self.refresh() assert count_pre == search.count()
def test_live_indexing(self): search = ResponseDocType.docs.search() count_pre = search.count() s = ResponseFactory(happy=True, description='Test live indexing.') self.refresh() assert count_pre + 1 == search.count() s.delete() self.refresh() assert count_pre == search.count()
def test_spanish_no_translation(self): """Spanish should not get translated""" resp = ResponseFactory(locale=u'es', product=u'firefox', description=u'hola', translated_description=u'') # No jobs should be translated assert len(resp.generate_translation_jobs()) == 0 # Nothing should be translated assert resp.translated_description == u''
def test_english_gb_no_translation(self): """en-GB descriptions should get copied over""" resp = ResponseFactory(locale=u'en-GB', description=u'hello', translated_description=u'') # No new jobs should be generated assert len(resp.generate_translation_jobs()) == 0 # Re-fetch from the db and make sure the description was copied over resp = Response.objects.get(id=resp.id) assert resp.description == resp.translated_description
def test_english_no_translation(self): """English descriptions should get copied over""" resp = ResponseFactory(locale=u'en-US', description=u'hello', translated_description=u'') # No new jobs should be generated eq_(len(resp.generate_translation_jobs()), 0) # Re-fetch from the db and make sure the description was copied over resp = Response.objects.get(id=resp.id) eq_(resp.description, resp.translated_description)
def test_url_exists(self): fb1 = ResponseFactory(url=u'') fb2 = ResponseFactory(url=u'http://example.com') fb3 = ResponseFactory(url=u'http://example.com') jane = AnalyzerProfileFactory().user self.client_login_user(jane) # Test don't care data = { 'locales': [], 'products': [], 'versions': [], 'keywords': [], 'url_exists': None } resp = self.client.post( reverse('triggerrule-match'), content_type='application/json', data=json.dumps(data) ) assert resp.status_code == 200 assert ( [item['id'] for item in json.loads(resp.content)['results']] == [fb3.id, fb2.id, fb1.id] ) # Test has a url data['url_exists'] = True resp = self.client.post( reverse('triggerrule-match'), content_type='application/json', data=json.dumps(data) ) assert resp.status_code == 200 assert ( [item['id'] for item in json.loads(resp.content)['results']] == [fb3.id, fb2.id] ) # Test does not have a url data['url_exists'] = False resp = self.client.post( reverse('triggerrule-match'), content_type='application/json', data=json.dumps(data) ) assert resp.status_code == 200 assert ( [item['id'] for item in json.loads(resp.content)['results']] == [fb1.id] )
def test_versions(self): te_resp = ResponseFactory(version=u'38.0') teof_resp = ResponseFactory(version=u'38.0.5') ResponseFactory(version=u'39.0') jane = AnalyzerProfileFactory().user self.client_login_user(jane) # Test one version data = { 'locales': [], 'products': [], 'versions': [u'38.0'], 'keywords': [], 'url_exists': None } resp = self.client.post( reverse('triggerrule-match'), content_type='application/json', data=json.dumps(data) ) assert resp.status_code == 200 assert ( [item['id'] for item in json.loads(resp.content)['results']] == [te_resp.id] ) # Test two data['versions'] = [u'38.0', u'38.0.5'] resp = self.client.post( reverse('triggerrule-match'), content_type='application/json', data=json.dumps(data) ) assert resp.status_code == 200 assert ( [item['id'] for item in json.loads(resp.content)['results']] == [teof_resp.id, te_resp.id] ) # Test prefix data['versions'] = [u'38*'] resp = self.client.post( reverse('triggerrule-match'), content_type='application/json', data=json.dumps(data) ) assert resp.status_code == 200 assert ( [item['id'] for item in json.loads(resp.content)['results']] == [teof_resp.id, te_resp.id] )
def test_keywords(self): rte_resp = ResponseFactory(description=u'Ride the lightning') fwtbt_resp = ResponseFactory(description=u'For whom the bell tolls') ResponseFactory(description=u'The thing that should not be') jane = AnalyzerProfileFactory().user self.client_login_user(jane) # Test one keyword data = { 'locales': [], 'products': [], 'versions': [], 'keywords': [u'lightning'], 'url_exists': None } resp = self.client.post( reverse('triggerrule-match'), content_type='application/json', data=json.dumps(data) ) assert resp.status_code == 200 assert ( [item['id'] for item in json.loads(resp.content)['results']] == [rte_resp.id] ) # Test two data['keywords'] = [u'lightning', u'tolls'] resp = self.client.post( reverse('triggerrule-match'), content_type='application/json', data=json.dumps(data) ) assert resp.status_code == 200 assert ( [item['id'] for item in json.loads(resp.content)['results']] == [fwtbt_resp.id, rte_resp.id] ) # Test phrase data['keywords'] = [u'bell tolls'] resp = self.client.post( reverse('triggerrule-match'), content_type='application/json', data=json.dumps(data) ) assert resp.status_code == 200 assert ( [item['id'] for item in json.loads(resp.content)['results']] == [fwtbt_resp.id] )
def test_english_gb_no_translation(self): """en-GB descriptions should get copied over""" resp = ResponseFactory( locale=u'en-GB', description=u'hello', translated_description=u'' ) # No new jobs should be generated eq_(len(resp.generate_translation_jobs()), 0) # Re-fetch from the db and make sure the description was copied over resp = Response.objects.get(id=resp.id) eq_(resp.description, resp.translated_description)
def test_cant_see_old_responses(self): # Make sure we can't see responses from > 180 days ago cutoff = datetime.today() - timedelta(days=180) ResponseFactory(description='Young enough--Party!', created=cutoff + timedelta(days=1)) ResponseFactory(description='Too old--Get off my lawn!', created=cutoff - timedelta(days=1)) self.refresh() url = reverse('dashboard') resp = self.client.get(url, {'date_start': cutoff.strftime('%Y-%m-%d')}) assert 'Young enough--Party!' in resp.content assert 'Too old--Get off my lawn!' not in resp.content
def test_spanish_no_translation(self): """Spanish should not get translated""" resp = ResponseFactory( locale=u'es', product=u'firefox', description=u'hola', translated_description=u'' ) # No jobs should be translated eq_(len(resp.generate_translation_jobs()), 0) # Nothing should be translated eq_(resp.translated_description, u'')
def test_english_no_translation(self): """English descriptions should get copied over""" resp = ResponseFactory( locale=u'en-US', description=u'hello', translated_description=u'' ) # No new jobs should be generated assert len(resp.generate_translation_jobs()) == 0 # Re-fetch from the db and make sure the description was copied over resp = Response.objects.get(id=resp.id) assert resp.description == resp.translated_description
def timezone_view(request): """Admin view showing times and timezones in data.""" # Note: This is an admin page that gets used once in a blue moon. # As such, I'm taking some liberties (hand-indexing the response, # time.sleep, etc) that I would never take if it was used more # often or was viewable by users. If these two assumptions ever # change, then this should be rewritten. from fjord.feedback.models import ( Response, ResponseDocType, ResponseDocTypeManager ) from fjord.feedback.tests import ResponseFactory from fjord.search.index import get_es, get_index_name server_time = datetime.now() # Create a new response. resp = ResponseFactory() resp_time = resp.created # Index the response by hand so we know it gets to # Elasticsearch. Otherwise it gets done by celery and we don't # know how long that'll take. doc = ResponseDocType.extract_doc(resp) ResponseDocTypeManager.bulk_index(docs=[doc]) # Fetch the response from the db. resp = Response.objects.get(id=resp.id) resp2_time = resp.created # Refresh and sleep 5 seconds as a hand-wavey way to make sure # that Elasticsearch has had time to refresh the index. get_es().indices.refresh(get_index_name()) time.sleep(5) s = ResponseDocTypeManager.search().filter('term', id=resp.id).execute() es_time = s[0].created # Delete the test response which also deletes it in the index. resp.delete() return render(request, 'admin/timezone_view.html', { 'server_time': server_time, 'resp_time': resp_time, 'resp2_time': resp2_time, 'es_time': es_time })
def test_index_chunk_task(self): responses = ResponseFactory.create_batch(10) # With live indexing, that'll create items in the index. Since # we want to test index_chunk_test, we need a clean index to # start with so we delete and recreate it. self.setup_indexes(empty=True) # Verify there's nothing in the index. assert ResponseDocType.docs.search().count() == 0 # Create the record and the chunk and then run it through # celery. batch_id = 'ou812' rec = RecordFactory(batch_id=batch_id) chunk = ( to_class_path(ResponseDocType), [item.id for item in responses] ) index_chunk_task.delay(get_index_name(), batch_id, rec.id, chunk) self.refresh() # Verify everything is in the index now. assert ResponseDocType.docs.search().count() == 10 # Verify the record was marked succeeded. rec = Record.objects.get(pk=rec.id) assert rec.status == Record.STATUS_SUCCESS
def setUp(self): super(TestDashboardView, self).setUp() # Set up some sample data # 4 happy, 3 sad. # 2 Windows XP, 2 Linux, 1 OS X, 2 Windows 7 now = datetime.now() # The dashboard by default shows the last week of data, so # these need to be relative to today. The alternative is that # every test gives an explicit date range, and that is # annoying and verbose. items = [ # happy, platform, locale, description, created (True, '', 'en-US', 'apple', now - timedelta(days=6)), (True, 'Windows 7', 'es', 'banana', now - timedelta(days=5)), (True, 'Linux', 'en-US', 'orange', now - timedelta(days=4)), (True, 'Linux', 'en-US', 'apple', now - timedelta(days=3)), (False, 'Windows XP', 'en-US', 'banana', now - timedelta(days=2)), (False, 'Windows 7', 'en-US', 'orange', now - timedelta(days=1)), (False, 'Linux', 'es', u'\u2713 apple', now - timedelta(days=0)), ] for happy, platform, locale, description, created in items: # We don't need to keep this around, just need to create it. ResponseFactory(happy=happy, platform=platform, locale=locale, description=description, created=created) self.refresh()
def test_response_view_analyzer(self): """Test secret section only shows up for analyzers""" resp = ResponseFactory(happy=True, description=u'the bestest best!') self.refresh() r = self.client.get(reverse('response_view', args=(resp.id, ))) eq_(200, r.status_code) self.assertTemplateUsed(r, 'analytics/response.html') assert str(resp.description) in r.content # Verify there is no secret area visible for non-analyzers. pq = PyQuery(r.content) secretarea = pq('dl.secret') eq_(len(secretarea), 0) jane = ProfileFactory(user__email='*****@*****.**').user jane.groups.add(Group.objects.get(name='analyzers')) self.client_login_user(jane) r = self.client.get(reverse('response_view', args=(resp.id, ))) eq_(200, r.status_code) self.assertTemplateUsed(r, 'analytics/response.html') assert str(resp.description) in r.content # Verify the secret area is there. pq = PyQuery(r.content) secretarea = pq('dl.secret') eq_(len(secretarea), 1) # Verify there is an mlt section in the secret area. mlt = pq('dd#mlt') eq_(len(mlt), 1)
def test_index_chunk_task(self): responses = ResponseFactory.create_batch(10) # With live indexing, that'll create items in the index. Since # we want to test index_chunk_test, we need a clean index to # start with so we delete and recreate it. self.setup_indexes(empty=True) # Verify there's nothing in the index. eq_(len(ResponseMappingType.search()), 0) # Create the record and the chunk and then run it through # celery. batch_id = 'ou812' rec = RecordFactory(batch_id=batch_id) chunk = (to_class_path(ResponseMappingType), [item.id for item in responses]) index_chunk_task.delay(get_index(), batch_id, rec.id, chunk) ResponseMappingType.refresh_index() # Verify everything is in the index now. eq_(len(ResponseMappingType.search()), 10) # Verify the record was marked succeeded. rec = Record.objects.get(pk=rec.id) eq_(rec.status, Record.STATUS_SUCCESS)
def setUp(self): super(TestSearchView, self).setUp() # Set up some sample data # 4 happy, 3 sad. # 2 Windows XP, 2 Linux, 1 OS X, 2 Windows 7 now = datetime.now() # The dashboard by default shows the last week of data, so # these need to be relative to today. The alternative is that # every test gives an explicit date range, and that is # annoying and verbose. items = [ # happy, platform, locale, description, created (True, '', 'en-US', 'apple', now - timedelta(days=6)), (True, 'Windows 7', 'es', 'banana', now - timedelta(days=5)), (True, 'Linux', 'en-US', 'orange', now - timedelta(days=4)), (True, 'Linux', 'en-US', 'apple', now - timedelta(days=3)), (False, 'Windows XP', 'en-US', 'banana', now - timedelta(days=2)), (False, 'Windows 7', 'en-US', 'orange', now - timedelta(days=1)), (False, 'Linux', 'es', u'\u2713 apple', now - timedelta(days=0)), ] for happy, platform, locale, description, created in items: # We don't need to keep this around, just need to create it. ResponseFactory(happy=happy, platform=platform, locale=locale, description=description, created=created) self.refresh() # Create analyzer and log analyzer in jane = ProfileFactory(user__email='*****@*****.**').user jane.groups.add(Group.objects.get(name='analyzers')) self.client_login_user(jane)
def test_response_id_in_qs_authenticated(self): """Verify response_id in querystring overrides session id""" # Create analyzer and log in. jane = AnalyzerProfileFactory().user self.client_login_user(jane) # Create some feedback which sets the response_id in the # session. url = reverse('feedback', args=(u'firefox', ), locale='en-US') r = self.client.post( url, { 'happy': 0, 'description': u'Why Firefox not make me sandwiches!', }, follow=True) # Create another piece of feedback which is not the one we # just did. feedback = ResponseFactory(description=u'purple horseshoes') # Fetch the thank you page with the response_id in the # querystring. url = reverse('thanks') + '?response_id={0}'.format(feedback.id) r = self.client.get(url) assert r.status_code == 200 assert r.jinja_context['feedback'].id == feedback.id assert r.jinja_context['suggestions'] == []
def test_get_suggestions(self): now = u'ts_{0}'.format(datetime.datetime.now()) req = self.get_feedback_post_request({ 'happy': 1, 'description': now, 'url': u'http://example.com/{0}'.format(now) }) feedback = ResponseFactory(happy=True, description=now, url=u'http://example.com/{0}'.format(now)) # Try with just the feedback links = get_suggestions(feedback) assert len(links) == 1 assert links[0].provider == 'dummy' assert links[0].provider_version == 1 assert links[0].cssclass == u'document' assert links[0].summary == u'summary {0}'.format(now) assert links[0].description == u'description {0}'.format(now) assert links[0].url == feedback.url # Now with the feedback and request links = get_suggestions(feedback, req) assert len(links) == 1 assert links[0].provider == 'dummy' assert links[0].provider_version == 1 assert links[0].cssclass == u'document' assert links[0].summary == u'summary {0}'.format(now) assert links[0].description == u'description {0}'.format(now) assert links[0].url == feedback.url
def test_not_json(self): # If we get back text and not JSON, then the sumo search # suggest provider should return no links. This tests the "if # any exception happens, return nothing" handling. with requests_mock.Mocker() as m: m.get(SUMO_SUGGEST_API_URL, text='Gah! Something bad happened') patch_point = 'fjord.suggest.providers.sumo.provider.logger' with patch(patch_point) as logger_patch: # Create a mock that we can call .exception() on and # it makes sure it got called. logger_patch.exception = MagicMock() feedback = ResponseFactory( happy=False, locale=u'en-US', product=u'Firefox', description=( u'slow browser please speed improve i am wait ' u'speed improv')) links = self.suggester.get_suggestions(feedback) # Make sure we get back no links. assert len(links) == 0 # Make sure logger.exception() got called once. assert logger_patch.exception.call_count == 1
def setUp(self): super(TestOccurrencesView, self).setUp() # Set up some sample data items = [ # happy, locale, description (True, 'en-US', 'apple banana orange pear'), (True, 'en-US', 'orange pear kiwi'), (True, 'en-US', 'chocolate chocolate yum'), (False, 'en-US', 'apple banana grapefruit'), # This one doesn't create bigrams because there isn't enough words (False, 'en-US', 'orange'), # This one shouldn't show up (False, 'es', 'apple banana'), ] for happy, locale, description in items: ResponseFactory(happy=happy, locale=locale, description=description) self.refresh() # Create analyzer and log analyzer in jane = ProfileFactory(user__email='*****@*****.**').user jane.groups.add(Group.objects.get(name='analyzers')) self.client_login_user(jane)
def test_rating_to_happy(self): """Test that we do populate happy from rating""" data = {1: False, 2: False, 3: False, 4: True, 5: True} for rat, expected in data.items(): # Create the response, but DON'T save it to the db. resp = ResponseFactory.build(happy=None, rating=rat) resp.save() eq_(resp.happy, expected)
def create_basic_sampledata(): happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) products = sentence_generator(PRODUCTS) platforms = sentence_generator(PLATFORMS) locales = sentence_generator(settings.DEV_LANGUAGES) urls = sentence_generator(URLS) # Create 100 happy responses. now = time.time() objs = [] for i in range(100): product = products.next() now = now - random.randint(500, 2000) objs.append( ResponseFactory.build( happy=True, description=happy_feedback.next(), product=product[0], version=product[1], platform=platforms.next(), locale=locales.next(), created=datetime.datetime.fromtimestamp(now) ) ) # Create 100 sad responses. now = time.time() for i in range(100): product = products.next() now = now - random.randint(500, 2000) objs.append( ResponseFactory.build( happy=False, description=sad_feedback.next(), product=product[0], version=product[1], platform=platforms.next(), locale=locales.next(), url=urls.next(), created=datetime.datetime.fromtimestamp(now) ) ) Response.objects.bulk_create(objs)
def test_too_short(self): with requests_mock.Mocker(): feedback = ResponseFactory(happy=False, locale=u'en-US', product=u'Firefox', description=u'Firefox is bad.') links = self.suggester.get_suggestions(feedback) assert len(links) == 0
def create_additional_sampledata(samplesize="1000"): samplesize = int(samplesize) print "Generating {0} feedback responses...".format(samplesize) happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) products = sentence_generator(PRODUCT_TUPLES) urls = sentence_generator(URLS) locales = locale_generator() objs = [] now = time.time() for i in range(samplesize): now = now - random.randint(500, 2000) happy = random.choice([True, False]) if happy: description = happy_feedback.next() url = u"" else: description = sad_feedback.next() url = urls.next() product = products.next() if product[0] in ALWAYS_API: api = "1" elif product[0] in NEVER_API: api = None else: api = random.choice(("1", None)) objs.append( ResponseFactory.build( happy=happy, description=description, product=product[0], version=product[1], platform=product[2], url=url, user_agent=product[3], locale=locales.next(), created=datetime.datetime.fromtimestamp(now), api=api, ) ) # Bulk-save the objects to the db 500 at a time and # print something to stdout about it. if i % 500 == 0: Response.objects.bulk_create(objs) objs = [] print " {0}...".format(i) if objs: print " {0}...".format(samplesize) Response.objects.bulk_create(objs) objs = []
def test_not_firefox(self): with requests_mock.Mocker(): feedback = ResponseFactory( happy=False, locale=u'en-US', product=u'Firefox for Android', description=u'Firefox does not make good sandwiches. Srsly.') links = self.suggester.get_suggestions(feedback) assert len(links) == 0
def test_id(self): feedback = ResponseFactory() self.refresh() resp = self.client.get(reverse('feedback-api'), {'id': feedback.id}) json_data = json.loads(resp.content) assert json_data['count'] == 1 assert len(json_data['results']) == 1 assert json_data['results'][0]['id'] == feedback.id
def test_multiple_ids(self): # Create some responses that we won't ask for for i in range(5): ResponseFactory() resps = [] for i in range(5): resps.append(ResponseFactory()) self.refresh() resp = self.client.get( reverse('feedback-api'), {'id': ','.join([str(int(f.id)) for f in resps])}) json_data = json.loads(resp.content) assert json_data['count'] == 5 assert len(json_data['results']) == 5 assert (sorted([item['id'] for item in json_data['results'] ]) == sorted([feedback.id for feedback in resps]))
def create_additional_sampledata(samplesize='1000'): samplesize = int(samplesize) print 'Generating {0} feedback responses...'.format(samplesize) happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) products = sentence_generator(PRODUCT_TUPLES) urls = sentence_generator(URLS) locales = locale_generator() objs = [] now = time.time() for i in range(samplesize): now = now - random.randint(500, 2000) happy = random.choice([True, False]) if happy: description = happy_feedback.next() url = u'' else: description = sad_feedback.next() url = urls.next() product = products.next() if product[0] in ALWAYS_API: api = '1' elif product[0] in NEVER_API: api = None else: api = random.choice(('1', None)) objs.append( ResponseFactory.build(happy=happy, description=description, product=product[0], version=product[1], platform=product[2], url=url, user_agent=product[3], locale=locales.next(), created=datetime.datetime.fromtimestamp(now), api=api)) # Bulk-save the objects to the db 500 at a time and # print something to stdout about it. if i % 500 == 0: Response.objects.bulk_create(objs) objs = [] print ' {0}...'.format(i) if objs: print ' {0}...'.format(samplesize) Response.objects.bulk_create(objs) objs = []
def create_basic_sampledata(): happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) products = sentence_generator(PRODUCTS) platforms = sentence_generator(PLATFORMS) locales = sentence_generator(settings.DEV_LANGUAGES) urls = sentence_generator(URLS) # Create 100 happy responses. now = time.time() objs = [] for i in range(100): product = products.next() now = now - random.randint(500, 2000) objs.append( ResponseFactory.build( happy=True, description=happy_feedback.next(), product=product[0], version=product[1], platform=platforms.next(), locale=locales.next(), created=datetime.datetime.fromtimestamp(now))) # Create 100 sad responses. now = time.time() for i in range(100): product = products.next() now = now - random.randint(500, 2000) objs.append( ResponseFactory.build( happy=False, description=sad_feedback.next(), product=product[0], version=product[1], platform=platforms.next(), locale=locales.next(), url=urls.next(), created=datetime.datetime.fromtimestamp(now))) Response.objects.bulk_create(objs)
def create_additional_sampledata(samplesize): samplesize = int(samplesize) print 'Working on generating {0} feedback responses....'.format( samplesize) happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) products = sentence_generator(PRODUCTS) urls = sentence_generator(URLS) user_agents = sentence_generator(USER_AGENTS) locales = sentence_generator(settings.DEV_LANGUAGES) objs = [] now = time.time() for i in range(samplesize): now = now - random.randint(500, 2000) happy = random.choice([True, False]) if happy: description = happy_feedback.next() url = u'' else: description = sad_feedback.next() url = urls.next() product = products.next() objs.append( ResponseFactory.build( happy=happy, description=description, product=product[0], version=product[1], url=url, user_agent=user_agents.next(), locale=locales.next(), created=datetime.datetime.fromtimestamp(now) ) ) # Bulk-save the objects to the db 500 at a time and # print something to stdout about it. if i % 500 == 0: Response.objects.bulk_create(objs) objs = [] print ' {0}...'.format(i) if objs: print ' {0}...'.format(samplesize) Response.objects.bulk_create(objs) objs = []
def test_english_with_dennis(self): """English descriptions should get copied over""" resp = ResponseFactory( locale=u'en-US', product=u'firefox', description=u'hello', translated_description=u'' ) # Set the product up for translation *after* creating the response # so that it doesn't get auto-translated because Response is set up # for auto-translation. prod = Product.objects.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() # No new jobs should be generated eq_(len(resp.generate_translation_jobs()), 0) # Re-fetch from the db and make sure the description was copied over resp = Response.objects.get(id=resp.id) eq_(resp.description, resp.translated_description)
def test_rating_to_happy(self): """Test that we do populate happy from rating""" data = { 1: False, 2: False, 3: False, 4: True, 5: True } for rat, expected in data.items(): # Create the response, but DON'T save it to the db. resp = ResponseFactory.build(happy=None, rating=rat) resp.save() eq_(resp.happy, expected)
def test_spanish_with_dennis(self): """Spanish should get translated""" resp = ResponseFactory( locale=u'es', product=u'firefox', description=u'hola', translated_description=u'' ) # Set the product up for translation *after* creating the response # so that it doesn't get auto-translated because Response is set up # for auto-translation. prod = Product.objects.get(db_name='firefox') prod.translation_system = u'dennis' prod.save() # One job should be generated jobs = resp.generate_translation_jobs() eq_(len(jobs), 1) job = jobs[0] eq_(job[1:], (u'dennis', u'es', u'description', u'en', 'translated_description')) eq_(resp.translated_description, u'')
def test_empty_tr(self): feedback_responses = ResponseFactory.create_batch(5) jane = AnalyzerProfileFactory().user self.client_login_user(jane) data = { 'locales': [], 'products': [], 'versions': [], 'keywords': [], 'url_exists': None } resp = self.client.post( reverse('triggerrule-match'), content_type='application/json', data=json.dumps(data) ) assert resp.status_code == 200 # Note: This matches everything because it's an empty rule. assert ( [item['id'] for item in json.loads(resp.content)['results']] == [fr.id for fr in reversed(feedback_responses)] )
def create_basic_sampledata(): print "Generating 100 happy and 100 sad responses..." happy_feedback = sentence_generator(HAPPY_FEEDBACK) sad_feedback = sentence_generator(SAD_FEEDBACK) # Note: We're abusing sentence_generator to just return random # choice from a tuple of things. products = sentence_generator(PRODUCT_TUPLES) locales = locale_generator() urls = sentence_generator(URLS) # Create 100 happy responses. now = time.time() objs = [] for i in range(100): product = products.next() now = now - random.randint(500, 2000) if product[0] in ALWAYS_API: api = "1" elif product[0] in NEVER_API: api = None else: api = random.choice(("1", None)) objs.append( ResponseFactory.build( happy=True, description=happy_feedback.next(), product=product[0], version=product[1], platform=product[2], user_agent=product[3], locale=locales.next(), created=datetime.datetime.fromtimestamp(now), api=api, ) ) # Create 100 sad responses. now = time.time() for i in range(100): product = products.next() now = now - random.randint(500, 2000) if product[0] in ALWAYS_API: api = "1" elif product[0] in NEVER_API: api = None else: api = random.choice(("1", None)) objs.append( ResponseFactory.build( happy=False, description=sad_feedback.next(), product=product[0], version=product[1], platform=product[2], locale=locales.next(), user_agent=product[3], url=urls.next(), created=datetime.datetime.fromtimestamp(now), api=api, ) ) Response.objects.bulk_create(objs)