示例#1
0
    def test_v6_monthly(self):
        fixture_path = join(self.fixture_dir, 'v6--views--2021-11-01_2021-11-30.json')
        fixture = json.load(open(fixture_path, 'r'))

        # it's a 2021-11 fixture but we'll use 2021-12 dates so that the v6 module is picked.
        from_dt, to_dt = datetime(2021, 12, 1), datetime(2021, 12, 31) # monthly
        with patch('article_metrics.ga_metrics.core.query_ga_write_results', return_value=(fixture, fixture_path)):
            with patch('article_metrics.ga_metrics.core.output_path', return_value=fixture_path):
                ga_table_id = '0xdeadbeef'
                results = core.article_views(ga_table_id, from_dt, to_dt, cached=False, only_cached=False)
                expected_num_results = 11738
                expected_total = Counter(full=712582, abstract=0, digest=0)

                # representative sample of `/article` and `/article/executable`, /article?foo=...
                expected_sample = [
                    (61268, Counter(full=209, abstract=0, digest=0)),
                    (60066, Counter(full=814, abstract=0, digest=0)),
                    (61523, Counter(full=127, abstract=0, digest=0)),
                    (64909, Counter(full=422, abstract=0, digest=0)),
                    (60095, Counter(full=64, abstract=0, digest=0)),
                    (30274, Counter(full=82, abstract=0, digest=0)),
                    (48, Counter(full=2, abstract=0, digest=0)),
                    (78, Counter(full=3, abstract=0, digest=0)),
                ]

                self.assertEqual(expected_num_results, len(results))
                self.assertEqual(expected_total, elife_v1.count_counter_list(results.values()))
                for msid, expected_count in expected_sample:
                    self.assertEqual(expected_count, results[utils.msid2doi(msid)])
示例#2
0
    def test_v5_daily(self):
        "the daily `/article/123` and `/article/123/executable` sums add up"
        fixture_path = join(self.fixture_dir, 'v5--views--2020-02-22.json')
        fixture = json.load(open(fixture_path, 'r'))

        from_dt = to_dt = datetime(2020, 2, 22) # daily
        with patch('article_metrics.ga_metrics.core.query_ga_write_results', return_value=(fixture, fixture_path)):
            with patch('article_metrics.ga_metrics.core.output_path', return_value=fixture_path):
                ga_table_id = '0xdeadbeef'
                results = core.article_views(ga_table_id, from_dt, to_dt, cached=False, only_cached=False)
                expected_total_results = 4491 # total results after counting (not rows in fixture)
                expected_total = Counter(full=379200, abstract=0, digest=0) # total of all results

                # mix of `/article` and `/article/executable`
                expected_sample = {
                    48: Counter(full=48, abstract=0, digest=0),
                    68: Counter(full=2, abstract=0, digest=0),
                    78: Counter(full=30, abstract=0, digest=0),

                    90: Counter(full=38, abstract=0, digest=0)
                }

                self.assertEqual(expected_total_results, len(results))
                self.assertEqual(expected_total, elife_v1.count_counter_list(results.values()))
                for msid, expected_count in expected_sample.items():
                    self.assertEqual(expected_count, results[utils.msid2doi(msid)])
    def test_one_bad_apple(self):
        "bad article objects are removed from results"
        cases = {
            '1111': ([1, 1, 1], 1, 1),
            '2222': ([2, 2, 2], 2, 2),
        }
        base.insert_metrics(cases)

        # skitch doi
        # this is the particular bad doi I'm dealing with right now
        bad_doi = '10.7554/eLife.e30552'  # preceeding 'e'
        models.Article.objects.filter(doi=utils.msid2doi('1111')).update(
            doi=bad_doi)

        url = reverse('v2:summary')
        resp = self.c.get(url)
        self.assertEqual(resp.status_code, 200)

        expected_response = {
            'total':
            1,
            'items': [{
                'id': 2222,
                'views': 2,
                'downloads': 2,
                models.CROSSREF: 2,
                models.PUBMED: 2,
                models.SCOPUS: 2
            }]
        }
        self.assertEqual(resp.json(), expected_response)
示例#4
0
    def test_v6_daily(self):
        fixture_path = join(self.fixture_dir, 'v6--views--2021-11-30.json')
        fixture = json.load(open(fixture_path, 'r'))

        from_dt = to_dt = datetime(2021, 12, 1) # daily
        with patch('article_metrics.ga_metrics.core.query_ga_write_results', return_value=(fixture, fixture_path)):
            with patch('article_metrics.ga_metrics.core.output_path', return_value=fixture_path):
                ga_table_id = '0xdeadbeef'
                results = core.article_views(ga_table_id, from_dt, to_dt, cached=False, only_cached=False)
                expected_num_results = 7265
                expected_total = Counter(full=31275, abstract=0, digest=0)

                # representative sample of `/article` and `/article/executable`, /article?foo=...
                expected_sample = [
                    (61268, Counter(full=7, abstract=0, digest=0)),
                    (60066, Counter(full=4, abstract=0, digest=0)),
                    (61523, Counter(full=5, abstract=0, digest=0)),
                    (64909, Counter(full=17, abstract=0, digest=0)),
                    (60095, Counter(full=1, abstract=0, digest=0)),
                    (30274, Counter(full=8, abstract=0, digest=0)),
                    (48, Counter(full=1, abstract=0, digest=0)),
                    (78, Counter(full=1, abstract=0, digest=0)),
                ]

                self.assertEqual(expected_num_results, len(results))
                self.assertEqual(expected_total, elife_v1.count_counter_list(results.values()))
                for msid, expected_count in expected_sample:
                    self.assertEqual(expected_count, results[utils.msid2doi(msid)])
    def test_one_bad_apple(self):
        "articles with bad dois don't prevent an entire summary from being returned"
        cases = {
            '1111': ([1, 1, 1], 1, 1),
            '2222': ([2, 2, 2], 2, 2),
        }
        base.insert_metrics(cases)

        # skitch doi
        # this is the particular bad doi I'm dealing with right now
        bad_doi = '10.7554/eLife.00000'
        models.Article.objects.filter(doi=utils.msid2doi('1111')).update(
            doi=bad_doi)

        # expect just one result
        resp = self.c.get(reverse('v2:summary'))
        expected_response = {
            'total':
            1,
            'items': [
                {
                    'id': 2222,
                    'views': 2,
                    'downloads': 2,
                    models.CROSSREF: 2,
                    models.PUBMED: 2,
                    models.SCOPUS: 2
                },
            ]
        }
        self.assertEqual(resp.json(), expected_response)
示例#6
0
def enplumpen(artid):
    "takes an article id like e01234 and returns a DOI like 10.7554/eLife.01234"
    if isint(artid):
        return msid2doi(artid)
    ensure(artid[0] == 'e', 'cannot convert article id %s to doi' % artid)
    return artid.replace('e', '10.7554/eLife.')
示例#7
0
 def test_msid_to_doi(self):
     cases = [(3, '10.7554/eLife.00003'), (10627, '10.7554/eLife.10627')]
     for given, expected in cases:
         self.assertEqual(utils.msid2doi(given), expected)
示例#8
0
def count_for_msid(msid):
    return count_for_doi(utils.msid2doi(msid))
示例#9
0
def count_for_msid(msid):
    return count_for_obj(models.Article.objects.get(doi=utils.msid2doi(msid)))