def test_dataset_ttl(self):

        dataset = factories.Dataset(
            notes='Test dataset'
        )

        url = url_for('dcat_dataset', _id=dataset['id'], _format='ttl')

        app = self._get_test_app()

        response = app.get(url)

        eq_(response.headers['Content-Type'], 'text/turtle')

        content = response.body

        # Parse the contents to check it's an actual serialization
        p = RDFParser()

        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]

        eq_(len(dcat_datasets), 1)

        dcat_dataset = dcat_datasets[0]

        eq_(dcat_dataset['title'], dataset['title'])
        eq_(dcat_dataset['notes'], dataset['notes'])
Пример #2
0
    def test_catalog_fq_filter(self, app):
        dataset1 = factories.Dataset(
            title='First dataset',
            tags=[
                {'name': 'economy'},
                {'name': 'statistics'}
            ]
        )
        dataset2 = factories.Dataset(
            title='Second dataset',
            tags=[{'name': 'economy'}]
        )
        dataset3 = factories.Dataset(
            title='Third dataset',
            tags=[{'name': 'statistics'}]
        )

        url = url_for('dcat.read_catalog',
                      _format='ttl',
                      fq='tags:economy')


        response = app.get(url)
        content = response.body
        p = RDFParser()
        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]
        assert len(dcat_datasets) == 2
        assert dcat_datasets[0]['title'] in [dataset1['title'], dataset2['title']]
        assert dcat_datasets[1]['title'] in [dataset1['title'], dataset2['title']]
    def test_catalog_modified_date(self):

        dataset1 = factories.Dataset(title='First dataset')
        time.sleep(1)
        dataset2 = factories.Dataset(title='Second dataset')

        url = url_for('dcat_catalog',
                      _format='ttl',
                      modified_since=dataset2['metadata_modified'])

        app = self._get_test_app()

        response = app.get(url)

        content = response.body

        p = RDFParser()

        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]

        eq_(len(dcat_datasets), 1)

        eq_(dcat_datasets[0]['title'], dataset2['title'])
Пример #4
0
    def test_dataset_ttl(self, app):

        dataset = factories.Dataset(
            notes='Test dataset'
        )

        url = url_for('dcat.read_dataset', _id=dataset['name'], _format='ttl')

        response = app.get(url)

        assert response.headers['Content-Type'] == 'text/turtle'

        content = response.body

        # Parse the contents to check it's an actual serialization
        p = RDFParser()

        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]

        assert len(dcat_datasets) == 1

        dcat_dataset = dcat_datasets[0]

        assert dcat_dataset['title'] == dataset['title']
        assert dcat_dataset['notes'] == dataset['notes']
    def test_dataset_ttl(self):

        dataset = factories.Dataset(
            notes='Test dataset'
        )

        url = url_for('dcat_dataset', _id=dataset['id'], _format='ttl')

        app = self._get_test_app()

        response = app.get(url)

        eq_(response.headers['Content-Type'], 'text/turtle')

        content = response.body

        # Parse the contents to check it's an actual serialization
        p = RDFParser()

        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]

        eq_(len(dcat_datasets), 1)

        dcat_dataset = dcat_datasets[0]

        eq_(dcat_dataset['title'], dataset['title'])
        eq_(dcat_dataset['notes'], dataset['notes'])
    def test_catalog_modified_date(self):

        dataset1 = factories.Dataset(title='First dataset')
        time.sleep(1)
        dataset2 = factories.Dataset(title='Second dataset')

        url = url_for('dcat_catalog',
                      _format='ttl',
                      modified_since=dataset2['metadata_modified'])

        app = self._get_test_app()

        response = app.get(url)

        content = response.body

        p = RDFParser()

        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]

        eq_(len(dcat_datasets), 1)

        eq_(dcat_datasets[0]['title'], dataset2['title'])
Пример #7
0
    def test_catalog_q_search(self, app):

        dataset1 = factories.Dataset(title='First dataset')
        factories.Dataset(title='Second dataset')

        url = url_for('dcat.read_catalog', _format='ttl', q='First')

        response = app.get(url)
        content = response.body
        p = RDFParser()
        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]
        assert len(dcat_datasets) == 1
        assert dcat_datasets[0]['title'] == dataset1['title']
Пример #8
0
    def test_catalog_q_search(self):

        dataset1 = factories.Dataset(title='First dataset')
        dataset2 = factories.Dataset(title='Second dataset')

        url = url_for('dcat_catalog', _format='ttl', q='First')

        app = self._get_test_app()
        response = app.get(url)
        content = response.body
        p = RDFParser()
        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]
        eq_(len(dcat_datasets), 1)
        eq_(dcat_datasets[0]['title'], dataset1['title'])
Пример #9
0
    def test_catalog_ttl(self, app):

        for i in range(4):
            factories.Dataset()

        url = url_for('dcat.read_catalog', _format='ttl')

        response = app.get(url)

        assert response.headers['Content-Type'] == 'text/turtle'

        content = response.body

        # Parse the contents to check it's an actual serialization
        p = RDFParser()

        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]

        assert len(dcat_datasets) == 4
    def test_catalog_ttl(self):

        for i in xrange(4):
            factories.Dataset()

        url = url_for('dcat_catalog', _format='ttl')

        app = self._get_test_app()

        response = app.get(url)

        eq_(response.headers['Content-Type'], 'text/turtle')

        content = response.body

        # Parse the contents to check it's an actual serialization
        p = RDFParser()

        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]

        eq_(len(dcat_datasets), 4)
Пример #11
0
    def test_catalog_modified_date(self, app):

        dataset1 = factories.Dataset(title='First dataset')
        time.sleep(1)
        dataset2 = factories.Dataset(title='Second dataset')

        url = url_for('dcat.read_catalog',
                      _format='ttl',
                      modified_since=dataset2['metadata_modified'])

        response = app.get(url)

        content = response.body

        p = RDFParser()

        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]

        assert len(dcat_datasets) == 1

        assert dcat_datasets[0]['title'] == dataset2['title']
    def test_catalog_ttl(self):

        for i in xrange(4):
            factories.Dataset()

        url = url_for('dcat_catalog', _format='ttl')

        app = self._get_test_app()

        response = app.get(url)

        eq_(response.headers['Content-Type'], 'text/turtle')

        content = response.body

        # Parse the contents to check it's an actual serialization
        p = RDFParser()

        p.parse(content, _format='turtle')

        dcat_datasets = [d for d in p.datasets()]

        eq_(len(dcat_datasets), 4)