コード例 #1
0
    def test_missing_url(self):
        dataset, url = import_fixture("file:///dev/null")
        source = tasks.extract_url(dataset, url)
        assert source is None, source

        for run in db.session.query(Run).all():
            assert run.status == Run.STATUS_FAILED, run
コード例 #2
0
    def test_missing_url(self):
        dataset, url = import_fixture('file:///dev/null')
        source = tasks.extract_url(dataset, url)
        assert source is None, source

        for run in db.session.query(Run).all():
            assert run.status == Run.STATUS_FAILED, run
コード例 #3
0
    def test_extract_missing_url(self):
        url = csvimport_fixture_path('../data', 'xcra.csv')
        source = tasks.extract_url(self.ds, url)
        assert source is None, source

        run = db.session.query(Run).first()
        package = data_manager.package(self.ds.name)
        messages = list(logger.load(package, run.id))
        assert len(messages) > 2, messages
コード例 #4
0
ファイル: tasks.py プロジェクト: leowmjw/spendb
def load_from_url(dataset_name, url):
    with flask_app.app_context():
        dataset = Dataset.by_name(dataset_name)
        if dataset is None:
            log.error("Dataset not found: %s", dataset_name)
            return
        source = tasks.extract_url(dataset, url)
        if source is not None:
            load_from_source.delay(dataset_name, source.name)
コード例 #5
0
ファイル: tasks.py プロジェクト: annafralberts/spendb
def load_from_url(dataset_name, url):
    with flask_app.app_context():
        dataset = Dataset.by_name(dataset_name)
        if dataset is None:
            log.error("Dataset not found: %s", dataset_name)
            return
        source = tasks.extract_url(dataset, url)
        if source is not None:
            load_from_source.delay(dataset_name, source.name)
コード例 #6
0
    def _test_import(self, name, lines=None):
        dataset, url = import_fixture(name)
        data = urllib.urlopen(url)
        if lines is None:
            lines = self.count_lines_in_stream(data) - 1  # -1 for header row

        source = tasks.extract_url(dataset, url)
        tasks.transform_source(dataset, source.name)
        tasks.load(dataset, source_name=source.name)

        for run in db.session.query(Run).all():
            assert run.status == Run.STATUS_COMPLETE, run

        # check correct number of entries
        dataset = db.session.query(Dataset).first()
        entries = list(dataset.fact_table.entries())
        assert len(entries) == lines, len(entries)
コード例 #7
0
    def _test_import(self, name, lines=None):
        dataset, url = import_fixture(name)
        data = urllib.urlopen(url)
        if lines is None:
            lines = self.count_lines_in_stream(data) - 1  # -1 for header row

        source = tasks.extract_url(dataset, url)
        tasks.transform_source(dataset, source.name)
        tasks.load(dataset, source_name=source.name)

        for run in db.session.query(Run).all():
            assert run.status == Run.STATUS_COMPLETE, run

        # check correct number of entries
        dataset = db.session.query(Dataset).first()
        entries = list(dataset.fact_table.entries())
        assert len(entries) == lines, len(entries)
コード例 #8
0
 def test_extract_url(self):
     source = tasks.extract_url(self.ds, self.cra_url)
     assert 'cra.csv' == source.name, source.name
コード例 #9
0
ファイル: tasks.py プロジェクト: gitter-badger/spendb
def load_from_url(dataset_name, url):
    with flask_app.app_context():
        dataset = Dataset.by_name(dataset_name)
        source = tasks.extract_url(dataset, url)
        if source is not None:
            load_from_source.delay(dataset_name, source.name)