Example #1
0
    def test_add_index(self):
        # build the table without an index
        config = get_sample_data_source()
        config.save()
        self.addCleanup(config.delete)
        pillow = get_kafka_ucr_pillow()
        pillow.bootstrap([config])

        adapter = get_indicator_adapter(config)
        engine = adapter.engine
        insp = reflection.Inspector.from_engine(engine)
        table_name = get_table_name(config.domain, config.table_id)
        self.assertEqual(len(insp.get_indexes(table_name)), 0)

        # add the index to the config
        config = get_sample_data_source()
        self.addCleanup(config.delete)
        config.configured_indicators[0]['create_index'] = True
        config.save()
        adapter = get_indicator_adapter(config)

        # mock rebuild table to ensure the table isn't rebuilt when adding index
        pillow = get_kafka_ucr_pillow()
        pillow.processors[0].rebuild_table = MagicMock()
        pillow.bootstrap([config])
        self.assertFalse(pillow.processors[0].rebuild_table.called)
        engine = adapter.engine
        insp = reflection.Inspector.from_engine(engine)
        self.assertEqual(len(insp.get_indexes(table_name)), 1)
Example #2
0
    def test_add_non_nullable_column(self):
        self._setup_data_source('add_non_nullable_col')

        # assert new date isn't in the config
        insp = reflection.Inspector.from_engine(self.engine)
        table_name = get_table_name(self.config.domain, self.config.table_id)
        self.assertEqual(
            len([
                c for c in insp.get_columns(table_name)
                if c['name'] == 'new_date'
            ]), 0)

        # add the column to the config
        config = self._get_config('add_non_nullable_col')
        self.addCleanup(config.delete)
        config.configured_indicators.append({
            "column_id": "new_date",
            "type": "raw",
            "display_name": "new_date opened",
            "datatype": "datetime",
            "property_name": "other_opened_on",
            "is_nullable": False
        })
        config.save()
        adapter = get_indicator_adapter(config)
        engine = adapter.engine

        # mock rebuild table to ensure the table is rebuilt
        pillow = get_kafka_ucr_pillow()
        pillow.processors[0].rebuild_table = MagicMock()
        pillow.bootstrap([config])
        self.assertTrue(pillow.processors[0].rebuild_table.called)
        # column doesn't exist because rebuild table was mocked
        insp = reflection.Inspector.from_engine(engine)
        self.assertEqual(
            len([
                c for c in insp.get_columns(table_name)
                if c['name'] == 'new_date'
            ]), 0)

        # Another time without the mock to ensure the column is there
        pillow = get_kafka_ucr_pillow()
        pillow.bootstrap([config])
        insp = reflection.Inspector.from_engine(engine)
        self.assertEqual(
            len([
                c for c in insp.get_columns(table_name)
                if c['name'] == 'new_date'
            ]), 1)
Example #3
0
    def setUp(self):
        config1 = get_data_source_with_related_doc_type()
        config1.save()
        config2 = get_data_source_with_related_doc_type()
        config2.table_id = 'other-config'
        config2.save()
        self.configs = [config1, config2]
        self.adapters = [get_indicator_adapter(c) for c in self.configs]

        # one pillow that has one config, the other has both configs
        self.pillow1 = get_kafka_ucr_pillow(topics=['case-sql'])
        self.pillow2 = get_kafka_ucr_pillow(topics=['case-sql'])
        self.pillow1.bootstrap(configs=[config1])
        self.pillow2.bootstrap(configs=self.configs)
        with trap_extra_setup(KafkaUnavailableError):
            self.pillow1.get_change_feed().get_latest_offsets()
Example #4
0
    def test_pillow_save_to_one_database_at_a_time(self):
        pillow = get_kafka_ucr_pillow()
        pillow.bootstrap(configs=[self.ds_1])

        sample_doc, _ = get_sample_doc_and_indicators()
        pillow.process_change(doc_to_change(sample_doc))

        self.assertEqual(1, self.ds1_adapter.get_query_object().count())
        self.assertEqual(0, self.ds2_adapter.get_query_object().count())

        # save to the other
        pillow.bootstrap(configs=[self.ds_2])
        orig_id = sample_doc['_id']
        sample_doc['_id'] = uuid.uuid4().hex
        pillow.process_change(doc_to_change(sample_doc))
        self.assertEqual(1, self.ds1_adapter.get_query_object().count())
        self.assertEqual(1, self.ds2_adapter.get_query_object().count())
        self.assertEqual(
            1,
            self.ds1_adapter.get_query_object().filter_by(
                doc_id=orig_id).count())
        self.assertEqual(
            1,
            self.ds2_adapter.get_query_object().filter_by(
                doc_id=sample_doc['_id']).count())
Example #5
0
    def setUp(self):
        delete_all_locations()
        self.domain_obj = create_domain(self.domain)

        self.region = LocationType.objects.create(domain=self.domain, name="region")
        self.town = LocationType.objects.create(domain=self.domain, name="town", parent_type=self.region)

        self.data_source_config = DataSourceConfiguration(
            domain=self.domain,
            display_name='Locations in Westworld',
            referenced_doc_type='Location',
            table_id=_clean_table_name(self.domain, str(uuid.uuid4().hex)),
            configured_filter={},
            configured_indicators=[{
                "type": "expression",
                "expression": {
                    "type": "property_name",
                    "property_name": "name"
                },
                "column_id": "location_name",
                "display_name": "location_name",
                "datatype": "string"
            }],
        )
        self.data_source_config.validate()
        self.data_source_config.save()

        self.pillow = get_kafka_ucr_pillow()
        self.pillow.bootstrap(configs=[self.data_source_config])
        with trap_extra_setup(KafkaUnavailableError):
            self.pillow.get_change_feed().get_current_offsets()
Example #6
0
 def _setup_data_source(self, extra_id):
     self.config = self._get_config(extra_id)
     self.config.save()
     pillow = get_kafka_ucr_pillow()
     pillow.bootstrap([self.config])
     self.adapter = get_indicator_adapter(self.config)
     self.engine = self.adapter.engine
    def setUp(self):
        self.domain_obj = create_domain(self.domain)

        self.region = LocationType.objects.create(domain=self.domain,
                                                  name="region")
        self.town = LocationType.objects.create(domain=self.domain,
                                                name="town",
                                                parent_type=self.region)

        self.data_source_config = DataSourceConfiguration(
            domain=self.domain,
            display_name='Locations in Westworld',
            referenced_doc_type='Location',
            table_id=clean_table_name(self.domain, str(uuid.uuid4().hex)),
            configured_filter={},
            configured_indicators=[{
                "type": "expression",
                "expression": {
                    "type": "property_name",
                    "property_name": "name"
                },
                "column_id": "location_name",
                "display_name": "location_name",
                "datatype": "string"
            }],
        )
        self.data_source_config.validate()
        self.data_source_config.save()

        self.pillow = get_kafka_ucr_pillow()
        self.pillow.bootstrap(configs=[self.data_source_config])
        with trap_extra_setup(KafkaUnavailableError):
            self.pillow.get_change_feed().get_latest_offsets()
Example #8
0
 def setUpClass(cls):
     super(IndicatorPillowTest, cls).setUpClass()
     cls.config = get_sample_data_source()
     cls.config.save()
     cls.adapter = get_indicator_adapter(cls.config)
     cls.adapter.build_table()
     cls.fake_time_now = datetime(2015, 4, 24, 12, 30, 8, 24886)
     cls.pillow = get_kafka_ucr_pillow()
Example #9
0
    def setUp(self):
        self.pillow = get_kafka_ucr_pillow(topics=['case-sql'])
        self.config = get_data_source_with_related_doc_type()
        self.config.save()
        self.adapter = get_indicator_adapter(self.config)

        self.pillow.bootstrap(configs=[self.config])
        with trap_extra_setup(KafkaUnavailableError):
            self.pillow.get_change_feed().get_latest_offsets()
Example #10
0
 def test_pillow_save_to_multiple_databases(self):
     self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
     pillow = get_kafka_ucr_pillow()
     pillow.bootstrap(configs=[self.ds_1, self.ds_2])
     self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
     sample_doc, _ = get_sample_doc_and_indicators()
     pillow.processor(doc_to_change(sample_doc))
     self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
     self.assertEqual(1, self.ds1_adapter.get_query_object().count())
     self.assertEqual(1, self.ds2_adapter.get_query_object().count())
Example #11
0
 def test_pillow_save_to_multiple_databases(self):
     self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
     pillow = get_kafka_ucr_pillow()
     pillow.bootstrap(configs=[self.ds_1, self.ds_2])
     self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
     sample_doc, _ = get_sample_doc_and_indicators()
     pillow.process_change(doc_to_change(sample_doc))
     self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
     self.assertEqual(1, self.ds1_adapter.get_query_object().count())
     self.assertEqual(1, self.ds2_adapter.get_query_object().count())
Example #12
0
    def setUpClass(cls):
        super(AsyncIndicatorTest, cls).setUpClass()
        cls.pillow = get_kafka_ucr_pillow()
        cls.config = get_data_source_with_related_doc_type()
        cls.config.asynchronous = True
        cls.config.save()
        cls.adapter = get_indicator_adapter(cls.config)

        cls.pillow.bootstrap(configs=[cls.config])
        with trap_extra_setup(KafkaUnavailableError):
            cls.pillow.get_change_feed().get_latest_offsets()
    def _add_rows(self, rows):
        pillow = get_kafka_ucr_pillow()
        pillow.bootstrap(configs=[self.data_source])

        def _get_case(row):
            return {
                '_id': uuid.uuid4().hex,
                'domain': self.domain,
                'doc_type': 'CommCareCase',
                'type': 'city',
                'name': row.name,
                'number': row.number,
            }
        for row in rows:
            pillow.process_change(doc_to_change(_get_case(row)))
Example #14
0
    def _add_rows(self, rows):
        pillow = get_kafka_ucr_pillow()
        pillow.bootstrap(configs=[self.data_source])

        def _get_case(row):
            return {
                "_id": uuid.uuid4().hex,
                "domain": self.domain,
                "doc_type": "CommCareCase",
                "type": "city",
                "name": row.name,
                "number": row.number,
            }

        for row in rows:
            pillow.process_change(doc_to_change(_get_case(row)))
    def _add_rows(self, rows):
        pillow = get_kafka_ucr_pillow()
        pillow.bootstrap(configs=[self.data_source])

        def _get_case(row):
            return {
                '_id': uuid.uuid4().hex,
                'domain': self.domain,
                'doc_type': 'CommCareCase',
                'type': 'city',
                'name': row.name,
                'number': row.number,
            }

        for row in rows:
            pillow.process_change(doc_to_change(_get_case(row)))
Example #16
0
    def test_pillow_save_to_one_database_at_a_time(self):
        pillow = get_kafka_ucr_pillow()
        pillow.bootstrap(configs=[self.ds_1])

        sample_doc, _ = get_sample_doc_and_indicators()
        pillow.processor(doc_to_change(sample_doc))

        self.assertEqual(1, self.ds1_adapter.get_query_object().count())
        self.assertEqual(0, self.ds2_adapter.get_query_object().count())

        # save to the other
        pillow.bootstrap(configs=[self.ds_2])
        orig_id = sample_doc['_id']
        sample_doc['_id'] = uuid.uuid4().hex
        pillow.processor(doc_to_change(sample_doc))
        self.assertEqual(1, self.ds1_adapter.get_query_object().count())
        self.assertEqual(1, self.ds2_adapter.get_query_object().count())
        self.assertEqual(1, self.ds1_adapter.get_query_object().filter_by(doc_id=orig_id).count())
        self.assertEqual(1, self.ds2_adapter.get_query_object().filter_by(doc_id=sample_doc['_id']).count())
Example #17
0
 def setUp(self):
     super(IndicatorPillowTest, self).setUp()
     self.pillow = get_kafka_ucr_pillow()
     self.pillow.bootstrap(configs=[self.config])
     with trap_extra_setup(KafkaUnavailableError):
         self.pillow.get_change_feed().get_current_offsets()
Example #18
0
 def setUp(self):
     super(IndicatorPillowTest, self).setUp()
     self.pillow = get_kafka_ucr_pillow()
     self.pillow.bootstrap(configs=[self.config])
     with trap_extra_setup(KafkaUnavailableError):
         self.pillow.get_change_feed().get_current_offsets()
    def _process_docs(self, docs):
        pillow = get_kafka_ucr_pillow()
        pillow.bootstrap(configs=[self.data_source])

        for doc in docs:
            pillow.process_change(doc_to_change(doc))
Example #20
0
 def setUp(self):
     super(IndicatorPillowTest, self).setUp()
     self.pillow = get_kafka_ucr_pillow()
     self.pillow.bootstrap(configs=[self.config])