def handle(self, **options):
        data_sources = list(DataSourceConfiguration.all())
        data_sources.extend(list(StaticDataSourceConfiguration.all()))

        engine_ids = self._get_engine_ids(data_sources, options.get('engine_id'))

        tables_to_remove_by_engine = defaultdict(list)
        for engine_id in engine_ids:
            engine = connection_manager.get_engine(engine_id)
            with engine.begin() as connection:
                migration_context = get_migration_context(connection, include_object=_include_object)
                raw_diffs = compare_metadata(migration_context, metadata)

            diffs = reformat_alembic_diffs(raw_diffs)
            tables_to_remove_by_engine[engine_id] = [
                diff.table_name for diff in diffs
                if diff.type == 'remove_table'
            ]

        for engine_id, tablenames in tables_to_remove_by_engine.items():
            engine = connection_manager.get_engine(engine_id)
            for tablename in tablenames:
                with engine.begin() as connection:
                    try:
                        result = connection.execute(
                            'SELECT COUNT(*), MAX(inserted_at) FROM "{tablename}"'.format(tablename=tablename)
                        )
                    except Exception:
                        print(tablename, "no inserted_at column, probably not UCR")
                    else:
                        print(tablename, result.fetchone())
Esempio n. 2
0
    def handle(self, **options):
        data_sources = list(DataSourceConfiguration.all())
        data_sources.extend(list(StaticDataSourceConfiguration.all()))

        engine_ids = self._get_engine_ids(data_sources,
                                          options.get('engine_id'))

        tables_to_remove_by_engine = defaultdict(list)
        for engine_id in engine_ids:
            engine = connection_manager.get_engine(engine_id)
            with engine.begin() as connection:
                migration_context = get_migration_context(
                    connection, include_object=_include_object)
                raw_diffs = compare_metadata(migration_context, metadata)

            diffs = reformat_alembic_diffs(raw_diffs)
            tables_to_remove_by_engine[engine_id] = [
                diff.table_name for diff in diffs
                if diff.type == 'remove_table'
            ]

        for engine_id, tablenames in tables_to_remove_by_engine.items():
            engine = connection_manager.get_engine(engine_id)
            for tablename in tablenames:
                with engine.begin() as connection:
                    try:
                        result = connection.execute(
                            'SELECT COUNT(*), MAX(inserted_at) FROM "{tablename}"'
                            .format(tablename=tablename))
                    except Exception:
                        print(tablename,
                              "no inserted_at column, probably not UCR")
                    else:
                        print(tablename, result.fetchone())
Esempio n. 3
0
    def handle(self, **options):
        data_sources = list(DataSourceConfiguration.all())
        data_sources.extend(list(StaticDataSourceConfiguration.all()))

        tables_by_engine_id = self._get_tables_by_engine_id(
            data_sources, options.get('engine_id'))

        tables_to_remove_by_engine = defaultdict(list)
        for engine_id, expected_tables in tables_by_engine_id.items():
            engine = connection_manager.get_engine(engine_id)
            with engine.begin() as connection:
                # Using string formatting rather than execute with %s syntax
                # is acceptable here because the strings we're inserting are static
                # and only templated for DRYness
                results = connection.execute(f"""
                SELECT table_name
                  FROM information_schema.tables
                WHERE table_schema='public'
                  AND table_type='BASE TABLE'
                  AND (
                    table_name LIKE '{UCR_TABLE_PREFIX}%%'
                    OR
                    table_name LIKE '{LEGACY_UCR_TABLE_PREFIX}%%'
                );
                """).fetchall()
                tables_in_db = {r[0] for r in results}

            tables_to_remove_by_engine[
                engine_id] = tables_in_db - expected_tables

        for engine_id, tablenames in tables_to_remove_by_engine.items():
            print("\nTables no longer referenced in database: {}:\n".format(
                engine_id))
            engine = connection_manager.get_engine(engine_id)
            if not tablenames:
                print("\t No tables to prune")
                continue

            for tablename in tablenames:
                with engine.begin() as connection:
                    try:
                        result = connection.execute(
                            f'SELECT COUNT(*), MAX(inserted_at) FROM "{tablename}"'
                        )
                    except Exception:
                        print(
                            f"\t{tablename}: no inserted_at column, probably not UCR"
                        )
                    else:
                        row_count, idle_since = result.fetchone()
                        if row_count == 0:
                            print(f"\t{tablename}: {row_count} rows")
                            if options['drop_empty_tables']:
                                connection.execute(f'DROP TABLE "{tablename}"')
                                print(f'\t^-- deleted {tablename}')
                        else:
                            print(
                                f"\t{tablename}: {row_count} rows, idle since {idle_since}"
                            )
    def handle(self, source_db_alias, target_db_alias, **options):
        self.dry_run = options['dry_run']

        with connection_manager.get_engine(source_db_alias).begin() as conn:
            self.parent_child_mapping = get_parent_child_mapping(conn)

        source_engine = connection_manager.get_engine(source_db_alias)
        target_engine = connection_manager.get_engine(target_db_alias)
        with source_engine.begin() as source_conn:
            for table in keep_child_tables:
                self.create_child_tables(source_conn, target_engine, table)
Esempio n. 5
0
    def handle(self, **options):
        data_sources = list(DataSourceConfiguration.all())
        data_sources.extend(list(StaticDataSourceConfiguration.all()))

        tables_by_engine_id = self._get_tables_by_engine_id(
            data_sources, options.get('engine_id'))

        tables_to_remove_by_engine = defaultdict(list)
        for engine_id, expected_tables in tables_by_engine_id.items():
            engine = connection_manager.get_engine(engine_id)
            with engine.begin() as connection:
                results = connection.execute("""
                SELECT table_name
                  FROM information_schema.tables
                WHERE table_schema='public'
                  AND table_type='BASE TABLE'
                  AND (
                    table_name LIKE '{}%%'
                    OR
                    table_name LIKE '{}%%'
                );
                """.format(UCR_TABLE_PREFIX,
                           LEGACY_UCR_TABLE_PREFIX)).fetchall()
                tables_in_db = {r[0] for r in results}

            tables_to_remove_by_engine[
                engine_id] = tables_in_db - expected_tables

        for engine_id, tablenames in tables_to_remove_by_engine.items():
            print("\nTables no longer referenced in database: {}:\n".format(
                engine_id))
            engine = connection_manager.get_engine(engine_id)
            if not tablenames:
                print("\t No tables to prune")
                continue

            for tablename in tablenames:
                if options['show_counts']:
                    with engine.begin() as connection:
                        try:
                            result = connection.execute(
                                'SELECT COUNT(*), MAX(inserted_at) FROM "{tablename}"'
                                .format(tablename=tablename))
                        except Exception:
                            print(
                                "\t{}: no inserted_at column, probably not UCR"
                                .format(tablename))
                        else:
                            print("\t{}: {}".format(tablename,
                                                    result.fetchone()))
                else:
                    print("\t{}".format(tablename))
Esempio n. 6
0
def _setup_ucr_tables():
    with mock.patch(
            'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
    ):
        with override_settings(SERVER_ENVIRONMENT=TEST_ENVIRONMENT):
            configs = StaticDataSourceConfiguration.by_domain(TEST_DOMAIN)
            adapters = [get_indicator_adapter(config) for config in configs]

            for adapter in adapters:
                try:
                    adapter.drop_table()
                except Exception:
                    pass
                adapter.build_table()

    engine = connection_manager.get_engine('aaa-data')
    metadata = sqlalchemy.MetaData(bind=engine)
    metadata.reflect(bind=engine, extend_existing=True)

    for file_name in os.listdir(INPUT_PATH):
        with open(os.path.join(INPUT_PATH, file_name), encoding='utf-8') as f:
            table_name = FILE_NAME_TO_TABLE_MAPPING[file_name[:-4]]
            table = metadata.tables[table_name]
            columns = [
                '"{}"'.format(c.strip())  # quote to preserve case
                for c in f.readline().split(',')
            ]
            postgres_copy.copy_from(f,
                                    table,
                                    engine,
                                    format='csv',
                                    null='',
                                    columns=columns)
Esempio n. 7
0
def setUpModule():
    _call_center_domain_mock = mock.patch(
        'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
    )
    _call_center_domain_mock.start()

    domain = create_domain('champ-cameroon')

    try:
        configs = StaticDataSourceConfiguration.by_domain(domain.name)
        adapters = [get_indicator_adapter(config) for config in configs]

        for adapter in adapters:
            adapter.build_table()

        engine = connection_manager.get_engine(UCR_ENGINE_ID)
        metadata = sqlalchemy.MetaData(bind=engine)
        metadata.reflect(bind=engine, extend_existing=True)
        path = os.path.join(os.path.dirname(__file__), 'fixtures')
        for file_name in os.listdir(path):
            with open(os.path.join(path, file_name), encoding='utf-8') as f:
                table_name = get_table_name(domain.name, file_name[:-4])
                table = metadata.tables[table_name]
                postgres_copy.copy_from(f,
                                        table,
                                        engine,
                                        format='csv',
                                        null='',
                                        header=True)
    except Exception:
        tearDownModule()
        raise

    _call_center_domain_mock.stop()
Esempio n. 8
0
def _setup_ucr_tables():
    with mock.patch('corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'):
        with override_settings(SERVER_ENVIRONMENT=TEST_ENVIRONMENT):
            configs = StaticDataSourceConfiguration.by_domain(TEST_DOMAIN)
            adapters = [get_indicator_adapter(config) for config in configs]

            for adapter in adapters:
                try:
                    adapter.drop_table()
                except Exception:
                    pass
                adapter.build_table()

    engine = connection_manager.get_engine('aaa-data')
    metadata = sqlalchemy.MetaData(bind=engine)
    metadata.reflect(bind=engine, extend_existing=True)

    for file_name in os.listdir(INPUT_PATH):
        with open(os.path.join(INPUT_PATH, file_name), encoding='utf-8') as f:
            table_name = FILE_NAME_TO_TABLE_MAPPING[file_name[:-4]]
            table = metadata.tables[table_name]
            columns = [
                '"{}"'.format(c.strip())  # quote to preserve case
                for c in f.readline().split(',')
            ]
            postgres_copy.copy_from(
                f, table, engine, format='csv' if six.PY3 else b'csv',
                null='' if six.PY3 else b'', columns=columns
            )
Esempio n. 9
0
    def _rebuild_sql_tables(self, adapters):
        tables_by_engine = defaultdict(dict)
        for adapter in adapters:
            try:
                tables_by_engine[adapter.engine_id][adapter.get_table().name] = adapter
            except BadSpecError:
                _soft_assert = soft_assert(to='{}@{}'.format('jemord', 'dimagi.com'))
                _soft_assert(False, "Broken data source {}".format(adapter.config.get_id))

        _assert = soft_assert(notify_admins=True)
        _notify_rebuild = lambda msg, obj: _assert(False, msg, obj)

        for engine_id, table_map in tables_by_engine.items():
            table_names = list(table_map)
            engine = connection_manager.get_engine(engine_id)

            # Temporary measure necessary during the process of renaming tables
            # - Configs point to new tables which may just be views and not real tables
            # - The global metadata contains references to the new table names
            legacy_tables = {}
            table_names_for_diff = []
            diff_metadata = sqlalchemy.MetaData()
            with engine.begin() as connection:
                for table_name in table_names:
                    sql_adapter = table_map[table_name]
                    legacy_table_name = get_legacy_table_name(
                        sql_adapter.config.domain, sql_adapter.config.table_id
                    )
                    if not table_exists(connection, table_name) and table_exists(connection, legacy_table_name):
                        legacy_tables[legacy_table_name] = table_name
                        pillow_logging.debug("[rebuild] Using legacy table: %s", legacy_table_name)
                        # popultate metadata with the table schema
                        get_indicator_table(
                            sql_adapter.config,
                            metadata=diff_metadata,
                            override_table_name=legacy_table_name
                        )
                        table_names_for_diff.append(legacy_table_name)
                    else:
                        # popultate metadata with the table schema
                        get_indicator_table(sql_adapter.config, metadata=diff_metadata)
                        table_names_for_diff.append(table_name)

            diffs = get_table_diffs(engine, table_names_for_diff, diff_metadata)

            tables_to_act_on = get_tables_rebuild_migrate(diffs, table_names_for_diff)
            for real_table_name in tables_to_act_on.rebuild:
                table_name = legacy_tables.get(real_table_name, real_table_name)
                pillow_logging.debug("[rebuild] Rebuilding table: %s (%s)", real_table_name, table_name)
                sql_adapter = table_map[table_name]
                if not sql_adapter.config.is_static:
                    try:
                        self.rebuild_table(sql_adapter)
                    except TableRebuildError as e:
                        _notify_rebuild(six.text_type(e), sql_adapter.config.to_json())
                else:
                    self.rebuild_table(sql_adapter)

            pillow_logging.debug("[rebuild] Application migrations to tables: %s", tables_to_act_on.migrate)
            migrate_tables(engine, diffs.raw, tables_to_act_on.migrate)
Esempio n. 10
0
def setup_tables_and_fixtures(domain_name):
    configs = StaticDataSourceConfiguration.by_domain(domain_name)
    adapters = [get_indicator_adapter(config) for config in configs]

    for adapter in adapters:
        try:
            adapter.drop_table()
        except Exception:
            pass
        adapter.build_table()

    cleanup_misc_agg_tables()
    engine = connection_manager.get_engine(ICDS_UCR_CITUS_ENGINE_ID)
    metadata = sqlalchemy.MetaData(bind=engine)
    metadata.reflect(bind=engine, extend_existing=True)
    path = os.path.join(os.path.dirname(__file__), 'fixtures')
    for file_name in os.listdir(path):
        with open(os.path.join(path, file_name), encoding='utf-8') as f:
            table_name = FILE_NAME_TO_TABLE_MAPPING[file_name[:-4]]
            table = metadata.tables[table_name]
            if not table_name.startswith('icds_dashboard_'):
                columns = [
                    '"{}"'.format(c.strip())  # quote to preserve case
                    for c in f.readline().split(',')
                ]
                postgres_copy.copy_from(f,
                                        table,
                                        engine,
                                        format='csv',
                                        null='',
                                        columns=columns)

    _distribute_tables_for_citus(engine)
Esempio n. 11
0
    def _rebuild_sql_tables(self, adapters):
        # todo move this code to sql adapter rebuild_if_necessary
        tables_by_engine = defaultdict(dict)
        for adapter in adapters:
            sql_adapter = get_indicator_adapter(adapter.config)
            tables_by_engine[sql_adapter.engine_id][
                sql_adapter.get_table().name] = sql_adapter

        _assert = soft_assert(notify_admins=True)
        _notify_rebuild = lambda msg, obj: _assert(False, msg, obj)

        for engine_id, table_map in tables_by_engine.items():
            engine = connection_manager.get_engine(engine_id)
            table_names = list(table_map)
            with engine.begin() as connection:
                migration_context = get_migration_context(
                    connection, table_names)
                raw_diffs = compare_metadata(migration_context, metadata)
                diffs = reformat_alembic_diffs(raw_diffs)

            tables_to_rebuild = get_tables_to_rebuild(diffs, table_names)
            for table_name in tables_to_rebuild:
                sql_adapter = table_map[table_name]
                if not sql_adapter.config.is_static:
                    try:
                        self.rebuild_table(sql_adapter)
                    except TableRebuildError as e:
                        _notify_rebuild(six.text_type(e),
                                        sql_adapter.config.to_json())
                else:
                    self.rebuild_table(sql_adapter)

            tables_to_migrate = get_tables_to_migrate(diffs, table_names)
            tables_to_migrate -= tables_to_rebuild
            migrate_tables(engine, raw_diffs, tables_to_migrate)
Esempio n. 12
0
def tearDownModule():
    if settings.USE_PARTITIONED_DATABASE:
        return

    _call_center_domain_mock = mock.patch(
        'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
    )
    _call_center_domain_mock.start()
    with override_settings(SERVER_ENVIRONMENT='icds'):
        configs = StaticDataSourceConfiguration.by_domain('icds-cas')
        adapters = [get_indicator_adapter(config) for config in configs]
        for adapter in adapters:
            if adapter.config.table_id == 'static-child_health_cases':
                # hack because this is in a migration
                adapter.clear_table()
                continue
            adapter.drop_table()

        engine = connection_manager.get_engine(ICDS_UCR_ENGINE_ID)
        with engine.begin() as connection:
            metadata = sqlalchemy.MetaData(bind=engine)
            metadata.reflect(bind=engine, extend_existing=True)
            table = metadata.tables['ucr_table_name_mapping']
            delete = table.delete()
            connection.execute(delete)
    LocationType.objects.filter(domain='icds-cas').delete()
    SQLLocation.objects.filter(domain='icds-cas').delete()

    Domain.get_by_name('icds-cas').delete()
    _call_center_domain_mock.stop()
Esempio n. 13
0
    def _rebuild_sql_tables(self, adapters):
        tables_by_engine = defaultdict(dict)
        for adapter in adapters:
            sql_adapter = get_indicator_adapter(adapter.config)
            try:
                tables_by_engine[sql_adapter.engine_id][sql_adapter.get_table().name] = sql_adapter
            except BadSpecError:
                _soft_assert = soft_assert(to='{}@{}'.format('jemord', 'dimagi.com'))
                _soft_assert(False, "Broken data source {}".format(adapter.config.get_id))

        _assert = soft_assert(notify_admins=True)
        _notify_rebuild = lambda msg, obj: _assert(False, msg, obj)

        for engine_id, table_map in tables_by_engine.items():
            engine = connection_manager.get_engine(engine_id)
            table_names = list(table_map)
            with engine.begin() as connection:
                migration_context = get_migration_context(connection, table_names)
                raw_diffs = compare_metadata(migration_context, metadata)
                diffs = reformat_alembic_diffs(raw_diffs)

            tables_to_rebuild = get_tables_to_rebuild(diffs, table_names)
            for table_name in tables_to_rebuild:
                sql_adapter = table_map[table_name]
                if not sql_adapter.config.is_static:
                    try:
                        self.rebuild_table(sql_adapter)
                    except TableRebuildError as e:
                        _notify_rebuild(six.text_type(e), sql_adapter.config.to_json())
                else:
                    self.rebuild_table(sql_adapter)

            tables_to_migrate = get_tables_to_migrate(diffs, table_names)
            tables_to_migrate -= tables_to_rebuild
            migrate_tables(engine, raw_diffs, tables_to_migrate)
Esempio n. 14
0
def tearDownModule():
    if settings.USE_PARTITIONED_DATABASE:
        return

    _call_center_domain_mock = mock.patch(
        'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
    )
    _call_center_domain_mock.start()
    with override_settings(SERVER_ENVIRONMENT='icds'):
        configs = StaticDataSourceConfiguration.by_domain('icds-cas')
        adapters = [get_indicator_adapter(config) for config in configs]
        for adapter in adapters:
            if adapter.config.table_id == 'static-child_health_cases':
                # hack because this is in a migration
                adapter.clear_table()
                continue
            adapter.drop_table()

        engine = connection_manager.get_engine(ICDS_UCR_ENGINE_ID)
        with engine.begin() as connection:
            metadata = sqlalchemy.MetaData(bind=engine)
            metadata.reflect(bind=engine, extend_existing=True)
            table = metadata.tables['ucr_table_name_mapping']
            delete = table.delete()
            connection.execute(delete)
    LocationType.objects.filter(domain='icds-cas').delete()
    SQLLocation.objects.filter(domain='icds-cas').delete()

    Domain.get_by_name('icds-cas').delete()
    _call_center_domain_mock.stop()
Esempio n. 15
0
def setUpModule():
    if isinstance(Domain.get_db(), Mock):
        # needed to skip setUp for javascript tests thread on Travis
        return

    _call_center_domain_mock = mock.patch(
        'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
    )
    _call_center_domain_mock.start()

    domain = create_domain('champ-cameroon')
    with override_settings(SERVER_ENVIRONMENT='production'):

        configs = StaticDataSourceConfiguration.by_domain(domain.name)
        adapters = [get_indicator_adapter(config) for config in configs]

        for adapter in adapters:
            adapter.build_table()

        engine = connection_manager.get_engine(UCR_ENGINE_ID)
        metadata = sqlalchemy.MetaData(bind=engine)
        metadata.reflect(bind=engine, extend_existing=True)
        path = os.path.join(os.path.dirname(__file__), 'fixtures')
        for file_name in os.listdir(path):
            with open(os.path.join(path, file_name), encoding='utf-8') as f:
                table_name = get_table_name(domain.name, file_name[:-4])
                table = metadata.tables[table_name]
                postgres_copy.copy_from(
                    f, table, engine, format='csv' if six.PY3 else b'csv',
                    null='' if six.PY3 else b'', header=True
                )
    _call_center_domain_mock.stop()
Esempio n. 16
0
def catch_signal(sender, **kwargs):
    from fluff.pillow import get_fluff_pillow_configs
    if settings.UNIT_TESTING or kwargs['using'] != DEFAULT_DB_ALIAS:
        return

    table_pillow_map = {}
    for config in get_fluff_pillow_configs():
        pillow = config.get_instance()
        for processor in pillow.processors:
            doc = processor.indicator_class()
            if doc.save_direct_to_sql:
                table_pillow_map[doc._table.name] = {
                    'doc': doc,
                    'pillow': pillow
                }

    print('\tchecking fluff SQL tables for schema changes')
    engine = connection_manager.get_engine('default')

    with engine.begin() as connection:
        migration_context = get_migration_context(connection, list(table_pillow_map))
        raw_diffs = compare_metadata(migration_context, fluff_metadata)

    diffs = reformat_alembic_diffs(raw_diffs)
    tables_to_rebuild = get_tables_to_rebuild(diffs, list(table_pillow_map))

    for table in tables_to_rebuild:
        info = table_pillow_map[table]
        rebuild_table(engine, info['pillow'], info['doc'])

    engine.dispose()
Esempio n. 17
0
    def _rebuild_sql_tables(self, adapters):
        tables_by_engine = defaultdict(dict)
        for adapter in adapters:
            sql_adapter = get_indicator_adapter(adapter.config)
            try:
                tables_by_engine[sql_adapter.engine_id][sql_adapter.get_table().name] = sql_adapter
            except BadSpecError:
                _soft_assert = soft_assert(to='{}@{}'.format('jemord', 'dimagi.com'))
                _soft_assert(False, "Broken data source {}".format(adapter.config.get_id))

        _assert = soft_assert(notify_admins=True)
        _notify_rebuild = lambda msg, obj: _assert(False, msg, obj)

        for engine_id, table_map in tables_by_engine.items():
            table_names = list(table_map)
            engine = connection_manager.get_engine(engine_id)
            diffs = get_table_diffs(engine, table_names, metadata)

            tables_to_act_on = get_tables_rebuild_migrate(diffs, table_names)
            for table_name in tables_to_act_on.rebuild:
                sql_adapter = table_map[table_name]
                if not sql_adapter.config.is_static:
                    try:
                        self.rebuild_table(sql_adapter)
                    except TableRebuildError as e:
                        _notify_rebuild(six.text_type(e), sql_adapter.config.to_json())
                else:
                    self.rebuild_table(sql_adapter)

            migrate_tables(engine, diffs.raw, tables_to_act_on.migrate)
Esempio n. 18
0
def catch_signal(sender, **kwargs):
    from fluff.pillow import get_fluff_pillow_configs
    if settings.UNIT_TESTING or kwargs['using'] != DEFAULT_DB_ALIAS:
        return

    table_pillow_map = {}
    for config in get_fluff_pillow_configs():
        pillow = config.get_instance()
        for processor in pillow.processors:
            doc = processor.indicator_class()
            if doc.save_direct_to_sql:
                table_pillow_map[doc._table.name] = {
                    'doc': doc,
                    'pillow': pillow
                }

    print('\tchecking fluff SQL tables for schema changes')
    engine = connection_manager.get_engine('default')

    with engine.begin() as connection:
        migration_context = get_migration_context(connection,
                                                  list(table_pillow_map))
        raw_diffs = compare_metadata(migration_context, fluff_metadata)

    diffs = reformat_alembic_diffs(raw_diffs)
    tables_to_rebuild = get_tables_to_rebuild(diffs, list(table_pillow_map))

    for table in tables_to_rebuild:
        info = table_pillow_map[table]
        rebuild_table(engine, info['pillow'], info['doc'])

    engine.dispose()
Esempio n. 19
0
    def _rebuild_sql_tables(self, adapters):
        # todo move this code to sql adapter rebuild_if_necessary
        tables_by_engine = defaultdict(dict)
        for adapter in adapters:
            sql_adapter = get_indicator_adapter(adapter.config)
            tables_by_engine[sql_adapter.engine_id][sql_adapter.get_table().name] = sql_adapter

        _assert = soft_assert(to='@'.join(['czue', 'dimagi.com']))
        _notify_cory = lambda msg, obj: _assert(False, msg, obj)

        for engine_id, table_map in tables_by_engine.items():
            engine = connection_manager.get_engine(engine_id)
            with engine.begin() as connection:
                migration_context = get_migration_context(connection, table_map.keys())
                raw_diffs = compare_metadata(migration_context, metadata)
                diffs = reformat_alembic_diffs(raw_diffs)

            tables_to_rebuild = get_tables_to_rebuild(diffs, table_map.keys())
            for table_name in tables_to_rebuild:
                sql_adapter = table_map[table_name]
                if not sql_adapter.config.is_static:
                    try:
                        rev_before_rebuild = sql_adapter.config.get_db().get_rev(sql_adapter.config._id)
                        self.rebuild_table(sql_adapter)
                    except TableRebuildError, e:
                        _notify_cory(unicode(e), sql_adapter.config.to_json())
                else:
                    self.rebuild_table(sql_adapter)
Esempio n. 20
0
    def _rebuild_sql_tables(self, adapters):
        tables_by_engine = defaultdict(dict)
        all_adapters = []
        for adapter in adapters:
            if getattr(adapter, 'all_adapters', None):
                all_adapters.extend(adapter.all_adapters)
            else:
                all_adapters.append(adapter)
        for adapter in all_adapters:
            tables_by_engine[adapter.engine_id][adapter.get_table().name] = adapter

        _assert = soft_assert(notify_admins=True)
        _notify_rebuild = lambda msg, obj: _assert(False, msg, obj)

        for engine_id, table_map in tables_by_engine.items():
            table_names = list(table_map)
            engine = connection_manager.get_engine(engine_id)

            diffs = get_table_diffs(engine, table_names, get_metadata(engine_id))

            tables_to_act_on = get_tables_rebuild_migrate(diffs)
            for table_name in tables_to_act_on.rebuild:
                pillow_logging.debug("[rebuild] Rebuilding table: %s", table_name)
                sql_adapter = table_map[table_name]
                table_diffs = [diff for diff in diffs if diff.table_name == table_name]
                if not sql_adapter.config.is_static:
                    try:
                        self.rebuild_table(sql_adapter, table_diffs)
                    except TableRebuildError as e:
                        _notify_rebuild(str(e), sql_adapter.config.to_json())
                else:
                    self.rebuild_table(sql_adapter, table_diffs)

            self.migrate_tables(engine, diffs, tables_to_act_on.migrate, table_map)
Esempio n. 21
0
    def _rebuild_sql_tables(self, adapters):
        tables_by_engine = defaultdict(dict)
        for adapter in adapters:
            tables_by_engine[adapter.engine_id][adapter.get_table().name] = adapter

        _assert = soft_assert(to='@'.join(['czue', 'dimagi.com']))
        _notify_cory = lambda msg, obj: _assert(False, msg, obj)

        for engine_id, table_map in tables_by_engine.items():
            engine = connection_manager.get_engine(engine_id)
            with engine.begin() as connection:
                migration_context = get_migration_context(connection, table_map.keys())
                raw_diffs = compare_metadata(migration_context, metadata)
                diffs = reformat_alembic_diffs(raw_diffs)

            tables_to_rebuild = get_tables_to_rebuild(diffs, table_map.keys())
            for table_name in tables_to_rebuild:
                sql_adapter = table_map[table_name]
                if not sql_adapter.config.is_static:
                    try:
                        rev_before_rebuild = sql_adapter.config.get_db().get_rev(sql_adapter.config._id)
                        self.rebuild_table(sql_adapter)
                    except TableRebuildError, e:
                        _notify_cory(unicode(e), sql_adapter.config.to_json())
                else:
                    self.rebuild_table(sql_adapter)
Esempio n. 22
0
    def handle(self, **options):
        data_sources = list(DataSourceConfiguration.all())
        data_sources.extend(list(StaticDataSourceConfiguration.all()))

        tables_by_engine_id = self._get_tables_by_engine_id(data_sources, options.get('engine_id'))

        tables_to_remove_by_engine = defaultdict(list)
        for engine_id, expected_tables in tables_by_engine_id.items():
            engine = connection_manager.get_engine(engine_id)
            with engine.begin() as connection:
                results = connection.execute("""
                SELECT table_name
                  FROM information_schema.tables
                WHERE table_schema='public'
                  AND table_type='BASE TABLE'
                  AND (
                    table_name LIKE '{}%%'
                    OR
                    table_name LIKE '{}%%'
                );
                """.format(UCR_TABLE_PREFIX, LEGACY_UCR_TABLE_PREFIX)).fetchall()
                tables_in_db = {r[0] for r in results}

            tables_to_remove_by_engine[engine_id] = tables_in_db - expected_tables

        for engine_id, tablenames in tables_to_remove_by_engine.items():
            print("\nTables no longer referenced in database: {}:\n".format(engine_id))
            engine = connection_manager.get_engine(engine_id)
            if not tablenames:
                print("\t No tables to prune")
                continue

            for tablename in tablenames:
                if options['show_counts']:
                    with engine.begin() as connection:
                        try:
                            result = connection.execute(
                                'SELECT COUNT(*), MAX(inserted_at) FROM "{tablename}"'.format(tablename=tablename)
                            )
                        except Exception:
                            print("\t{}: no inserted_at column, probably not UCR".format(tablename))
                        else:
                            print("\t{}: {}".foramt(tablename, result.fetchone()))
                else:
                    print("\t{}".format(tablename))
Esempio n. 23
0
def cleanup_misc_agg_tables():
    engine = connection_manager.get_engine(ICDS_UCR_CITUS_ENGINE_ID)
    with engine.begin() as connection:
        metadata = sqlalchemy.MetaData(bind=engine)
        metadata.reflect(bind=engine, extend_existing=True)
        for name in ('ucr_table_name_mapping', 'awc_location', 'awc_location_local'):
            table = metadata.tables[name]
            delete = table.delete()
            connection.execute(delete)
Esempio n. 24
0
    def setUpClass(cls):
        super(IntraHealthTestCase, cls).setUpClass()
        cls.engine = connection_manager.get_engine(DEFAULT_ENGINE_ID)

        cls.domain = create_domain(TEST_DOMAIN)
        cls.region_type = LocationType.objects.create(domain=TEST_DOMAIN,
                                                      name='Région')
        cls.district_type = LocationType.objects.create(domain=TEST_DOMAIN,
                                                        name='District')
        cls.pps_type = LocationType.objects.create(domain=TEST_DOMAIN,
                                                   name='PPS')

        cls.region = make_location(domain=TEST_DOMAIN,
                                   name='Test region',
                                   location_type='Région')
        cls.region.save()
        cls.district = make_location(domain=TEST_DOMAIN,
                                     name='Test district',
                                     location_type='District',
                                     parent=cls.region)
        cls.district.save()
        cls.pps = make_location(domain=TEST_DOMAIN,
                                name='Test PPS',
                                location_type='PPS',
                                parent=cls.district)
        cls.pps.save()

        cls.mobile_worker = CommCareUser.create(domain=TEST_DOMAIN,
                                                username='******',
                                                password='******',
                                                phone_number='777777')
        cls.mobile_worker.location_id = cls.pps.get_id
        cls.mobile_worker.save()

        cls.product = Product(_id='81457658bdedd663f8b0bdadb19d8f22',
                              name='ASAQ Nourisson',
                              domain=TEST_DOMAIN)
        cls.product2 = Product(_id='81457658bdedd663f8b0bdadb19d83d8',
                               name='ASAQ Petit Enfant',
                               domain=TEST_DOMAIN)

        cls.product.save()
        cls.product2.save()

        cls.recap_table = RecapPassageFluff._table
        cls.intra_table = IntraHealthFluff._table
        cls.taux_rupt_table = TauxDeRuptureFluff._table
        cls.livraison_table = LivraisonFluff._table
        cls.taux_sat_table = TauxDeSatisfactionFluff._table
        cls.couverture_table = CouvertureFluff._table
        with cls.engine.begin() as connection:
            cls.recap_table.create(connection, checkfirst=True)
            cls.intra_table.create(connection, checkfirst=True)
            cls.taux_rupt_table.create(connection, checkfirst=True)
            cls.livraison_table.create(connection, checkfirst=True)
            cls.taux_sat_table.create(connection, checkfirst=True)
            cls.couverture_table.create(connection, checkfirst=True)
    def generate_dump_script(self, source_engine_id):
        self.seen_tables = set()

        source_engine = connection_manager.get_engine(source_engine_id)
        # direct dump and load from parent + child tables
        with source_engine.begin() as source_conn:
            insp = sqlinspect(source_conn)
            for table in keep_child_tables + plain_tables:
                if table in self.all_tables:
                    for line in self.get_table_date_target(insp, table):
                        self.insert_row(line)

            # direct dump and load from parent
            # dump from all child tables into parent table
            for table in drop_child_tables:
                if table in self.all_tables:
                    for line in self.get_table_date_target(insp,
                                                           table,
                                                           all_in_parent=True):
                        self.insert_row(line)

            for datasource in StaticDataSourceConfiguration.by_domain(
                    DASHBOARD_DOMAIN):
                if source_engine_id == datasource.engine_id or source_engine_id in datasource.mirrored_engine_ids:
                    adapter = get_indicator_adapter(datasource)
                    table_name = adapter.get_table().name

                    # direct dump and load from parent
                    # dump from all child tables into parent table
                    #  - if table is distrubuted, citus will distribute the data
                    #  - if table is partitioned the triggers on the parent will distribute the data
                    for line in self.get_table_date_target(insp,
                                                           table_name,
                                                           all_in_parent=True):
                        self.insert_row(line)

            remaining_tables = self.all_tables - self.seen_tables - IGNORE_TABLES
            icds_ucr_prefix = '{}{}_'.format(UCR_TABLE_PREFIX,
                                             DASHBOARD_DOMAIN)

            def keep_table(table):
                root_table = self.child_parent_mapping.get(table, table)
                return not root_table.startswith(
                    UCR_TABLE_PREFIX) or root_table.startswith(icds_ucr_prefix)

            remaining_tables = list(filter(keep_table, remaining_tables))

            if remaining_tables:
                self.stderr.write("Some tables not seen:")
                for t in remaining_tables:
                    parent = self.child_parent_mapping.get(t)
                    if parent:
                        self.stderr.write("\t{} (parent: {})".format(
                            t, parent))
                    else:
                        self.stderr.write("\t{}".format(t))
def drop_tables(apps, schema_editor):
    metadata = MetaData(bind=connection_manager.get_engine())

    for table_name in [
            'fluff_OPMHierarchyFluff',
            'fluff_OpmCaseFluff',
            'fluff_OpmFormFluff',
            'fluff_OpmHealthStatusAllInfoFluff',
            'fluff_VhndAvailabilityFluff',
    ]:
        Table(table_name, metadata).drop(checkfirst=True)
def drop_tables(apps, schema_editor):
    metadata = MetaData(bind=connection_manager.get_engine())

    for table_name in [
        'fluff_OPMHierarchyFluff',
        'fluff_OpmCaseFluff',
        'fluff_OpmFormFluff',
        'fluff_OpmHealthStatusAllInfoFluff',
        'fluff_VhndAvailabilityFluff',
    ]:
        Table(table_name, metadata).drop(checkfirst=True)
Esempio n. 28
0
 def setUpClass(cls):
     cls.engine = connection_manager.get_engine()
     cls.metadata = sqlalchemy.MetaData()
     cls.table_name = 'diff_table_' + uuid.uuid4().hex
     sqlalchemy.Table(
         cls.table_name, cls.metadata,
         sqlalchemy.Column('user_id', sqlalchemy.Integer, primary_key=True),
         sqlalchemy.Column('user_name', sqlalchemy.String(16), nullable=False),
         sqlalchemy.Column('email_address', sqlalchemy.String(60), key='email'),
         sqlalchemy.Column('password', sqlalchemy.String(20), nullable=False),
     )
     cls.metadata.create_all(cls.engine)
def _assert_migrated(apps, schema_editor):
    for engine_id, data_sources in _data_sources_by_engine_id().items():
        with connection_manager.get_engine(engine_id).begin() as conn:
            for data_source in data_sources:
                legacy_table_name = get_legacy_table_name(data_source)
                new_table_name = get_table_name(data_source.domain,
                                                data_source.table_id)
                if (table_exists(conn, legacy_table_name)
                        and not table_exists(conn, new_table_name)):
                    print("")
                    print(AUTO_MIGRATE_FAILED_MESSAGE)
                    sys.exit(1)
 def setUpClass(cls):
     super(TestAlembicDiffs, cls).setUpClass()
     cls.engine = connection_manager.get_engine()
     cls.metadata = sqlalchemy.MetaData()
     cls.table_name = 'diff_table_' + uuid.uuid4().hex
     sqlalchemy.Table(
         cls.table_name, cls.metadata,
         sqlalchemy.Column('user_id', sqlalchemy.Integer, primary_key=True),
         sqlalchemy.Column('user_name', sqlalchemy.String(16), nullable=False),
         sqlalchemy.Column('email_address', sqlalchemy.String(60), key='email'),
         sqlalchemy.Column('password', sqlalchemy.String(20), nullable=False),
     )
     cls.metadata.create_all(cls.engine)
Esempio n. 31
0
    def _rebuild_sql_tables(self, adapters):
        tables_by_engine = defaultdict(dict)
        all_adapters = []
        for adapter in adapters:
            if getattr(adapter, 'all_adapters', None):
                all_adapters.extend(adapter.all_adapters)
            else:
                all_adapters.append(adapter)
        for adapter in all_adapters:
            try:
                tables_by_engine[adapter.engine_id][
                    adapter.get_table().name] = adapter
            except BadSpecError:
                _soft_assert = soft_assert(
                    to='{}@{}'.format('jemord', 'dimagi.com'))
                _soft_assert(
                    False,
                    "Broken data source {}".format(adapter.config.get_id))

        _assert = soft_assert(notify_admins=True)
        _notify_rebuild = lambda msg, obj: _assert(False, msg, obj)

        for engine_id, table_map in tables_by_engine.items():
            table_names = list(table_map)
            engine = connection_manager.get_engine(engine_id)

            diffs = get_table_diffs(engine, table_names,
                                    get_metadata(engine_id))

            tables_to_act_on = get_tables_rebuild_migrate(diffs, table_names)
            for table_name in tables_to_act_on.rebuild:
                pillow_logging.debug("[rebuild] Rebuilding table: %s",
                                     table_name)
                sql_adapter = table_map[table_name]
                table_diffs = [
                    diff for diff in diffs.formatted
                    if diff.table_name == table_name
                ]
                if not sql_adapter.config.is_static:
                    try:
                        self.rebuild_table(sql_adapter, table_diffs)
                    except TableRebuildError as e:
                        _notify_rebuild(six.text_type(e),
                                        sql_adapter.config.to_json())
                else:
                    self.rebuild_table(sql_adapter, table_diffs)

            migration_diffs = diffs.filter(tables_to_act_on.migrate)
            self.migrate_tables(engine, migration_diffs.raw,
                                tables_to_act_on.migrate, table_map)
Esempio n. 32
0
def tearDownModule():
    _call_center_domain_mock = mock.patch(
        'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
    )
    domain = Domain.get_by_name('test-pna')
    engine = connection_manager.get_engine(UCR_ENGINE_ID)
    metadata = sqlalchemy.MetaData(bind=engine)
    metadata.reflect(bind=engine, extend_existing=True)
    path = os.path.join(os.path.dirname(__file__), 'fixtures')
    for file_name in os.listdir(path):
        table_name = get_table_name(domain.name, file_name[:-4])
        table = metadata.tables[table_name]
        table.drop()
    _call_center_domain_mock.start()
    domain.delete()
    _call_center_domain_mock.stop()
Esempio n. 33
0
def _assert_migrated(apps, schema_editor):
    if settings.UNIT_TESTING:
        return

    for engine_id, data_sources in _data_sources_by_engine_id().items():
        with connection_manager.get_engine(engine_id).begin() as conn:
            for data_source in data_sources:
                legacy_table_name = get_legacy_table_name(data_source)
                new_table_name = get_table_name(data_source.domain, data_source.table_id)
                if (
                    table_exists(conn, legacy_table_name)
                    and not table_exists(conn, new_table_name)
                ):
                    print("")
                    print(AUTO_MIGRATE_FAILED_MESSAGE)
                    sys.exit(1)
Esempio n. 34
0
    def setUpClass(cls):
        super(IntraHealthTestCase, cls).setUpClass()
        cls.engine = connection_manager.get_engine('default')

        cls.domain = create_domain(TEST_DOMAIN)
        cls.region_type = LocationType.objects.create(domain=TEST_DOMAIN, name='Région')
        cls.district_type = LocationType.objects.create(domain=TEST_DOMAIN, name='District')
        cls.pps_type = LocationType.objects.create(domain=TEST_DOMAIN, name='PPS')

        cls.region = make_location(domain=TEST_DOMAIN, name='Test region', location_type='Région')
        cls.region.save()
        cls.district = make_location(
            domain=TEST_DOMAIN, name='Test district', location_type='District', parent=cls.region
        )
        cls.district.save()
        cls.pps = make_location(domain=TEST_DOMAIN, name='Test PPS', location_type='PPS', parent=cls.district)
        cls.pps.save()

        cls.mobile_worker = create_mobile_worker(
            domain=TEST_DOMAIN, username='******', password='******', phone_number='777777'
        )
        cls.mobile_worker.location_id = cls.pps.get_id
        cls.mobile_worker.save()

        cls.product = Product(_id='81457658bdedd663f8b0bdadb19d8f22', name='ASAQ Nourisson', domain=TEST_DOMAIN)
        cls.product2 = Product(
            _id='81457658bdedd663f8b0bdadb19d83d8', name='ASAQ Petit Enfant', domain=TEST_DOMAIN
        )

        cls.product.save()
        cls.product2.save()

        cls.recap_table = RecapPassageFluff._table
        cls.intra_table = IntraHealthFluff._table
        cls.taux_rupt_table = TauxDeRuptureFluff._table
        cls.livraison_table = LivraisonFluff._table
        cls.taux_sat_table = TauxDeSatisfactionFluff._table
        cls.couverture_table = CouvertureFluff._table
        with cls.engine.begin() as connection:
            cls.recap_table.create(connection, checkfirst=True)
            cls.intra_table.create(connection, checkfirst=True)
            cls.taux_rupt_table.create(connection, checkfirst=True)
            cls.livraison_table.create(connection, checkfirst=True)
            cls.taux_sat_table.create(connection, checkfirst=True)
            cls.couverture_table.create(connection, checkfirst=True)
def _rename_tables(dry_run=False,
                   tables_by_engine=None,
                   confirm=False,
                   engine_id=None):
    tables_by_engine = tables_by_engine or _get_old_new_tablenames(engine_id)
    for engine_id, dsconfs in tables_by_engine.items():
        engine = connection_manager.get_engine(engine_id)
        print('\tChecking {} tables in engine "{}"'.format(
            len(dsconfs), engine_id))
        for dsconf in dsconfs:
            with engine.begin() as conn:
                executed = False
                rename = table_exists(conn,
                                      dsconf.old_table) and not table_exists(
                                          conn, dsconf.new_table)
                if rename:
                    drop_partition = rename_children = None
                    if dsconf.config.sql_settings.partition_config:
                        drop_partition = _get_drop_partitioning_features_sql(
                            conn, dsconf, dry_run)
                        rename_children = _get_rename_child_tables_sql(
                            conn, dsconf, dry_run)

                    drop_view_rename_table = """
                    {drop_partition}
                    {rename_children}
                    DROP VIEW IF EXISTS "{new_table}";
                    ALTER TABLE "{old_table}" RENAME TO "{new_table}";
                    """.format(drop_partition=drop_partition or '',
                               rename_children=rename_children or '',
                               old_table=dsconf.old_table,
                               new_table=dsconf.new_table)

                    print('\t\tRenaming table "{}" to "{}"'.format(
                        dsconf.old_table, dsconf.new_table))
                    executed = _run_sql_with_logging(conn,
                                                     drop_view_rename_table,
                                                     dry_run, confirm)

            # do this outside the previous transaction to avoid deadlock
            if rename and executed and dsconf.config.sql_settings.partition_config:
                print('\t\tReinstalling partitioning on "{}"'.format(
                    dsconf.new_table))
                if not dry_run:
                    dsconf.adapter._install_partition()
Esempio n. 36
0
    def handle(self, output_database, source_engine_id, **options):
        with connection_manager.get_engine(source_engine_id).begin() as conn:
            self.all_tables = get_all_tables(conn)
            self.parent_child_mapping = get_parent_child_mapping(conn)
            self.child_parent_mapping = {
                child: parent
                for parent, children in self.parent_child_mapping.items()
                for child in children
            }

        self.table_count = 0
        self.db = sqlite3.connect(output_database)
        try:
            self.setup_sqlite_db()
            self.generate_dump_script(source_engine_id)
            self.stdout.write("\n{} tables processed\n".format(self.table_count))
        finally:
            self.db.close()
Esempio n. 37
0
def partition_child_health():
    engine = connection_manager.get_engine(ICDS_UCR_CITUS_ENGINE_ID)
    queries = [
        "ALTER TABLE child_health_monthly RENAME TO child_health_old_partition",
        "CREATE TABLE child_health_monthly (LIKE child_health_old_partition) PARTITION BY LIST (month)",
        "SELECT create_distributed_table('child_health_monthly', 'supervisor_id')",
        "ALTER TABLE child_health_monthly ATTACH PARTITION child_health_old_partition DEFAULT"
        ]
    with engine.begin() as connection:
        # check if we have already partitioned this table (necessary for reusedb)
        q = connection.execute("select exists (select * from pg_tables where tablename='child_health_old_partition')")
        if q.first()[0]:
            return
        for query in queries:
            connection.execute(query)
        for view in get_view_migrations():
            with open(view.sql, "r", encoding='utf-8') as sql_file:
                sql_to_execute = sql_file.read()
                connection.execute(sql_to_execute)
Esempio n. 38
0
def load_data():
    engine = connection_manager.get_engine('default')
    metadata.bind = engine
    user_table.drop(engine, checkfirst=True)
    region_table.drop(engine, checkfirst=True)
    metadata.create_all()

    user_data = [
        {"user": "******", "date": date(2013, 1, 1), "indicator_a": 1, "indicator_b": 0, "indicator_c": 1, "indicator_d": 1},
        {"user": "******", "date": date(2013, 2, 1), "indicator_a": 0, "indicator_b": 1, "indicator_c": 1, "indicator_d": 1},
        {"user": "******", "date": date(2013, 1, 1), "indicator_a": 0, "indicator_b": 1, "indicator_c": 1, "indicator_d": 2},
        {"user": "******", "date": date(2013, 2, 1), "indicator_a": 1, "indicator_b": 0, "indicator_c": 1, "indicator_d": 2},
    ]

    region_data = [
        {"region": "region1", "sub_region": "region1_a", "date": date(2013, 1, 1), "indicator_a": 1, "indicator_b": 0},
        {"region": "region1", "sub_region": "region1_a", "date": date(2013, 2, 1), "indicator_a": 1, "indicator_b": 1},

        {"region": "region1", "sub_region": "region1_b", "date": date(2013, 1, 1), "indicator_a": 0, "indicator_b": 1},
        {"region": "region1", "sub_region": "region1_b", "date": date(2013, 2, 1), "indicator_a": 0, "indicator_b": 0},

        {"region": "region2", "sub_region": "region2_a", "date": date(2013, 1, 1), "indicator_a": 0, "indicator_b": 1},
        {"region": "region2", "sub_region": "region2_a", "date": date(2013, 2, 1), "indicator_a": 1, "indicator_b": 1},

        {"region": "region2", "sub_region": "region2_b", "date": date(2013, 1, 1), "indicator_a": 1, "indicator_b": 0},
        {"region": "region2", "sub_region": "region2_b", "date": date(2013, 2, 1), "indicator_a": 0, "indicator_b": 0},
        ]

    connection = engine.connect()
    try:
        connection.execute(user_table.delete())
        connection.execute(region_table.delete())
        for d in user_data:
            insert = user_table.insert().values(**d)
            connection.execute(insert)

        for d in region_data:
            insert = region_table.insert().values(**d)
            connection.execute(insert)
    finally:
        connection.close()
        engine.dispose()
Esempio n. 39
0
def load_data():
    engine = connection_manager.get_engine('default')
    metadata.bind = engine
    user_table.drop(engine, checkfirst=True)
    region_table.drop(engine, checkfirst=True)
    metadata.create_all()

    user_data = [
        {"user": "******", "date": date(2013, 1, 1), "indicator_a": 1, "indicator_b": 0, "indicator_c": 1, "indicator_d": 1},
        {"user": "******", "date": date(2013, 2, 1), "indicator_a": 0, "indicator_b": 1, "indicator_c": 1, "indicator_d": 1},
        {"user": "******", "date": date(2013, 1, 1), "indicator_a": 0, "indicator_b": 1, "indicator_c": 1, "indicator_d": 2},
        {"user": "******", "date": date(2013, 2, 1), "indicator_a": 1, "indicator_b": 0, "indicator_c": 1, "indicator_d": 2},
    ]

    region_data = [
        {"region": "region1", "sub_region": "region1_a", "date": date(2013, 1, 1), "indicator_a": 1, "indicator_b": 0},
        {"region": "region1", "sub_region": "region1_a", "date": date(2013, 2, 1), "indicator_a": 1, "indicator_b": 1},

        {"region": "region1", "sub_region": "region1_b", "date": date(2013, 1, 1), "indicator_a": 0, "indicator_b": 1},
        {"region": "region1", "sub_region": "region1_b", "date": date(2013, 2, 1), "indicator_a": 0, "indicator_b": 0},

        {"region": "region2", "sub_region": "region2_a", "date": date(2013, 1, 1), "indicator_a": 0, "indicator_b": 1},
        {"region": "region2", "sub_region": "region2_a", "date": date(2013, 2, 1), "indicator_a": 1, "indicator_b": 1},

        {"region": "region2", "sub_region": "region2_b", "date": date(2013, 1, 1), "indicator_a": 1, "indicator_b": 0},
        {"region": "region2", "sub_region": "region2_b", "date": date(2013, 2, 1), "indicator_a": 0, "indicator_b": 0},
        ]

    connection = engine.connect()
    try:
        connection.execute(user_table.delete())
        connection.execute(region_table.delete())
        for d in user_data:
            insert = user_table.insert().values(**d)
            connection.execute(insert)

        for d in region_data:
            insert = region_table.insert().values(**d)
            connection.execute(insert)
    finally:
        connection.close()
        engine.dispose()
Esempio n. 40
0
def tearDownModule():
    if isinstance(Domain.get_db(), Mock):
        # needed to skip setUp for javascript tests thread on Travis
        return

    _call_center_domain_mock = mock.patch(
        'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
    )
    domain = Domain.get_by_name('test-pna')
    engine = connection_manager.get_engine(UCR_ENGINE_ID)
    metadata = sqlalchemy.MetaData(bind=engine)
    metadata.reflect(bind=engine, extend_existing=True)
    path = os.path.join(os.path.dirname(__file__), 'fixtures')
    for file_name in os.listdir(path):
        table_name = get_table_name(domain.name, file_name[:-4])
        table = metadata.tables[table_name]
        table.drop()
    _call_center_domain_mock.start()
    domain.delete()
    _call_center_domain_mock.stop()
Esempio n. 41
0
def tearDownModule():
    if isinstance(Domain.get_db(), Mock):
        # needed to skip setUp for javascript tests thread on Travis
        return

    _call_center_domain_mock = mock.patch(
        'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
    )
    domain = Domain.get_by_name('up-nrhm')
    engine = connection_manager.get_engine(UCR_ENGINE_ID)
    metadata = sqlalchemy.MetaData(bind=engine)
    metadata.reflect(bind=engine, extend_existing=True)
    path = os.path.join(os.path.dirname(__file__), 'fixtures')
    for file_name in os.listdir(path):
        table_name = get_table_name(domain.name, file_name[:-4])
        table = metadata.tables[table_name]
        table.drop()
    _call_center_domain_mock.start()
    domain.delete()
    _call_center_domain_mock.stop()
def create_ucr_views(dry_run=False, tables_by_engine=None):
    tables_by_engine = tables_by_engine or _get_old_new_tablenames()
    for engine_id, dsconfs in tables_by_engine.items():
        print('\tChecking {} tables in engine "{}"'.format(
            len(dsconfs), engine_id))
        engine = connection_manager.get_engine(engine_id)
        view_creates = []
        with engine.begin() as conn:
            for dsconf in dsconfs:
                if _should_add_view(conn, dsconf.old_table, dsconf.new_table):
                    view_creates.append(
                        'CREATE VIEW "{}" AS SELECT * FROM "{}";'.format(
                            dsconf.new_table, dsconf.old_table))

        if view_creates:
            print('\tCreating {} views in engine "{}"'.format(
                len(view_creates), engine_id))
            with engine.begin() as conn:
                _run_sql_with_logging(conn, '\n'.join(view_creates), dry_run)
        else:
            print('\tNo views to create in engine "{}"'.format(engine_id))
Esempio n. 43
0
    def rebuild_tables_if_necessary(self):
        tables_by_engine = defaultdict(dict)
        for adapter in self.table_adapters:
            tables_by_engine[adapter.engine_id][adapter.get_table().name] = adapter

        _assert = soft_assert(to='@'.join(['czue', 'dimagi.com']))
        _notify_cory = lambda msg, obj: _assert(False, msg, obj)

        for engine_id, table_map in tables_by_engine.items():
            engine = connection_manager.get_engine(engine_id)
            with engine.begin() as connection:
                migration_context = get_migration_context(connection, table_map.keys())
                diffs = compare_metadata(migration_context, metadata)

            tables_to_rebuild = get_tables_to_rebuild(diffs, table_map.keys())
            for table_name in tables_to_rebuild:
                sql_adapter = table_map[table_name]
                if not is_static(sql_adapter.config._id):
                    try:
                        rev_before_rebuild = sql_adapter.config.get_db().get_rev(sql_adapter.config._id)
                        self.rebuild_table(sql_adapter)
                    except TableRebuildError, e:
                        _notify_cory(unicode(e), sql_adapter.config.to_json())
                    else:
                        # note: this fancy logging can be removed as soon as we get to the
                        # bottom of http://manage.dimagi.com/default.asp?211297
                        # if no signs of it popping back up by april 2016, should remove this
                        rev_after_rebuild = sql_adapter.config.get_db().get_rev(sql_adapter.config._id)
                        _notify_cory(
                            u'rebuilt table {} ({}) because {}. rev before: {}, rev after: {}'.format(
                                table_name,
                                u'{} [{}]'.format(sql_adapter.config.display_name, sql_adapter.config._id),
                                diffs,
                                rev_before_rebuild,
                                rev_after_rebuild,
                            ),
                            sql_adapter.config.to_json(),
                        )
                else:
                    self.rebuild_table(sql_adapter)
Esempio n. 44
0
def get_growth_monitoring_details(domain, case_id):
    engine = connection_manager.get_engine(ICDS_UCR_ENGINE_ID)
    metadata = sqlalchemy.MetaData(bind=engine)
    metadata.reflect(bind=engine, extend_existing=True)
    ucr_table = metadata.tables[get_table_name(domain,
                                               'static-child_health_cases')]
    chm_table = metadata.tables['child_health_monthly']

    select_query = select([
        ucr_table.c.case_id,
        ucr_table.c.name,
        ucr_table.c.mother_name,
        ucr_table.c.dob,
        ucr_table.c.sex,
        chm_table.c.age_in_months,
        chm_table.c.recorded_weight,
        chm_table.c.recorded_height,
    ]).select_from(
        ucr_table.join(
            chm_table, chm_table.c.case_id == ucr_table.c.doc_id)).where(
                chm_table.c.case_id == case_id).order_by('month asc')

    return select_query.execute()
Esempio n. 45
0
    def _rebuild_sql_tables(self, adapters):
        tables_by_engine = defaultdict(dict)
        all_adapters = []
        for adapter in adapters:
            if getattr(adapter, 'all_adapters', None):
                all_adapters.extend(adapter.all_adapters)
            else:
                all_adapters.append(adapter)
        for adapter in all_adapters:
            try:
                tables_by_engine[adapter.engine_id][adapter.get_table().name] = adapter
            except BadSpecError:
                _soft_assert = soft_assert(to='{}@{}'.format('jemord', 'dimagi.com'))
                _soft_assert(False, "Broken data source {}".format(adapter.config.get_id))

        _assert = soft_assert(notify_admins=True)
        _notify_rebuild = lambda msg, obj: _assert(False, msg, obj)

        for engine_id, table_map in tables_by_engine.items():
            table_names = list(table_map)
            engine = connection_manager.get_engine(engine_id)

            diffs = get_table_diffs(engine, table_names, get_metadata(engine_id))

            tables_to_act_on = get_tables_rebuild_migrate(diffs, table_names)
            for table_name in tables_to_act_on.rebuild:
                pillow_logging.debug("[rebuild] Rebuilding table: %s", table_name)
                sql_adapter = table_map[table_name]
                if not sql_adapter.config.is_static:
                    try:
                        self.rebuild_table(sql_adapter)
                    except TableRebuildError as e:
                        _notify_rebuild(six.text_type(e), sql_adapter.config.to_json())
                else:
                    self.rebuild_table(sql_adapter)

            self.migrate_tables(engine, diffs.raw, tables_to_act_on.migrate, table_map)
Esempio n. 46
0
 def get_sql_engine(cls):
     engine = getattr(cls, '_engine', None)
     if not engine:
         cls._engine = connection_manager.get_engine('default')
     return cls._engine
Esempio n. 47
0
 def __init__(self, database_name):
     self.database_name = database_name
     # use db1 engine to create db2 http://stackoverflow.com/a/8977109/8207
     self.root_engine = connection_manager.get_engine('default')
Esempio n. 48
0
    def setUpClass(cls):
        super(ReportTestCase, cls).setUpClass()
        cls.domain = create_domain('test-domain')
        cls.web_user = WebUser.get_by_username('test')
        if not cls.web_user:
            cls.web_user = WebUser.create('test-domain', 'test', 'test')

        SQLProduct.objects.create(
            domain=cls.domain.name,
            name='Product 2',
            code='p2',
            product_id='p2'
        )

        SQLProduct.objects.create(
            domain=cls.domain.name,
            name='Product 3',
            code='p3',
            product_id='p3'
        )

        SQLProduct.objects.create(
            domain=cls.domain.name,
            name='Product 1',
            code='p1',
            product_id='p1'
        )

        region_location_type = LocationType.objects.create(
            domain=cls.domain.name,
            name='Region',
        )

        SQLLocation.objects.create(
            domain=cls.domain.name,
            name='Region 1',
            location_id='r1',
            location_type=region_location_type
        )

        district_location_type = LocationType.objects.create(
            domain=cls.domain.name,
            name='District',
        )

        SQLLocation.objects.create(
            domain=cls.domain.name,
            name='District 1',
            location_id='d1',
            location_type=district_location_type
        )

        cls.engine = connection_manager.get_engine('default')
        cls.intra_table = IntraHealthFluff._table
        cls.couverture_table = CouvertureFluff._table
        cls.taux_table = TauxDeRuptureFluff._table
        cls.taux_de_satisfaction_table = TauxDeSatisfactionFluff._table
        cls.livraison_table = LivraisonFluff._table
        cls.recouvrement_table = RecouvrementFluff._table

        with cls.engine.begin() as connection:
            cls.couverture_table.create(connection, checkfirst=True)
            cls.intra_table.create(connection, checkfirst=True)
            cls.taux_table.create(connection, checkfirst=True)
            cls.taux_de_satisfaction_table.create(connection, checkfirst=True)
            cls.recouvrement_table.create(connection, checkfirst=True)
            cls.livraison_table.create(connection, checkfirst=True)

            insert = cls.intra_table.insert().values([
                dict(
                    doc_id='1',
                    date=datetime(2017, 11, 17),
                    product_name='Product 1',
                    product_id='p1',
                    district_id='d1',
                    district_name='District 1',
                    location_id='pps1',
                    region_id='r1',
                    PPS_name='PPS 1',
                    actual_consumption_total=10,
                    billed_consumption_total=5,
                    stock_total=33,
                    total_stock_total=70,
                    quantity_total=13,
                    cmm_total=16
                ),
                dict(
                    doc_id='1',
                    date=datetime(2017, 11, 17),
                    product_name='Product 2',
                    product_id='p2',
                    district_id='d1',
                    district_name='District 1',
                    location_id='pps1',
                    region_id='r1',
                    PPS_name='PPS 1',
                    actual_consumption_total=2,
                    billed_consumption_total=2,
                    stock_total=1,
                    total_stock_total=2,
                    quantity_total=3,
                    cmm_total=4
                ),
                dict(
                    doc_id='1',
                    date=datetime(2017, 11, 17),
                    product_name='Product 3',
                    product_id='p3',
                    district_id='d1',
                    district_name='District 1',
                    location_id='pps1',
                    region_id='r1',
                    PPS_name='PPS 1',
                    actual_consumption_total=6,
                    billed_consumption_total=4,
                    stock_total=14,
                    total_stock_total=0,
                    quantity_total=88,
                    cmm_total=99
                ),
                dict(
                    doc_id='2',
                    date=datetime(2017, 11, 17),
                    product_name='Product 1',
                    product_id='p1',
                    district_id='d1',
                    district_name='District 1',
                    location_id='pps2',
                    region_id='r1',
                    PPS_name='PPS 2',
                    actual_consumption_total=13,
                    billed_consumption_total=11,
                    stock_total=50,
                    total_stock_total=100,
                    quantity_total=1,
                    cmm_total=8
                ),
                dict(
                    doc_id='2',
                    date=datetime(2017, 11, 17),
                    product_name='Product 2',
                    product_id='p2',
                    district_id='d1',
                    district_name='District 1',
                    location_id='pps2',
                    region_id='r1',
                    PPS_name='PPS 2',
                    actual_consumption_total=0,
                    billed_consumption_total=0,
                    stock_total=2,
                    total_stock_total=17,
                    quantity_total=3,
                    cmm_total=15
                ),
                dict(
                    doc_id='2',
                    date=datetime(2017, 11, 17),
                    product_name='Product 3',
                    product_id='p3',
                    district_id='d1',
                    district_name='District 1',
                    location_id='pps2',
                    region_id='r1',
                    PPS_name='PPS 2',
                    actual_consumption_total=150,
                    billed_consumption_total=11,
                    stock_total=4,
                    total_stock_total=0,
                    quantity_total=11,
                    cmm_total=12
                )
            ])
            connection.execute(insert)
Esempio n. 49
0
def setUpModule():
    if settings.USE_PARTITIONED_DATABASE:
        print('============= WARNING: not running test setup because settings.USE_PARTITIONED_DATABASE is True.')
        return

    _call_center_domain_mock = mock.patch(
        'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
    )
    _call_center_domain_mock.start()

    domain = create_domain('icds-cas')
    location_type = LocationType.objects.create(
        domain=domain.name,
        name='block',
    )
    SQLLocation.objects.create(
        domain=domain.name,
        name='b1',
        location_id='b1',
        location_type=location_type
    )

    state_location_type = LocationType.objects.create(
        domain=domain.name,
        name='state',
    )
    SQLLocation.objects.create(
        domain=domain.name,
        name='st1',
        location_id='st1',
        location_type=state_location_type
    )
    SQLLocation.objects.create(
        domain=domain.name,
        name='st2',
        location_id='st2',
        location_type=state_location_type
    )
    SQLLocation.objects.create(
        domain=domain.name,
        name='st3',
        location_id='st3',
        location_type=state_location_type
    )
    SQLLocation.objects.create(
        domain=domain.name,
        name='st4',
        location_id='st4',
        location_type=state_location_type
    )
    SQLLocation.objects.create(
        domain=domain.name,
        name='st5',
        location_id='st5',
        location_type=state_location_type
    )
    SQLLocation.objects.create(
        domain=domain.name,
        name='st6',
        location_id='st6',
        location_type=state_location_type
    )
    SQLLocation.objects.create(
        domain=domain.name,
        name='st7',
        location_id='st7',
        location_type=state_location_type
    )

    awc_location_type = LocationType.objects.create(
        domain=domain.name,
        name='awc',
    )
    SQLLocation.objects.create(
        domain=domain.name,
        name='a7',
        location_id='a7',
        location_type=awc_location_type
    )

    with override_settings(SERVER_ENVIRONMENT='icds'):
        configs = StaticDataSourceConfiguration.by_domain('icds-cas')
        adapters = [get_indicator_adapter(config) for config in configs]

        for adapter in adapters:
            try:
                adapter.drop_table()
            except Exception:
                pass
            adapter.build_table()

        engine = connection_manager.get_engine(ICDS_UCR_ENGINE_ID)
        metadata = sqlalchemy.MetaData(bind=engine)
        metadata.reflect(bind=engine, extend_existing=True)
        path = os.path.join(os.path.dirname(__file__), 'fixtures')
        for file_name in os.listdir(path):
            with open(os.path.join(path, file_name), encoding='utf-8') as f:
                table_name = FILE_NAME_TO_TABLE_MAPPING[file_name[:-4]]
                table = metadata.tables[table_name]
                if not table_name.startswith('icds_dashboard_'):
                    columns = [
                        '"{}"'.format(c.strip())  # quote to preserve case
                        for c in f.readline().split(',')
                    ]
                    postgres_copy.copy_from(
                        f, table, engine, format='csv' if six.PY3 else b'csv',
                        null='' if six.PY3 else b'', columns=columns
                    )

        for state_id in ('st1', 'st2'):
            _aggregate_child_health_pnc_forms(state_id, datetime(2017, 3, 31))
            _aggregate_gm_forms(state_id, datetime(2017, 3, 31))
            _aggregate_bp_forms(state_id, datetime(2017, 3, 31))

        try:
            move_ucr_data_into_aggregation_tables(datetime(2017, 5, 28), intervals=2)
            build_incentive_report(agg_date=datetime(2017, 5, 28))
        except Exception as e:
            print(e)
            tearDownModule()
            raise
        finally:
            _call_center_domain_mock.stop()
Esempio n. 50
0
def setUpModule():
    if isinstance(Domain.get_db(), Mock):
        # needed to skip setUp for javascript tests thread on Travis
        return

    _call_center_domain_mock = mock.patch(
        'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
    )
    _call_center_domain_mock.start()

    domain = create_domain('test-pna')
    region_location_type = LocationType.objects.create(
        domain='test-pna',
        name='R\u00e9gion',
    )

    SQLLocation.objects.create(
        domain='test-pna',
        name='Region Test',
        location_id='8cde73411ddc4488a7f913c99499ead4',
        location_type=region_location_type
    )

    SQLLocation.objects.create(
        domain='test-pna',
        name='PASSY',
        location_id='1991b4dfe166335e342f28134b85fcac',
        location_type=region_location_type
    )

    SQLLocation.objects.create(
        domain='test-pna',
        name='r1',
        location_id='0682630532ff25717176320482ff1028',
        location_type=region_location_type
    )

    SQLLocation.objects.create(
        domain='test-pna',
        name='r2',
        location_id='582c5d65a307baa7a38e7b5e651fd5fc',
        location_type=region_location_type
    )

    SQLLocation.objects.create(
        domain='test-pna',
        name='r3',
        location_id='bd0395ba4a4fbd38c90765bd04208a8f',
        location_type=region_location_type
    )

    SQLLocation.objects.create(
        domain='test-pna',
        name='r4',
        location_id='6ed1f958fccd1b8202e8e30851a2b326',
        location_type=region_location_type
    )

    SQLLocation.objects.create(
        domain='test-pna',
        name='r5',
        location_id='1991b4dfe166335e342f28134b85f516',
        location_type=region_location_type
    )

    district_location_type = LocationType.objects.create(
        domain='test-pna',
        name='District',
    )

    SQLLocation.objects.create(
        domain='test-pna',
        name='District Test',
        location_id='3db74fac2bad4e708e2b03800cc5ab73',
        location_type=district_location_type
    )

    pps_location_type = LocationType.objects.create(
        domain='test-pna',
        name='PPS',
    )

    SQLLocation.objects.create(
        domain='test-pna',
        name='P2',
        location_id='ccf4430f5c3f493797486d6ce1c39682',
        location_type=pps_location_type
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='Collier',
        code='product1',
        product_id='product1'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='CU',
        code='product2',
        product_id='product2'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='Depo-Provera',
        code='product3',
        product_id='product3'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='DIU',
        code='product4',
        product_id='product4'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='Jadelle',
        code='product5',
        product_id='product5'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='Microgynon/Lof.',
        code='product6',
        product_id='product6'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='Microlut/Ovrette',
        code='product7',
        product_id='product7'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='Preservatif Feminin',
        code='product8',
        product_id='product8'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='Preservatif Masculin',
        code='product9',
        product_id='product9'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='Sayana Press',
        code='product10',
        product_id='product10'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='IMPLANON',
        code='product11',
        product_id='product11'
    )

    SQLProduct.objects.create(
        domain='test-pna',
        name='Product 7',
        code='p7',
        product_id='p7'
    )

    with override_settings(SERVER_ENVIRONMENT='production'):

        configs = StaticDataSourceConfiguration.by_domain(domain.name)
        adapters = [get_indicator_adapter(config) for config in configs]

        for adapter in adapters:
            adapter.build_table()

        engine = connection_manager.get_engine(UCR_ENGINE_ID)
        metadata = sqlalchemy.MetaData(bind=engine)
        metadata.reflect(bind=engine, extend_existing=True)
        path = os.path.join(os.path.dirname(__file__), 'fixtures')
        for file_name in os.listdir(path):
            with open(os.path.join(path, file_name), encoding='utf-8') as f:
                table_name = get_table_name(domain.name, file_name[:-4])
                table = metadata.tables[table_name]
                postgres_copy.copy_from(
                    f, table, engine, format='csv' if six.PY3 else b'csv', null='' if six.PY3 else b'', header=True
                )
    _call_center_domain_mock.stop()