Esempio n. 1
0
async def test_client(aiohttp_client):
    app = web.Application()
    config.get_config(app, DEFAULT_CONFIG_PATH)
    app.add_routes(routes.routes)

    app.redis = redis.Redis(host='redis_test')

    app.on_startup.extend([init_db.init_database])
    app.on_cleanup.extend([init_db.close_database])

    return await aiohttp_client(app)
Esempio n. 2
0
File: app.py Progetto: AREXP/pure_cv
def init_app(argv=None) -> web.Application:
    args = parse_args()
    app = web.Application()
    app.add_routes(routes.routes)

    config.get_config(app, args.config_path)

    app.redis = redis.Redis(host='redis')

    app.on_startup.extend([init_db.init_database])
    app.on_cleanup.extend([init_db.close_database])

    return app
Esempio n. 3
0
    def run(self):

        city_stats_location = 'Phase 2 - Districts'
        config = get_config()
        repository = AWSFileRepository(config['aws']['bucket production'])

        if repository.exists(city_stats_location):
            city_stats = repository.get_dataframe(city_stats_location)
            # -- sort by date
            city_stats = city_stats.sort_values(by=['date'])

            # critical cities
            critical_city_stats = critical_districts(data=city_stats)
            # download to local fs
            critical_city_stats.to_csv(self.output().path, index=False)
        else:
            log.error("Missing City Stats Data")

        # test
        new_data = critical_districts(data=city_stats)
        print("critical cities")
        print(new_data)
        print(new_data.columns)

        return None
Esempio n. 4
0
    def run(self):
        # TODO: it needs to partialy fail if one of its dependencies fails
        # as in it should continue processing all the other wards even if a few failed.

        ward_storage_location = 'raw_ward_data'
        config = get_config()
        repository = AWSFileRepository(config['aws']['bucket production'])

        if repository.exists(ward_storage_location):
            all_wards = repository.get_dataframe(ward_storage_location)
            all_wards = all_wards.set_index(
                ['state', 'district', 'ward', 'date'])
        else:
            all_wards = None

        for district, ward_task in self.input().items():
            log.info(f'Processing: {district}')

            with ward_task.open('r') as json_file:
                ward_task = pd.read_csv(json_file, parse_dates=['date'])
                ward_task = ward_task.set_index(
                    ['state', 'district', 'ward', 'date'])

            # This needs to support overwriting existing data as well as adding new data
            # TODO make a test for it
            if all_wards is None:
                all_wards = ward_task
            else:
                all_wards = all_wards.combine_first(
                    ward_task)  # update old values and add new values

        # cleanup
        for task in self.input().values():
            task.remove()

        # store the raw data, no imputation done yet
        repository.store_dataframe(all_wards,
                                   ward_storage_location,
                                   allow_create=True,
                                   store_index=True)

        # impute delta's atleast for Mumbai this is needed it only provides totals
        delta_needed_for = [
            'tested', 'confirmed', 'recovered', 'deceased', 'active', 'other'
        ]
        group_by_cols = ['state', 'district', 'ward']
        all_wards = interpolate_values(all_wards, group_by_cols,
                                       delta_needed_for)
        all_wards = create_delta_cols(all_wards, group_by_cols,
                                      delta_needed_for)

        # add population
        static_ward_data = get_static_ward_data()
        all_wards = all_wards.join(static_ward_data,
                                   on=group_by_cols,
                                   how='left')

        all_wards.to_csv(self.output().path, index=True)
Esempio n. 5
0
    def setUp(self) -> None:
        config = get_config()
        self._url = config['google sheets']['url development']
        self._sheets_created = []
        self._test_storage_location = 'for_integration_tests'

        # be sure there is a basic worksheet to run other tests on.
        # but we are not deleting this one. We just want to make sure its always there
        self.test_create_storage_location()
Esempio n. 6
0
    def run(self):
        
        ward_stats_location = 'Phase 2 - Wards'
        config = get_config()
        repository = AWSFileRepository(config['aws']['bucket production'])

        if repository.exists(ward_stats_location):
            ward_stats = repository.get_dataframe(ward_stats_location)
            # -- sort by date
            ward_stats = ward_stats.sort_values(by = ['date'])

            # download to local fs
            ward_stats.to_csv(self.output().path, index=False)     
        else:
            log.error("Missing Ward Stats Data")

        return None
Esempio n. 7
0
    def run(self):
        if 'stopcoronavirus' in self.file_url:
            dashboard_storage = 'mumbai_dashboard.pdf'
            config = get_config()
            repository = AWSFileRepository(config['aws']['bucket production'])

            if repository.exists(dashboard_storage):
                repository._download_file(self._temp_file.path,
                                          config['aws']['bucket production'],
                                          dashboard_storage)
            else:
                print("Mumbai file not found in S3 bucket")

        else:
            response = requests.get(
                self.file_url,
                verify='tasks/districts/mumbaiwards_consolidated.pem')
            with open(self.output().path, mode='wb') as output_file:
                output_file.write(response.content)
Esempio n. 8
0
    def run(self):
        config = get_config()
        repository = AWSFileRepository(config['aws']['bucket production'])

        # import master districts data
        all_districts = repository.get_dataframe(self.s3_districts_path)

        # delete existing RT/DT columns

        avl_cols = [
            x for x in all_districts.columns
            if x in ['mean.RT', 'upper.RT', 'lower.RT', 'dt']
        ]
        for avl_col in avl_cols:
            del all_districts[avl_col]

        # read RT results
        rt_results0 = pd.read_csv(self.local_rt_path, parse_dates=["date"])
        # specify column name containing Rt values
        rt_colname = ['mean', 'upper', 'lower']
        rt_results = rt_results0[['city', 'date'] + rt_colname]
        rt_results.columns = [
            'district', 'date', 'mean.RT', 'upper.RT', 'lower.RT'
        ]
        # join rt data with all districts data
        all_districts = all_districts.merge(rt_results,
                                            on=['district', 'date'],
                                            how='left')

        # read DT results
        dt_results0 = pd.read_csv(self.local_dt_path, parse_dates=["date"])
        # pick only the relevant columns
        dt_results = dt_results0[["district", "date", "dt"]]
        all_districts = all_districts.merge(dt_results,
                                            on=['district', 'date'],
                                            how='left')

        # push RT/DT Critical Cities Updates to Repo
        repository.store_dataframe(all_districts,
                                   self.s3_districts_update_path,
                                   allow_create=True)
Esempio n. 9
0
def create_app(test_config=None):
    """ Flask application factory

        Load applicaiton configuration, creates Flask instance
        and attach some hook to it
        The method is called using 'flask' command line with FLASK_APP=backend as environmnt
        variable.
    """
    app = Flask(__name__)
    app.config.from_mapping(
        mongodb={},
    )

    if test_config is None:
        app.config.update(get_config())
    else:
        app.config.update(test_config)

    @app.route('/')
    def index():
        return "Welcome Paranuara Citizen API"

    @app.errorhandler(BusinessException)
    def handle_not_found(error):
        response = jsonify(error.to_dict())
        response.status_code = error.status_code
        return response

    # Register application commands
    db.init_app(app)
    doc.init_app(app)

    app.register_blueprint(people_v1, url_prefix='/v1/people')
    app.register_blueprint(companies_v1, url_prefix='/v1/companies')
    
    app.after_request(add_cors_headers)

    return app
Esempio n. 10
0
    def run(self):
        config = get_config()

        # we are skipping older data since we only have low case numbers there.
        start_date = datetime.strptime(config['dashboard']['start date'],
                                       '%Y-%m-%d')
        # repository = GSheetRepository(config['google sheets']['url production'])
        repository = AWSFileRepository(config['aws']['bucket production'])

        fetch_covid19_india_task = self.input()['state_district_data']
        fetch_wards_task = self.input()['ward_data']

        with fetch_covid19_india_task.open('r') as json_file:
            all_covid19india_data = json.load(json_file)

        all_ward_data = pd.read_csv(fetch_wards_task.path,
                                    parse_dates=['date'])

        # cleanup
        fetch_covid19_india_task.remove()
        fetch_wards_task.remove()

        state_data, district_data = ExtractCovid19IndiaData().process(
            all_covid19india_data)

        # not the best location to create this, but it's ok for now
        if not repository.exists(self.storage_hospitalizations):
            df = pd.DataFrame({'date': [], 'percentages': []})
            repository.store_dataframe(df,
                                       self.storage_hospitalizations,
                                       allow_create=True)

        hospitalization_df = repository.get_dataframe(
            self.storage_hospitalizations)
        hospitalizations_updated = impute_hospitalization_percentages(
            hospitalization_df, state_data['date'])

        state_data = state_data[state_data['date'] >= start_date]
        state_data = extend_and_impute_metrics(
            raw_metrics=state_data,
            hospitalizations=hospitalizations_updated,
            grouping_columns=['state'])

        district_data = district_data[district_data['date'] >= start_date]
        district_data = extend_and_impute_metrics(
            raw_metrics=district_data,
            hospitalizations=hospitalizations_updated,
            grouping_columns=['state', 'district'])

        ward_data = all_ward_data[all_ward_data['date'] >= start_date]

        ward_data = extend_and_impute_metrics(
            raw_metrics=ward_data,
            hospitalizations=hospitalizations_updated,
            grouping_columns=['state', 'district', 'ward'])

        self.update_population_sheets(state_data, district_data, repository)

        # Idea placeholder
        # Calculate today's top 20ish cities and add that top 20 as a tab in the google sheet so the dashboard can
        # get access to it.

        # validate and filter
        self.states_is_valid = self._has_all_columns(
            state_data, self.state_columns_needed_by_dashboard)
        self.districts_is_valid = self._has_all_columns(
            district_data, self.district_columns_needed_by_dashboard)
        self.wards_is_valid = self._has_all_columns(
            ward_data, self.ward_columns_needed_by_dashboard)

        states_filtered = state_data[self.state_columns_needed_by_dashboard]
        districts_filtered = district_data[
            self.district_columns_needed_by_dashboard]
        wards_filtered = ward_data[self.ward_columns_needed_by_dashboard]

        repository.store_dataframe(hospitalizations_updated,
                                   self.storage_hospitalizations,
                                   allow_create=True)
        repository.store_dataframe(states_filtered,
                                   self.storage_states,
                                   allow_create=True)
        repository.store_dataframe(districts_filtered,
                                   self.storage_districts,
                                   allow_create=True)
        repository.store_dataframe(wards_filtered,
                                   self.storage_wards,
                                   allow_create=True)
    def run(self):
        config = get_config()

        # we are skipping older data since we only have low case numbers there.
        start_date = datetime.strptime(config['dashboard']['start date'],
                                       '%Y-%m-%d')
        # repository = GSheetRepository(config['google sheets']['url production'])
        repository = AWSFileRepository(config['aws']['bucket production'])

        #fetch_covid19_india_task = self.input()['state_district_data']
        fetch_wards_task = self.input()['ward_data']
        fetch_districtoverview_task = self.input()['district_overview_data']

        #with fetch_covid19_india_task.open('r') as json_file:
        #    all_covid19india_data = json.load(json_file)

        states_covid19india_data = pd.read_csv(
            "https://api.covid19tracker.in/data/csv/latest/states.csv",
            parse_dates=["Date"])
        states_covid19india_data['other'] = None
        states_covid19india_data['tested'] = None
        districts_covid19india_data = pd.read_csv(
            "https://api.covid19tracker.in/data/csv/latest/districts.csv",
            parse_dates=["Date"])
        districts_covid19india_data['tested'] = None

        # read population data
        states_pop = pd.read_csv("backend/data/static_states_data.csv")[[
            'state', 'population'
        ]]
        districts_pop = pd.read_csv("backend/data/static_districts_data.csv")[[
            'state', 'district', 'population'
        ]]

        # obtain latest time series at state/district levels
        states_covid19india_data.columns = [
            'date', 'state', 'total.confirmed', 'total.recovered',
            'total.deceased', 'total.other', 'total.tested'
        ]
        states_covid19india_data['tested'] = states_covid19india_data[
            'total.tested']
        states_covid19india_data = states_covid19india_data.merge(states_pop,
                                                                  on=["state"],
                                                                  how="left")

        districts_covid19india_data.columns = [
            'date', 'state', 'district', 'total.confirmed', 'total.recovered',
            'total.deceased', 'total.other', 'total.tested'
        ]
        districts_covid19india_data['tested'] = districts_covid19india_data[
            'total.tested']
        districts_covid19india_data = districts_covid19india_data.merge(
            districts_pop, on=["state", "district"], how="left")

        all_ward_data = pd.read_csv(fetch_wards_task.path,
                                    parse_dates=['date'])
        district_overview_data = pd.read_csv(fetch_districtoverview_task.path,
                                             parse_dates=['date'])

        # cleanup
        #fetch_covid19_india_task.remove()
        fetch_wards_task.remove()

        state_data, district_data = ExtractCovid19IndiaData().process(
            states_covid19india_data, districts_covid19india_data)

        # not the best location to create this, but it's ok for now
        if not repository.exists(self.storage_hospitalizations):
            df = pd.DataFrame({'date': [], 'percentages': []})
            repository.store_dataframe(df,
                                       self.storage_hospitalizations,
                                       allow_create=True)

        hospitalization_df = repository.get_dataframe(
            self.storage_hospitalizations)
        hospitalizations_updated = impute_hospitalization_percentages(
            hospitalization_df, state_data['date'])

        state_data = state_data[state_data['date'] >= start_date]
        state_data = extend_and_impute_metrics(
            raw_metrics=state_data,
            hospitalizations=hospitalizations_updated,
            grouping_columns=['state'])

        district_data = district_data[district_data['date'] >= start_date]
        district_data = extend_and_impute_metrics(
            raw_metrics=district_data,
            hospitalizations=hospitalizations_updated,
            grouping_columns=['state', 'district'])

        ward_data = all_ward_data[all_ward_data['date'] >= start_date]

        ward_data = extend_and_impute_metrics(
            raw_metrics=ward_data,
            hospitalizations=hospitalizations_updated,
            grouping_columns=['state', 'district', 'ward'])

        #district_overview_data = district_overview_data.set_index(['date'])

        self.update_population_sheets(state_data, district_data, repository)

        # Idea placeholder
        # Calculate today's top 20ish cities and add that top 20 as a tab in the google sheet so the dashboard can
        # get access to it.

        # validate and filter
        self.states_is_valid = self._has_all_columns(
            state_data, self.state_columns_needed_by_dashboard)
        self.districts_is_valid = self._has_all_columns(
            district_data, self.district_columns_needed_by_dashboard)
        self.wards_is_valid = self._has_all_columns(
            ward_data, self.ward_columns_needed_by_dashboard)
        self.districtoverview_is_valid = self._has_all_columns(
            district_overview_data,
            self.districtoverview_columns_needed_by_dashboard)

        states_filtered = state_data[self.state_columns_needed_by_dashboard]
        districts_filtered = district_data[
            self.district_columns_needed_by_dashboard]
        wards_filtered = ward_data[self.ward_columns_needed_by_dashboard]
        districtoverview_filtered = district_overview_data[
            ['date'] + self.districtoverview_columns_needed_by_dashboard]

        repository.store_dataframe(hospitalizations_updated,
                                   self.storage_hospitalizations,
                                   allow_create=True)
        repository.store_dataframe(states_filtered,
                                   self.storage_states,
                                   allow_create=True)
        repository.store_dataframe(districts_filtered,
                                   self.storage_districts,
                                   allow_create=True)
        repository.store_dataframe(wards_filtered,
                                   self.storage_wards,
                                   allow_create=True)
        repository.store_dataframe(districtoverview_filtered,
                                   self.storage_districtoverview,
                                   allow_create=True)
Esempio n. 12
0
 def test_config(self):
     result = get_config()
     self.assertIsNotNone(result)
Esempio n. 13
0
 def test_config_gsheet(self):
     result = get_config()
     self.assertTrue('google sheets' in result)
     self.assertTrue('url production' in result['google sheets'])
     self.assertTrue('url development' in result['google sheets'])
    def run(self):
        # TODO: it needs to partialy fail if one of its dependencies fails
        # as in it should continue processing all the other wards even if a few failed.

        overall_storage_location = 'raw_district_overview_stats'
        config = get_config()
        repository = AWSFileRepository(config['aws']['bucket production'])

        if repository.exists(overall_storage_location):
            overall_df = repository.get_dataframe(overall_storage_location)
            overall_df = overall_df.set_index(['date', 'metric', 'metric_type'])
        else:
            overall_df = None

        for district, overall_task in self.input().items():
            log.info(f'Processing: {district}')

            with overall_task.open('r') as json_file:
                overall_task = pd.read_csv(json_file, parse_dates=['date'])
                overall_task = overall_task.set_index(['date', 'metric', 'metric_type'])

            # This needs to support overwriting existing data as well as adding new data
            # TODO make a test for it
            if overall_df is None:
                overall_df = overall_task
            else:
                overall_df = overall_df.combine_first(overall_task)  # update old values and add new values

        # cleanup
        for task in self.input().values():
            task.remove()

        # store the raw data, no imputation done yet
        repository.store_dataframe(overall_df, overall_storage_location, allow_create=True, store_index=True)
        
        data_mini = overall_df.reset_index(drop=False)
        
        data_mini = data_mini.loc[data_mini['metric'].isin(['active.ccc1.facilities','active.ccc2.facilities','contact.traced.high.risk','contact.traced.low.risk',
                                                             'total.contact.traced','containment.zones.active.slums.chawls',
                                                             'containment.zones.active.micro.sealed.buildings','floors.sealed','bed.available.dchc.dch.ccc2',
                                                             'bed.occupied.dchc.dch.ccc2','bed.available.dchc.dch','bed.occupied.dchc.dch','bed.available.icu',
                                                             'bed.occupied.icu','bed.available.o2','bed.occupied.o2','bed.available.ventilator',
                                                             'bed.occupied.ventilator','active.critical','total.deaths','total.discharged',
                                                             'active.symptomatic','active.asymptomatic','total.tests','total.active','total.positive',
                                                             'currently.quarantined.home'])]

        data_mini['value'] = data_mini.apply(lambda x: x['past.24hrs'] if x['metric'] in ['contact.traced.high.risk', 'contact.traced.low.risk', 'total.contact.traced'] 
                                                       else (x['num.facilities'] if x['metric'] in ['active.ccc1.facilities', 'active.ccc2.facilities'] else x['count']), axis=1)


        data_mini2 = data_mini.pivot(index='date', columns='metric', values='value')

        # impute delta's atleast for Mumbai this is needed it only provides totals
        delta_needed_for = ['deaths', 'discharged', 'tests', 'positive']
        group_by_cols = []
        try:
            data_mini2 = interpolate_values_generic(data_mini2, group_by_cols, list(data_mini2))
            data_mini2 = create_delta_cols(data_mini2, group_by_cols, delta_needed_for)
        except:
            print("error in delta/interpolate")
            pass
        
        # add population
        data_mini2.to_csv(self.output().path, index=True)