Exemplo n.º 1
0
    def upstream_takes(self):
        """
        Function to determine the upstream water abstraction sites from the catchment delineation.

        Returns
        -------
        DataFrame
            allocation data
        """
        if not hasattr(self, 'catch_gdf'):
            catch_gdf = self.catch_del()

        ### WAP selection
        wap1 = mssql.rd_sql(self.permit_server, param['input']['permit_database'], param['input']['crc_wap_table'], ['ExtSiteID'], where_in={'ConsentStatus': param['input']['crc_status']}).ExtSiteID.unique()

        sites3 = self.sites[self.sites.ExtSiteID.isin(wap1)].copy()
        sites3.rename(columns={'ExtSiteID': 'Wap'}, inplace=True)

        sites4 = vector.xy_to_gpd('Wap', 'NZTMX', 'NZTMY', sites3)
        sites4 = sites4.merge(sites3.drop(['NZTMX', 'NZTMY'], axis=1), on='Wap')

        waps_gdf, poly1 = vector.pts_poly_join(sites4, catch_gdf, 'ExtSiteID')
        waps_gdf.dropna(subset=['SwazName', 'SwazGroupName'], inplace=True)

        ### Get crc data
        if waps_gdf.empty:
            print('No WAPs were found in the polygon')
            allo_wap = pd.DataFrame()
        else:
            allo1 = AlloUsage(crc_filter={'ExtSiteID': waps_gdf.Wap.unique().tolist(), 'ConsentStatus': param['input']['crc_status']}, from_date=self.from_date, to_date=self.to_date)

            allo_wap1 = allo1.allo.copy()
            allo_wap = pd.merge(allo_wap1.reset_index(), waps_gdf[['Wap', 'ExtSiteID']], on='Wap')

            ## Save if required
            if hasattr(self, 'output_path'):
                run_time = pd.Timestamp.today().strftime('%Y-%m-%dT%H%M')

                waps_shp = param['output']['waps_shp'].format(run_date=run_time)
                waps_gdf.to_file(os.path.join(self.output_path, waps_shp))

                allo_data_csv = param['output']['allo_data_csv'].format(run_date=run_time)
                allo_wap.to_csv(os.path.join(self.output_path, allo_data_csv), index=False)

        ## Return
        setattr(self, 'waps_gdf', waps_gdf)
        setattr(self, 'allo_wap', allo_wap)
        return allo_wap
pe2 = pe1.to_dataframe().dropna()
rain2 = rain1.to_dataframe().dropna()

## Combine datasets

both4 = pd.concat([rain2, pe2], axis=1).reset_index()

## Aggregate by catchment
both5 = vector.xy_to_gpd(['time', 'rain', 'pe'], 'x', 'y', both4)
#pts0 = both4[both4.time == '1982-07-31'].copy()
#pts0.index.name = 'index'
#pts1 = vector.xy_to_gpd(pts0.index, 'x', 'y', pts0)
catch_del = gpd.read_file(catch_del_shp_path)
catch_del.rename(columns={'SITENUMBER': 'site'}, inplace=True)

pts2, poly1 = vector.pts_poly_join(both5, catch_del, 'site')

catch_agg1 = pts2.groupby(['site', 'time'])[['rain', 'pe']].mean()

## Adjust the vcsn according to the precip gauge 404810
ts1 = mssql.rd_sql_ts(server,
                      database,
                      ts_table,
                      'ExtSiteID',
                      'DateTime',
                      'Value',
                      where_in={
                          'ExtSiteID': ['404810'],
                          'DatasetTypeID': [15]
                      },
                      from_date='2005-07-01',
Exemplo n.º 3
0
    ### WAP selection

    wap1 = mssql.rd_sql(permit_server,
                        permit_database,
                        crc_wap_table, ['ExtSiteID'],
                        where_in={
                            'ConsentStatus': param['crc_status']
                        }).ExtSiteID.unique()

    sites3 = sites1[sites1.ExtSiteID.isin(wap1)].copy()
    sites3.rename(columns={'ExtSiteID': 'Wap'}, inplace=True)

    sites4 = vector.xy_to_gpd('Wap', 'NZTMX', 'NZTMY', sites3)
    sites4 = sites4.merge(sites3.drop(['NZTMX', 'NZTMY'], axis=1), on='Wap')

    waps_gdf, poly1 = vector.pts_poly_join(sites4, catch_gdf, 'FlowSite')
    waps_gdf.to_file(os.path.join(results_path, waps_shp))

    ##################################
    ### Get crc data

    allo1 = AlloUsage(crc_filter={
        'ExtSiteID': waps_gdf.Wap.unique().tolist(),
        'ConsentStatus': param['crc_status']
    },
                      from_date=param['from_date'],
                      to_date=param['to_date'])

    allo_wap1 = allo1.allo.copy()
    allo_wap = pd.merge(allo_wap1.reset_index(),
                        waps_gdf[['Wap', 'FlowSite']],
Exemplo n.º 4
0
catch_shp_path = get_path(catch_shp)

sites_col_name = 'SITENUMBER'
poly_col_name = 'Catchmen_1'
line_site_col = 'NZREACH'

#######################################
### Examples

pts = util.load_geo_data(sites_shp_path)
pts['geometry'] = pts.geometry.simplify(1)

## Selecting points from within a polygon
pts1 = vector.sel_sites_poly(sites_shp_path, rec_catch_shp_path, buffer_dis=10)

## Spatial join attributes of polygons to points
pts2, poly2 = vector.pts_poly_join(sites_shp_path, catch_shp_path, poly_col_name)

## Create a GeoDataFrame from x and y data
pts_df = pts[[sites_col_name, 'geometry']].copy()
pts_df['x'] = pts_df.geometry.x
pts_df['y'] = pts_df.geometry.y
pts_df.drop('geometry', axis=1, inplace=True)

pts3 = vector.xy_to_gpd(sites_col_name, 'x', 'y', pts_df)

## Find the closest line to points
line1 = vector.closest_line_to_pts(sites_shp_path, rec_streams_shp_path, line_site_col, buffer_dis=100)


def osm_delineation(param):
    """

    """
    osm.op_endpoint = param['osm']['op_endpoint']

    ########################################
    ### Load data

    # run_time_start = pd.Timestamp.today().strftime('%Y-%m-%d %H:%M:%S')
    # print(run_time_start)

    ## Read in source data
    print('--Reading in source data...')

    json_lst = get_json_from_api(param['plan_limits']['api_url'], param['plan_limits']['api_headers'])
    json_lst1 = json_filters(json_lst, only_operative=True, only_reach_points=True)
    gjson1, hydro_units, pts_alt, sg1 = geojson_convert(json_lst1)

    combined_zones1 = [j for j in json_lst if j['id'] == param['other']['combined_zones_id']][0]
    combined_zones2 = [s['id'] for s in combined_zones1['spatialUnit']]

    no_limit1 = [j for j in json_lst if j['id'] == param['other']['no_limit_id']][0]
    no_limit2 = [s['id'] for s in no_limit1['spatialUnit']][0]

    # pts = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['pts']['table'], [param['gis_waterdata']['pts']['id']], where_in={param['gis_waterdata']['pts']['id']: pts_alt.id.unique().tolist()}, geo_col=True, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'], rename_cols=[id_col])
    pts = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['pts']['table'], [param['gis_waterdata']['pts']['id']], where_in={param['gis_waterdata']['pts']['id']: pts_alt.id.unique().tolist()}, geo_col=True, rename_cols=[id_col])

    ## Point checks
    excluded_points = pts_alt[~pts_alt.id.isin(pts.SpatialUnitId)].copy()
    if not excluded_points.empty:
        print('These points are in the Plan Limits db, but have no GIS data:')
        print(excluded_points)

    bad_geo = pts[pts.geom_type != 'Point']
    if not bad_geo.empty:
        print('These points do not have a "Point" geometry (likely "MultiPoint"):')
        print(bad_geo)
        pts = pts[~pts.SpatialUnitId.isin(bad_geo.SpatialUnitId)].copy()

    cwms1 = mssql.rd_sql(param['gis_prod']['server'], param['gis_prod']['database'], param['gis_prod']['cwms']['table'], param['gis_prod']['cwms']['col_names'], rename_cols=param['gis_prod']['cwms']['rename_cols'], geo_col=True, username=param['gis_prod']['username'], password=param['gis_prod']['password'])

    # zones3 = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['allo_zones']['table'], [param['gis_waterdata']['allo_zones']['id']], where_in={param['gis_waterdata']['allo_zones']['id']: combined_zones2}, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'], geo_col=True, rename_cols=[id_col])
    zones3 = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['allo_zones']['table'], [param['gis_waterdata']['allo_zones']['id']], where_in={param['gis_waterdata']['allo_zones']['id']: combined_zones2}, geo_col=True, rename_cols=[id_col])

    pts['geometry'] = pts.geometry.simplify(1)

    #######################################
    ### Run query
    print('--Pull out the waterways from OSM')

    pts1, bad_points = osm.get_nearest_waterways(pts, id_col, param['other']['search_distance'], 'all')

    waterways, nodes = osm.get_waterways(pts1, 'all')

    print('--Delineating Reaches from OSM')

    site_delin = osm.waterway_delineation(waterways, True)
    osm_delin = osm.to_osm(site_delin, nodes)
    gdf1 = osm.to_gdf(osm_delin)

    gdf2 = gdf1.to_crs(pts.crs)

    gdf3 = gdf2.merge(pts1.rename(columns={'id': 'start_node'})[['start_node', id_col]], on='start_node')

    print('--Pulling out all of Canterbury...')

    cant2 = osm.get_waterways_within_boundary(cwms1, buffer=0, waterway_type='all')

    combined1, poly1 = vector.pts_poly_join(cant2, zones3, id_col, op='intersects')
    gdf3 = gdf3[~gdf3.way_id.isin(combined1.way_id.unique())].copy()

    all_others1 = cant2[~cant2.way_id.isin(combined1.way_id)]
    all_others2 = all_others1[~all_others1.way_id.isin(gdf3.way_id.unique().tolist())].copy()
    all_others2[id_col] = no_limit2

    print('--Combine all reach data')

    gdf4 = pd.concat([gdf3, combined1, all_others2]).reset_index(drop=True)

    gdf4.rename(columns={'way_id': 'OSMWaterwayId', 'waterway': 'OSMWaterwayType', 'name': 'RiverName', 'start_node': 'StartNode'}, inplace=True)
    gdf4['OSMWaterwayId'] = gdf4['OSMWaterwayId'].astype('int64')

    print('--Compare existing reaches in the database')

    cols = gdf4.columns.drop('geometry').tolist()
    cols.extend(['OBJECTID'])

    # old1 = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], cols, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'], geo_col=True)
    old1 = mssql.rd_sql(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], cols, geo_col=True)

    comp_dict = util.compare_dfs(old1.drop('OBJECTID', axis=1), gdf4, on=['SpatialUnitId', 'OSMWaterwayId'])
    new1 = comp_dict['new'].copy()
    diff1 = comp_dict['diff'].copy()
    rem1 = comp_dict['remove'][['SpatialUnitId', 'OSMWaterwayId']].copy()

    print('--Save to database')

    sql_dtypes = {'StartNode': types.BIGINT(), 'OSMWaterwayId': types.BIGINT(), 'RiverName': types.NVARCHAR(200), 'OSMWaterwayType': types.NVARCHAR(30), 'SpatialUnitId': types.NVARCHAR(8), 'SHAPE_': types.VARCHAR(), 'OBJECTID': types.INT(), 'ModifiedDate': types.DATETIME()}

    if not new1.empty:
        max_id = old1['OBJECTID'].max() + 1

        new1['ModifiedDate'] = today_str
        new1['OBJECTID'] = list(range(max_id, max_id + len(new1)))
        new1.rename(columns={'geometry': 'SHAPE'}, inplace=True)

        # mssql.update_table_rows(new1, param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], on=['SpatialUnitId', 'OSMWaterwayId'], index=False, append=True, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'], geo_col='SHAPE', clear_table=False, dtype=sql_dtypes)
        mssql.update_table_rows(new1, param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], on=['SpatialUnitId', 'OSMWaterwayId'], index=False, append=True, geo_col='SHAPE', clear_table=False, dtype=sql_dtypes)

    if not diff1.empty:
        diff2 = pd.merge(diff1, old1[['SpatialUnitId', 'OSMWaterwayId', 'OBJECTID']], on=['SpatialUnitId', 'OSMWaterwayId'])
        diff2['ModifiedDate'] = today_str
        diff2.rename(columns={'geometry': 'SHAPE'}, inplace=True)

        # mssql.update_table_rows(diff2, param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], on=['SpatialUnitId', 'OSMWaterwayId'], index=False, append=True, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'], geo_col='SHAPE', clear_table=False, dtype=sql_dtypes)
        mssql.update_table_rows(diff2, param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], on=['SpatialUnitId', 'OSMWaterwayId'], index=False, append=True, geo_col='SHAPE', clear_table=False, dtype=sql_dtypes)

    if not rem1.empty:
        # mssql.del_table_rows(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], pk_df=rem1, username=param['gis_waterdata']['username'], password=param['gis_waterdata']['password'])
        mssql.del_table_rows(param['gis_waterdata']['server'], param['gis_waterdata']['database'], param['gis_waterdata']['reaches']['table'], pk_df=rem1)

    return gdf4, excluded_points, bad_geo, bad_points
Exemplo n.º 6
0
def process_waps(param):
    """

    """
    run_time_start = pd.Timestamp.today().strftime('%Y-%m-%d %H:%M:%S')
    print(run_time_start)

    ### Read in source data and update accela tables in ConsentsReporting db
    print('--Reading in source data...')

    ## Make object to contain the source data
    db = types.SimpleNamespace()

    for t in param['misc']['WapProcessing']['tables']:
        p = param['source data'][t]
        stmt = 'select * from "{table}"'.format(table=p['table'])
        setattr(
            db, t,
            sf.read_table(p['username'], p['password'], p['account'],
                          p['database'], p['schema'], stmt))

    # Spatial data
    gw_dict = param['source data']['gw_zones']

    setattr(
        db, 'gw_zones',
        mssql.rd_sql(gw_dict['server'],
                     gw_dict['database'],
                     gw_dict['table'],
                     gw_dict['col_names'],
                     username=gw_dict['username'],
                     password=gw_dict['password'],
                     geo_col=True,
                     rename_cols=gw_dict['rename_cols']))

    sw_dict = param['source data']['sw_reaches']

    setattr(
        db, 'sw_reaches',
        mssql.rd_sql(sw_dict['server'],
                     sw_dict['database'],
                     sw_dict['table'],
                     sw_dict['col_names'],
                     username=gw_dict['username'],
                     password=gw_dict['password'],
                     geo_col=True))

    ##################################################
    ### Waps
    print('--Process Waps')

    sites1 = vector.xy_to_gpd('Wap', 'NzTmX', 'NzTmY',
                              db.sites.drop('EffectiveFromDate', axis=1))

    waps1 = sites1.merge(db.wap_sd.drop('EffectiveFromDate', axis=1), on='Wap')
    waps1.loc[waps1['SD1_7Day'].isnull(), 'SD1_7Day'] = 0
    waps1.loc[waps1['SD1_30Day'].isnull(), 'SD1_30Day'] = 0
    waps1.loc[waps1['SD1_150Day'].isnull(), 'SD1_150Day'] = 0
    waps1[['SD1_7Day', 'SD1_30Day',
           'SD1_150Day']] = waps1[['SD1_7Day', 'SD1_30Day',
                                   'SD1_150Day']].round().astype(int)

    ## Aquifer tests
    aq1 = db.wap_aquifer_test.dropna(subset=['Storativity']).drop(
        'EffectiveFromDate', axis=1).copy()
    aq2 = aq1.groupby('Wap')['Storativity'].mean().dropna().reset_index()
    aq2.Storativity = True

    waps2 = waps1.merge(aq2, on='Wap', how='left')
    waps2.loc[waps2.Storativity.isnull(), 'Storativity'] = False

    ## Add spaital info
    # GW
    gw_zones = db.gw_zones.copy()
    gw_zones.rename(columns={'SpatialUnitID': 'GwSpatialUnitId'}, inplace=True)

    waps3, poly1 = vector.pts_poly_join(waps2, gw_zones, 'GwSpatialUnitId')
    waps3.drop_duplicates('Wap', inplace=True)
    waps3['Combined'] = waps3.apply(lambda x: 'CWAZ' in x['GwSpatialUnitId'],
                                    axis=1)

    # SW
    sw1 = db.sw_reaches.copy()
    sw1.rename(columns={'SpatialUnitID': 'SwSpatialUnitId'}, inplace=True)

    lst1 = []
    for index, row in sw1.iterrows():
        for j in list(row['geometry'].coords):
            lst1.append([row['SwSpatialUnitId'], Point(j)])
    df1 = pd.DataFrame(lst1, columns=['SwSpatialUnitId', 'geometry'])
    sw2 = gpd.GeoDataFrame(df1, geometry='geometry')

    waps3b = vector.kd_nearest(waps3, sw2, 'SwSpatialUnitId')

    ## prepare output
    waps3b['NzTmX'] = waps3b.geometry.x
    waps3b['NzTmY'] = waps3b.geometry.y

    waps4 = pd.DataFrame(waps3b.drop(['geometry'], axis=1))
    waps4[['NzTmX', 'NzTmY']] = waps4[['NzTmX', 'NzTmY']].round().astype(int)
    waps4.rename(columns={
        'Name': 'SpatialUnitName',
        'distance': 'DistanceToSw'
    },
                 inplace=True)

    ## Check for differences
    print('Save results')
    wap_dict = param['source data']['waps']

    #    old_stmt = 'select * from "{table}"'.format(table=wap_dict['table'])
    #    old1 = sf.read_table(wap_dict['username'], wap_dict['password'], wap_dict['account'], wap_dict['database'], wap_dict['schema'], old_stmt).drop('EffectiveFromDate', axis=1)
    #
    #    change1 = compare_dfs(old1, waps4, ['Wap'])
    #    new1 = change1['new']
    #    diff1 = change1['diff']

    ## Save data
    waps4['EffectiveFromDate'] = run_time_start

    sf.to_table(waps4, wap_dict['table'], wap_dict['username'],
                wap_dict['password'], wap_dict['account'],
                wap_dict['database'], wap_dict['schema'], True)

    return waps4
Exemplo n.º 7
0
def test_pts_poly_join():
    pts2, poly2 = vector.pts_poly_join(sites_shp_path, catch_shp_path,
                                       poly_col_name)

    assert (len(pts2) == 2) & (len(poly2) == 1) & isinstance(
        pts2, gpd.GeoDataFrame)
Exemplo n.º 8
0
    def _usage_estimation(self, freq, buffer_dis=40000, min_months=36):
        """

        """
        ### Get the necessary data

        # a1 = AlloUsage()
        # a1.permits = self.permits.copy()
        # a1.waps = self.waps.copy()
        # a1.from_date = self.from_date
        # a1.to_date = self.to_date

        # if hasattr(self, 'total_allo_ts'):
        #     delattr(self, 'total_allo_ts')

        allo_use1 = self.get_ts(['allo', 'metered_allo', 'usage'], 'M',
                                ['permit_id', 'wap'])

        permits = self.permits.copy()

        ### Create Wap locations
        waps1 = vector.xy_to_gpd(
            'wap', 'lon', 'lat',
            self.waps.drop('permit_id', axis=1).drop_duplicates('wap'), 4326)
        waps2 = waps1.to_crs(2193)

        ### Determine which Waps need to be estimated
        allo_use_mis1 = allo_use1[allo_use1['total_metered_allo'] ==
                                  0].copy().reset_index()
        allo_use_with1 = allo_use1[
            allo_use1['total_metered_allo'] > 0].copy().reset_index()

        mis_waps1 = allo_use_mis1.groupby(['permit_id', 'wap'
                                           ])['total_allo'].count().copy()
        with_waps1 = allo_use_with1.groupby(['permit_id',
                                             'wap'])['total_allo'].count()
        with_waps2 = with_waps1[with_waps1 >= min_months]

        with_waps3 = pd.merge(with_waps2.reset_index()[['permit_id', 'wap']],
                              permits[['permit_id', 'use_type']],
                              on='permit_id')

        with_waps4 = pd.merge(waps2, with_waps3['wap'], on='wap')

        mis_waps2 = pd.merge(mis_waps1.reset_index(),
                             permits[['permit_id', 'use_type']],
                             on='permit_id')
        mis_waps3 = pd.merge(waps2, mis_waps2['wap'], on='wap')
        mis_waps3['geometry'] = mis_waps3['geometry'].buffer(buffer_dis)
        # mis_waps3.rename(columns={'wap': 'mis_wap'}, inplace=True)

        mis_waps4, poly1 = vector.pts_poly_join(
            with_waps4.rename(columns={'wap': 'good_wap'}), mis_waps3, 'wap')

        ## Calc ratios
        allo_use_with2 = pd.merge(allo_use_with1,
                                  permits[['permit_id', 'use_type']],
                                  on='permit_id')

        allo_use_with2['month'] = allo_use_with2['date'].dt.month
        allo_use_with2['usage_allo'] = allo_use_with2[
            'total_usage'] / allo_use_with2['total_allo']

        allo_use_ratio1 = allo_use_with2.groupby(
            ['permit_id', 'wap', 'use_type',
             'month'])['usage_allo'].mean().reset_index()

        allo_use_ratio2 = pd.merge(
            allo_use_ratio1.rename(columns={'wap': 'good_wap'}),
            mis_waps4[['good_wap', 'wap']],
            on='good_wap')

        ## Combine with the missing ones
        allo_use_mis2 = pd.merge(allo_use_mis1[['permit_id', 'wap', 'date']],
                                 permits[['permit_id', 'use_type']],
                                 on='permit_id')
        allo_use_mis2['month'] = allo_use_mis2['date'].dt.month

        allo_use_mis3 = pd.merge(
            allo_use_mis2,
            allo_use_ratio2[['use_type', 'month', 'usage_allo', 'wap']],
            on=['use_type', 'wap', 'month'])
        allo_use_mis4 = allo_use_mis3.groupby(
            ['permit_id', 'wap', 'date'])['usage_allo'].mean().reset_index()

        allo_use_mis5 = pd.merge(allo_use_mis4,
                                 allo_use_mis1[[
                                     'permit_id', 'wap', 'date', 'total_allo',
                                     'sw_allo', 'gw_allo'
                                 ]],
                                 on=['permit_id', 'wap', 'date'])

        allo_use_mis5['total_usage_est'] = (
            allo_use_mis5['usage_allo'] * allo_use_mis5['total_allo']).round()
        allo_use_mis5['sw_allo_usage_est'] = (
            allo_use_mis5['usage_allo'] * allo_use_mis5['sw_allo']).round()
        allo_use_mis5['gw_allo_usage_est'] = (
            allo_use_mis5['usage_allo'] * allo_use_mis5['gw_allo']).round()

        allo_use_mis6 = allo_use_mis5[[
            'permit_id', 'wap', 'date', 'total_usage_est', 'sw_allo_usage_est',
            'gw_allo_usage_est'
        ]].copy()

        ### Convert to daily if required
        if freq == 'D':
            days1 = allo_use_mis6.date.dt.daysinmonth
            days2 = pd.to_timedelta((days1 / 2).round().astype('int32'),
                                    unit='D')

            allo_use_mis6[
                'total_usage_est'] = allo_use_mis6['total_usage_est'] / days1
            allo_use_mis6['sw_allo_usage_est'] = allo_use_mis6[
                'sw_allo_usage_est'] / days1
            allo_use_mis6['gw_allo_usage_est'] = allo_use_mis6[
                'gw_allo_usage_est'] / days1

            usage_rate0 = allo_use_mis6.copy()

            usage_rate0['date'] = usage_rate0['date'] - days2

            grp1 = allo_use_mis6.groupby(['permit_id', 'wap'])
            first1 = grp1.first()
            last1 = grp1.last()

            first1.loc[:, 'date'] = pd.to_datetime(
                first1.loc[:, 'date'].dt.strftime('%Y-%m') + '-01')

            usage_rate1 = pd.concat(
                [first1,
                 usage_rate0.set_index(['permit_id', 'wap']), last1],
                sort=True).reset_index().sort_values(
                    ['permit_id', 'wap', 'date'])

            usage_rate1.set_index('date', inplace=True)

            usage_daily_rate1 = usage_rate1.groupby([
                'permit_id', 'wap'
            ]).apply(lambda x: x.resample('D').interpolate(method='pchip')[[
                'total_usage_est', 'sw_allo_usage_est', 'gw_allo_usage_est'
            ]]).round(2)
        else:
            usage_daily_rate1 = allo_use_mis6.set_index(
                ['permit_id', 'wap', 'date'])

        ## Put the actual usage back into the estimate
        act_use1 = self.get_ts(['usage'], freq, ['permit_id', 'wap'])

        combo1 = pd.concat([usage_daily_rate1, act_use1], axis=1).sort_index()
        combo1.loc[combo1['total_usage'].notnull(),
                   'total_usage_est'] = combo1.loc[
                       combo1['total_usage'].notnull(), 'total_usage']
        combo1.loc[combo1['sw_allo_usage'].notnull(),
                   'sw_allo_usage_est'] = combo1.loc[
                       combo1['sw_allo_usage'].notnull(), 'sw_allo_usage']
        combo1.loc[combo1['gw_allo_usage'].notnull(),
                   'gw_allo_usage_est'] = combo1.loc[
                       combo1['gw_allo_usage'].notnull(), 'gw_allo_usage']
        combo1.drop(['total_usage', 'sw_allo_usage', 'gw_allo_usage'],
                    axis=1,
                    inplace=True)

        # combo1 = combo1.loc[slice(None), slice(None), self.from_date:self.to_date]
        setattr(self, 'usage_est', combo1)

        return combo1