segments_up['DOR_scaler'] = segments_up.DOR * segments_up.QC_MA
t0 = datetime.datetime.now()
RRI_temp = bfc.upstream_ag(data=segments_up,
                           downIDs='DnHydroseq',
                           agg_value=['DOR_scaler'])
t1 = datetime.datetime.now()
print('RRI upstream agg', (t1 - t0))

RRI = RRI_temp.DOR_scaler_up / segments_up.QC_MA_up

# %%
# 3. divide into fragments and get average fragment properties
# Create fragments
t1 = datetime.datetime.now()
segments = bfc.make_fragments(segments,
                              exit_id=52000,
                              verbose=False,
                              subwatershed=True)
t2 = datetime.datetime.now()
print("Make Fragments:", (t2 - t1))

# Summarize basic fragment properites
fragments = bfc.agg_by_frag(segments)

# %%
# 4. Caculate the RFI - actually I think this is DOF
# because I'm  doing it with Length for now?

# adding length squared for aggregation
fragments['LENGTHKM_sq'] = fragments.LENGTHKM**2

# aggregate l2 by upstream
                               'LENGTHKM', 'StartFlag', 'Coordinates', 'DamID',
                               'DamCount'
                           ])
else:
    print('does not exist')
    nabd_nhd = ex.join_dams_flowlines(run_name)
    filtered_join = ex.filter_join(nabd_nhd, run_name)  #filter the joined data
    segments = ex.filter_join(nabd_nhd, run_name)  #filter the joined data
    # segments = pd.read_csv(run_name+'.csv', usecols=['Hydroseq', 'UpHydroseq',
    #                                                  'DnHydroseq', 'LENGTHKM',
    #                                                  'StartFlag','Coordinates',
    #                                                  'DamID', 'DamCount'])

#%%
# STEP1:  Make Fragments
segments = bfc.make_fragments(segments, exit_id=99900000)

# STEP 2: Making a fragment data frame and aggregated by fragment
fragments = bfc.agg_by_frag(segments)

# STEP 3: Map Upstream Fragments
UpDict = bfc.map_up_frag(fragments)

#STEP 4: Aggregate by upstream area
fragments = bfc.agg_by_frag_up(fragments, UpDict)

# %%
# Some plotting
print(segments.columns)
segments['Frag'] = segments['Frag'].fillna(0)
## test['UpHydroseq'] = test['UpHydroseq'].round(decimals=0)
## test['DnHydroseq'] = test['DnHydroseq'].round(decimals=0)
## test['Hydroseq'] = test['Hydroseq'].round(decimals=0)
## test.set_index('Hydroseq')
##
# %%
# test.rename(columns={'WKT': 'Coordinates'}) #rename column
test2 = test.rename(columns={'WKT': 'Coordinates'})
test2.Coordinates = test2.Coordinates.astype(str)
test2['Coordinates'] = test2['Coordinates'].apply(wkt.loads)
test2Geo = gp.GeoDataFrame(test2, geometry='Coordinates')

segments = test2Geo.copy()

# %%
segments2 = make_fragments(segments)

# %%
# STEP 1: Making fragments
# looping to make fragments
# To do - calculate fragment totals  -- total number of dams upstream
#  Total storage upstream

queue = segments.loc[segments.UpHydroseq == 0]
# Initail number to use for fragments that are existing the  domain
# Rather than hitting a dam. Exiting framents will start counting from
# this number
fexit = 11

snum = 0  #Counter for the segment starting points -- just for print purposes
while len(queue) > 0: