setting = "classic" # classic, steep, or floodplain # name of the folder of the desired DCEs for the analysis DCE1_name = "DCE_01" DCE2_name = 'DCE_02' # DCE 1 Parameters DCE1_date = "201108" DCE1_image_source = 'drone' DCE1_date_name = "August 2019" DCE1_flow_stage = 'moderate' DCE1_active = 'Yes' DCE1_maintained = 'No' DCE1_res = '.02' # DCE 2 Parameters DCE2_date = '201108' DCE2_image_source = 'google earth' DCE2_date_name = "Undammed" DCE2_flow_stage = 'moderate' DCE2_active = 'Yes' DCE2_maintained = 'Yes' DCE2_res = '.46' calculate_metrics(project_path, RS_folder_name, DEM, mapper, project_name, site_name, DCE1_name, DCE1_date, DCE1_image_source, DCE2_image_source, DCE1_date_name, DCE2_date_name, DCE1_flow_stage, DCE1_active, DCE1_maintained, DCE2_name, DCE2_date, DCE2_flow_stage, DCE2_active, DCE2_maintained, DCE1_res, DCE2_res, setting, huc8) ################################
if p == t: batch_corrects += 1 if i % print_every == 0 and i > 0: print('[%d, %5d] loss: %.7f' % (epoch + 1, i + 1, running_loss / print_every)) print('real: ', str(tmp_target), '----- predicted: ', str(tmp_predicted)) running_loss = 0.0 print() total_loss.append((epoch_loss / len(train_data_loader))) total_corrects += batch_corrects auc, pr_auc, average_precision, average_recall = calculate_metrics( targets=targets, predictions=predictions, bin_predictions=bin_predictions) print('\nEpoch %d/%d, Accuracy: %.3f' % (epoch + 1, num_epochs, batch_corrects / len(train_data_loader))) print('\nEpoch %d/%d, Loss : %.7f' % (epoch + 1, num_epochs, (epoch_loss / len(train_data_loader)))) print('\nEpoch %d/%d, AUC: %.3f' % (epoch + 1, num_epochs, auc)) print('\nEpoch %d/%d, PR-AUC: %.3f' % (epoch + 1, num_epochs, pr_auc)) print('\nEpoch %d/%d, Precision: %.3f' % (epoch + 1, num_epochs, average_precision)) print( '\nEpoch %d/%d, Recall: %.3f' % (epoch + 1, num_epochs, average_recall), '\n') print('Finished Training')
# 2.5 set index for easy concat dist_df.index = df.index dist_df.index.set_names(["date"], inplace=True) # 2.6 add genenric col names new_col = [ col.replace("{}.".format(district), "") for col in list(dist_df.columns) ] dist_df.rename(dict(zip(list(dist_df.columns), new_col)), axis=1, inplace=True) # 2.7 Output to CSV logging.info("Writing data to output file") dist_df.to_csv(output_file, mode="w" if header else "a", header=header) # Calculate metrics logging.info("calculating metrics for {}".format(district)) calculate_metrics.calculate_metrics( dist_df, header=header, hospitalizations=hospitalizations, output=metrics_file) header = False except Exception as e: logging.exception("Error Occurred")