示例#1
0
def run_single(path, min_flow_area, max_gradient):
    logger.info("Analyzing scenario at {}".format(path))

    gr = GridH5ResultAdmin(os.path.join(path, GRIDADMIN_NAME),
                           os.path.join(path, RESULTS_NAME))

    lines2d2d_valid, lines1d2d_active, lines1d2d_valid = filter_lines(
        gr,
        min_flow_area=min_flow_area,
        max_gradient=max_gradient,
    )

    groups = group_nodes(lines2d2d_valid.line)

    cell_data = gr.cells.subset('2D_ALL').only("id", "cell_coords").data

    overlast_ids, plas_ids, modelfout_ids = classify_nodes(
        node_id_2d=cell_data['id'],
        groups=groups,
        lines1d2d_active=lines1d2d_active,
        lines1d2d_valid=lines1d2d_valid,
    )

    cell_data['case'] = np.full(cell_data['id'].size, '', dtype='S10')
    cell_data['case'][np.isin(cell_data['id'], plas_ids)] = 'plas'
    cell_data['case'][np.isin(cell_data['id'], overlast_ids)] = 'overlast'
    cell_data['case'][np.isin(cell_data['id'], modelfout_ids)] = 'modelfout'
    return cell_data, gr.epsg_code
示例#2
0
    def _process_results(self):
        # Input files

        gridadmin_file = self.settings.base_dir / "model" / "gridadmin.h5"
        if not gridadmin_file.exists():
            raise utils.MissingFileException(
                f"Gridadmin file {gridadmin_file} not found")
        results_file = self.output_dir / "results_3di.nc"
        if not results_file.exists():
            raise utils.MissingFileException(
                f"Results file {results_file} not found")
        open_water_input_file = self.settings.base_dir / "input" / "ow.nc"
        if not open_water_input_file.exists():
            raise utils.MissingFileException(
                f"Open water input file {open_water_input_file} not found")

        results = GridH5ResultAdmin(str(gridadmin_file), str(results_file))
        times = results.nodes.timestamps[()] + self.settings.start.timestamp()
        times = times.astype("datetime64[s]")
        times = pd.Series(times).dt.round("10 min")
        endtime = results.nodes.timestamps[-1]

        # to be expanded
        if results.has_pumpstations:
            pump_id = results.pumps.display_name.astype("U13")
            discharges = results.pumps.timeseries(
                start_time=0, end_time=endtime).data["q_pump"]
            discharges_dataframe = pd.DataFrame(discharges,
                                                index=times,
                                                columns=pump_id)
            params = [
                "Q.sim" for x in range(len(discharges_dataframe.columns))
            ]

            discharges_dataframe.columns = pd.MultiIndex.from_arrays(
                [pump_id, pump_id, params])
            discharges_csv_output = self.output_dir / "discharges.csv"
            discharges_dataframe.to_csv(discharges_csv_output,
                                        index=True,
                                        header=True,
                                        sep=",")
            logger.info(
                "Simulated pump discharges have been exported to %s",
                discharges_csv_output,
            )

        open_water_input_file = self.settings.base_dir / "input" / "ow.nc"
        open_water_output_file = self.settings.base_dir / "output" / "ow.nc"
        converted_netcdf = utils.write_netcdf_with_time_indexes(
            open_water_input_file, self.settings)
        # converted_netcdf is a temp file, so move it to the correct spot.
        converted_netcdf.replace(open_water_output_file)
        logger.debug("Started open water output file %s",
                     open_water_output_file)
        dset = netCDF4.Dataset(open_water_output_file, "a")
        s1 = (results.nodes.subset("2D_OPEN_WATER").timeseries(
            start_time=0, end_time=endtime).s1)
        dset["Mesh2D_s1"][:, :] = s1
        dset.close()
        logger.info("Wrote open water output file %s", open_water_output_file)
示例#3
0
def list_result_options(request):
    """
    responses:
      200:
        description: A list of result options per model type.
    """
    model_name = request.path_params['model_name']
    gr = GridH5ResultAdmin(gridadmin_f, results_f)
    return JSONResponse(
        getattr(gr, model_name)._meta.get_fields(only_names=True))
示例#4
0
    def __init__(self, gridadmin_path, results_3di_path):
        with h5py.File(results_3di_path) as h5:
            self.result_type = h5.attrs['result_type'].decode('ascii')

        result_admin_args = gridadmin_path, results_3di_path
        if self.result_type == "raw":
            self._result_admin = GridH5ResultAdmin(*result_admin_args)
            self.variable = "s1"
            self.calculation_steps = self.nodes.timestamps.size
        else:
            self._result_admin = GridH5AggregateResultAdmin(*result_admin_args)
            self.variable = "s1_max"
            self.calculation_steps = self.nodes.timestamps[self.variable].size
示例#5
0
from threedi_result_aggregation import *
from threedigrid.admin.gridresultadmin import GridH5ResultAdmin
from threedigrid.admin.gridresultadmin import GridH5AggregateResultAdmin

ga = 'C:\\Users\\leendert.vanwolfswin\\Documents\\purmerend\\overhwere_opp\\70mm\\gridadmin.h5'
res = 'C:\\Users\\leendert.vanwolfswin\\Documents\\purmerend\\overhwere_opp\\70mm\\results_3di.nc'
agg_res = 'C:\\Users\\leendert.vanwolfswin\\Documents\\purmerend\\overhwere_opp\\70mm\\aggregate_results_3di.nc'

gr = GridH5ResultAdmin(ga, res)
gr_agg = GridH5ResultAdmin(ga, agg_res)
gr_agg.nodes._field_names
gr_agg.nodes.Meta.composite_fields.keys()
gr_agg.nodes.Meta.subset_fields.keys()
# nodes = list(gr.nodes.id)
# flow_per_node(gr=gr, node_ids=nodes, start_time=0, end_time=3600, out=True, aggregation_method=AGGREGATION_METHODS.get_by_short_name('sum'))
#
# # from threedigrid.admin.nodes.exporters import NodesOgrExporter
# # a = NodesOgrExporter(gr.nodes)
# # a.set_driver(driver_name='MEMORY', extension='')
# # a.save('', node_data=gr.nodes.data, target_epsg_code=28992)
#
# das = []
#
# # da4 = dict()
# # da4['variable'] = 'q_pos'
# # da4['method']='median'
# # da4['threshold'] = 0.0
# # das.append(da4)
# #
# # da4 = dict()
# # da4['variable'] = 'q_in_x'
示例#6
0
def gr():
    gr = GridH5ResultAdmin(grid_file, result_file)
    yield gr
    gr.close()
示例#7
0
def gr(request):
    gr = GridH5ResultAdmin(
        os.path.join(test_file_dir, request.param), result_file)
    yield gr
    gr.close()
示例#8
0
async def _fetch(indexes, var_name, model_name):
    print('received flow indexes %s ' % indexes)
    gr = GridH5ResultAdmin(gridadmin_f, results_f)
    t = getattr(gr, model_name).timeseries(indexes=indexes)
    data = t.only(var_name).data
    return data[var_name].tolist()