Beispiel #1
0
    def reconstruct_event(self, event):
        """
        Perform full event reconstruction, including Hillas and ImPACT analysis.

        Parameters
        ----------
        event: ctapipe event container

        Returns
        -------
            None
        """
        # store MC pointing direction for the array
        array_pointing = HorizonFrame(
            alt=event.mcheader.run_array_direction[1] * u.rad,
            az=event.mcheader.run_array_direction[0] * u.rad)
        tilted_system = TiltedGroundFrame(pointing_direction=array_pointing)

        image = {}
        pixel_x = {}
        pixel_y = {}
        pixel_area = {}
        tel_type = {}
        tel_x = {}
        tel_y = {}

        hillas = {}
        hillas_nom = {}
        image_pred = {}
        mask_dict = {}
        print("Event energy", event.mc.energy)

        for tel_id in event.dl0.tels_with_data:

            # Get calibrated image (low gain channel only)
            pmt_signal = event.dl1.tel[tel_id].image[0]
            if len(event.dl1.tel[tel_id].image) > 1:
                print(event.dl1.tel[tel_id].image[1][pmt_signal > 100])
                pmt_signal[pmt_signal > 100] = \
                    event.dl1.tel[tel_id].image[1][pmt_signal > 100]
            # Create nominal system for the telescope (this should later used telescope
            # pointing)
            nom_system = NominalFrame(array_direction=array_pointing,
                                      pointing_direction=array_pointing)

            # Create camera system of all pixels
            pix_x, pix_y = event.inst.pixel_pos[tel_id]
            fl = event.inst.optical_foclen[tel_id]
            if tel_id not in self.geoms:
                self.geoms[tel_id] = CameraGeometry.guess(
                    pix_x,
                    pix_y,
                    event.inst.optical_foclen[tel_id],
                    apply_derotation=False)
            # Transform the pixels positions into nominal coordinates
            camera_coord = CameraFrame(x=pix_x,
                                       y=pix_y,
                                       z=np.zeros(pix_x.shape) * u.m,
                                       focal_length=fl,
                                       rotation=-1 *
                                       self.geoms[tel_id].cam_rotation)

            nom_coord = camera_coord.transform_to(nom_system)
            tx, ty, tz = event.inst.tel_pos[tel_id]

            # ImPACT reconstruction is performed in the tilted system,
            # so we need to transform tel positions
            grd_tel = GroundFrame(x=tx, y=ty, z=tz)
            tilt_tel = grd_tel.transform_to(tilted_system)

            # Clean image using split level cleaning
            mask = tailcuts_clean(
                self.geoms[tel_id],
                pmt_signal,
                picture_thresh=self.tail_cut[self.geoms[tel_id].cam_id][1],
                boundary_thresh=self.tail_cut[self.geoms[tel_id].cam_id][0])

            # Perform Hillas parameterisation
            moments = None
            try:
                moments_cam = hillas_parameters(
                    event.inst.pixel_pos[tel_id][0],
                    event.inst.pixel_pos[tel_id][1], pmt_signal * mask)

                moments = hillas_parameters(nom_coord.x, nom_coord.y,
                                            pmt_signal * mask)

            except HillasParameterizationError as e:
                print(e)
                continue

            # Make cut based on Hillas parameters
            if self.preselect(moments, np.sum(mask), tel_id):

                # Dialte around edges of image
                for i in range(4):
                    mask = dilate(self.geoms[tel_id], mask)

                # Save everything in dicts for reconstruction later
                pixel_area[tel_id] = self.geoms[tel_id].pix_area / (fl * fl)
                pixel_area[tel_id] *= u.rad * u.rad
                pixel_x[tel_id] = nom_coord.x
                pixel_y[tel_id] = nom_coord.y

                tel_x[tel_id] = tilt_tel.x
                tel_y[tel_id] = tilt_tel.y

                tel_type[tel_id] = self.geoms[tel_id].cam_id
                image[tel_id] = pmt_signal
                image_pred[tel_id] = np.zeros(pmt_signal.shape)

                hillas[tel_id] = moments_cam
                hillas_nom[tel_id] = moments
                mask_dict[tel_id] = mask

        # Cut on number of telescopes remaining
        if len(image) > 1:
            fit_result = self.fit.predict(hillas_nom, tel_x, tel_y,
                                          array_pointing)

            for tel_id in hillas_nom:

                core_grd = GroundFrame(x=fit_result.core_x,
                                       y=fit_result.core_y,
                                       z=0. * u.m)
                core_tilt = core_grd.transform_to(tilted_system)
                impact = np.sqrt(
                    np.power(tel_x[tel_id] - core_tilt.x, 2) +
                    np.power(tel_y[tel_id] - core_tilt.y, 2))

                pix = np.array([
                    pixel_x[tel_id].to(u.deg).value,
                    pixel_y[tel_id].to(u.deg).value
                ])
                img = image[tel_id]
                #img[img < 0.0] = 0.0
                #img /= np.max(img)

                lin = LinearNDInterpolator(pix.T, img, fill_value=0.0)
                x_img = np.arange(-4, 4, 0.05) * u.deg
                y_img = np.arange(-4, 4, 0.05) * u.deg
                xv, yv = np.meshgrid(x_img, y_img)
                pos = np.array([xv.ravel(), yv.ravel()])
                npix = xv.shape[0]
                img_int = np.reshape(lin(pos.T), xv.shape)
                print(img_int)

                hdu = fits.ImageHDU(img_int.astype(np.float32))
                hdu.header["IMPACT"] = impact.to(u.m).value
                hdu.header["TELTYPE"] = tel_type[tel_id]
                hdu.header["TELNUM"] = tel_id

                hdu.header["MCENERGY"] = event.mc.energy.to(u.TeV).value
                hdu.header["RECENERGY"] = 0
                hdu.header["AMP"] = hillas_nom[tel_id].size
                hdu.header["WIDTH"] = hillas_nom[tel_id].width.to(u.deg).value
                hdu.header["LENGTH"] = hillas_nom[tel_id].length.to(
                    u.deg).value
                self.output.append(hdu)
Beispiel #2
0
def quads_from_corner_lookup(lon,
                             lat,
                             corner_points,
                             pixel_lon,
                             pixel_lat,
                             nadir_lon=0.0,
                             inflate=1.0,
                             extrapolate=True):
    """
    Given corner offset data in corner_points located at ctr_lon, ctr_lat
    return interpolated corner offsets

    Arguments
    lon, lat: arrays, shape (N,M), of longitude and latitude giving the
        locations of the corresponding offsets in corner_points
    corner_points: array, shape (N,M,4,2)
        Corners of the pixel quadrilateral are given in order along the
        third dimension. Longitude and latitudes are indexes 0 and 1 in the
        trailing dimension, respectively.
    pixel_lon, pixel_lat: arrays, shape (P,), of longitudes and latitudes
    nadir_lon: geostationary satellite longitude. Added to lon and
        lat (or subtracted from pixel locations) so as to shift the
        lookup table to the correct earth-relative position.
    inflate: multiply the corner point delta by this amount.
    extrapolate: if True (default) and pixel is outside the domain of lon and
        lat, use the nearest neighbor in corner_points instead

    Returns
    quads: array, shape (P,4,2) of corner locations for each pixel.
    """
    did_extrap = False

    n_corners = corner_points.shape[-2]
    n_coords = corner_points.shape[-1]

    lon_shift = lon + nadir_lon

    pixel_loc = np.vstack((pixel_lon, pixel_lat)).T
    grid_loc = (lon_shift.flatten(), lat.flatten())

    quads = np.empty((pixel_lon.shape[0], n_corners, n_coords))
    for ci in range(n_corners):
        corner_interp_lon = LinearNDInterpolator(
            grid_loc, corner_points[:, :, ci, 0].flatten())
        #, bounds_error=True)
        corner_interp_lat = LinearNDInterpolator(
            grid_loc, corner_points[:, :, ci, 1].flatten())
        #, bounds_error=True)
        dlon = corner_interp_lon(pixel_loc)
        dlat = corner_interp_lat(pixel_loc)
        if extrapolate:
            out_lon = np.isnan(dlon)
            out_lat = np.isnan(dlat)
            if out_lon.sum() > 0:
                did_extrap = True
                corner_extrap_lon = NearestNDInterpolator(
                    grid_loc, corner_points[:, :, ci, 0].flatten())
                dlon[out_lon] = corner_extrap_lon(pixel_loc[out_lon])
            if out_lat.sum() > 0:
                did_extrap = True
                corner_extrap_lat = NearestNDInterpolator(
                    grid_loc, corner_points[:, :, ci, 1].flatten())
                dlat[out_lat] = corner_extrap_lat(pixel_loc[out_lat])
        quads[:, ci, 0] = pixel_lon + dlon * inflate
        quads[:, ci, 1] = pixel_lat + dlat * inflate
    if did_extrap:
        log.warning(extrap_warning)
    return quads
Beispiel #3
0
def scalar2geo(ifile, opath, variable,
               mesh, ind_noempty_all,
               ind_empty_all,ind_depth_all, cmore_table,
               lonreg2, latreg2, distances, inds, radius_of_influence,
               topo, points, interp, qh, timestep, dind, realdepth):
    print(ifile)
    ext = variable
    #ifile = ipath
    ofile = os.path.join(opath, '{}_{}.nc'.format(os.path.basename(ifile)[:-3], ext))

    fl = Dataset(ifile)
    if fl.variables[variable].shape[1] == mesh.n2d:
        dim3d = False
        dind = [dind[0]]
        realdepth = [realdepth[0]]
    elif fl.variables[variable].shape[1] == mesh.n3d:
        dim3d = True
    else:
        raise ValueError('Variable size {} is not equal to number of 2d ({}) or 3d ({}) nodes'.format(fl.variables[variable].shape[1], mesh.n2d, mesh.n3d))

    fw = Dataset(ofile, mode='w',data_model='NETCDF4_CLASSIC', )

    fw.createDimension('latitude', lonreg2.shape[0])
    fw.createDimension('longitude', latreg2.shape[1])
    fw.createDimension('time', None)
    fw.createDimension('depth_coord',  len(realdepth) )

    lat = fw.createVariable('latitude', 'd', ('latitude'))
    lat.setncatts(noempty_dict(cmore_table['axis_entry']['latitude']))
    lat[:] = latreg2[:,0].flatten()

    lon = fw.createVariable('longitude', 'd', ('longitude'))
    lon.setncatts(noempty_dict(cmore_table['axis_entry']['longitude']))
    lon[:] = lonreg2[0,:].flatten()

    depth = fw.createVariable('depth_coord','d',('depth_coord'))
    depth.setncatts(noempty_dict(cmore_table['axis_entry']['depth_coord']))
    depth[:] = realdepth[:]

    time = fw.createVariable('time','d',('time'))
    time.setncatts(noempty_dict(cmore_table['axis_entry']['time']))
    time.units = fl.variables['time'].units
    if timestep == -1:
        time[:] = fl.variables['time'][:]
    else:
        time[:] = fl.variables['time'][timestep]

    # ii = LinearNDInterpolator(qh, mesh.topo)
    if timestep == -1:
        timesteps = range(fl.variables[variable].shape[0])
    else:
        timesteps = range(timestep, timestep+1)
    if True:
        temp = fw.createVariable(variable,'d',\
                                ('time','depth_coord','latitude','longitude'), \
                                fill_value=-9999, zlib=False, complevel=1)

        for ttime in timesteps:

            all_layers = fl.variables[variable][ttime,:]
            level_data=np.zeros(shape=(mesh.n2d))
            inter_data=np.zeros(shape=(len(mesh.zlevs),lonreg2.shape[0], lonreg2.shape[1]))
            # inter_data=np.ma.masked_where(topo[0,:,:].mask, inter_data)
            #for i in range(len(mesh.zlevs)):
            for n, i in enumerate(dind):
                #level_data=np.zeros(shape=(mesh.n2d))
                
                level_data[ind_noempty_all[i]]=all_layers[ind_depth_all[i][ind_noempty_all[i]]]
                level_data[ind_empty_all[i]] = np.nan
                if interp == 'nn':
                    air_nearest = pf.fesom2regular(level_data, mesh, lonreg2, latreg2, distances=distances,
                                            inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
                elif interp == 'idist':
                    air_nearest = pf.fesom2regular(level_data, mesh, lonreg2, latreg2, distances=distances,
                                            inds=inds, radius_of_influence=radius_of_influence, n_jobs=1, how='idist')
                elif interp == 'linear':
                    #level_data = level_data.copy()
                    #lonreg2 = lonreg2.tolist()
                    #latreg2 = latreg2.tolist()
                    air_nearest = LinearNDInterpolator(qh, level_data)((lonreg2, latreg2))
                elif interp == 'cubic':
                    air_nearest =CloughTocher2DInterpolator(qh, level_data)((lonreg2, latreg2))
                    print('cubic')
                    print(air_nearest.min())

                air_nearest = np.ma.masked_where(topo.mask, air_nearest)
                if timestep == -1:
                    temp[ttime,n,:,:] = air_nearest[:,:].filled(-9999)
                else:
                    temp[0,n,:,:] = air_nearest[:,:].filled(-9999)

    fl.close()
    fw.close()
Beispiel #4
0
def calc_lidar_bulk(instrument, model, is_conv, p_values, z_values, OD_from_sfc=True,
                    hyd_types=None, mie_for_ice=False, **kwargs):
    """
    Calculates the lidar stratiform or convective backscatter, extinction, and
    optical depth in a sub-columns using bulk scattering LUTs assuming geometric
    scatterers (radiation scheme logic).
    Effective radii for each hydrometeor class must be provided (in model.ds).

    Parameters
    ----------
    instrument: Instrument
        The instrument to simulate. The instrument must be a lidar.
    model: Model
        The model to generate the parameters for.
    is_conv: bool
        True if the cell is convective
    p_values: ndarray
        model output pressure array in Pa.
    z_values: ndarray
        model output height array in m.
    OD_from_sfc: bool
        If True, then calculate optical depth from the surface.
    hyd_types: list or None
        list of hydrometeor names to include in calcuation. using default Model subclass types if None.
    mie_for_ice: bool
        If True, using bulk mie caculation LUTs. Otherwise, currently using the bulk C6
        scattering LUTs for 8-column severly roughned aggregate.
    Additonal keyword arguments are passed into
    :py:func:`emc2.simulator.lidar_moments.accumulate_OD`.

    Returns
    -------
    model: :func:`emc2.core.Model`
        The model with the added simulated lidar parameters.
    """
    hyd_types = model.set_hyd_types(hyd_types)

    if is_conv:
        cloud_str = "conv"
        re_fields = model.conv_re_fields
    else:
        cloud_str = "strat"
        re_fields = model.strat_re_fields

    n_subcolumns = model.num_subcolumns

    if model.model_name in ["E3SM", "CESM2"]:
        bulk_ice_lut = "CESM_ice"
        bulk_mie_ice_lut = "mie_ice_CESM_PSD"
        bulk_liq_lut = "CESM_liq"
    else:
        bulk_ice_lut = "E3_ice"
        bulk_mie_ice_lut = "mie_ice_E3_PSD"
        bulk_liq_lut = "E3_liq"

    Dims = model.ds["%s_q_subcolumns_cl" % cloud_str].shape
    model.ds['sub_col_beta_p_tot_%s' % cloud_str] = xr.DataArray(
        np.zeros(Dims), dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims)
    model.ds['sub_col_alpha_p_tot_%s' % cloud_str] = xr.DataArray(
        np.zeros(Dims), dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims)
    model.ds['sub_col_OD_tot_%s' % cloud_str] = xr.DataArray(
        np.zeros(Dims), dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims)

    rhoa_dz = np.tile(np.abs(np.diff(p_values, axis=1, append=0.)) / instrument.g,
                      (model.num_subcolumns, 1, 1))
    dz = np.tile(np.diff(z_values, axis=1, append=0.), (model.num_subcolumns, 1, 1))
    for hyd_type in hyd_types:
        if hyd_type[-1] == 'l':
            rho_b = model.Rho_hyd[hyd_type]  # bulk water
            re_array = np.tile(model.ds[re_fields[hyd_type]], (model.num_subcolumns, 1, 1))
            if model.lambda_field is not None:  # assuming my and lambda can be provided only for liq hydrometeors
                if not model.lambda_field[hyd_type] is None:
                    lambda_array = model.ds[model.lambda_field[hyd_type]].values
                    mu_array = model.ds[model.mu_field[hyd_type]].values
        else:
            rho_b = instrument.rho_i  # bulk ice
            rho_hyd = model.Rho_hyd[hyd_type]
            if rho_hyd == 'variable':
                rho_hyd = model.ds[model.variable_density[hyd_type]].values
            fi_factor = model.fluffy[hyd_type].magnitude * rho_hyd / rho_b + \
                (1 - model.fluffy[hyd_type].magnitude) * (rho_hyd / rho_b) ** (1 / 3)
            re_array = np.tile(model.ds[re_fields[hyd_type]] * fi_factor,
                               (model.num_subcolumns, 1, 1))

        tau_hyd = np.where(model.ds["%s_q_subcolumns_%s" % (cloud_str, hyd_type)] > 0,
                           3 * model.ds["%s_q_subcolumns_%s" % (cloud_str, hyd_type)] * rhoa_dz /
                           (2 * rho_b * re_array * 1e-6), 0)
        A_hyd = tau_hyd / (2 * dz)  # model assumes geometric scatterers

        if np.isin(hyd_type, ["ci", "pi"]):
            if mie_for_ice:
                r_eff_bulk = instrument.bulk_table[bulk_mie_ice_lut]["r_e"].values.copy()
                Qback_bulk = instrument.bulk_table[bulk_mie_ice_lut]["Q_back"].values
                Qext_bulk = instrument.bulk_table[bulk_mie_ice_lut]["Q_ext"].values
            else:
                r_eff_bulk = instrument.bulk_table[bulk_ice_lut]["r_e"].values.copy()
                Qback_bulk = instrument.bulk_table[bulk_ice_lut]["Q_back"].values
                Qext_bulk = instrument.bulk_table[bulk_ice_lut]["Q_ext"].values
        else:
            if model.model_name in ["E3SM", "CESM2"]:
                mu_b = np.tile(instrument.bulk_table[bulk_liq_lut]["mu"].values,
                               (instrument.bulk_table[bulk_liq_lut]["lambdas"].size)).flatten()
                lambda_b = instrument.bulk_table[bulk_liq_lut]["lambda"].values.flatten()
            else:
                r_eff_bulk = instrument.bulk_table[bulk_liq_lut]["r_e"].values
            Qback_bulk = instrument.bulk_table[bulk_liq_lut]["Q_back"].values
            Qext_bulk = instrument.bulk_table[bulk_liq_lut]["Q_ext"].values

        if np.logical_and(np.isin(hyd_type, ["cl", "pl"]), model.model_name in ["E3SM", "CESM2"]):
            print("2-D interpolation of bulk liq lidar backscattering using mu-lambda values")
            rel_locs = model.ds[model.q_names_stratiform[hyd_type]].values > 0.
            back_tmp = np.ones_like(model.ds[model.q_names_stratiform[hyd_type]].values, dtype=float) * np.nan
            ext_tmp = np.copy(back_tmp)
            interpolator = LinearNDInterpolator(np.stack((mu_b, lambda_b), axis=1), Qback_bulk.flatten())
            interp_vals = interpolator(mu_array[rel_locs], lambda_array[rel_locs])
            np.place(back_tmp, rel_locs, interp_vals)
            print("2-D interpolation of bulk liq lidar extinction using mu-lambda values")
            interpolator = LinearNDInterpolator(np.stack((mu_b, lambda_b), axis=1), Qext_bulk.flatten())
            interp_vals = interpolator(mu_array[rel_locs], lambda_array[rel_locs])
            np.place(ext_tmp, rel_locs, interp_vals)
            model.ds["sub_col_beta_p_%s_%s" % (hyd_type, cloud_str)] = xr.DataArray(
                np.tile(back_tmp, (n_subcolumns, 1, 1)) * A_hyd,
                dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims).fillna(0)
            model.ds["sub_col_alpha_p_%s_%s" % (hyd_type, cloud_str)] = xr.DataArray(
                np.tile(ext_tmp, (n_subcolumns, 1, 1)) * A_hyd,
                dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims).fillna(0)
        else:
            model.ds["sub_col_alpha_p_%s_%s" % (hyd_type, cloud_str)] = xr.DataArray(
                np.interp(re_array, r_eff_bulk, Qext_bulk) * A_hyd,
                dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims).fillna(0)
            model.ds["sub_col_beta_p_%s_%s" % (hyd_type, cloud_str)] = xr.DataArray(
                np.interp(re_array, r_eff_bulk, Qback_bulk) * A_hyd,
                dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims).fillna(0)

        model = accumulate_OD(model, is_conv, z_values, hyd_type, OD_from_sfc, **kwargs)

        model.ds["sub_col_beta_p_tot_%s" % cloud_str] += \
            model.ds["sub_col_beta_p_%s_%s" % (hyd_type, cloud_str)].fillna(0)
        model.ds["sub_col_alpha_p_tot_%s" % cloud_str] += \
            model.ds["sub_col_alpha_p_%s_%s" % (hyd_type, cloud_str)].fillna(0)
        model.ds["sub_col_OD_tot_%s" % cloud_str] += \
            model.ds["sub_col_OD_%s_%s" % (hyd_type, cloud_str)].fillna(0)

    return model
Beispiel #5
0
    def compute_geolayer(self) -> (np.ndarray, np.ndarray):
        """Compute pixel-wise lon/lat arrays based on RPC coefficients, corner coordinates and image dimensions.

        :return: (2D longitude array, 2D latitude array)
        """
        # transform corner coordinates of EnMAP image to UTM
        grid_utm_epsg = get_UTMEPSG_from_LonLat(
            *get_center_coord(self.enmapIm_cornerCoords))
        cornerCoordsUTM = np.array([
            transform_any_prj(4326, grid_utm_epsg, lon, lat)
            for lon, lat in self.enmapIm_cornerCoords
        ])
        xmin, xmax = cornerCoordsUTM[:, 0].min(), cornerCoordsUTM[:, 0].max()
        ymin, ymax = cornerCoordsUTM[:, 1].min(), cornerCoordsUTM[:, 1].max()

        # get UTM bounding box and move it to the EnMAP grid
        xmin, ymin, xmax, ymax = \
            move_extent_to_coord_grid((xmin, ymin, xmax, ymax),
                                      enmap_coordinate_grid_utm['x'], enmap_coordinate_grid_utm['y'])

        # set up a regular grid of UTM points with a specific mesh width
        meshwidth = 300  # 10 EnMAP pixels
        y_grid_utm, x_grid_utm = np.meshgrid(np.arange(ymax, ymin - meshwidth,
                                                       -meshwidth),
                                             np.arange(xmin, xmax + meshwidth,
                                                       meshwidth),
                                             indexing='ij')

        if not isinstance(self.elevation, (int, float)):
            # transform UTM grid to DEM projection
            x_grid_demPrj, y_grid_demPrj = \
                (x_grid_utm, y_grid_utm) if prj_equal(grid_utm_epsg, self.elevation.epsg) else \
                transform_coordArray(CRS(grid_utm_epsg).to_wkt(),
                                     CRS(self.elevation.epsg).to_wkt(),
                                     x_grid_utm, y_grid_utm)

            # retrieve corresponding heights from DEM
            # -> resample DEM to EnMAP grid?
            xy_pairs_demPrj = np.vstack(
                [x_grid_demPrj.flatten(),
                 y_grid_demPrj.flatten()]).T
            heights = self.elevation.read_pointData(xy_pairs_demPrj).flatten()
        else:
            heights = np.full_like(x_grid_utm.flatten(), self.elevation)

        # transform UTM points to lon/lat
        lon_grid, lat_grid = \
            transform_coordArray(CRS(grid_utm_epsg).to_wkt(), CRS(4326).to_wkt(), x_grid_utm, y_grid_utm)
        lons = lon_grid.flatten()
        lats = lat_grid.flatten()

        # compute floating point EnMAP image coordinates for the selected UTM points
        rows, cols = self.transform_LonLatHeight_to_RowCol(lon=lons,
                                                           lat=lats,
                                                           height=heights)

        # interpolate lon/lats to get lon/lat coordinates integer image coordinates of EnMAP image
        rows_im, cols_im = self.enmapIm_dims_sensorgeo
        out_rows_grid, out_cols_grid = np.meshgrid(range(rows_im),
                                                   range(cols_im),
                                                   indexing='ij')
        out_xy_pairs = np.vstack(
            [out_cols_grid.flatten(),
             out_rows_grid.flatten()]).T
        in_xy_pairs = np.array((cols, rows)).T

        # Compute the triangulation (that takes time and can be computed for all values to be interpolated at once),
        # then run the interpolation
        triangulation = Delaunay(in_xy_pairs)
        lons_interp = LinearNDInterpolator(
            triangulation, lons)(out_xy_pairs).reshape(*out_rows_grid.shape)
        lats_interp = LinearNDInterpolator(
            triangulation, lats)(out_xy_pairs).reshape(*out_rows_grid.shape)

        # lons_interp / lats_interp may contain NaN values in case xmin, ymin, xmax, ymax has been set too small
        # to account for RPC transformation errors
        # => fix that by extrapolation at NaN value positions
        # FIXME: can this be avoided by modified xmin/ymin/xmy/ymax coords?

        lons_interp = self._fill_nans_at_corners(
            lons_interp, along_axis=0)  # extrapolate in left/right direction
        lats_interp = self._fill_nans_at_corners(lats_interp, along_axis=1)

        # return a geolayer in the exact dimensions like the EnMAP detector array
        return lons_interp, lats_interp
Beispiel #6
0
d[groups['interior']] = -1.0
d[groups['ghosts:free']] = -1.0
d[groups['boundary:fixed']] = 0.0
d[groups['boundary:free']] = 0.0

# find the solution at the nodes
u_soln = spsolve(A, d)
error = np.abs(u_soln - series_solution(nodes))

## PLOT THE NUMERICAL SOLUTION AND ITS ERROR
fig, axs = plt.subplots(2, figsize=(6, 8))
# interpolate the solution on a grid
xg, yg = np.meshgrid(np.linspace(-0.05, 2.05, 400),
                     np.linspace(-0.05, 2.05, 400))
points = np.array([xg.flatten(), yg.flatten()]).T
u_itp = LinearNDInterpolator(nodes, u_soln)(points)
# mask points outside of the domain
u_itp[~contains(points, vert, smp)] = np.nan
ug = u_itp.reshape((400, 400))  # fold back into a grid
# make a contour plot of the solution
p = axs[0].contourf(xg, yg, ug, np.linspace(-1e-6, 0.3, 9), cmap='viridis')
fig.colorbar(p, ax=axs[0])
# plot the domain
for s in smp:
    axs[0].plot(vert[s, 0], vert[s, 1], 'k-', lw=2)

# show the locations of the nodes
for i, (k, v) in enumerate(groups.items()):
    axs[0].plot(nodes[v, 0], nodes[v, 1], 'C%so' % i, markersize=4, label=k)

axs[0].set_title('RBF-FD solution')
Beispiel #7
0
 def __init__(self, coords, values):
     Interpolator.__init__(self, coords, values)
     self._interpolator = LinearNDInterpolator(self._prepare(coords),
                                               values,
                                               rescale=False)
#############################################
# Defining the Survey
# -------------------
#
# Here, we define survey that will be used for the simulation. Magnetic
# surveys are simple to create. The user only needs an (N, 3) array to define
# the xyz locations of the observation locations, the list of field components
# which are to be modeled and the properties of the Earth's field.
#

# Define the observation locations as an (N, 3) numpy array or load them.
x = np.linspace(-80.0, 80.0, 17)
y = np.linspace(-80.0, 80.0, 17)
x, y = np.meshgrid(x, y)
x, y = mkvc(x.T), mkvc(y.T)
fun_interp = LinearNDInterpolator(np.c_[x_topo, y_topo], z_topo)
z = fun_interp(np.c_[x, y]) + 10  # Flight height 10 m above surface.
receiver_locations = np.c_[x, y, z]

# Define the component(s) of the field we want to simulate as a list of strings.
# Here we simulation total magnetic intensity data.
components = ["tmi"]

# Use the observation locations and components to define the receivers. To
# simulate data, the receivers must be defined as a list.
receiver_list = magnetics.receivers.Point(receiver_locations,
                                          components=components)

receiver_list = [receiver_list]

# Define the inducing field H0 = (intensity [nT], inclination [deg], declination [deg])
Beispiel #9
0
import dipy
from dipy.io.image import load_nifti
import coordinates
import nibabel as nib
from scipy.interpolate import griddata, LinearNDInterpolator
import numpy as np

difff1=nib.load('/home/uzair/PycharmProjects/Unfolding/data/diffusionSimulations_res-6mm_drt-17+w-99'
                         '/Unfolded/data.nii.gz')
difff2=nib.load('/home/uzair/PycharmProjects/Unfolding/data/diffusionSimulations_res-6250mm_drt-17+w-99'
                         '/Unfolded/data.nii.gz')


diff1=difff1.get_fdata()
diff2=difff2.get_fdata()

S1=[]
S2=[]
for b in range(0, diff2.shape[3]):
    print(b)
    points,S=coordinates.getPointsData(difff1,b)
    interp=LinearNDInterpolator(points, S)
    for i in range(0,diff2.shape[0]):
        for j in range(0, diff2.shape[1]):
            for k in range(0, diff2.shape[2]):
                S2.append(diff2[i,j,k,b])
                ind=np.asarray([i,j,k])
                point=coordinates.toWorld(difff1,[ind])
                S1.append(interp(point[0]))

Beispiel #10
0
def estimateDepth(grid, method="tiltAngleDerivative"):
    """
        Function to estimate depth of anomalies
        from tilt angle. Distance between the 0 and
        pi/4 contour.

        INPUT
        :object: grid Grid object from

        OUTPUT
        :xyDepth:
    """

    assert method.lower() in [
        "tiltAngle".lower(), "tiltAngleDerivative".lower()
    ], "'method' should be 'tiltAngle' or 'tiltAngleDerivative'"

    upward_height = 0
    print(grid.heightUC)
    if getattr(grid, 'heightUC', None) is not None:
        upward_height += grid.heightUC

    tilt = grid.tiltAngle
    X, Y = np.meshgrid(grid.hx, grid.hy)
    C_0 = plt.contour(X, Y, tilt, levels=[0], colors='k')
    plt.close()
    xy = []
    depth = []
    if method == 'tiltAngle':

        C_45 = plt.contour(X, Y, tilt, levels=[np.pi / 4.], colors='r')
        plt.close()

        # Get zero contour nodes
        # xy0 = np.vstack(C_0.allsegs[0])

        # Get 45 contour nodes
        xy45 = np.vstack(C_45.allsegs[0])

        # Create ckDtree for shortest distance
        tree = cKDTree(xy45)

        # Compute shortest distance between pair of points
        for contour in C_0.allsegs[0]:

            if contour.shape[0] == 1:
                continue

            # Query two closest points to each nodes of zero contour
            d, indx = tree.query(contour, k=2)

            length = ((xy45[indx[:, 1], 0] - xy45[indx[:, 0], 0])**2. +
                      (xy45[indx[:, 1], 1] - xy45[indx[:, 0], 1])**2.)

            indL = length > 0

            depth += [
                upward_height +
                np.abs((xy45[indx[indL, 1], 1] - xy45[indx[indL, 0], 1]) *
                       contour[indL, 0] -
                       (xy45[indx[indL, 1], 0] - xy45[indx[indL, 0], 0]) *
                       contour[indL, 1] +
                       xy45[indx[indL, 1], 0] * xy45[indx[indL, 0], 1] -
                       xy45[indx[indL, 1], 1] * xy45[indx[indL, 0], 0]) /
                length[indL]**0.5
            ]

            xy += [contour[indL, :]]
    else:

        # Compute the total derivative of the tilt angle
        # Compute tilt angle derivative
        grad_tilt = np.gradient(grid.tiltAngle, grid.dx, grid.dy)

        THD_tilt = (grad_tilt[0]**2. + grad_tilt[1]**2. + 1e-8)**0.5

        # Interpolate value on 0 contour
        tri2D = Delaunay(grid.gridCC[:, :2])
        F = LinearNDInterpolator(tri2D, THD_tilt.flatten(order='F'))

        for contour in C_0.allsegs[0]:

            estimate_z = 1. / F(contour[:, 0], contour[:, 1])
            depth += [upward_height + estimate_z[~np.isnan(estimate_z)]]
            xy += [contour[~np.isnan(estimate_z), :]]

        # if getattr(self, 'heightUC', None) is None
    return xy, depth
Beispiel #11
0
#pts_shell = pts[cutoff+1:,:]
#print pts_shell
#Uabss_core = Uabss[:cutoff]
#Uabss_shell = Uabss[cutoff+1:]
#print Uabss_core
#print Uabss_shell

# make an interpolation of the data so it can be called like a function
# for a quadrature routine
#testout  = griddata(pts,Uabss,(0.02,0,0),method='linear')
#print testout
#testout  = griddata(pts_core,Uabss_core,(0.02,0,0),method='linear')
#print testout

##THIS IS NOT EVEN CLOSE!!!:
interp = LinearNDInterpolator(pts, Uabss)
print 'interpolation in phi'
print interp(0.27273E-01, 0.18480E+01, 0.36960E+01)
print interp(0.27273E-01, 0.18480E+01, 0.375E+01)
print interp(0.27273E-01, 0.18480E+01, 0.38808E+01)
print 'interpolation in theta'
print interp(0.27273E-01, 0.18480E+01, 0.36960E+01)
print interp(0.27273E-01, 0.19 + 01, 0.36960E+01)
print interp(0.27273E-01, 0.20328E+01, 0.36960E+01)
print 'interpolation in r'
print interp(0.27273E-01, 0.18480E+01, 0.36960E+01)
print interp(0.29E-01, 0.18480E+01, 0.36960E+01)
print interp(0.32727E-01, 0.18480E+01, 0.36960E+01)

#print griddata(pts,Uabss,(0.27273E-01,  0.18480E+01,  0.36960E+01),method='linear')
#print griddata(pts,Uabss,(0.27273E-01,  0.18480E+01,  0.377E+01),method='linear')
Beispiel #12
0
triangles = []
for line in open(datadir + fn_triangle):
    triNo = map(lambda no: int(no) - 1, line.split()[:3])
    tri = frozenset(map(allPoints.__getitem__, triNo))
    assert len(tri) == 3
    triangles.append(tri)

On_same_side = lambda x, y, x3, y3, x1, y1, x2, y2: (x - x1) * (y1 - y2) == (
    y - y1) * (x1 - x2) or ((x - x1) * (y1 - y2) < (y - y1) * (x1 - x2)) == (
        (x3 - x1) * (y1 - y2) < (y3 - y1) * (x1 - x2))


def Is_in_tri(x, y, tri):
    x1, x2, x3 = map(attrgetter('x'), tri)
    y1, y2, y3 = map(attrgetter('y'), tri)
    return (On_same_side(x, y, x3, y3, x1, y1, x2, y2)
            and On_same_side(x, y, x1, y1, x2, y2, x3, y3)
            and On_same_side(x, y, x2, y2, x3, y3, x1, y1))


for tri in triangles:
    if Is_in_tri(x0, y0, tri):
        x = tuple(map(attrgetter('x'), tri))
        y = tuple(map(attrgetter('y'), tri))
        phi = tuple(map(attrgetter('phi'), tri))
        m = array((x, y))
        m = m.transpose()
        print(LinearNDInterpolator(m, phi)((x0, y0)))
        break
Beispiel #13
0
    indTop = meshInput.gridCC[:, 2] == meshInput.vectorCCz[-1]
    topo = meshInput.gridCC[indTop, :]
    topo[:, 2] += meshInput.hz.min()/2. + 1e-8
    topo_interp_function = NearestNDInterpolator(topo[:, :2], topo[:, 2])
    
if "drape_data" in list(input_dict.keys()):
    drape_data = input_dict["drape_data"]
    
    # In case topo is very large only use interpolant points next to observations
    max_pad_distance = [4 * drape_data] 
    
    # Create new data locations draped at drapeAltitude above topo
    ix = (topo[:, 0] >= (rxLoc[:, 0].min() - max_pad_distance)) & (topo[:, 0] <= (rxLoc[:, 0].max() + max_pad_distance)) & \
         (topo[:, 1] >= (rxLoc[:, 1].min() - max_pad_distance)) & (topo[:, 1] <= (rxLoc[:, 1].max() + max_pad_distance))

    F = LinearNDInterpolator(topo[ix, :2], topo[ix, 2])
    
    z = F(rxLoc[:, 0], rxLoc[:, 1]) + input_dict["drape_data"]
    survey.srcField.rxList[0].locs = np.c_[rxLoc[:, :2], z]
else:
    drape_data = None

if "target_chi" in list(input_dict.keys()):
    target_chi = input_dict["target_chi"]
else:
    target_chi = 1

if "model_norms" in list(input_dict.keys()):
    model_norms = input_dict["model_norms"]
else:
    model_norms = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
Beispiel #14
0
def refineTree(mesh,
               xyz,
               finalize=False,
               dtype="radial",
               octreeLevels=[1, 1, 1],
               octreeLevels_XY=None,
               maxDist=np.inf,
               maxLevel=None):

    if octreeLevels_XY is not None:

        assert len(octreeLevels_XY) == len(
            octreeLevels
        ), "Arguments 'octreeLevels' and 'octreeLevels_XY' must be the same length"

    else:

        octreeLevels_XY = np.zeros_like(octreeLevels)

    if maxLevel is None:
        maxLevel = int(np.log2(mesh.hx.shape[0]))

    tree = cKDTree(xyz)

    if dtype == "point":

        mesh.insert_cells(xyz,
                          np.ones(xyz.shape[0]) * maxLevel,
                          finalize=False)

        stencil = np.r_[np.ones(octreeLevels[0]),
                        np.ones(octreeLevels[1]) * 2,
                        np.ones(octreeLevels[2]) * 3]

        # Reflect in the opposite direction
        vec = np.r_[stencil[::-1], 1, stencil]
        vecX, vecY, vecZ = np.meshgrid(vec, vec, vec)
        gridLevel = np.maximum(np.maximum(np.abs(vecX), np.abs(vecY)),
                               np.abs(vecZ))
        gridLevel = np.kron(np.ones(xyz.shape[0]), mkvc(gridLevel))

        # Grid the coordinates
        vec = np.r_[-np.cumsum(stencil)[::-1], 0, np.cumsum(stencil)]
        vecX, vecY, vecZ = np.meshgrid(vec, vec, vec)
        offset = np.c_[mkvc(np.sign(vecX) * np.abs(vecX) * mesh.hx.min()),
                       mkvc(np.sign(vecY) * np.abs(vecY) * mesh.hy.min()),
                       mkvc(np.sign(vecZ) * np.abs(vecZ) * mesh.hz.min())]

        # Replicate the point locations in each offseted grid points
        newLoc = (np.kron(xyz, np.ones(
            (offset.shape[0], 1))) + np.kron(np.ones(
                (xyz.shape[0], 1)), offset))

        # Apply max distance
        r, ind = tree.query(newLoc)

        mesh.insert_cells(newLoc[r < maxDist],
                          maxLevel - mkvc(gridLevel)[r < maxDist] + 1,
                          finalize=False)

        if finalize:
            mesh.finalize()

    elif dtype == "radial":

        # Compute the outer limits of each octree level
        rMax = np.cumsum(mesh.hx.min() * np.asarray(octreeLevels) *
                         2**np.arange(len(octreeLevels)))

        def inBall(cell):
            xyz = cell.center
            r, ind = tree.query(xyz)

            for ii, nC in enumerate(octreeLevels):

                if r < rMax[ii]:

                    return maxLevel - ii

            return 0

        mesh.refine(inBall, finalize=finalize)

    elif dtype == 'surface':

        # Compute centroid and
        centroid = np.mean(xyz, axis=0)

        # Largest outer point distance
        rOut = np.linalg.norm(np.r_[np.abs(centroid[0] - xyz[:, 0]).max(),
                                    np.abs(centroid[1] - xyz[:, 1]).max()])

        # Compute maximum depth of refinement
        zmax = np.cumsum(mesh.hz.min() * np.asarray(octreeLevels) *
                         2**np.arange(len(octreeLevels)))
        padWidth = np.cumsum(mesh.hx.min() * np.asarray(octreeLevels_XY) *
                             2**np.arange(len(octreeLevels_XY)))

        depth = zmax[-1]

        # Increment the vertical offset
        zOffset = 0
        xyPad = -1
        # Cycle through the Tree levels backward
        for ii in range(len(octreeLevels) - 1, -1, -1):

            dx = mesh.hx.min() * 2**ii
            dy = mesh.hy.min() * 2**ii
            dz = mesh.hz.min() * 2**ii

            # Increase the horizontal extent of the surface
            if xyPad != padWidth[ii]:
                xyPad = padWidth[ii]

                # Calculate expansion for padding XY cells
                expFactor = (rOut + xyPad) / rOut
                xLoc = (xyz - centroid) * expFactor + centroid

                # Create a new triangulated surface
                tri2D = Delaunay(xLoc[:, :2])
                F = LinearNDInterpolator(tri2D, xLoc[:, 2])

            limx = np.r_[xLoc[:, 0].max(), xLoc[:, 0].min()]
            limy = np.r_[xLoc[:, 1].max(), xLoc[:, 1].min()]

            nCx = int(np.ceil((limx[0] - limx[1]) / dx))
            nCy = int(np.ceil((limy[0] - limy[1]) / dy))

            # Create a grid at the octree level in xy
            CCx, CCy = np.meshgrid(np.linspace(limx[1], limx[0], nCx),
                                   np.linspace(limy[1], limy[0], nCy))

            xy = np.c_[CCx.reshape(-1), CCy.reshape(-1)]

            # Only keep points within triangulation
            indexTri = tri2D.find_simplex(xy)

            z = F(xy[indexTri != -1])

            newLoc = np.c_[xy[indexTri != -1], z]

            # Only keep points within maxDist
            # Apply max distance
            r, ind = tree.query(newLoc)

            # Apply vertical padding for current octree level
            zOffset = 0
            while zOffset < depth:
                indIn = r < (maxDist + padWidth[ii])
                nnz = int(np.sum(indIn))
                if nnz > 0:
                    mesh.insert_cells(np.c_[newLoc[indIn, :2],
                                            newLoc[indIn, 2] - zOffset],
                                      np.ones(nnz) * maxLevel - ii,
                                      finalize=False)

                zOffset += dz

            depth -= dz * octreeLevels[ii]

        if finalize:
            mesh.finalize()

    elif dtype == 'box':

        # Define the data extend [bottom SW, top NE]
        bsw = np.min(xyz, axis=0)
        tne = np.max(xyz, axis=0)

        hx = mesh.hx.min()

        if mesh.dim == 2:
            hz = mesh.hy.min()
        else:
            hz = mesh.hz.min()

        # Pre-calculate max depth of each level
        zmax = np.cumsum(hz * np.asarray(octreeLevels) *
                         2**np.arange(len(octreeLevels)))

        if mesh.dim == 2:
            # Pre-calculate outer extent of each level
            padWidth = np.cumsum(mesh.hx.min() * np.asarray(octreeLevels_XY) *
                                 2**np.arange(len(octreeLevels_XY)))

            # Make a list of outer limits
            BSW = [
                bsw - np.r_[padWidth[ii], zmax[ii]] for ii, (
                    octZ,
                    octXY) in enumerate(zip(octreeLevels, octreeLevels_XY))
            ]

            TNE = [
                tne + np.r_[padWidth[ii], zmax[ii]] for ii, (
                    octZ,
                    octXY) in enumerate(zip(octreeLevels, octreeLevels_XY))
            ]

        else:
            hy = mesh.hy.min()

            # Pre-calculate outer X extent of each level
            padWidth_x = np.cumsum(hx * np.asarray(octreeLevels_XY) *
                                   2**np.arange(len(octreeLevels_XY)))

            # Pre-calculate outer Y extent of each level
            padWidth_y = np.cumsum(hy * np.asarray(octreeLevels_XY) *
                                   2**np.arange(len(octreeLevels_XY)))

            # Make a list of outer limits
            BSW = [
                bsw - np.r_[padWidth_x[ii], padWidth_y[ii], zmax[ii]]
                for ii, (
                    octZ,
                    octXY) in enumerate(zip(octreeLevels, octreeLevels_XY))
            ]

            TNE = [
                tne + np.r_[padWidth_x[ii], padWidth_y[ii], zmax[ii]]
                for ii, (
                    octZ,
                    octXY) in enumerate(zip(octreeLevels, octreeLevels_XY))
            ]

        def inBox(cell):

            xyz = cell.center

            for ii, (nC, bsw, tne) in enumerate(zip(octreeLevels, BSW, TNE)):

                if np.all([xyz > bsw, xyz < tne]):
                    return maxLevel - ii

            return cell._level

        mesh.refine(inBox, finalize=finalize)

    else:
        NotImplementedError("Only dtype= 'surface' | 'points' "
                            " | 'radial' | 'box' has been implemented")

    return mesh
    def interp_mass_feh_eep(self,
                            interp_colname="_lgage",
                            mfe=(1.01, 0.01, 503.2),
                            lndi=True,
                            debug=False,
                            raise_error=False):
        test_minit, test_feh, test_eep = np.array(mfe, dtype=np.float)

        # 1. assert Minit [Fe/H] in range
        try:
            assert self.min_minit < test_minit <= self.max_minit
            assert self.min_feh < test_feh <= self.max_feh
        except AssertionError as ae:
            if not raise_error:
                return np.nan
            else:
                raise ae("The test values are not in bounds!")

        # 2. locate 4 tracks
        # ind_minit = find_rank_1d(self.u_minit, test_minit)
        # ind_feh = find_rank_1d(self.u_feh, test_feh)
        # val_minit = self.u_minit[ind_minit]
        # val_feh = self.u_feh[ind_feh]
        #
        # ind_track = np.where(np.logical_and(
        #     (self.grid_minit == val_minit[0]) | (self.grid_minit == val_minit[1]),
        #     (self.grid_feh == val_feh[0]) | (self.grid_feh == val_feh[1])))[0]
        # track4 = self.data[ind_track]
        track4 = self.get_track4_unstructured((test_minit, test_feh))
        if track4 is None:
            if raise_error:
                raise (ValueError("Bad test values!"))
            else:
                return np.nan
        eep_maxmin = np.max([_["_eep"][0] for _ in track4])
        eep_minmax = np.min([_["_eep"][-1] for _ in track4])

        # 3. assert EEP in range
        try:
            assert eep_maxmin < test_eep <= eep_minmax
        except AssertionError as ae:
            if not raise_error:
                return np.nan
            else:
                raise ae("EEP value is not in bounds!")

        # 4. locate EEP
        eep_arr = np.arange(eep_maxmin, eep_minmax + 1)
        ind_eep = find_rank_1d(eep_arr, test_eep)
        val_eep = eep_arr[ind_eep]

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            track_box = table.vstack([
                get_track_item_given_eeps(track, val_eep) for track in track4
            ])

        # 5. interpolate
        if not lndi:
            # points = np.array(track_box["_lgmass", "_feh", "_eep"].to_pandas())
            # values = track_box[interp_colname].data
            # lndi = LinearNDInterpolator(points, values)
            # test_points = np.array((np.log10(test_minit), test_feh, test_eep))
            w_mfe = (1 - calc_weight(track_box["_lgmass"], np.log10(test_minit), 4)) * \
                    (1 - calc_weight(track_box["_feh"], test_feh, 4)) * \
                    (1 - calc_weight(track_box["_eep"], test_eep, 4))
            if debug:
                return w_mfe
            star_result = table_linear_combination(track_box, w_mfe)
            return star_result

        elif type(interp_colname) is not list:
            points = np.array(track_box["_lgmass", "_feh", "_eep"].to_pandas())
            # for linear mass
            # points[:, 0] = np.power(10, points[:, 0])
            values = track_box[interp_colname].data
            lndi = LinearNDInterpolator(points, values)
            test_points = np.array((np.log10(test_minit), test_feh, test_eep))
            # for linear mass
            # test_points = np.array((np.log10(test_minit), test_feh, test_eep))
            return lndi(test_points)[0]
        elif type(interp_colname) is list:
            points = np.array(track_box["_lgmass", "_feh", "_eep"].to_pandas())
            test_points = np.array((np.log10(test_minit), test_feh, test_eep))
            results = []
            for _interp_colname in interp_colname:
                if type(_interp_colname) is int:
                    # directly return the input value if int
                    results.append(test_points[_interp_colname])
                else:
                    values = track_box[_interp_colname].data
                    results.append(
                        LinearNDInterpolator(points, values)(test_points)[0])
            return np.array(results)
Beispiel #16
0
def cost_emission_calculation(m, n):

    if m.shape[0] == 0:
        # if there is empty dataframe input, set all values to zero then
        pass

    else:
        # read age and capacity information from ultrasubcritical PC database
        x_1 = m.loc[:, ('age')].values
        y_1 = m.loc[:, ('capacity_MW')].values
        y_11 = m.loc[:, ('capacity_MW')].values

        # use cubic spline interpolation to construct surrogate model
        x1 = n.loc[:, ('age')].values.reshape(
            (int(n.shape[0] / n.capacity_MW.nunique()),
             n.capacity_MW.nunique()))[:, 0]
        y1 = n.loc[:, ('capacity_MW')].values.reshape(
            (int(n.shape[0] / n.capacity_MW.nunique()),
             n.capacity_MW.nunique()))[0, :]
        x1x1, y1y1 = np.meshgrid(x1, y1, indexing='ij')
        x1y1 = np.column_stack([x1x1.ravel(), y1y1.ravel()])
        # x1y1.shape
        y11 = n.loc[:, ('capacity_MW')].values.reshape(
            (int(n.shape[0] / n.capacity_MW.nunique()),
             n.capacity_MW.nunique()))[0, :]
        # y11.shape
        z1 = n.loc[:, ('marginal_cost_MWh')].values
        # z1.shape
        z11 = n.loc[:, ('annual_emission_ton')].values.reshape(
            (int(n.shape[0] / n.capacity_MW.nunique()),
             n.capacity_MW.nunique()))[0, :]
        # z11.shape

        # f1 is the functional relationship between electricity cost and age, capacity
        f1 = LinearNDInterpolator(x1y1, z1)
        # f1(30,1865)

        # f11 is the functional relationship between annual emission and capacity
        f11 = interpolate.interp1d(y11, z11, kind='slinear')
        # f11(1865)
        # vectorized function
        f_11 = np.vectorize(f11)

        # maximum x_1 and y_1 are 30 and 2500 respectively
        x_new_1 = np.clip(x_1, 1, 30)
        y_new_1 = np.clip(y_1, 100, 2500)
        xnew1ynew1 = np.column_stack((x_new_1, y_new_1))

        # cost estimation by age and capacity, clip at 30 and 2500
        cost = f1(xnew1ynew1)

        # emission estimation by capacity
        emission = f_11(np.clip(y_11 % 2500, 100,
                                2500)) + f_11(2500) * (y_11 // 2500)

        m = m.assign(cost=cost)
        m = m.assign(emission=emission)

        m.rename(columns={
            'cost': 'electricity_cost_MWh',
            'emission': 'annual_emission_ton'
        },
                 inplace=True)
        k = m.loc[:, :]

        text_file = open("log_file.txt", "a")
        text_file.write("###################### \n")
        text_file.write(
            "generation technology: %s, primary_fuel: %s \n" %
            (n.generation_technology.unique(), n.primary_fuel.unique()))
        text_file.write("cost range: max %s, min %s \n" %
                        (np.max(cost), np.min(cost)))
        text_file.write("emission range: max %s, min %s \n" %
                        (np.max(emission), np.min(emission)))
        text_file.write("###################### \n")
        text_file.write('\n')
        text_file.close()

        return k
Beispiel #17
0
def extract_column_from_wrfdata(
    fpath,
    coords,
    Ztop=2000.,
    Vres=5.0,
    T0=300.,
    spatial_filter='interpolate',
    L_filter=0.0,
    additional_fields=[],
    verbose=False,
):
    """
    Extract a column of time-height data for a specific site from a
    4-dimensional WRF output file

    The site specific data is obtained by applying a spatial filter to
    the WRF data and subsequently interpolating to an equidistant set 
    of vertical levels representing a microscale vertical grid.
    The following spatial filtering types are supported:
    - 'interpolate': interpolate to site coordinates
    - 'nearest':     use nearest WRF grid point
    - 'average':     average over an area with size L_filter x L_filter,
                     centered around the site

    Usage
    ====
    coords : list or tuple of length 2
        Latitude and longitude of the site for which to extract data
    Ztop : float
        Top of the microscale grid [m]
    Vres : float
        Vertical grid resolution of the microscale grid [m]
    T0 : float
        Reference temperature for WRF perturbation temperature [K]
    spatial_filter : 'interpolate', 'nearest' or 'average'
        Type of spatial filtering
    L_filter : float
        Length scale for spatial averaging [m]
    additional_fields : list
        Additional fields to be processed
    """
    import utm
    assert(spatial_filter in ['nearest','interpolate','average']),\
            'Spatial filtering type "'+spatial_filter+'" not recognised'

    # Load WRF data
    ds = xr.open_dataset(fpath)
    tdim, zdim, ydim, xdim = get_wrf_dims(ds)

    #---------------------------
    # Preprocessing
    #---------------------------

    # Extract WRF grid resolution
    dx_meso = ds.attrs['DX']
    assert (dx_meso == ds.attrs['DY'])

    # Number of additional points besides nearest grid point to perform spatial filtering
    if spatial_filter == 'interpolate':
        Nadd = 1
    elif spatial_filter == 'average':
        Nadd = int(
            np.ceil(0.5 * L_filter / dx_meso +
                    1.0e-6))  # +eps to make sure Nadd*dxmeso > L_filter/2
    else:
        Nadd = 0

    # Setup microscale grid data
    site_X, site_Y, site_zonenumber, _ = utm.from_latlon(coords[0], coords[1])

    if spatial_filter == 'average':
        Navg = 6  #Number of interpolation points to compute average
        xmicro = np.linspace(-L_filter / 2, L_filter / 2, Navg,
                             endpoint=True) + site_X
        ymicro = np.linspace(-L_filter / 2, L_filter / 2, Navg,
                             endpoint=True) + site_Y
    else:  # 'interpolate' or 'nearest'
        xmicro = site_X
        ymicro = site_Y

    zmicro = np.linspace(0, Ztop, 1 + int(Ztop / Vres))
    Zmicro, Ymicro, Xmicro = np.meshgrid(zmicro, ymicro, xmicro, indexing='ij')

    #2D and 3D list of points
    XYmicro = np.array((Ymicro[0, :, :].ravel(), Xmicro[0, :, :].ravel())).T
    XYZmicro = np.array((Zmicro.ravel(), Ymicro.ravel(), Xmicro.ravel())).T

    # Check whether additional fields are 3D or 4D and append to corresnponding list of fields
    fieldnames_3D = default_3D_fields
    fieldnames_4D = default_4D_fields
    for field in additional_fields:
        try:
            ndim = len(ds[field].dims)
        except KeyError:
            print('The additional field "' + field +
                  '" is not available and will be ignored.')
        else:
            if len(ds[field].dims) == 3:
                if field not in fieldnames_3D:
                    fieldnames_3D.append(field)
            elif len(ds[field].dims) == 4:
                if field not in fieldnames_4D:
                    fieldnames_4D.append(field)
            else:
                raise Exception(
                    'Field "' + field +
                    '" is not 3D or 4D, not sure how to process this field.')

    #---------------------------
    # Load data
    #---------------------------

    WRFdata = {}

    # Cell-centered coordinates
    XLAT = ds.variables['XLAT'].values  # WRF indexing XLAT[time,lat,lon]
    XLONG = ds.variables['XLONG'].values

    # Height above ground level
    WRFdata['Zagl'], _ = get_height(ds, timevarying=True)
    WRFdata['Zagl'] = add_surface_plane(WRFdata['Zagl'])

    # 3D fields. WRF indexing Z[tdim,ydim,xdim]
    for field in fieldnames_3D:
        WRFdata[field] = get_unstaggered_var(ds, field)

    # 4D fields. WRF indexing Z[tdim,zdim,ydim,xdim]
    for field in fieldnames_4D:
        WRFdata[field] = get_unstaggered_var(ds, field)
        if WRFdata[field] is None:
            continue

        # 4D field specific processing
        if field is 'T':
            # Add T0, set surface plane to TSK
            WRFdata[field] += T0
            WRFdata[field] = add_surface_plane(WRFdata[field],
                                               plane=WRFdata['TSK'])
        elif field in ['U', 'V', 'W']:
            # Set surface plane to zero (no slip)
            WRFdata[field] = add_surface_plane(WRFdata[field])
        else:
            WRFdata[field] = add_surface_plane(WRFdata[field],
                                               plane=WRFdata[field][:,
                                                                    0, :, :])

    # clean up empty fields
    for name in list(WRFdata.keys()):
        if WRFdata[name] is None:
            del WRFdata[name]
            try:
                fieldnames_3D.remove(name)
            except ValueError:
                pass
            try:
                fieldnames_4D.remove(name)
            except ValueError:
                pass

    #---------------------------
    # Extract site data
    #---------------------------
    sitedata = {}
    sitedata['Zagl'] = zmicro

    # Nearest grid points to the site
    points = np.array((XLAT[0, :, :].ravel(), XLONG[0, :, :].ravel())).T
    tree = KDTree(points)
    dist, index = tree.query(np.array(coords), 1)  # nearest grid point
    inear = int(index % xdim)  # index to XLONG
    jnear = int((index - inear) / xdim)  # index to XLAT

    # Extract data and apply spatial filter if necessary
    if spatial_filter == 'nearest':
        # - 3D fields
        for field in fieldnames_3D:
            sitedata[field] = WRFdata[field][:, jnear, inear]

        # - 4D fields
        for field in fieldnames_4D:
            sitedata[field] = np.zeros((tdim, zmicro.size))

        # Interpolate to microscale z grid at every time
        for t in range(tdim):
            Zmeso = WRFdata['Zagl'][t, :, jnear, inear].squeeze()
            wrf_data_combined = np.array([
                WRFdata[field][t, :, jnear, inear].squeeze()
                for field in fieldnames_4D
            ]).T
            site_data_combined = interp1d(Zmeso, wrf_data_combined,
                                          axis=0)(zmicro)
            for l, field in enumerate(fieldnames_4D):
                sitedata[field][t, :] = site_data_combined[:, l]

    else:  # 'interpolate' or 'average'
        # Coordinates of a subset of the WRF grid in UTM-projected cartesian system
        NN = 1 + 2 * Nadd
        Xmeso = np.zeros((NN, NN))
        Ymeso = np.zeros((NN, NN))
        for i, ii in enumerate(range(inear - Nadd, inear + Nadd + 1)):
            for j, jj in enumerate(range(jnear - Nadd, jnear + Nadd + 1)):
                Xmeso[j, i], Ymeso[j, i], _, _ = utm.from_latlon(
                    XLAT[0, jj, ii],
                    XLONG[0, jj, ii],
                    force_zone_number=site_zonenumber)

        Xmeso = np.repeat(Xmeso[np.newaxis, :, :], zdim + 1, axis=0)
        Ymeso = np.repeat(Ymeso[np.newaxis, :, :], zdim + 1, axis=0)
        XYmeso = np.array(
            (np.ravel(Ymeso[0, :, :]), np.ravel(Xmeso[0, :, :]))).T

        #Initialize fields
        for field in fieldnames_3D:
            sitedata[field] = np.zeros((tdim))
        for field in fieldnames_4D:
            sitedata[field] = np.zeros((tdim, zmicro.size))

        #Perform 3D interpolation to microscale grid for every time
        for t in range(tdim):
            # 3D fields
            slice3d = (t, slice(jnear - Nadd, jnear + Nadd + 1),
                       slice(inear - Nadd, inear + Nadd + 1))
            wrf_data_combined = np.array(
                [WRFdata[field][slice3d].ravel() for field in fieldnames_3D]).T
            site_data_combined = LinearNDInterpolator(
                XYmeso, wrf_data_combined)(XYmicro)
            for l, field in enumerate(fieldnames_3D):
                if spatial_filter == 'interpolate':
                    sitedata[field][t] = site_data_combined[0, l]
                elif spatial_filter == 'average':
                    sitedata[field][t] = np.mean(site_data_combined[:, l])

            # 4D fields
            slice4d = (t, range(zdim + 1), slice(jnear - Nadd,
                                                 jnear + Nadd + 1),
                       slice(inear - Nadd, inear + Nadd + 1))
            Zmeso = WRFdata['Zagl'][slice4d]
            XYZmeso = np.array((Zmeso.ravel(), Ymeso.ravel(), Xmeso.ravel())).T

            wrf_data_combined = np.array(
                [WRFdata[field][slice4d].ravel() for field in fieldnames_4D]).T
            site_data_combined = LinearNDInterpolator(
                XYZmeso, wrf_data_combined)(XYZmicro)
            for l, field in enumerate(fieldnames_4D):
                if spatial_filter == 'interpolate':
                    sitedata[field][t, :] = site_data_combined[:, l]
                elif spatial_filter == 'average':
                    sitedata[field][t, :] = np.mean(
                        site_data_combined[:, l].reshape(Zmicro.shape),
                        axis=(1, 2))

    #---------------------------
    # Store data in xarray
    #---------------------------

    coords = {'Time': ds['XTIME'].values, 'height': zmicro}

    data_vars = {}
    for field in fieldnames_3D:
        data_vars[field] = ('Time', sitedata[field], {
            'description':
            ds[field].attrs['description'].lower(),
            'units':
            ds[field].attrs['units']
        })
    for field in fieldnames_4D:
        data_vars[field] = (['Time', 'height'], sitedata[field], {
            'description':
            ds[field].attrs['description'].lower(),
            'units':
            ds[field].attrs['units']
        })

    xn = xr.Dataset(data_vars=data_vars, coords=coords)

    # Rename T to theta and adjust description
    xn = xn.rename({'T': 'theta'})
    xn['theta'].attrs['description'] = 'potential temperature'

    return xn
Beispiel #18
0
    def __init__(self, points, values):

        if points.ndim is 1 or points.shape[1] is 1:
            self._interpolator = interp1d(points.flatten(), values.T)
        else:
            self._interpolator = LinearNDInterpolator(points, values)
Beispiel #19
0
def point_fcst_uv_tmp_according_to_3D_field_vs_sounding(
        output_dir=None,
        obs_ID='55664',
        initTime=None,
        fhour=6,
        day_back=0,
        extra_info={
            'output_head_name':
            ' ',
            'output_tail_name':
            ' ',
            'point_name':
            ' ',
            'drw_thr':
            True,
            'levels_for_interp': [
                1000, 950, 925, 900, 850, 800, 700, 600, 500, 400, 300, 250,
                200, 150
            ]
        },
        **kwargs):

    model = 'GRAPES_GFS'
    try:
        dir_rqd = [
            utl.Cassandra_dir(data_type='high',
                              data_source='OBS',
                              var_name='TLOGP',
                              lvl=''),
            utl.Cassandra_dir(data_type='high',
                              data_source=model,
                              var_name='HGT',
                              lvl=''),
            utl.Cassandra_dir(data_type='high',
                              data_source=model,
                              var_name='UGRD',
                              lvl=''),
            utl.Cassandra_dir(data_type='high',
                              data_source=model,
                              var_name='VGRD',
                              lvl=''),
            utl.Cassandra_dir(data_type='high',
                              data_source=model,
                              var_name='TMP',
                              lvl='')
        ]
    except KeyError:
        raise ValueError('Can not find all required directories needed')

    if (initTime == None):
        initTime = get_latest_initTime(dir_rqd[1][0:-1] + '/850')

    filename_obs = (datetime.strptime('20' + initTime, '%Y%m%d%H') +
                    timedelta(hours=fhour)).strftime('%Y%m%d%H%M%S') + '.000'
    obs_pfl_all = MICAPS_IO.get_tlogp(dir_rqd[0][0:-1],
                                      filename=filename_obs,
                                      cache=False)
    if (obs_pfl_all is None):
        return
    obs_pfl_raw = obs_pfl_all[obs_pfl_all.ID == obs_ID]
    obs_pfl = obs_pfl_raw.replace(9999.0, np.nan).dropna(how='any')
    obs_pfl = obs_pfl[obs_pfl.p >= 200.]

    directory = dir_rqd[1][0:-1]
    filename = initTime + '.' + str(fhour).zfill(3)
    HGT_4D = get_model_3D_grid(directory=directory,
                               filename=filename,
                               levels=extra_info['levels_for_interp'],
                               allExists=False)
    directory = dir_rqd[2][0:-1]
    U_4D = get_model_3D_grid(directory=directory,
                             filename=filename,
                             levels=extra_info['levels_for_interp'],
                             allExists=False)
    directory = dir_rqd[3][0:-1]
    V_4D = get_model_3D_grid(directory=directory,
                             filename=filename,
                             levels=extra_info['levels_for_interp'],
                             allExists=False)

    directory = dir_rqd[4][0:-1]
    TMP_4D = get_model_3D_grid(directory=directory,
                               filename=filename,
                               levels=extra_info['levels_for_interp'],
                               allExists=False)

    points = {
        'lon': obs_pfl.lon.to_numpy(),
        'lat': obs_pfl.lat.to_numpy(),
        'altitude': obs_pfl.h.to_numpy() * 10
    }

    directory = dir_rqd[4][0:-1]

    delt_xy = HGT_4D['lon'].values[1] - HGT_4D['lon'].values[0]
    mask = (HGT_4D['lon'] < (points['lon'][0] + 2 * delt_xy)) & (
        HGT_4D['lon'] > (points['lon'][0] - 2 * delt_xy)
    ) & (HGT_4D['lat'] <
         (points['lat'][0] + 2 * delt_xy)) & (HGT_4D['lat'] >
                                              (points['lat'][0] - 2 * delt_xy))

    HGT_4D_sm = HGT_4D['data'].where(mask, drop=True)
    U_4D_sm = U_4D['data'].where(mask, drop=True)
    V_4D_sm = V_4D['data'].where(mask, drop=True)
    TMP_4D_sm = TMP_4D['data'].where(mask, drop=True)

    lon_md = np.squeeze(HGT_4D_sm['lon'].values)
    lat_md = np.squeeze(HGT_4D_sm['lat'].values)
    alt_md = np.squeeze(HGT_4D_sm.values * 10).flatten()
    time_md = HGT_4D_sm['forecast_period'].values

    coords = np.zeros((HGT_4D_sm.level.size, len(lat_md), len(lon_md), 3))
    coords[..., 1] = lat_md.reshape((1, len(lat_md), 1))
    coords[..., 2] = lon_md.reshape((1, 1, len(lon_md)))
    coords = coords.reshape((alt_md.size, 3))
    coords[:, 0] = alt_md

    interpolator_U = LinearNDInterpolator(coords,
                                          U_4D_sm.values.reshape(
                                              (U_4D_sm.values.size)),
                                          rescale=True)
    interpolator_V = LinearNDInterpolator(coords,
                                          V_4D_sm.values.reshape(
                                              (V_4D_sm.values.size)),
                                          rescale=True)
    interpolator_TMP = LinearNDInterpolator(coords,
                                            TMP_4D_sm.values.reshape(
                                                (TMP_4D_sm.values.size)),
                                            rescale=True)

    coords2 = np.zeros((np.size(points['lon']), 3))
    coords2[:, 0] = points['altitude']
    coords2[:, 1] = points['lat']
    coords2[:, 2] = points['lon']

    U_interped = np.squeeze(interpolator_U(coords2))
    V_interped = np.squeeze(interpolator_V(coords2))
    windsp_interped = (U_interped**2 + V_interped**2)**0.5
    winddir10m_interped = mpcalc.wind_direction(U_interped * units('m/s'),
                                                V_interped * units('m/s'))
    TMP_interped = np.squeeze(interpolator_TMP(coords2))

    fcst_pfl = obs_pfl.copy()
    fcst_pfl.wind_angle = np.array(winddir10m_interped)
    fcst_pfl.wind_speed = np.array(windsp_interped)
    fcst_pfl.t = TMP_interped

    fcst_info = xr.DataArray(np.array(U_4D_sm.values),
                             coords=U_4D_sm.coords,
                             dims=U_4D_sm.dims,
                             attrs={
                                 'points': points,
                                 'model': model
                             })

    sta_graphics.draw_sta_skewT_model_VS_obs(fcst_pfl=fcst_pfl,
                                             obs_pfl=obs_pfl,
                                             fcst_info=fcst_info,
                                             output_dir=output_dir)
Beispiel #20
0
    def r( self, c, gpos, mpos=0.0):
        """
        Calculates the virtual distances between grid point locations and
        microphone locations or the origin. These virtual distances correspond
        to travel times of the sound along a ray that is traced through the
        medium.

        Parameters
        ----------
        c : float
            The speed of sound to use for the calculation.
        gpos : array of floats of shape (3, N)
            The locations of points in the beamforming map grid in 3D cartesian
            co-ordinates.
        mpos : array of floats of shape (3, M), optional
            The locations of microphones in 3D cartesian co-ordinates. If not
            given, then only one microphone at the origin (0, 0, 0) is
            considered.

        Returns
        -------
        array of floats
            The distances in a twodimensional (N, M) array of floats. If M==1, 
            then only a one-dimensional array is returned.
        """
        if isscalar(mpos):
            mpos = array((0, 0, 0), dtype = float32)[:, newaxis]

        # the DE system
        def f1(t, y, v):
            x = y[0:3]
            s = y[3:6]
            vv, dv = v(x)
            sa = sqrt(s[0]*s[0]+s[1]*s[1]+s[2]*s[2])
            x = empty(6)
            x[0:3] = c*s/sa - vv # time reversal
            x[3:6] = dot(s, -dv.T) # time reversal
            return x

        # integration along a single ray
        def fr(x0, n0, rmax, dt, v, xyz, t):
            s0 = n0 / (c+dot(v(x0)[0], n0))
            y0 = hstack((x0, s0))
            oo = ode(f1)
            oo.set_f_params(v)
            oo.set_integrator('vode', 
                              rtol=1e-4, # accuracy !
                              max_step=1e-4*rmax) # for thin shear layer
            oo.set_initial_value(y0, 0)
            while oo.successful():
                xyz.append(oo.y[0:3])
                t.append(oo.t)
                if norm(oo.y[0:3]-x0)>rmax:
                    break
                oo.integrate(oo.t+dt)

        gs2 = gpos.shape[-1]
        gt = empty((gs2, mpos.shape[-1]))
        vv = self.ff.v
        NN = int(sqrt(self.N))
        for micnum, x0 in enumerate(mpos.T):
            xe = gpos.mean(1) # center of grid
            r = x0[:, newaxis]-gpos
            rmax = sqrt((r*r).sum(0).max()) # maximum distance
            nv = spiral_sphere(self.N, self.Om, b=xe-x0)
            rstep = rmax/sqrt(self.N)
            rmax += rstep
            tstep = rstep/c
            xyz = []
            t = []
            lastind = 0
            for i, n0 in enumerate(nv.T):
                fr(x0, n0, rmax, tstep, vv, xyz, t)
                if i and i % NN == 0:
                    if not lastind:
                        dd = ConvexHull(vstack((gpos.T, xyz)), incremental=True)
                    else:
                        dd.add_points(xyz[lastind:], restart=True)
                    lastind = len(xyz)
                    # ConvexHull includes grid if no grid points on hull
                    if dd.simplices.min()>=gs2:
                        break
            xyz = array(xyz)
            t = array(t)
            li = LinearNDInterpolator(xyz, t)
            gt[:, micnum] = li(gpos.T)
        if gt.shape[1] == 1:
            gt = gt[:, 0]
        return c*gt #return distance along ray
Beispiel #21
0
except:
    feq = open(fileDir + 'units.m')
    for line in feq:
        if 'psi_x' in line:
            psi_x = float(line.split()[1])

psin = fm['psi'][:] / psi_x
tri = fm['cell_set[0]/node_connect_list'][...]
triObj = Triangulation(RZ[:, 0], RZ[:, 1], tri)
file3d = fileDir + 'xgc.3d.00001.h5'
f3d = h5py.File(file3d, 'r')
sml_nphi = f3d['nphi'][0]
sml_iphi = f3d['iphi'][0]

#setup Bfield interpolator (could use higher order interpolation scheme)
Binterp = LinearNDInterpolator(RZ, Bgrid, fill_value=np.inf)

blob_generator = syntheticBlobs(RZ, psin, tri, Bgrid, sml_nphi)

#now generate some blobs
#xcenter = np.array([0.95,0,0]) #(psin,theta,phi)
#xcenter = np.array([2.26,0,0]) #(R,Z,phi)

for timestep in range(0, 5):
    xcenter = np.array([2.26, timestep * 0.01, 0])  #(R,Z,phi)
    ntor = 5
    Lpol = 0.01
    Lrad = Lpol / 1.5
    dnOvernMag = 0.1
    dnOvernXGC = blob_generator.generate(xcenter, ntor, Lpol, Lrad, dnOvernMag)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
from scipy.interpolate import LinearNDInterpolator
import numpy as np

from quantecon import SearchProblem, compute_fixed_point


sp = SearchProblem(w_grid_size=100, pi_grid_size=100)
v_init = np.zeros(len(sp.grid_points)) + sp.c / (1 - sp.beta)
v = compute_fixed_point(sp.bellman_operator, v_init)
policy = sp.get_greedy(v)

# Make functions from these arrays by interpolation
vf = LinearNDInterpolator(sp.grid_points, v)
pf = LinearNDInterpolator(sp.grid_points, policy)

pi_plot_grid_size, w_plot_grid_size = 100, 100
pi_plot_grid = np.linspace(0.001, 0.99, pi_plot_grid_size)
w_plot_grid = np.linspace(0, sp.w_max, w_plot_grid_size)

#plot_choice = 'value_function'
plot_choice = 'policy_function'

if plot_choice == 'value_function':
    Z = np.empty((w_plot_grid_size, pi_plot_grid_size))
    for i in range(w_plot_grid_size):
        for j in range(pi_plot_grid_size):
            Z[i, j] = vf(w_plot_grid[i], pi_plot_grid[j])
    fig, ax = plt.subplots()
Beispiel #23
0
def plot2Ddata(xyz,
               data,
               vec=False,
               nx=100,
               ny=100,
               ax=None,
               mask=None,
               level=False,
               figname=None,
               ncontour=10,
               dataloc=False,
               contourOpts={},
               levelOpts={},
               scale="linear",
               clim=None,
               method='linear'):
    """

        Take unstructured xy points, interpolate, then plot in 2D

        :param numpy.ndarray xyz: data locations
        :param numpy.ndarray data: data values
        :param bool vec: plot streamplot?
        :param float nx: number of x grid locations
        :param float ny: number of y grid locations
        :param matplotlib.axes ax: axes
        :param boolean numpy.ndarray mask: mask for the array
        :param boolean level: boolean to plot (or not)
                                :meth:`matplotlib.pyplot.contour`
        :param string figname: figure name
        :param float ncontour: number of :meth:`matplotlib.pyplot.contourf`
                                contours
        :param bool dataloc: plot the data locations
        :param dict controuOpts: :meth:`matplotlib.pyplot.contourf` options
        :param dict levelOpts: :meth:`matplotlib.pyplot.contour` options
        :param numpy.ndarray clim: colorbar limits
        :param str method: interpolation method, either 'linear' or 'nearest'

    """

    # Error checking and set vmin, vmax
    vlimits = [None, None]

    if clim is not None:
        vlimits = [np.min(clim), np.max(clim)]

    for i, key in enumerate(["vmin", "vmax"]):
        if key in contourOpts.keys():
            if vlimits[i] is None:
                vlimits[i] = contourOpts.pop(key)
            else:
                if not np.isclose(contourOpts[key], vlimits[i]):
                    raise Exception(
                        "The values provided in the colorbar limit, clim {} "
                        "does not match the value of {} provided in the "
                        "contourOpts: {}. Only one value should be provided or "
                        "the two values must be equal.".format(
                            vlimits[i], key, contourOpts[key]))
                contourOpts.pop(key)
    vmin, vmax = vlimits[0], vlimits[1]

    # create a figure if it doesn't exist
    if ax is None:
        fig = plt.figure()
        ax = plt.subplot(111)

    # interpolate data to grid locations
    xmin, xmax = xyz[:, 0].min(), xyz[:, 0].max()
    ymin, ymax = xyz[:, 1].min(), xyz[:, 1].max()
    x = np.linspace(xmin, xmax, nx)
    y = np.linspace(ymin, ymax, ny)
    X, Y = np.meshgrid(x, y)
    xy = np.c_[X.flatten(), Y.flatten()]

    if vec is False:
        if method == 'nearest':
            F = NearestNDInterpolator(xyz[:, :2], data)
        else:
            F = LinearNDInterpolator(xyz[:, :2], data)
        DATA = F(xy)
        DATA = DATA.reshape(X.shape)

        # Levels definitions
        dataselection = np.logical_and(~np.isnan(DATA), np.abs(DATA) != np.inf)

        if scale == "log":
            DATA = np.log10(abs(DATA))

            vmin = np.log10(vmin) if vmin is not None else vmin
            vmax = np.log10(vmax) if vmax is not None else vmax

        vmin = DATA[dataselection].min() if vmin is None else vmin
        vmax = DATA[dataselection].max() if vmax is None else vmax

        vstep = np.abs((vmin - vmax) / (ncontour + 1))
        levels = np.arange(vmin, vmax + vstep, vstep)
        if DATA[dataselection].min() < levels.min():
            levels = np.r_[DATA[dataselection].min(), levels]
        if DATA[dataselection].max() > levels.max():
            levels = np.r_[levels, DATA[dataselection].max()]

        if mask is not None:
            Fmask = NearestNDInterpolator(xyz[:, :2], mask)
            MASK = Fmask(xy)
            MASK = MASK.reshape(X.shape)
            DATA = np.ma.masked_array(DATA, mask=MASK)

        cont = ax.contourf(X,
                           Y,
                           DATA,
                           levels=levels,
                           vmin=vmin,
                           vmax=vmax,
                           **contourOpts)
        if level:
            CS = ax.contour(X, Y, DATA, levels=levels, **levelOpts)

    else:
        # Assume size of data is (N,2)
        datax = data[:, 0]
        datay = data[:, 1]
        if method == 'nearest':
            Fx = NearestNDInterpolator(xyz[:, :2], datax)
            Fy = NearestNDInterpolator(xyz[:, :2], datay)
        else:
            Fx = LinearNDInterpolator(xyz[:, :2], datax)
            Fy = LinearNDInterpolator(xyz[:, :2], datay)
        DATAx = Fx(xy)
        DATAy = Fy(xy)
        DATA = np.sqrt(DATAx**2 + DATAy**2).reshape(X.shape)
        DATAx = DATAx.reshape(X.shape)
        DATAy = DATAy.reshape(X.shape)
        if scale == "log":
            DATA = np.log10(abs(DATA))

        # Levels definitions
        dataselection = np.logical_and(~np.isnan(DATA), np.abs(DATA) != np.inf)

        # set vmin, vmax
        vmin = DATA[dataselection].min() if vmin is None else vmin
        vmax = DATA[dataselection].max() if vmax is None else vmax
        if scale == "log":
            if vmin <= 0 or vmax <= 0:
                raise Exception(
                    "All values must be strictly positive in order to use the log-scale"
                )
            vmin = np.log10(vmin)
            vmax = np.log10(vmax)

        vstep = np.abs((vmin - vmax) / (ncontour + 1))
        levels = np.arange(vmin, vmax + vstep, vstep)
        if DATA[dataselection].min() < levels.min():
            levels = np.r_[DATA[dataselection].min(), levels]
        if DATA[dataselection].max() > levels.max():
            levels = np.r_[levels, DATA[dataselection].max()]

        if mask is not None:
            Fmask = NearestNDInterpolator(xyz[:, :2], mask)
            MASK = Fmask(xy)
            MASK = MASK.reshape(X.shape)
            DATA = np.ma.masked_array(DATA, mask=MASK)

        cont = ax.contourf(X,
                           Y,
                           DATA,
                           levels=levels,
                           vmin=vmin,
                           vmax=vmax,
                           **contourOpts)
        ax.streamplot(X, Y, DATAx, DATAy, color="w")
        if level:
            CS = ax.contour(X, Y, DATA, levels=levels, **levelOpts)

    if dataloc:
        ax.plot(xyz[:, 0], xyz[:, 1], 'k.', ms=2)
    plt.gca().set_aspect('equal', adjustable='box')
    if figname:
        plt.axis("off")
        fig.savefig(figname, dpi=200)
    if level:
        return cont, ax, CS
    else:
        return cont, ax
def find_closest_qmc( U=8, T=0.67, mu=4.0, **kwargs):
    """
    This function finds the closest values of U and T in the QMC data 
    that straddle the values U and T given as arguments.
    
    """

    nUs = 4 
    nTs = 3
    ALLPTS = kwargs.get('ALLPTS', False) 

    # select which quantity will be returned, options are
    # spi and entropy
    QTY = kwargs.get('QTY', 'spi' ) 
    
    if QTY == 'spi':
        datadir = basedir + 'COMB_Final_Spi/'
    elif QTY == 'entropy':
        datadir = basedir + 'COMB_Final_Entr/'
    elif QTY == 'density':
        datadir = basedir + 'COMB_Final_Spi/'
    elif QTY == 'kappa':
        datadir = basedir + 'COMB_Final_Spi/'
    else:
        raise ValueError('Quantity not defined:' + str(QTY) ) 
         
      
    
    fname = datadir + 'U*'
    us = [ float(u.split('/U')[-1]) for u in glob.glob(fname) ] 
    du = [ np.abs(U-u) for u in us ]
    index = np.argsort(du)

    if ALLPTS: 
        Ulist0 = range(len(index)) 
    else:
        Ulist0 = range( nUs ) 

    us = [ us[index[i]] for i in Ulist0] 
    #print us
    #print du
    #print index
    #print "Closest Us = ", us
    
    datfiles = []
    for u in us:    
    
        # For the Spi and Stheta data
        if QTY == 'spi' or QTY == 'density' or QTY == 'kappa':
            fname = datadir + 'U{U:02d}/T*dat'.format(U=int(u))
            fs = sorted(glob.glob(fname))
            Ts = [ float(f.split('T')[1].split('.dat')[0]) for f in fs ]
        elif QTY=='entropy':
            fname = datadir + 'U{U:02d}/S*dat'.format(U=int(u))
            fs = sorted(glob.glob(fname))
            Ts = [ float(f.split('S')[1].split('.dat')[0]) for f in fs ]


        Ts_g = [] ; Ts_l = []; 
        for t in Ts:
            if t > T:
                Ts_g.append(t) 
            else:
                Ts_l.append(t) 
        
        order_g = np.argsort( [ np.abs( T -t ) for t in Ts_g ] )
        order_l = np.argsort( [ np.abs( T -t ) for t in Ts_l ] )

        try:
            Tpts = [ Ts_g[ order_g[0]] , Ts_l[ order_l[0]]   ] 
        except:
            #print 
            #print "problem adding U=",u, "T=",Ts
            #print "available T data does not stride the point"
            #print "T  =", T
            #print "Ts =", Ts
            #print "will add nearest Ts nevertheless"
            Tpts = [  ] 
            #raise ValueError("QMC data not available.")


        dT = [ np.abs( T - t) for t in Ts ] 
        index = np.argsort(dT)
 
        if ALLPTS: 
            Tlist0 = range(len(Ts)) 
        else:
            Tlist0 = range( min(nTs , len(Ts)))
   
        for i in Tlist0:
            Tnew = Ts[index[i]] 
            if Tnew not in Tpts:
                Tpts.append(Tnew) 
        for Tpt in Tpts: 
            index = Ts.index( Tpt )  
            try:
                datfiles.append( [ fs[ index ], u, Ts[index] ] ) 
            except:
                print "problem adding U=",u, "T=",Ts
                raise
          

        # Need to make sure that selected T values stride both
        # sides of the point 
        
        #print
        #print u
        #print Ts
        #print dT
        #print index
        #print fs

#        for i in range(min(3, len(Ts))):
#            try:
#                datfiles.append( [ fs[index[i]], u, Ts[index[i]] ] ) 
#            except:
#                print "problem adding U=",u, "T=",Ts
#                raise
#        
            #datfiles.append( [ fs[index[1]], u, Ts[index[1]] ] ) 
        
    #print datfiles
    MUCOL   = 0 
    DENSCOL = 1
    ENTRCOL = 2  
    SPICOL  = 3 
    CMPRCOL = 4
  
    if QTY == 'spi':
        COL = SPICOL 
    elif QTY == 'entropy':
        COL = ENTRCOL 
    elif QTY == 'density':
        COL = DENSCOL 
    elif QTY == 'kappa':
        COL = CMPRCOL
       
    msg0 = 'U={:0.2f}, T={:0.2f}'.format(U,T) 
 
    logger.debug("number of nearby points  = " +  str(len(datfiles))) 
    basedat = []
    basedaterr = []
    datserr = [] 
    for mm, f in enumerate(datfiles):
        # f[0] is the datafile name
        # f[1] is U
        # f[2] is T 

        radius = kwargs.get('radius', np.nan ) 
        msg = 'U={:0.2f}, T={:0.2f}'.format(U,T) + \
               ' mu={:0.2f}, r={:0.2f}, Upt={:0.3f}, Tpt={:0.3f}'.\
               format(mu, radius, f[1], f[2])
 
        try:
            dat = np.loadtxt(f[0])
            spival = get_qty_mu( dat, mu, MUCOL, COL, msg=msg )

            # Toggle the false here to plot all of the out of bounds
            if spival == 'out-of-bounds':
                #spival_symmetry = 
                logger.info('qty is out of bounds')
                basedaterr.append( [f[1], f[2], np.nan] )
                datserr.append( dat ) 

                if False:
                    fig = plt.figure( figsize=(3.5,3.5))
                    gs = matplotlib.gridspec.GridSpec( 1,1 ,\
                            left=0.15, right=0.96, bottom=0.12, top=0.88)
                    ax = fig.add_subplot( gs[0] )
                    ax.grid(alpha=0.5) 
                    ax.plot( dat[:,MUCOL], dat[:,COL], '.-') 
                    ax.axvline( mu )
                    ax.text( 0.5, 1.05, msg, ha='center', va='bottom', \
                        transform=ax.transAxes, fontsize=6.) 
                    if matplotlib.get_backend() == 'agg':
                        fig.savefig('err_mu_%02d.png'%mm, dpi=200)
                        plt.close(fig)
                    else:         
                        plt.show()
                        plt.close(fig)
                continue 
            else: 
                basedat.append( [f[1], f[2], spival] )
        except Exception as e :
            print "Failed to get data from file = ", f  

            # toggle plotting, not implemented yet: 
            if True:
                fig = plt.figure( figsize=(3.5,3.5))
                gs = matplotlib.gridspec.GridSpec( 1,1 ,\
                        left=0.15, right=0.96, bottom=0.12, top=0.88)
                ax = fig.add_subplot( gs[0] )
                ax.grid(alpha=0.5) 
                ax.plot( dat[:,MUCOL], dat[:,COL], '.-') 
                ax.axvline( mu )
                ax.text( 0.5, 1.05, msg, ha='center', va='bottom', \
                    transform=ax.transAxes) 
                if matplotlib.get_backend() == 'agg':
                    fig.savefig('err_mu_%02d.png'%mm, dpi=200)
                else:         
                    plt.show()
            raise e
    logger.debug("number of nearby valid points  = " +  str(len(basedat))) 
 
        
    error = False
    points = None

    # MAKE THE TRIANGULATION 
    basedat =   np.array(basedat)

    Us = np.unique(basedat[:,0] )
    Ts = np.unique(basedat[:,1] )

    validTriang = not ( len(Us) ==1  or len(Ts) ==  1 ) 
    #print "#Us={:d}, #Ts={:d}".format( len(Us), len(Ts) )  
    #print msg 

    if validTriang: 
        points = _ndim_coords_from_arrays(( basedat[:,0] , basedat[:,1]))
        #print "Closest dat = ", basedat
        #finterp = CloughTocher2DInterpolator(points, basedat[:,2])
        finterp = LinearNDInterpolator( points, basedat[:,2] )
    else:
       
        logerr = 'not enough finterp points, QTY=%s'%QTY + '\n' + msg + '\n' \
                  + "number of basedat pts = " + str(len(basedat))
        print basedat
        print "len Us = ", len(Us)
        print "len Ts = ", len(Ts)
        print "len 'out-of-bounds' = ", len( basedaterr )
        if len( basedaterr ) > 0:
            for bb, bdaterr in enumerate(basedaterr):
                msgbb = 'U={:0.2f}, T={:0.2f}'.format(U,T) +\
                   ' mu={:0.2f}, r={:0.2f}, Upt={:0.3f}, Tpt={:0.3f}'.\
               format(mu, radius, basedaterr[bb][0], basedaterr[bb][1] )
                daterr = datserr[bb]
                fig = plt.figure( figsize=(3.5,3.5))
                gs = matplotlib.gridspec.GridSpec( 1,1 ,\
                        left=0.15, right=0.96, bottom=0.12, top=0.88)
                ax = fig.add_subplot( gs[0] )
                ax.grid(alpha=0.5) 
                ax.plot( daterr[:,MUCOL], daterr[:,COL], '.-') 
                ax.axvline( mu )
                ax.text( 0.5, 1.05, msgbb, ha='center', va='bottom', \
                    transform=ax.transAxes, fontsize=6.) 
                if matplotlib.get_backend() == 'agg':
                    fig.savefig('err_mu_%02d.png'%bb, dpi=200)
                    plt.close(fig)
                else:         
                    plt.show()
                    plt.close(fig)
        logger.exception(logerr)
        raise ValueError('finterp')


    if points == None:
        logger.warning( "points object is None"  )  

    if error == False:
        try:
            result = finterp( U,T )
            if np.isnan(result):
                if U >= 30.0 and U <=32.5:
                    result = finterp( 29.99, T ) 
                    logger.warning(" qmc: U={:0.1f} replaced to U=29.99 ".\
                                     format(U) )
            if np.isnan(result):
                raise Exception("\n!!!! qmc: Invalid result, QTY:%s!!!!\n"%QTY \
                        + msg0)
        except Exception as e:
            if kwargs.get('error_nan', False):
                return np.nan 
            else:
                error = True
                logger.exception("Invalid QTY result!")

    if error == False:
        if result >= 8. and QTY == 'spi' :
            print " Obtained Spi > 8. : U={:0.2f}, T={:0.2f}, mu={:0.2f}".\
                   format( U, T, mu ),
            print "  ==> Spi={:0.2f}".format(float(result))
            error = True
        elif result >=4. and QTY == 'entropy':
            print " Obtained Ent > 4. : U={:0.2f}, T={:0.2f}, mu={:0.2f}".\
                   format( U, T, mu ), 
            print "  ==> Result={:0.2f}".format(float(result))
            error = True

    logger.debug("error status = " + str(error)) 
    if error or kwargs.get('showinterp',False):
        logger.debug("Inside error if statement...") 

        if kwargs.get('error_nan', False):
            pass
            #return np.nan 

        #print "Interp points:"
        #print basedat
        if len(basedat) == 0 and len(basedaterr) > 0 :

            basedaterr =   np.array(basedaterr)
            Userr = np.unique(basedaterr[:,0] )
            Tserr = np.unique(basedaterr[:,1] ) 
            validTriangerr = not ( len(Userr) ==1  or len(Tserr) ==  1 ) 

            points = _ndim_coords_from_arrays(( basedaterr[:,0] , basedaterr[:,1]))
            tri = Delaunay(points)

        else:
            tri = Delaunay(points)
        fig = plt.figure( figsize=(3.5,3.5))
        gs = matplotlib.gridspec.GridSpec( 1,1 ,\
                left=0.15, right=0.96, bottom=0.12, top=0.88)
        ax = fig.add_subplot( gs[0] )
        ax.grid(alpha=0.5)
        ax.triplot(points[:,0], points[:,1], tri.simplices.copy())
        ax.plot(points[:,0], points[:,1], 'o')
        ax.plot( U, T, 'o', ms=6., color='red')
        xlim = ax.get_xlim()
        dx = (xlim[1]-xlim[0])/10.
        ax.set_xlim( xlim[0]-dx, xlim[1]+dx )
        ylim = ax.get_ylim()
        dy = (ylim[1]-ylim[0])/10.
        ax.set_ylim( ylim[0]-dy, ylim[1]+dy )
        ax.set_xlabel('$U/t$')
        ax.set_ylabel('$T/t$',rotation=0,labelpad=8)
        
        tt = kwargs.get('title_text','')
        ax.set_title( tt + '$U/t={:.2f}$'.format(U) + \
                      ',\ \ ' + '$T/t={:.2f}$'.format(T), \
                ha='center', va='bottom', fontsize=10)

        save_err = kwargs.get('save_err',None) 
        if save_err is not None:
            print "Saving png." 
            fig.savefig( save_err, dpi=300)
        if matplotlib.get_backend() == 'agg':
            fig.savefig('err.png', dpi=200)
            print "Saved error to err.png"
        else:         
            plt.show()
        if not kwargs.get('single', False):
            raise ValueError("Could not interpolate using QMC data.")
        if ALLPTS:
            if 'savepath' in kwargs.keys():
                fig.savefig( kwargs.get('savepath',None) , dpi=300) 
        if error: 
            raise

    return result

        
        
Beispiel #25
0
maxY = float(tokens[1])

line = dim_file.readline()
tokens = line.split(' ')
dim = len(tokens)

dim_file.close()

density = dim * 1j

X, Y = np.mgrid[minX:maxX:density, minY:maxY:density]
outPoints = np.array([X, Y]).transpose()

start = time.time()

interpolator = LinearNDInterpolator(points, values, fill_value=0.0)
Z = interpolator(outPoints)

print repr(time.time() - start)

out_file = open(sys.argv[2], 'w')

out_file.write("2\n")

token_min = "%f" % minX
token_max = "%f" % maxX
out_file.write(token_min + " " + token_max + "\n")

token_min = "%f" % minY
token_max = "%f" % maxY
out_file.write(token_min + " " + token_max + "\n")
Beispiel #26
0
    def build_weights(self,
                      xc,
                      yc,
                      ifux,
                      ifuy,
                      psf,
                      I=None,
                      fac=None,
                      return_I_fac=False):
        """
        Build weight matrix for spectral extraction.
        
        Parameters
        ----------
        xc: float
            The ifu x-coordinate for the center of the collapse frame
        yc: float 
            The ifu y-coordinate for the center of the collapse frame
        xloc: numpy array
            The ifu x-coordinate for each fiber
        yloc: numpy array
            The ifu y-coordinate for each fiber
        psf: numpy 3d array
            zeroth dimension: psf image, xgrid, ygrid
        I : callable (Optional)
            a function that returns the PSF function
            for a given x, y. If this is passed the
            psf image is ignored, otherwise it is 
            computed (default: None).
        fac : float (Optional)
            scale factor to account for area
            differences between the PSF image and
            the fibers. If None this is computed 
            from the PSF grid (default: None)
        return_I_fac : bool (Optional)
            return the computed (or input) I and fac 
            values (see above). This saves compute time 
            if you call this function many times with 
            the same PSF, as you can pass these values 
            back in the next time you run. 

        Returns
        -------
        weights: numpy 2d array (len of fibers by wavelength dimension)
            Weights for each fiber as function of wavelength for extraction
        I
        """
        SX = np.zeros(len(ifux))
        SY = np.zeros(len(ifuy))
        weights = np.zeros((len(ifux), len(self.wave)))

        if not I:
            T = np.array([psf[1].ravel(), psf[2].ravel()]).swapaxes(0, 1)
            I = LinearNDInterpolator(T, psf[0].ravel(), fill_value=0.0)

        if not fac:
            scale = np.abs(psf[1][0, 1] - psf[1][0, 0])
            #area = 0.75 ** 2 * np.pi
            #area = 1.7671458676442586
            #fac = area / scale ** 2
            # scale to fiber size already included in moffat_psf_integration?
            fac = 1.0

        # Avoid using a loop to speed things up, uses more memory though
        SX = np.tile(ifux, len(self.wave)).reshape(len(self.wave), len(ifux)).T
        SY = np.tile(ifuy, len(self.wave)).reshape(len(self.wave), len(ifuy)).T
        ADRx3D = np.repeat(self.ADRx,
                           len(ifux)).reshape(len(self.wave), len(ifux)).T
        ADRy3D = np.repeat(self.ADRy,
                           len(ifuy)).reshape(len(self.wave), len(ifuy)).T

        SX = SX - ADRx3D - xc
        SY = SY - ADRy3D - yc
        weights = fac * I(SX, SY)

        if not return_I_fac:
            return weights
        else:
            return weights, I, fac
Beispiel #27
0
    def get_variables(self,
                      requested_variables,
                      time=None,
                      x=None,
                      y=None,
                      z=None,
                      block=False):

        requested_variables, time, x, y, z, outside = \
            self.check_arguments(requested_variables, time, x, y, z)

        nearestTime, dummy1, dummy2, indxTime, dummy3, dummy4 = \
            self.nearest_time(time)

        x = np.atleast_1d(x)
        y = np.atleast_1d(y)

        # Finding a subset around the particles, so that
        # we do not interpolate more points than is needed.
        # Performance is quite dependent on the given buffer,
        # but it should not be made too small to make sure
        # particles are inside box
        buffer = .1  # degrees around given positions

        lonmin = x.min() - buffer
        lonmax = x.max() + buffer
        latmin = y.min() - buffer
        latmax = y.max() + buffer
        c = np.where((self.lon > lonmin) & (self.lon < lonmax)
                     & (self.lat > latmin) & (self.lat < latmax))[0]
        # Making a lon-lat grid onto which data is interpolated
        lonstep = .0004  # hardcoded for now
        latstep = .0002  # hardcoded for now
        lons = np.arange(lonmin, lonmax, lonstep)
        lats = np.arange(latmin, latmax, latstep)
        lonsm, latsm = np.meshgrid(lons, lats)

        # Initialising dictionary to contain data
        variables = {'x': lons, 'y': lats, 'z': z, 'time': nearestTime}

        # Reader coordinates of subset
        for par in requested_variables:
            var = self.Dataset.variables[self.variable_mapping[par]]
            print(var)
            if var.ndim == 1:
                data = var[c]
            elif var.ndim == 2:
                data = var[indxTime, c]
            elif var.ndim == 3:
                data = var[indxTime, -1, c]
            else:
                raise ValueError('Wrong dimension of %s: %i' %
                                 (var_name, var.ndim))

            print(data)

            if 'interpolator' not in locals():
                self.logger.debug('Making interpolator...')
                interpolator = LinearNDInterpolator((self.lat[c], self.lon[c]),
                                                    data)
            else:
                # Re-use interpolator for other variables
                interpolator.values[:, 0] = data
            interpolator((0, 0))

            variables[par] = interpolator(latsm, lonsm)

        return variables
Beispiel #28
0
 def interp_other(self, aps, values, **kwargs):
     """ Interpolate on other values """
     f = LinearNDInterpolator(self.func.tri, values)
     return f(aps)
Beispiel #29
0
def convert(meshpath, ipath, opath, variable, depths, box,
            res, influence, timestep, abg, interp, ncore, k):
    '''
    meshpath - Path to the folder with FESOM1.4 mesh files.

    ipath    - Path to FESOM1.4 netCDF file or files (with wildcard).

    opath    - path where the output will be stored.

    variable - The netCDF variable to be converted.
    '''
    print(ipath)
    mesh = pf.load_mesh(meshpath, abg=abg, usepickle=False, usejoblib=True)

    sstep = timestep
    radius_of_influence = influence

    left, right, down, up = box
    lonNumber, latNumber = res

    lonreg = np.linspace(left, right, lonNumber)
    latreg = np.linspace(down, up, latNumber)
    lonreg2, latreg2 = np.meshgrid(lonreg, latreg)

    localdir = os.path.dirname(os.path.abspath(__file__))
    # print(os.path.abspath(__file__))
    print('localdir='+localdir)
    with open(localdir+'/CMIP6_Omon.json') as data_file:
        cmore_table = json.load(data_file, object_pairs_hook=OrderedDict)

    with open(localdir+'/CMIP6_SIday.json') as data_file:
        cmore_table_ice = json.load(data_file, object_pairs_hook=OrderedDict)

    depths = np.array(depths.split(' '),dtype='float32')
    if depths[0] == -1:
        dind = range(mesh.zlevs.shape[0])
        realdepth = mesh.zlevs
    else:
        dind = []
        realdepth = []
        for depth in depths:
            ddepth = abs(mesh.zlevs-depth).argmin()
            dind.append(ddepth)
            realdepth.append(mesh.zlevs[ddepth])
    print(dind)
    print(realdepth)
    #realdepth = mesh.zlevs[dind]
    
    distances, inds = pf.create_indexes_and_distances(mesh, lonreg2, latreg2,\
                                                      k=k, n_jobs=4)

    ind_depth_all = []
    ind_noempty_all = []
    ind_empty_all = []

    for i in range(len(mesh.zlevs)):
        ind_depth, ind_noempty, ind_empty = pf.ind_for_depth(mesh.zlevs[i], mesh)
        ind_depth_all.append(ind_depth)
        ind_noempty_all.append(ind_noempty)
        ind_empty_all.append(ind_empty)
    if interp == 'nn':
        topo_interp = pf.fesom2regular(mesh.topo, mesh, lonreg2, latreg2, distances=distances,
                                inds=inds, radius_of_influence=radius_of_influence, n_jobs=1)
        k = 1
        distances, inds = pf.create_indexes_and_distances(mesh, lonreg2, latreg2,\
                                                          k=k, n_jobs=4)
        points, qh = None, None
    elif interp == 'idist':
        topo_interp = pf.fesom2regular(mesh.topo, mesh, lonreg2, latreg2, distances=distances,
                                inds=inds, radius_of_influence=radius_of_influence, n_jobs=1, how='idist')
        k = k
        distances, inds = pf.create_indexes_and_distances(mesh, lonreg2, latreg2,\
                                                          k=k, n_jobs=4)
        points, qh = None, None
    elif interp == 'linear':
        points = np.vstack((mesh.x2, mesh.y2)).T
        qh = qhull.Delaunay(points)
        topo_interp = LinearNDInterpolator(qh, mesh.topo)((lonreg2, latreg2))
        distances, inds = None, None
    elif interp == 'cubic':
        points = np.vstack((mesh.x2, mesh.y2)).T
        qh = qhull.Delaunay(points)
        topo_interp = CloughTocher2DInterpolator(qh, mesh.topo)((lonreg2, latreg2))
        distances, inds = None, None

    mdata = maskoceans(lonreg2, latreg2, topo_interp, resolution = 'h', inlands=False)
    topo = np.ma.masked_where(~mdata.mask, topo_interp)
    
    # Backend is switched to threading for linear and cubic interpolations
    # due to problems with memory mapping.
    # One have to test threading vs multiprocessing.
    if (interp == 'linear') or (interp=='cubic'):
        backend = 'threading'
    else:
        backend = 'multiprocessing'

    Parallel(n_jobs=ncore, backend=backend, verbose=50)(delayed(scalar2geo)(ifile, opath, variable,
                                       mesh, ind_noempty_all,
                                       ind_empty_all,ind_depth_all, cmore_table, lonreg2, latreg2,
                                       distances, inds, radius_of_influence, topo, points, interp, qh, timestep, dind, realdepth) for ifile in ipath)
Beispiel #30
0
 def __init__(self, osl, *args, **kwargs):
     BaseInterpolator.__init__(self, osl, *args, **kwargs)
     data = osl.get_interpolation_data()
     values = osl.spectra
     self.func = LinearNDInterpolator(data, values, **kwargs)
    def get_variables(self, requested_variables, time=None,
                      x=None, y=None, z=None, block=False):

        requested_variables, time, x, y, z, outside = \
            self.check_arguments(requested_variables, time, x, y, z)

        nearestTime, dummy1, dummy2, indxTime, dummy3, dummy4 = \
            self.nearest_time(time)

        x = np.atleast_1d(x)
        y = np.atleast_1d(y)


        # Finding a subset around the particles, so that
        # we do not interpolate more points than is needed.
        # Performance is quite dependent on the given buffer,
        # but it should not be made too small to make sure
        # particles are inside box
        buffer = .1  # degrees around given positions

        lonmin = x.min() - buffer
        lonmax = x.max() + buffer
        latmin = y.min() - buffer
        latmax = y.min() + buffer
        c = np.where((self.lon > lonmin) &
                     (self.lon < lonmax) &
                     (self.lat > latmin) &
                     (self.lat < latmax))[0]

        # Making a lon-lat grid onto which data is interpolated
        lonstep = .01  # hardcoded for now
        latstep = .01  # hardcoded for now
        lons = np.arange(lonmin, lonmax, lonstep)
        lats = np.arange(latmin, latmax, latstep)
        lonsm, latsm = np.meshgrid(lons, lats)

        # Initialising dictionary to contain data
        variables = {'x': lons, 'y': lats, 'z': z,
                     'time': nearestTime}

        # Reader coordinates of subset
        for par in requested_variables:
            var = self.Dataset.variables[self.variable_mapping[par]]
            if var.ndim == 1:
                data = var[c]
            elif var.ndim == 2:
                data = var[indxTime,c]
            elif var.ndim == 3:
                data = var[indxTime,0,c]
            else:
                raise ValueError('Wrong dimension of %s: %i' %
                                 (var_name, var.ndim))

            if 'interpolator' not in locals():
                logging.debug('Making interpolator...')
                interpolator = LinearNDInterpolator((self.lat[c],
                                                     self.lon[c]),
                                                    data)
            else:
                # Re-use interpolator for other variables
                interpolator.values[:,0] = data

            variables[par] = interpolator(latsm, lonsm)

        #print variables
        return variables
Beispiel #32
0
 def _initSpline(self):
     LinearNDInterpolator.__init__(self, self.xs, self.P.T, rescale=True)
     self.spl = self