def get_variables(self,
                      requested_variables,
                      time=None,
                      x=None,
                      y=None,
                      z=None,
                      block=False):

        start_time = datetime.now()
        requested_variables, time, x, y, z, outside = self.check_arguments(
            requested_variables, time, x, y, z)

        # If one vector component is requested, but not the other
        # we must add the other for correct rotation
        for vector_pair in vector_pairs_xy:
            if (vector_pair[0] in requested_variables
                    and vector_pair[1] not in requested_variables):
                requested_variables.extend([vector_pair[1]])
            if (vector_pair[1] in requested_variables
                    and vector_pair[0] not in requested_variables):
                requested_variables.extend([vector_pair[0]])

        nearestTime, dummy1, dummy2, indxTime, dummy3, dummy4 = \
            self.nearest_time(time)

        variables = {}

        if z is None:
            z = np.atleast_1d(0)

        # Find horizontal indices corresponding to requested x and y
        if hasattr(self, 'clipped'):
            clipped = self.clipped
        else:
            clipped = 0
        indx = np.floor((x - self.xmin) / self.delta_x).astype(int) + clipped
        indy = np.floor((y - self.ymin) / self.delta_y).astype(int) + clipped
        indx[outside] = 0  # To be masked later
        indy[outside] = 0
        indx_el = indx
        indy_el = indy
        if block is True:
            # Adding buffer, to cover also future positions of elements
            buffer = self.buffer
            # Avoiding the last pixel in each dimension, since there are
            # several grids which are shifted (rho, u, v, psi)
            indx = np.arange(
                np.max([0, indx.min() - buffer]),
                np.min([indx.max() + buffer, self.lon.shape[1] - 1]))
            indy = np.arange(
                np.max([0, indy.min() - buffer]),
                np.min([indy.max() + buffer, self.lon.shape[0] - 1]))

        # Find depth levels covering all elements
        if z.min() == 0 or not hasattr(self, 'hc'):
            indz = self.num_layers - 1  # surface layer
            variables['z'] = 0

        else:
            # Find the range of indices covering given z-values
            if not hasattr(self, 'sea_floor_depth_below_sea_level'):
                logging.debug('Reading sea floor depth...')
                self.sea_floor_depth_below_sea_level = \
                    self.Dataset.variables['h'][:]

                Htot = self.sea_floor_depth_below_sea_level
                self.z_rho_tot = depth.sdepth(Htot, self.hc, self.Cs_r)

            if has_xarray is False:
                indxgrid, indygrid = np.meshgrid(indx, indy)
                H = self.sea_floor_depth_below_sea_level[indygrid, indxgrid]
            else:
                H = self.sea_floor_depth_below_sea_level[indy, indx]
            z_rho = depth.sdepth(H, self.hc, self.Cs_r)
            # Element indices must be relative to extracted subset
            indx_el = indx_el - indx.min()
            indy_el = indy_el - indy.min()

            # Loop to find the layers covering the requested z-values
            indz_min = 0
            indz_max = self.num_layers
            for i in range(self.num_layers):
                if np.min(z - z_rho[i, indy_el, indx_el]) > 0:
                    indz_min = i
                if np.max(z - z_rho[i, indy_el, indx_el]) > 0:
                    indz_max = i
            indz = range(
                np.maximum(0, indz_min - self.verticalbuffer),
                np.minimum(self.num_layers,
                           indz_max + 1 + self.verticalbuffer))
            z_rho = z_rho[indz, :, :]
            # Determine the z-levels to which to interpolate
            zi1 = np.maximum(
                0,
                bisect_left(-np.array(self.zlevels), -z.max()) -
                self.verticalbuffer)
            zi2 = np.minimum(
                len(self.zlevels),
                bisect_right(-np.array(self.zlevels), -z.min()) +
                self.verticalbuffer)
            variables['z'] = np.array(self.zlevels[zi1:zi2])

        #read_masks = {}  # To store maskes for various grids
        mask_values = {}
        for par in requested_variables:
            varname = [
                name for name, cf in self.ROMS_variable_mapping.items()
                if cf == par
            ]
            var = self.Dataset.variables[varname[0]]

            if par == 'land_binary_mask':
                if not hasattr(self, 'land_binary_mask'):
                    # Read landmask for whole domain, for later re-use
                    self.land_binary_mask = \
                        1 - self.Dataset.variables['mask_rho'][:]
                if has_xarray is False:
                    indxgrid, indygrid = np.meshgrid(indx, indy)
                    variables[par] = self.land_binary_mask[indygrid, indxgrid]
                else:
                    variables[par] = self.land_binary_mask[indy, indx]
            elif var.ndim == 2:
                variables[par] = var[indy, indx]
            elif var.ndim == 3:
                variables[par] = var[indxTime, indy, indx]
            elif var.ndim == 4:
                variables[par] = var[indxTime, indz, indy, indx]
            else:
                raise Exception('Wrong dimension of variable: ' +
                                self.variable_mapping[par])

            variables[par] = np.asarray(variables[par])  # If Xarray
            start = datetime.now()

            if par not in mask_values:
                if has_xarray is False:
                    indxgrid, indygrid = np.meshgrid(indx, indy)
                else:
                    indxgrid = indx
                    indygrid = indy
                if par == 'x_sea_water_velocity':
                    if not hasattr(self, 'mask_u'):
                        self.mask_u = self.Dataset.variables['mask_u'][:]
                    mask = self.mask_u[indygrid, indxgrid]
                elif par == 'y_sea_water_velocity':
                    if not hasattr(self, 'mask_v'):
                        self.mask_v = self.Dataset.variables['mask_v'][:]
                    mask = self.mask_v[indygrid, indxgrid]
                else:
                    if not hasattr(self, 'mask_rho'):
                        # For ROMS-Agrif this must perhaps be mask_psi?
                        self.mask_rho = self.Dataset.variables['mask_rho'][:]
                    mask = self.mask_rho[indygrid, indxgrid]
                if has_xarray is True:
                    mask = np.asarray(mask)
                if mask.min() == 0 and par != 'land_binary_mask':
                    first_mask_point = np.where(mask.ravel() == 0)[0][0]
                    if variables[par].ndim == 3:
                        upper = variables[par][0, :, :]
                    else:
                        upper = variables[par]
                    mask_values[par] = upper.ravel()[first_mask_point]
                    variables[par][variables[par] == mask_values[par]] = np.nan

            if var.ndim == 4:
                # Regrid from sigma to z levels
                if len(np.atleast_1d(indz)) > 1:
                    logging.debug('sigma to z for ' + varname[0])
                    if self.precalculate_s2z_coefficients is True:
                        M = self.sea_floor_depth_below_sea_level.shape[0]
                        N = self.sea_floor_depth_below_sea_level.shape[1]
                        O = len(self.z_rho_tot)
                        if not hasattr(self, 's2z_A'):
                            logging.info(
                                'Calculating sigma2z-coefficients for whole domain'
                            )
                            starttime = datetime.now()
                            dummyvar = np.ones((O, M, N))
                            dummy, self.s2z_total = depth.multi_zslice(
                                dummyvar, self.z_rho_tot, self.zlevels)
                            # Store arrays/coefficients
                            self.s2z_A = self.s2z_total[0].reshape(
                                len(self.zlevels), M, N)
                            self.s2z_C = self.s2z_total[1].reshape(
                                len(self.zlevels), M, N)
                            #self.s2z_I = self.s2z_total[2].reshape(M, N)
                            self.s2z_kmax = self.s2z_total[3]
                            del self.s2z_total  # Free memory
                            logging.info('Time: ' +
                                         str(datetime.now() - starttime))
                        if 'A' not in locals():
                            logging.info('Re-using sigma2z-coefficients')
                            # Select relevant subset of full arrays
                            zle = np.arange(zi1,
                                            zi2)  # The relevant depth levels
                            A = self.s2z_A.copy(
                            )  # Awkward subsetting to prevent losing one dimension
                            A = A[:, :, indx]
                            A = A[:, indy, :]
                            A = A[zle, :, :]
                            C = self.s2z_C.copy()
                            C = C[:, :, indx]
                            C = C[:, indy, :]
                            C = C[zle, :, :]
                            C = C - C.max() + variables[par].shape[0] - 1
                            C[C < 1] = 1
                            A = A.reshape(len(zle), len(indx) * len(indy))
                            C = C.reshape(len(zle), len(indx) * len(indy))
                            I = np.arange(len(indx) * len(indy))
                            ## Check
                            #dummyvar2, s2z = depth.multi_zslice(
                            #    variables[par].copy(), z_rho.copy(), variables['z'])
                            #print len(zle), variables[par].shape, 'zle, varshape'
                            #Ac,Cc,Ic,kmaxc = s2z
                            #print C, 'C'
                            #print Cc, 'Cc'
                            #print C.shape, Cc.shape
                            #if C.max() != Cc.max():
                            #    print 'WARNING!!'
                            #    import sys; sys.exit('stop')
                            kmax = len(
                                zle
                            )  # Must be checked. Or number of sigma-layers?
                    if 'A' not in locals():
                        logging.info('Calculating new sigma2z-coefficients')
                        variables[par], s2z = depth.multi_zslice(
                            variables[par], z_rho, variables['z'])
                        A, C, I, kmax = s2z
                        # Reshaping to compare with subset of full array
                        #zle = np.arange(zi1, zi2)
                        #A = A.reshape(len(zle), len(indx), len(indy))
                        #C = C.reshape(len(zle), len(indx), len(indy))
                        #I = I.reshape(len(indx), len(indy))
                    else:
                        logging.info('Applying sigma2z-coefficients')
                        # Re-using sigma2z koefficients:
                        F = np.asarray(variables[par])
                        Fshape = F.shape
                        N = F.shape[0]
                        M = F.size // N
                        F = F.reshape((N, M))
                        R = (1 - A) * F[(C - 1, I)] + A * F[(C, I)]
                        variables[par] = R.reshape((kmax, ) + Fshape[1:])

                    # Nan in input to multi_zslice gives extreme values in output
                    variables[par][variables[par] > 1e+9] = np.nan

            # If 2D array is returned due to the fancy slicing methods
            # of netcdf-python, we need to take the diagonal
            if variables[par].ndim > 1 and block is False:
                variables[par] = variables[par].diagonal()

            # Mask values outside domain
            variables[par] = np.ma.array(variables[par], ndmin=2, mask=False)
            if block is False:
                variables[par].mask[outside] = True

            # Skipping de-staggering, as it leads to invalid values at later interpolation
            #if block is True:
            #    # Unstagger grid for vectors
            #    logging.debug('Unstaggering ' + par)
            #    if 'eta_v' in var.dimensions:
            #        variables[par] = np.ma.array(variables[par],
            #                            mask=variables[par].mask)
            #        variables[par][variables[par].mask] = 0
            #        if variables[par].ndim == 2:
            #            variables[par] = \
            #                (variables[par][0:-1,0:-1] +
            #                variables[par][0:-1,1::])/2
            #        elif variables[par].ndim == 3:
            #            variables[par] = \
            #                (variables[par][:,0:-1,0:-1] +
            #                variables[par][:,0:-1,1::])/2
            #        variables[par] = np.ma.masked_where(variables[par]==0,
            #                                            variables[par])
            #    elif 'eta_u' in var.dimensions:
            #        variables[par] = np.ma.array(variables[par],
            #                            mask=variables[par].mask)
            #        variables[par][variables[par].mask] = 0
            #        if variables[par].ndim == 2:
            #            variables[par] = \
            #                (variables[par][0:-1,0:-1] +
            #                 variables[par][1::,0:-1])/2
            #        elif variables[par].ndim == 3:
            #            variables[par] = \
            #                (variables[par][:,0:-1,0:-1] +
            #                 variables[par][:,1::,0:-1])/2
            #        variables[par] = np.ma.masked_where(variables[par]==0,
            #                                            variables[par])
            #    else:
            #        if variables[par].ndim == 2:
            #            variables[par] = variables[par][1::, 1::]
            #        elif variables[par].ndim == 3:
            #            variables[par] = variables[par][:,1::, 1::]

        if block is True:
            # TODO: should be midpoints, but angle array below needs integer
            #indx = indx[0:-1]  # Only if de-staggering has been performed
            #indy = indy[1::]
            variables['x'] = indx
            variables['y'] = indy
        else:
            variables['x'] = self.xmin + (indx - 1) * self.delta_x
            variables['y'] = self.ymin + (indy - 1) * self.delta_y

        variables['x'] = variables['x'].astype(np.float)
        variables['y'] = variables['y'].astype(np.float)
        variables['time'] = nearestTime

        if 'x_sea_water_velocity' or 'sea_ice_x_velocity' \
                or 'x_wind' in variables.keys():
            # We must rotate current vectors
            if not hasattr(self, 'angle_xi_east'):
                logging.debug('Reading angle between xi and east...')
                self.angle_xi_east = self.Dataset.variables['angle'][:]
            if has_xarray is False:
                rad = self.angle_xi_east[tuple(np.meshgrid(indy, indx))].T
            else:
                rad = self.angle_xi_east[indy, indx]
            if 'x_sea_water_velocity' in variables.keys():
                variables['x_sea_water_velocity'], \
                    variables['y_sea_water_velocity'] = rotate_vectors_angle(
                        variables['x_sea_water_velocity'],
                        variables['y_sea_water_velocity'], rad)
            if 'sea_ice_x_velocity' in variables.keys():
                variables['sea_ice_x_velocity'], \
                    variables['sea_ice_y_velocity'] = rotate_vectors_angle(
                        variables['sea_ice_x_velocity'],
                        variables['sea_ice_y_velocity'], rad)
            if 'x_wind' in variables.keys():
                variables['x_wind'], \
                    variables['y_wind'] = rotate_vectors_angle(
                        variables['x_wind'],
                        variables['y_wind'], rad)

        # Masking NaN
        for var in requested_variables:
            variables[var] = np.ma.masked_invalid(variables[var])

        logging.debug('Time for ROMS native reader: ' +
                      str(datetime.now() - start_time))

        return variables
Пример #2
0
    def get_variables(self,
                      requested_variables,
                      time=None,
                      x=None,
                      y=None,
                      z=None,
                      block=False):

        requested_variables, time, x, y, z, outside = self.check_arguments(
            requested_variables, time, x, y, z)

        # If one vector component is requested, but not the other
        # we must add the other for correct rotation
        for vector_pair in vector_pairs_xy:
            if (vector_pair[0] in requested_variables
                    and vector_pair[1] not in requested_variables):
                requested_variables.extend([vector_pair[1]])
            if (vector_pair[1] in requested_variables
                    and vector_pair[0] not in requested_variables):
                requested_variables.extend([vector_pair[0]])

        nearestTime, dummy1, dummy2, indxTime, dummy3, dummy4 = \
            self.nearest_time(time)

        variables = {}

        if z is None:
            z = np.atleast_1d(0)

        # Find horizontal indices corresponding to requested x and y
        if hasattr(self, 'clipped'):
            clipped = self.clipped
        else:
            clipped = 0
        indx = np.floor((x - self.xmin) / self.delta_x).astype(int) + clipped
        indy = np.floor((y - self.ymin) / self.delta_y).astype(int) + clipped
        indx[outside] = 0  # To be masked later
        indy[outside] = 0
        indx_el = indx
        indy_el = indy
        if block is True:
            # Adding buffer, to cover also future positions of elements
            buffer = self.buffer
            # Avoiding the last pixel in each dimension, since there are
            # several grids which are shifted (rho, u, v, psi)
            indx = np.arange(
                np.max([0, indx.min() - buffer]),
                np.min([indx.max() + buffer, self.lon.shape[1] - 1]))
            indy = np.arange(
                np.max([0, indy.min() - buffer]),
                np.min([indy.max() + buffer, self.lon.shape[0] - 1]))

        # Find depth levels covering all elements
        if z.min() == 0 or not hasattr(self, 'hc'):
            indz = self.num_layers - 1  # surface layer
            variables['z'] = 0

        else:
            # Find the range of indices covering given z-values
            if not hasattr(self, 'sea_floor_depth_below_sea_level'):
                logging.debug('Reading sea floor depth...')
                self.sea_floor_depth_below_sea_level = \
                    self.Dataset.variables['h'][:]
            indxgrid, indygrid = np.meshgrid(indx, indy)
            H = self.sea_floor_depth_below_sea_level[indygrid, indxgrid]
            z_rho = depth.sdepth(H, self.hc, self.Cs_r)
            # Element indices must be relative to extracted subset
            indx_el = indx_el - indx.min()
            indy_el = indy_el - indy.min()

            # Loop to find the layers covering the requested z-values
            indz_min = 0
            indz_max = self.num_layers
            for i in range(self.num_layers):
                if np.min(z - z_rho[i, indy_el, indx_el]) > 0:
                    indz_min = i
                if np.max(z - z_rho[i, indy_el, indx_el]) > 0:
                    indz_max = i
            indz = range(
                np.maximum(0, indz_min - self.verticalbuffer),
                np.minimum(self.num_layers,
                           indz_max + 1 + self.verticalbuffer))
            z_rho = z_rho[indz, :, :]
            # Determine the z-levels to which to interpolate
            zi1 = np.maximum(
                0,
                bisect_left(-np.array(self.zlevels), -z.max()) -
                self.verticalbuffer)
            zi2 = np.minimum(
                len(self.zlevels),
                bisect_right(-np.array(self.zlevels), -z.min()) +
                self.verticalbuffer)
            variables['z'] = np.array(self.zlevels[zi1:zi2])

        #read_masks = {}  # To store maskes for various grids
        for par in requested_variables:
            varname = [
                name for name, cf in self.ROMS_variable_mapping.items()
                if cf == par
            ]
            var = self.Dataset.variables[varname[0]]

            # Automatic masking may lead to trouble for ROMS files
            # with valid_min/max, _Fill_value or missing_value
            # https://github.com/Unidata/netcdf4-python/issues/703
            var.set_auto_maskandscale(False)

            try:
                FillValue = getattr(var, '_FillValue')
            except:
                FillValue = None
            try:
                scale = getattr(var, 'scale_factor')
            except:
                scale = 1
            try:
                offset = getattr(var, 'add_offset')
            except:
                offset = 0

            if var.ndim == 2:
                variables[par] = var[indy, indx]
            elif var.ndim == 3:
                variables[par] = var[indxTime, indy, indx]
            elif var.ndim == 4:
                variables[par] = var[indxTime, indz, indy, indx]
            else:
                raise Exception('Wrong dimension of variable: ' +
                                self.variable_mapping[par])

# Manual scaling, offsetting and masking due to issue with ROMS files
            logging.debug(
                'Manually masking %s, FillValue %s, scale %s, offset %s' %
                (par, FillValue, scale, offset))
            if FillValue is not None:
                if var.dtype != FillValue.dtype:
                    mask = variables[par] == 0
                    if not 'already_warned' in locals():
                        logging.warning(
                            'Data type of variable (%s) and _FillValue (%s) is not the same. Masking 0-values instead'
                            % (var.dtype, FillValue.dtype))
                        already_warned = True
                else:
                    logging.warning('Masking ' + str(FillValue))
                    mask = variables[par] == FillValue
            variables[par] = variables[par] * scale + offset
            if FillValue is not None:
                variables[par][mask] = np.nan

            if var.ndim == 4:
                # Regrid from sigma to z levels
                if len(np.atleast_1d(indz)) > 1:
                    logging.debug('sigma to z for ' + varname[0])
                    variables[par] = depth.multi_zslice(
                        variables[par], z_rho, variables['z'])
                    # Nan in input to multi_zslice gives extreme values in output
                    variables[par][variables[par] > 1e+9] = np.nan

            # If 2D array is returned due to the fancy slicing methods
            # of netcdf-python, we need to take the diagonal
            if variables[par].ndim > 1 and block is False:
                variables[par] = variables[par].diagonal()

            # Mask values outside domain
            variables[par] = np.ma.array(variables[par], ndmin=2, mask=False)
            if block is False:
                variables[par].mask[outside] = True

            # Skipping de-staggering, as it leads to invalid values at later interpolation
            #if block is True:
            #    # Unstagger grid for vectors
            #    logging.debug('Unstaggering ' + par)
            #    if 'eta_v' in var.dimensions:
            #        variables[par] = np.ma.array(variables[par],
            #                            mask=variables[par].mask)
            #        variables[par][variables[par].mask] = 0
            #        if variables[par].ndim == 2:
            #            variables[par] = \
            #                (variables[par][0:-1,0:-1] +
            #                variables[par][0:-1,1::])/2
            #        elif variables[par].ndim == 3:
            #            variables[par] = \
            #                (variables[par][:,0:-1,0:-1] +
            #                variables[par][:,0:-1,1::])/2
            #        variables[par] = np.ma.masked_where(variables[par]==0,
            #                                            variables[par])
            #    elif 'eta_u' in var.dimensions:
            #        variables[par] = np.ma.array(variables[par],
            #                            mask=variables[par].mask)
            #        variables[par][variables[par].mask] = 0
            #        if variables[par].ndim == 2:
            #            variables[par] = \
            #                (variables[par][0:-1,0:-1] +
            #                 variables[par][1::,0:-1])/2
            #        elif variables[par].ndim == 3:
            #            variables[par] = \
            #                (variables[par][:,0:-1,0:-1] +
            #                 variables[par][:,1::,0:-1])/2
            #        variables[par] = np.ma.masked_where(variables[par]==0,
            #                                            variables[par])
            #    else:
            #        if variables[par].ndim == 2:
            #            variables[par] = variables[par][1::, 1::]
            #        elif variables[par].ndim == 3:
            #            variables[par] = variables[par][:,1::, 1::]

        if block is True:
            # TODO: should be midpoints, but angle array below needs integer
            #indx = indx[0:-1]  # Only if de-staggering has been performed
            #indy = indy[1::]
            variables['x'] = indx
            variables['y'] = indy
        else:
            variables['x'] = self.xmin + (indx - 1) * self.delta_x
            variables['y'] = self.ymin + (indy - 1) * self.delta_y

        variables['x'] = variables['x'].astype(np.float)
        variables['y'] = variables['y'].astype(np.float)
        variables['time'] = nearestTime

        if 'x_sea_water_velocity' or 'sea_ice_x_velocity' \
                or 'x_wind' in variables.keys():
            # We must rotate current vectors
            if not hasattr(self, 'angle_xi_east'):
                logging.debug('Reading angle between xi and east...')
                self.angle_xi_east = self.Dataset.variables['angle'][:]
            rad = self.angle_xi_east[np.meshgrid(indy, indx)].T
            if 'x_sea_water_velocity' in variables.keys():
                variables['x_sea_water_velocity'], \
                    variables['y_sea_water_velocity'] = rotate_vectors_angle(
                        variables['x_sea_water_velocity'],
                        variables['y_sea_water_velocity'], rad)
            if 'sea_ice_x_velocity' in variables.keys():
                variables['sea_ice_x_velocity'], \
                    variables['sea_ice_y_velocity'] = rotate_vectors_angle(
                        variables['sea_ice_x_velocity'],
                        variables['sea_ice_y_velocity'], rad)
            if 'x_wind' in variables.keys():
                variables['x_wind'], \
                    variables['y_wind'] = rotate_vectors_angle(
                        variables['x_wind'],
                        variables['y_wind'], rad)

        if 'land_binary_mask' in requested_variables:
            variables['land_binary_mask'] = \
                1 - variables['land_binary_mask']

        # Masking NaN
        for var in requested_variables:
            variables[var] = np.ma.masked_invalid(variables[var])

        return variables