def ns_kriging_mm(data, stations, bp, x_opt_st, mod_opt_st, targets, candidate_var=[variogram_fit.spherical_sv,], candidate_tag=['Spherical',], verbose=False, ncs=3, krig_type='Ord'): ''' t1_var : list Parameters of the tier 1 variogram (variogram of the kriging parameters) to be interpolated at each interval the method uses Ordinary Kriging using multi-precipitation model ''' if krig_type is 'Ord': ord_krig=True # Set the intervals bp = bp[:] bp.insert(0, np.min(data)) bp.append(np.max(data)) intervals = [[bp[i], bp[i+1]] for i in xrange(len(bp)-1)] intervals[-1][1] = intervals[-1][1]+0.0001 # Make separation of the regime for each record cond_map = np.zeros_like(data) for st_i in xrange(len(stations)): for n in xrange(len(data)): for i in xrange(len(intervals)): if data[n, st_i] >= min(intervals[i]) and \ data[n, st_i] < max(intervals[i]): cond_map[n, st_i] = i # Get distance between stations dist_mat = np.array(dist.between(stations)) #initialisation of lists for each target zz = np.zeros([len(data), len(targets)]) ssp = np.zeros([len(data), len(targets)]) for tarcoun in xrange(len(targets)): #Select invidual target for itnerpolation tar = targets[tarcoun] ##Select the closest NCS elements #Calculate distance of all stations das = np.array(dist.target([tar,], stations)).flatten() #Select index of the closest stations cs = list(das.argsort()[:ncs]) #Reduction of precipitation and sensor position matrices red_data = data[:, cs] red_dist = dist_mat[cs][:,cs] red_cond_map = cond_map[:, cs] red_xopt = x_opt_st[cs] # Pre-cooked all zeros case if ord_krig: cov_mat_zeros = np.ones([ncs+1, ncs+1]) cov_mat_zeros[-1, -1] = 0 else: cov_mat_zeros = np.ones([ncs, ncs]) for ii in xrange(ncs): for jj in xrange(ncs): # TODO include different variogram type where 0 is cov_mat_zeros[ii, jj] = candidate_var[0](red_dist[ii, jj], red_xopt[ii, 0]) if ord_krig: cov_tar_zeros = np.ones([ncs+1, 1]) else: cov_tar_zeros = np.ones([ncs, 1]) for ii in xrange(ncs): cov_tar_zeros[ii, 0] = candidate_var[0](das[cs[ii]], red_xopt[ii, 0]) # Inverse of extended covariance matrix inv_cov_mat_zeros = np.linalg.inv(cov_mat_zeros) # Kriging weights w_vec = np.dot(inv_cov_mat_zeros, cov_tar_zeros) # Interpolation variance and Kriging system solution if ord_krig: sp_zeros = (np.max(cov_mat_zeros) - (np.sum(w_vec * cov_tar_zeros + w_vec[-1][0]))) else: sp_zeros = (np.max(cov_mat_zeros) - (np.dot(np.transpose(w_vec), cov_tar_zeros)[0][0])) for t in xrange(len(data)): # If there is some precipitation data if np.max(red_data[t, :]) != 0: ## Here the interpolation is made for each of the time steps at ## each locatiom #Make covariance matrix between stations in the neighborhoud (K) if ord_krig: cov_mat = np.ones([ncs+1, ncs+1]) cov_mat[-1, -1] = 0 else: cov_mat = np.ones([ncs, ncs]) # Construct Covariance Matrix between stations # for each precipitation event for ii in xrange(ncs): for jj in xrange(ncs): # TODO include different variogram type where 0 is cov_mat[ii, jj] = candidate_var[0](red_dist[ii, jj], red_xopt[ii, int(red_cond_map[t, ii])]) # Fix maximum diagonal element to maximum of the variogram # To force positive eigenvalues for ii in xrange(ncs): cov_mat[ii, ii] = np.max(cov_mat) # Simmetrise using the average bi-directional covariance matrix cov_mat = 0.5*(cov_mat + np.transpose(cov_mat)) # Construct covariance towards target if ord_krig: cov_tar = np.ones([ncs+1, 1]) else: cov_tar = np.ones([ncs, 1]) for ii in xrange(ncs): cov_tar[ii, 0] = candidate_var[0](das[cs[ii]], red_xopt[ii, int(red_cond_map[t, ii])]) ## Solve the Kriging system # Invert the covariance matrix inv_cov_mat = np.linalg.inv(cov_mat) # Kriging weights w_vec = np.dot(inv_cov_mat, cov_tar) # Interpolation variance and Kriging system solution if ord_krig: z_temp = np.dot(np.transpose(w_vec[:-1]), np.reshape(red_data[t, :], [-1,1]))[0][0] #sp_temp = (np.max(cov_mat) # - (np.sum(w_vec * cov_tar + w_vec[-1][0]))) sp_temp = (np.max(cov_mat) - (np.sum(w_vec * cov_tar) + w_vec[-1][0])) else: z_temp = np.dot(np.transpose(w_vec), np.reshape(red_data[t, :], [-1,1]))[0][0] sp_temp = np.max(cov_mat) - (np.sum(w_vec * cov_tar)) # Appending of solutions ssp[t, tarcoun] = sp_temp zz[t, tarcoun] = z_temp else: ssp[t, tarcoun] = sp_zeros zz[t, tarcoun] = 0 if verbose: print ('Finished step {0}/{1} at {2}'.format(n, len(data), ctime())) return zz, ssp
def krig(MaxDist, targets, stations, records, record_covariance_matrix, ModOpt, xopt, candidate_sv, tmin=0, tmax='def', MinNumSt='def', krig_type='Sim', normalisation=False, m_error=None): ''' Parsing of data for Kriging interpolation. This function contains\ execution of interpolation as well. \n Parameters ---------- **MaxDist** -- Initial search radius for nearby stations \n **targets** -- Points of interest to interpolate (targets) ``[t,2]`` \n **stations** -- Gauge location for interpolation ``[x,2]`` \n **records** -- Precipitation register for gauges ``[n,x]`` \n **record_covariance_matrix** -- Gauge records covariance matrix ``[x,x]`` \n **ModOpt** -- pointer to optimal semivariogram model \n **xopt** -- vector with optimal semivariogram parameters ``[5]`` \n **candidate_sv** -- Array with pointer to functions in variogram_fit module \n **tmin** -- Initial time step to be interpolated \n **tmax** -- Final time step to be interpolated \n **MinNumSt** -- Minimum number of stations within the search radius \n **krig_type** -- Type of Kriging to be used. 'Sim' for Simple and 'Ord'\ for Ordinary Kriging **normalisation** -- Boolean. If true, then the variable is normalised\ in the neighbourhood of the variable **m_error -- cotains the variance in the measurement error Returns ------- **Z** -- Interpolation for each target and time step ``[n,t]`` \n **SP** -- Interpolation variance field ``[t,1]``\n **ZAvg** -- Average of interpolated field ``[n,1]`` ''' #print (MinNumSt) if MinNumSt is 'def': MinNumSt = len(stations) if MinNumSt >= len(stations): print('Number of stations should be larger than number of \ minimum stations. Set to all stations') MinNumSt = len(stations) if tmax == 'def': tmax = len(records) tmin = int(tmin) tmax = int(tmax) PrecSec = records[tmin:tmax] if m_error is None: m_error = np.zeros([len(PrecSec), len(stations)]) else: m_error = m_error[tmin:tmax] # Reduce measurements to relevant locations for the targets Z = [] SP = [] for kk in xrange(len(targets)): # Check if there are enough stations for interpolation, otherwise, # increase search radius #targets_dt = dist.target(stations, [targets[kk]])[0] #TNS = 0 #MaxDist2 = MaxDist das = np.array(dist.target([targets[kk],], stations)).flatten() #Select index of the closest stations cs = list(das.argsort()[:MinNumSt]) fs = list(das.argsort()[MinNumSt:]) selected_stations = stations[cs] # Reduction of relevant stations (reduced data and cov matrices) RedLoc = np.delete(stations, fs, 0) reduced_records = np.delete(PrecSec, fs, 1) # reduction of the measurement error matrix meas_var = m_error[:, cs] if normalisation: # detrending at all steps, one location local_average = np.average(reduced_records, 1).reshape( [len(reduced_records), 1]) reduced_records = reduced_records - local_average reduced_cov_matrix = record_covariance_matrix[:] reduced_cov_matrix = np.delete(reduced_cov_matrix, fs, 0) reduced_cov_matrix = np.delete(reduced_cov_matrix, fs, 1) # Kriging interpolation TempRes = _kriging_core(ModOpt, targets[kk], RedLoc, candidate_sv, xopt, reduced_cov_matrix, reduced_records, krig_type, meas_var) if Z == []: if normalisation: Z = np.vstack(TempRes[0]) + local_average else: Z = np.vstack(TempRes[0]) else: if normalisation: temp = np.vstack(TempRes[0]) + local_average else: temp = np.vstack(TempRes[0]) Z = np.hstack((Z, temp)) SP.append(TempRes[1]) ZAvg = np.average(Z, 1) SP = np.array(SP) SP[SP < 0] = 0 return Z, SP, ZAvg
def multi_variogram(data, stations, bp, candidate_var=[variogram_fit.spherical_sv,], candidate_tag=['Spherical',]): ''' Calculates the object with multiple semivariograms of the data at different breaking points Parameters: ----------- data : nd_array Array containing the measurements all the measurements of the variable on sze '[n, m]'. n is the number of measurements, m the number of stations stations : nd_array Array containing the location of stations. The size of the array is of '[m, 2]' bp : list List with the sections of the data to create the pools. Do not include values equal to the maximum or minimum of the data. returns: -------- xopt : nd_array 3 dimensional array consisting of station index, condition index and variogram parameters mod_opt_st : nd_array 3 dimensional array consisting of station index, condition index and optimal model output ''' #Make n variograms as breaking points in the location of the sensors # bp has to in ascending order bp = bp[:] bp.insert(0, np.min(data)) bp.append(np.max(data)) intervals = [[bp[i], bp[i+1]] for i in xrange(len(bp)-1)] intervals[-1][1] = intervals[-1][1]+0.0001 x_opt_st = [] mod_opt_st = [] # iterate over the stations for s_idx in xrange(len(stations)): # gest distance of station towards the other stations dist_to_stations = dist.target(stations, [stations[s_idx], ]) # Iterate over the intervals of precipitation x_opt_db = [] mod_opt_db = [] for interval in intervals: # Go throgh data to get variogram for each condition at each station # Make the pool for the SV temp_pool = [] for i in xrange(len(data)): if data[i, s_idx] >= min(interval) and \ data[i, s_idx] < max(interval): temp_pool.append(data[i, :]) # Raise error if pool is empty if temp_pool == []: raise NameError('Pool is empty: no data at station {0} \ on interval {1}. Pick new breaking points'.format(s_idx, interval)) temp_pool = np.array(temp_pool) # get experimental variogram for the pool temp_cov_matrix = np.cov(np.transpose(temp_pool)) reg_interval, reg_cov = _regul(np.transpose(dist_to_stations), temp_cov_matrix[:,s_idx], 15, 1) temp_exp_sv = np.transpose(np.vstack((reg_interval, reg_cov))) # fit to get theoretical SV x_opt, mod_opt, _ = theor_variogram(temp_exp_sv, candidate_sv=candidate_var, candidate_sv_tag=candidate_tag, Sb = (0.01, 6.0), Rb = (5.00, 45.0), Nb = (0.001, 0.05)) # save results x_opt_db.append(x_opt) mod_opt_db.append(mod_opt) print ('Variogram fitted for interval {0} at {1}'.format(interval, ctime())) x_opt_st.append(x_opt_db) mod_opt_st.append(mod_opt_db) x_opt_st = np.array(x_opt_st) mod_opt_st = np.array(mod_opt_st) return x_opt_st, mod_opt_st
def _kriging_core(ModOpt, single_target, stations, candidate_sv, xopt, record_covariance_matrix, records, krig_type, meas_var): ''' Kriging core where interpolating algorithms is taking place. Parameters ---------- **ModOpt** -- Pointer to optimal semivariogram model \n **single_target** -- Single point of interest to interpolate (targets)\ ``[t,2]`` \n **stations** -- Gauge location for interpolation ``[x,2]`` \n **candidate_sv** -- Array with pointer to functions in variogram_fit module \n **xopt** -- vector with optimal semivariogram parameters ``[5]`` \n **record_covariance_matrix** -- Gauge records covariance matrix ``[x,x]`` \n **records** -- Precipitation register for gauges ``[n,x]`` \n **krig_type** -- Type of Kriging to be used. 'Sim' for Simple and 'Ord'\ for Ordinary Kriging Returns ------- **Z** -- Interpolation for each target and time step ``[n,1]`` \n **SP** -- Interpolation variance field ``[1]``\n ''' n_stations = len(stations) targetsD = dist.target(stations, [single_target])[0] SVm = [] for j in xrange(len(stations)): SVm.append(candidate_sv[ModOpt](targetsD[j], xopt)) # fix covariance matrix so not having negative eigenvalues record_covariance_matrix = near_psd(record_covariance_matrix) if krig_type is 'Ord': #Ordinary Kriging record_covariance_matrix = np.row_stack((record_covariance_matrix, np.ones(len(record_covariance_matrix)))) record_covariance_matrix = np.column_stack((record_covariance_matrix, np.ones(len(record_covariance_matrix)))) record_covariance_matrix[-1,-1] = 0.0 SVm.append(1.0) SVr = np.array(record_covariance_matrix) if linalg.det(record_covariance_matrix) == 0: print('Non-singular covriance matrix - Sorry, cannot invert \n') err_out = ERROR_CODE*np.ones(len(records)) return err_out, err_out if np.max(meas_var) == 0: Z = [] InvSVr = linalg.inv(SVr) WM= np.dot(InvSVr,SVm) for i in xrange(len(records)): Ztemp = np.dot(WM[:-1], records[i]) Z.append(Ztemp) S = SVm[:-1] SP = xopt[0] - (np.dot(WM[:-1], np.transpose(S))) - WM[-1] else: Z = [] for i in xrange(len(records)): records_i = records[i] add_var_mat = np.zeros([n_stations + 1, n_stations + 1]) # generate added measurement covariance matrix add_var_mat[:-1, :-1] = np.array([[np.sqrt(meas_var[i, j]*meas_var[i, k]) for j in xrange(n_stations)] for k in xrange(n_stations)]) add_var_vec = np.zeros([n_stations + 1]) add_var_vec[:-1] = np.array([np.sqrt(meas_var[i, j]) for j in xrange(n_stations)]) #augmented variance matrix with measurement noise (trimmed) SVr_mod = np.clip(SVr - add_var_mat, 0, np.inf) # Augmented variance towards target (trimmed) SVm_mod = np.clip(SVm - add_var_vec, 0, np.inf) # Check for colinearity of the solutions and remove the non-necessary coll_vars = [j for j in xrange(len(SVr_mod)-1) if np.max(SVr_mod[:, j]) == 0] if coll_vars != []: # Remove from covariance matrix SVr_mod = np.delete(SVr_mod, coll_vars, 0) SVr_mod = np.delete(SVr_mod, coll_vars, 1) # remove from variance to vector SVm_mod = np.delete(SVm_mod, coll_vars, 0) # remove from records used records_i = np.delete(records_i, coll_vars, 0) # Get the weights InvSVr = linalg.inv(SVr_mod) WM= np.dot(InvSVr, SVm_mod) Ztemp = np.dot(WM[:-1], records[i]) #Ztemp = np.clip(Ztemp, 0, max(records[i])) # cutoff at 0 and max prec Z.append(Ztemp) S = SVm_mod[:-1] SP = xopt[0] - (np.dot(WM[:-1], np.transpose(S))) - WM[-1] elif krig_type is 'Sim': # Simple Kriging SVr = np.array(record_covariance_matrix) if linalg.det(record_covariance_matrix) == 0: print('Non-singular covriance matrix - Sorry, cannot invert \n') err_out = ERROR_CODE*np.ones(len(records)) return err_out, err_out if np.max(meas_var) == 0: InvSVr = linalg.inv(SVr) WM= np.dot(InvSVr, SVm) Z = [] for i in xrange(len(records)): Ztemp = np.dot(WM, records[i]) Z.append(Ztemp) S = SVm SP = xopt[0] - (np.dot(WM, np.transpose(SVm))) else: Z = [] for i in xrange(len(records)): records_i = records[i] # generate added measurement covariance matrix add_var_mat = np.array([[np.sqrt(meas_var[i, j]*meas_var[i, k]) for j in xrange(n_stations)] for k in xrange(n_stations)]) add_var_vec = np.array([np.sqrt(meas_var[i, j]) for j in xrange(n_stations)]) #augmented variance matrix with measurement noise (trimmed) SVr_mod = np.clip(SVr - add_var_mat, 0, np.inf) # Augmented variance towards target (trimmed) SVm_mod = np.clip(SVm - add_var_vec, 0, np.inf) # Check for colinearity of the solutions and remove the non-necessary coll_vars = [j for j in xrange(len(SVr_mod)) if np.max(SVr_mod[:, j]) == 0] if coll_vars != []: # Remove from covariance matrix SVr_mod = np.delete(SVr_mod, coll_vars, 0) SVr_mod = np.delete(SVr_mod, coll_vars, 1) # remove from variance to vector SVm_mod = np.delete(SVm_mod, coll_vars, 0) # remove from records used records_i = np.delete(records_i, coll_vars, 0) # Get the weights InvSVr = linalg.inv(SVr_mod) WM= np.dot(InvSVr, SVm_mod) Ztemp = np.dot(WM, records_i) Z.append(Ztemp) S = SVm SP = xopt[0] - (np.dot(WM, np.transpose(SVm_mod))) else: print 'I pity the fool for no chosing Kriging type' print 'only available Ord and Sim \n' Z = ERROR_CODE*np.ones(len(records)) SP = ERROR_CODE*np.ones(len(records)) return Z, SP
def ns_kriging_mm(data, stations, bp, x_opt_st, mod_opt_st, targets, candidate_var=[ variogram_fit.spherical_sv, ], candidate_tag=[ 'Spherical', ], verbose=False, ncs=3, krig_type='Ord'): ''' t1_var : list Parameters of the tier 1 variogram (variogram of the kriging parameters) to be interpolated at each interval the method uses Ordinary Kriging using multi-precipitation model ''' if krig_type is 'Ord': ord_krig = True # Set the intervals bp = bp[:] bp.insert(0, np.min(data)) bp.append(np.max(data)) intervals = [[bp[i], bp[i + 1]] for i in xrange(len(bp) - 1)] intervals[-1][1] = intervals[-1][1] + 0.0001 # Make separation of the regime for each record cond_map = np.zeros_like(data) for st_i in xrange(len(stations)): for n in xrange(len(data)): for i in xrange(len(intervals)): if data[n, st_i] >= min(intervals[i]) and \ data[n, st_i] < max(intervals[i]): cond_map[n, st_i] = i # Get distance between stations dist_mat = np.array(dist.between(stations)) #initialisation of lists for each target zz = np.zeros([len(data), len(targets)]) ssp = np.zeros([len(data), len(targets)]) for tarcoun in xrange(len(targets)): #Select invidual target for itnerpolation tar = targets[tarcoun] ##Select the closest NCS elements #Calculate distance of all stations das = np.array(dist.target([ tar, ], stations)).flatten() #Select index of the closest stations cs = list(das.argsort()[:ncs]) #Reduction of precipitation and sensor position matrices red_data = data[:, cs] red_dist = dist_mat[cs][:, cs] red_cond_map = cond_map[:, cs] red_xopt = x_opt_st[cs] # Pre-cooked all zeros case if ord_krig: cov_mat_zeros = np.ones([ncs + 1, ncs + 1]) cov_mat_zeros[-1, -1] = 0 else: cov_mat_zeros = np.ones([ncs, ncs]) for ii in xrange(ncs): for jj in xrange(ncs): # TODO include different variogram type where 0 is cov_mat_zeros[ii, jj] = candidate_var[0](red_dist[ii, jj], red_xopt[ii, 0]) if ord_krig: cov_tar_zeros = np.ones([ncs + 1, 1]) else: cov_tar_zeros = np.ones([ncs, 1]) for ii in xrange(ncs): cov_tar_zeros[ii, 0] = candidate_var[0](das[cs[ii]], red_xopt[ii, 0]) # Inverse of extended covariance matrix inv_cov_mat_zeros = np.linalg.inv(cov_mat_zeros) # Kriging weights w_vec = np.dot(inv_cov_mat_zeros, cov_tar_zeros) # Interpolation variance and Kriging system solution if ord_krig: sp_zeros = (np.max(cov_mat_zeros) - (np.sum(w_vec * cov_tar_zeros + w_vec[-1][0]))) else: sp_zeros = (np.max(cov_mat_zeros) - (np.dot(np.transpose(w_vec), cov_tar_zeros)[0][0])) for t in xrange(len(data)): # If there is some precipitation data if np.max(red_data[t, :]) != 0: ## Here the interpolation is made for each of the time steps at ## each locatiom #Make covariance matrix between stations in the neighborhoud (K) if ord_krig: cov_mat = np.ones([ncs + 1, ncs + 1]) cov_mat[-1, -1] = 0 else: cov_mat = np.ones([ncs, ncs]) # Construct Covariance Matrix between stations # for each precipitation event for ii in xrange(ncs): for jj in xrange(ncs): # TODO include different variogram type where 0 is cov_mat[ii, jj] = candidate_var[0]( red_dist[ii, jj], red_xopt[ii, int(red_cond_map[t, ii])]) # Fix maximum diagonal element to maximum of the variogram # To force positive eigenvalues for ii in xrange(ncs): cov_mat[ii, ii] = np.max(cov_mat) # Simmetrise using the average bi-directional covariance matrix cov_mat = 0.5 * (cov_mat + np.transpose(cov_mat)) # Construct covariance towards target if ord_krig: cov_tar = np.ones([ncs + 1, 1]) else: cov_tar = np.ones([ncs, 1]) for ii in xrange(ncs): cov_tar[ii, 0] = candidate_var[0]( das[cs[ii]], red_xopt[ii, int(red_cond_map[t, ii])]) ## Solve the Kriging system # Invert the covariance matrix inv_cov_mat = np.linalg.inv(cov_mat) # Kriging weights w_vec = np.dot(inv_cov_mat, cov_tar) # Interpolation variance and Kriging system solution if ord_krig: z_temp = np.dot(np.transpose(w_vec[:-1]), np.reshape(red_data[t, :], [-1, 1]))[0][0] #sp_temp = (np.max(cov_mat) # - (np.sum(w_vec * cov_tar + w_vec[-1][0]))) sp_temp = (np.max(cov_mat) - (np.sum(w_vec * cov_tar) + w_vec[-1][0])) else: z_temp = np.dot(np.transpose(w_vec), np.reshape(red_data[t, :], [-1, 1]))[0][0] sp_temp = np.max(cov_mat) - (np.sum(w_vec * cov_tar)) # Appending of solutions ssp[t, tarcoun] = sp_temp zz[t, tarcoun] = z_temp else: ssp[t, tarcoun] = sp_zeros zz[t, tarcoun] = 0 if verbose: print('Finished step {0}/{1} at {2}'.format( n, len(data), ctime())) return zz, ssp
def multi_variogram(data, stations, bp, candidate_var=[ variogram_fit.spherical_sv, ], candidate_tag=[ 'Spherical', ]): ''' Calculates the object with multiple semivariograms of the data at different breaking points Parameters: ----------- data : nd_array Array containing the measurements all the measurements of the variable on sze '[n, m]'. n is the number of measurements, m the number of stations stations : nd_array Array containing the location of stations. The size of the array is of '[m, 2]' bp : list List with the sections of the data to create the pools. Do not include values equal to the maximum or minimum of the data. returns: -------- xopt : nd_array 3 dimensional array consisting of station index, condition index and variogram parameters mod_opt_st : nd_array 3 dimensional array consisting of station index, condition index and optimal model output ''' #Make n variograms as breaking points in the location of the sensors # bp has to in ascending order bp = bp[:] bp.insert(0, np.min(data)) bp.append(np.max(data)) intervals = [[bp[i], bp[i + 1]] for i in xrange(len(bp) - 1)] intervals[-1][1] = intervals[-1][1] + 0.0001 x_opt_st = [] mod_opt_st = [] # iterate over the stations for s_idx in xrange(len(stations)): # gest distance of station towards the other stations dist_to_stations = dist.target(stations, [ stations[s_idx], ]) # Iterate over the intervals of precipitation x_opt_db = [] mod_opt_db = [] for interval in intervals: # Go throgh data to get variogram for each condition at each station # Make the pool for the SV temp_pool = [] for i in xrange(len(data)): if data[i, s_idx] >= min(interval) and \ data[i, s_idx] < max(interval): temp_pool.append(data[i, :]) # Raise error if pool is empty if temp_pool == []: raise NameError('Pool is empty: no data at station {0} \ on interval {1}. Pick new breaking points'.format( s_idx, interval)) temp_pool = np.array(temp_pool) # get experimental variogram for the pool temp_cov_matrix = np.cov(np.transpose(temp_pool)) reg_interval, reg_cov = _regul(np.transpose(dist_to_stations), temp_cov_matrix[:, s_idx], 15, 1) temp_exp_sv = np.transpose(np.vstack((reg_interval, reg_cov))) # fit to get theoretical SV x_opt, mod_opt, _ = theor_variogram(temp_exp_sv, candidate_sv=candidate_var, candidate_sv_tag=candidate_tag, Sb=(0.01, 6.0), Rb=(5.00, 45.0), Nb=(0.001, 0.05)) # save results x_opt_db.append(x_opt) mod_opt_db.append(mod_opt) print('Variogram fitted for interval {0} at {1}'.format( interval, ctime())) x_opt_st.append(x_opt_db) mod_opt_st.append(mod_opt_db) x_opt_st = np.array(x_opt_st) mod_opt_st = np.array(mod_opt_st) return x_opt_st, mod_opt_st
def krig(MaxDist, targets, stations, records, record_covariance_matrix, ModOpt, xopt, candidate_sv, tmin=0, tmax='def', MinNumSt='def', krig_type='Sim', normalisation=False, m_error=None): ''' Parsing of data for Kriging interpolation. This function contains\ execution of interpolation as well. \n Parameters ---------- **MaxDist** -- Initial search radius for nearby stations \n **targets** -- Points of interest to interpolate (targets) ``[t,2]`` \n **stations** -- Gauge location for interpolation ``[x,2]`` \n **records** -- Precipitation register for gauges ``[n,x]`` \n **record_covariance_matrix** -- Gauge records covariance matrix ``[x,x]`` \n **ModOpt** -- pointer to optimal semivariogram model \n **xopt** -- vector with optimal semivariogram parameters ``[5]`` \n **candidate_sv** -- Array with pointer to functions in variogram_fit module \n **tmin** -- Initial time step to be interpolated \n **tmax** -- Final time step to be interpolated \n **MinNumSt** -- Minimum number of stations within the search radius \n **krig_type** -- Type of Kriging to be used. 'Sim' for Simple and 'Ord'\ for Ordinary Kriging **normalisation** -- Boolean. If true, then the variable is normalised\ in the neighbourhood of the variable **m_error -- cotains the variance in the measurement error Returns ------- **Z** -- Interpolation for each target and time step ``[n,t]`` \n **SP** -- Interpolation variance field ``[t,1]``\n **ZAvg** -- Average of interpolated field ``[n,1]`` ''' #print (MinNumSt) if MinNumSt is 'def': MinNumSt = len(stations) if MinNumSt >= len(stations): print('Number of stations should be larger than number of \ minimum stations. Set to all stations') MinNumSt = len(stations) if tmax == 'def': tmax = len(records) tmin = int(tmin) tmax = int(tmax) PrecSec = records[tmin:tmax] if m_error is None: m_error = np.zeros([len(PrecSec), len(stations)]) else: m_error = m_error[tmin:tmax] # Reduce measurements to relevant locations for the targets Z = [] SP = [] for kk in xrange(len(targets)): # Check if there are enough stations for interpolation, otherwise, # increase search radius #targets_dt = dist.target(stations, [targets[kk]])[0] #TNS = 0 #MaxDist2 = MaxDist das = np.array(dist.target([ targets[kk], ], stations)).flatten() #Select index of the closest stations cs = list(das.argsort()[:MinNumSt]) fs = list(das.argsort()[MinNumSt:]) selected_stations = stations[cs] # Reduction of relevant stations (reduced data and cov matrices) RedLoc = np.delete(stations, fs, 0) reduced_records = np.delete(PrecSec, fs, 1) # reduction of the measurement error matrix meas_var = m_error[:, cs] if normalisation: # detrending at all steps, one location local_average = np.average(reduced_records, 1).reshape([len(reduced_records), 1]) reduced_records = reduced_records - local_average reduced_cov_matrix = record_covariance_matrix[:] reduced_cov_matrix = np.delete(reduced_cov_matrix, fs, 0) reduced_cov_matrix = np.delete(reduced_cov_matrix, fs, 1) # Kriging interpolation TempRes = _kriging_core(ModOpt, targets[kk], RedLoc, candidate_sv, xopt, reduced_cov_matrix, reduced_records, krig_type, meas_var) if Z == []: if normalisation: Z = np.vstack(TempRes[0]) + local_average else: Z = np.vstack(TempRes[0]) else: if normalisation: temp = np.vstack(TempRes[0]) + local_average else: temp = np.vstack(TempRes[0]) Z = np.hstack((Z, temp)) SP.append(TempRes[1]) ZAvg = np.average(Z, 1) SP = np.array(SP) SP[SP < 0] = 0 return Z, SP, ZAvg
def _kriging_core(ModOpt, single_target, stations, candidate_sv, xopt, record_covariance_matrix, records, krig_type, meas_var): ''' Kriging core where interpolating algorithms is taking place. Parameters ---------- **ModOpt** -- Pointer to optimal semivariogram model \n **single_target** -- Single point of interest to interpolate (targets)\ ``[t,2]`` \n **stations** -- Gauge location for interpolation ``[x,2]`` \n **candidate_sv** -- Array with pointer to functions in variogram_fit module \n **xopt** -- vector with optimal semivariogram parameters ``[5]`` \n **record_covariance_matrix** -- Gauge records covariance matrix ``[x,x]`` \n **records** -- Precipitation register for gauges ``[n,x]`` \n **krig_type** -- Type of Kriging to be used. 'Sim' for Simple and 'Ord'\ for Ordinary Kriging Returns ------- **Z** -- Interpolation for each target and time step ``[n,1]`` \n **SP** -- Interpolation variance field ``[1]``\n ''' n_stations = len(stations) targetsD = dist.target(stations, [single_target])[0] SVm = [] for j in xrange(len(stations)): SVm.append(candidate_sv[ModOpt](targetsD[j], xopt)) # fix covariance matrix so not having negative eigenvalues record_covariance_matrix = near_psd(record_covariance_matrix) if krig_type is 'Ord': #Ordinary Kriging record_covariance_matrix = np.row_stack( (record_covariance_matrix, np.ones(len(record_covariance_matrix)))) record_covariance_matrix = np.column_stack( (record_covariance_matrix, np.ones(len(record_covariance_matrix)))) record_covariance_matrix[-1, -1] = 0.0 SVm.append(1.0) SVr = np.array(record_covariance_matrix) if linalg.det(record_covariance_matrix) == 0: print('Non-singular covriance matrix - Sorry, cannot invert \n') err_out = ERROR_CODE * np.ones(len(records)) return err_out, err_out if np.max(meas_var) == 0: Z = [] InvSVr = linalg.inv(SVr) WM = np.dot(InvSVr, SVm) for i in xrange(len(records)): Ztemp = np.dot(WM[:-1], records[i]) Z.append(Ztemp) S = SVm[:-1] SP = xopt[0] - (np.dot(WM[:-1], np.transpose(S))) - WM[-1] else: Z = [] for i in xrange(len(records)): records_i = records[i] add_var_mat = np.zeros([n_stations + 1, n_stations + 1]) # generate added measurement covariance matrix add_var_mat[:-1, :-1] = np.array([[ np.sqrt(meas_var[i, j] * meas_var[i, k]) for j in xrange(n_stations) ] for k in xrange(n_stations)]) add_var_vec = np.zeros([n_stations + 1]) add_var_vec[:-1] = np.array( [np.sqrt(meas_var[i, j]) for j in xrange(n_stations)]) #augmented variance matrix with measurement noise (trimmed) SVr_mod = np.clip(SVr - add_var_mat, 0, np.inf) # Augmented variance towards target (trimmed) SVm_mod = np.clip(SVm - add_var_vec, 0, np.inf) # Check for colinearity of the solutions and remove the non-necessary coll_vars = [ j for j in xrange(len(SVr_mod) - 1) if np.max(SVr_mod[:, j]) == 0 ] if coll_vars != []: # Remove from covariance matrix SVr_mod = np.delete(SVr_mod, coll_vars, 0) SVr_mod = np.delete(SVr_mod, coll_vars, 1) # remove from variance to vector SVm_mod = np.delete(SVm_mod, coll_vars, 0) # remove from records used records_i = np.delete(records_i, coll_vars, 0) # Get the weights InvSVr = linalg.inv(SVr_mod) WM = np.dot(InvSVr, SVm_mod) Ztemp = np.dot(WM[:-1], records[i]) #Ztemp = np.clip(Ztemp, 0, max(records[i])) # cutoff at 0 and max prec Z.append(Ztemp) S = SVm_mod[:-1] SP = xopt[0] - (np.dot(WM[:-1], np.transpose(S))) - WM[-1] elif krig_type is 'Sim': # Simple Kriging SVr = np.array(record_covariance_matrix) if linalg.det(record_covariance_matrix) == 0: print('Non-singular covriance matrix - Sorry, cannot invert \n') err_out = ERROR_CODE * np.ones(len(records)) return err_out, err_out if np.max(meas_var) == 0: InvSVr = linalg.inv(SVr) WM = np.dot(InvSVr, SVm) Z = [] for i in xrange(len(records)): Ztemp = np.dot(WM, records[i]) Z.append(Ztemp) S = SVm SP = xopt[0] - (np.dot(WM, np.transpose(SVm))) else: Z = [] for i in xrange(len(records)): records_i = records[i] # generate added measurement covariance matrix add_var_mat = np.array([[ np.sqrt(meas_var[i, j] * meas_var[i, k]) for j in xrange(n_stations) ] for k in xrange(n_stations)]) add_var_vec = np.array( [np.sqrt(meas_var[i, j]) for j in xrange(n_stations)]) #augmented variance matrix with measurement noise (trimmed) SVr_mod = np.clip(SVr - add_var_mat, 0, np.inf) # Augmented variance towards target (trimmed) SVm_mod = np.clip(SVm - add_var_vec, 0, np.inf) # Check for colinearity of the solutions and remove the non-necessary coll_vars = [ j for j in xrange(len(SVr_mod)) if np.max(SVr_mod[:, j]) == 0 ] if coll_vars != []: # Remove from covariance matrix SVr_mod = np.delete(SVr_mod, coll_vars, 0) SVr_mod = np.delete(SVr_mod, coll_vars, 1) # remove from variance to vector SVm_mod = np.delete(SVm_mod, coll_vars, 0) # remove from records used records_i = np.delete(records_i, coll_vars, 0) # Get the weights InvSVr = linalg.inv(SVr_mod) WM = np.dot(InvSVr, SVm_mod) Ztemp = np.dot(WM, records_i) Z.append(Ztemp) S = SVm SP = xopt[0] - (np.dot(WM, np.transpose(SVm_mod))) else: print 'I pity the fool for no chosing Kriging type' print 'only available Ord and Sim \n' Z = ERROR_CODE * np.ones(len(records)) SP = ERROR_CODE * np.ones(len(records)) return Z, SP