def normalize_signals(signals, normalization=None, axis=None, percent=None): # Following pylab demean: def matrix_subtract_along_axis(x, y, axis=0): "Return x minus y, where y corresponds to some statistic of x along the specified axis" if axis == 0 or axis is None or x.ndim <= 1: return x - y ind = [slice(None)] * x.ndim ind[axis] = np.newaxis return x - y[ind] def matrix_divide_along_axis(x, y, axis=0): "Return x divided by y, where y corresponds to some statistic of x along the specified axis" if axis == 0 or axis is None or x.ndim <= 1: return x / y ind = [slice(None)] * x.ndim ind[axis] = np.newaxis return x / y[ind] for norm, ax, prcnd in zip(ensure_list(normalization), cycle(ensure_list(axis)), cycle(ensure_list(percent))): if isinstance(norm, string_types): if isequal_string(norm, "zscore"): signals = zscore(signals, axis=ax) # / 3.0 elif isequal_string(norm, "baseline-std"): signals = normalize_signals(["baseline", "std"], axis=axis) elif norm.find("baseline") == 0 and norm.find("amplitude") >= 0: signals = normalize_signals(signals, ["baseline", norm.split("-")[1]], axis=axis, percent=percent) elif isequal_string(norm, "minmax"): signals = normalize_signals(signals, ["min", "max"], axis=axis) elif isequal_string(norm, "mean"): signals = demean(signals, axis=ax) elif isequal_string(norm, "baseline"): if prcnd is None: prcnd = 1 signals = matrix_subtract_along_axis(signals, np.percentile(signals, prcnd, axis=ax), axis=ax) elif isequal_string(norm, "min"): signals = matrix_subtract_along_axis(signals, np.min(signals, axis=ax), axis=ax) elif isequal_string(norm, "max"): signals = matrix_divide_along_axis(signals, np.max(signals, axis=ax), axis=ax) elif isequal_string(norm, "std"): signals = matrix_divide_along_axis(signals, signals.std(axis=ax), axis=ax) elif norm.find("amplitude") >= 0: if prcnd is None: prcnd = [1, 99] amplitude = np.percentile(signals, prcnd[1], axis=ax) - np.percentile(signals, prcnd[0], axis=ax) this_ax = ax if isequal_string(norm.split("amplitude")[0], "max"): amplitude = amplitude.max() this_ax = None elif isequal_string(norm.split("amplitude")[0], "mean"): amplitude = amplitude.mean() this_ax = None signals = matrix_divide_along_axis(signals, amplitude, axis=this_ax) else: raise_value_error("Ignoring signals' normalization " + normalization + ",\nwhich is not one of the currently available " + str(NORMALIZATION_METHODS) + "!", logger) return signals
def noise_segmenting(poisson_times, st_event_2, st_t, noise_level, samp_rate, delta): """ Creates the noise array so that it is big enough to host all of the events. Creating the noise by multiplying white noise by the seismic noise, in the frequency domain. We then inverse FFT it and scale it to whatever SNR level is defined to output the full noise array. """ # end time for noise to cover all events noise_lim = (poisson_times[-1] + len(st_event_2)) *2 #gives some time after last event st_noise_start_t = UTCDateTime("2015-01-22T01:00:00") st_noise_end_t = UTCDateTime("2015-01-22T01:02:00") test_trace = st_t[0].slice(st_noise_start_t, st_noise_end_t) test_trace_length = int(len(test_trace) / test_trace.stats.sampling_rate) minutes_long = (noise_lim)/st_event_2.stats.sampling_rate noise_loops = int(np.ceil(minutes_long/2.0)) #working out how many 2 minute loops we need noise_array = np.zeros([noise_loops, len(test_trace)]) for j in range(noise_loops): # we average the Tungurahua noise over twenty 2 minute demeaned samples tung_n_fft = np.zeros([20, int(np.ceil((len(test_trace)/2.0)))]) for i in range(20): st_noise = st_t[0].slice(st_noise_start_t+ (i*test_trace_length), st_noise_end_t+ (i*test_trace_length)) noise_detrended = st_noise.detrend() noise_demeaned = mlab.demean(noise_detrended) noise_averaging = Trace(noise_demeaned).normalize() tung_n_fft[i] = np.fft.rfft(noise_averaging.data) # work out the average fft ave = np.average(tung_n_fft, axis=0) # smooth the data aves = movingaverage(ave,20) whitenoise = np.random.normal(0, 1, len(noise_averaging)) whitenoise_n = Trace(whitenoise).normalize() wn_n_fft = np.fft.rfft(whitenoise_n.data) newnoise_fft = wn_n_fft * aves newnoise = ifft(newnoise_fft, n = len(st_noise)) noise_array[j] = np.real(newnoise) full_noise_array = np.ravel(noise_array) full_noise_array_n = Trace(np.float32(full_noise_array)).normalize() full_noise_array_n_scaled = Trace(np.multiply(full_noise_array_n, noise_level)) full_noise_array_n_scaled.stats.sampling_rate = samp_rate full_noise_array_n_scaled.stats.delta = delta return full_noise_array_n_scaled
def create_noise_snr_data(st_noise_n_t, st_event_2, noise_lim, noise_steps): """ Creates noise for SNR data based on the size and characteristics of the noise variable. Averages the noise from Tungurahua for 20 demeaned samples and smooths it. st_noise_n_t = Trace array for basing the noise on st_event_2 = Trace array for the signal (used in SNR calc) noise_lim = limit of noise multiplications noise_steps = steps for the noise_level snr_r contains the SNR array for then storing (which can then be loaded when doing the calculations instead of doing this each time). This allows for the SNR to be input when creating noise (noise_segmenting) rather than the noise_level. noise_levels contains the noise levels used to calculate the SNR. """ st_noise_start_t = UTCDateTime("2015-01-22T01:00:00") st_noise_end_t = UTCDateTime("2015-01-22T01:02:00") test_trace = st_noise_n_t.slice(st_noise_start_t, st_noise_end_t) test_trace_n = test_trace.normalize() test_trace_length = int(len(test_trace) / test_trace.stats.sampling_rate) noise_levels = np.arange(0,noise_lim,noise_steps) snr_r = np.zeros(len(noise_levels)) noise_averaging = np.zeros([20, len(test_trace)]) tung_n_fft = np.zeros([20, int(np.ceil((len(test_trace)/2.0)))]) pp = np.zeros([20, int(np.ceil((len(test_trace)/2.0)))]) for j in range(0, len(noise_levels)): for i in range(20): st_noise = st_noise_n_t.slice(st_noise_start_t+ (i*test_trace_length), st_noise_end_t+ (i*test_trace_length)) noise_detrended = st_noise.detrend() noise_demeaned = mlab.demean(noise_detrended) noise_averaging = Trace(noise_demeaned).normalize() tung_n_fft[i] = np.fft.rfft(noise_averaging.data) ff, pp[i] = periodogram(noise_averaging.data,100) # work out the average fft and periodogram signal ave = np.average(tung_n_fft, axis=0) ave_p = np.average(pp, axis=0) # smooth the data aves = movingaverage(ave,20) aves_p = movingaverage(ave_p,20) whitenoise = np.random.normal(0, 1, len(noise_averaging)) whitenoise_n = Trace(whitenoise).normalize() wn_n_fft = np.fft.rfft(whitenoise_n.data) newnoise_fft = wn_n_fft * aves newnoise = ifft(newnoise_fft, n = len(st_noise)) newnoise_n = (Trace(newnoise).normalize()) newnoise_n_scaling = np.multiply(newnoise_n,noise_levels[j]) snr_r[j] = snr_eventtonoise(st_event_2, newnoise_n_scaling) return snr_r, noise_levels
""" 为了兼容matlab创造的一个库 """ from matplotlib import mlab import numpy as np # 实现去中心化 a = np.random.random((10, 3)) b = mlab.demean(a, axis=0) print(a) print('=' * 10) print(b) print('=' * 10) print(a - np.tile(np.mean(a, axis=1), (3, 1)).reshape(a.shape))