def plot_spectrogram(spectrogram, filename=None, freq_range=[], clim=[], figsize=(8, 6), dpi=72): """ Plot a spectrogram. A simple plot of the spectrogram matrix `corr_mat` is generated and saved under `filename` + '_spectrogram.png'. If filename is an empty string (default) the figure is interactivly opened. :type spectrogram: dictionary of the type spectrogram :param spectrogram: spectrogram to be plotted :type filename: string :param filename: complete path the file under which the figure is to be saved :type freq_range: list like :param freq_range: limits of frequency range to plot :type clim: list :param clim: lower and upper limit of the colorscale for plotting. Empty list uses default color range """ check = spectrogram_check(spectrogram) if not check['valid']: print 'spectrogram is not a valid spectrogram dictionary' return frequency = spectrogram['frequency'].flatten() if freq_range: start = np.argmax(frequency[frequency <= freq_range[0]]) stop = np.argmax(frequency[frequency <= freq_range[1]]) X = np.array(spectrogram['spec_mat'])[:,start:stop] frequency = frequency[start: stop] else: X = spectrogram['spec_mat'] extent=[frequency[0], frequency[-1], X.shape[0], 0] if 'time' in spectrogram: time = convert_time(spectrogram['time']) else: time = np.linspace(0, X.shape[0], X.shape[0]) plt.figure(figsize=figsize, dpi=dpi) plt.gcf().subplots_adjust(left=0.35) ax = plt.imshow(np.log(X), aspect='auto', origin='upper', interpolation='none', extent=[frequency[0], frequency[-1], X.shape[0], 0]) if clim: plt.clim(clim) plt.colorbar(format='%1.2g') ax.axes.xaxis.set_major_locator(ticker.MaxNLocator(nbins=7, integer=True, symmetric=True)) try: row, _ = X.shape # number of ylabel from 2 to 15 ynbins = max(2, min(15, row // 15)) except Exception: ynbins = 1 ax.axes.yaxis.set_major_locator(ticker.MaxNLocator(nbins=ynbins, integer=True, prune='upper')) ytickpos = np.array(ax.axes.yaxis.get_ticklocs()).astype('i') _ = ax.axes.yaxis.set_ticklabels(time[ytickpos]) plt.ylabel('Date') plt.title('Spectrogram') plt.xlabel('Frequency [Hz]') if filename == None: plt.show() else: plt.savefig(filename + '_spectrogram.png', dpi=dpi) plt.close()
def plot_dv(dv, save_dir='.', figure_file_name=None, mark_time=None, normalize_simmat=False, sim_mat_Clim=[], figsize=(9, 11), dpi=72): """ Plot the "extended" dv dictionary This function is thought to plot the result of the velocity change estimate as output by :class:`~miic.core.stretch_mod.multi_ref_vchange_and_align` and successively "extended" to contain also the timing in the form {'time': time_vect} where `time_vect` is a :class:`~numpy.ndarray` of :class:`~datetime.datetime` objects. This addition can be done, for example, using the function :class:`~miic.core.miic_utils.add_var_to_dict`. The produced figure is saved in `save_dir` that, if necessary, it is created. It is also possible to pass a "special" time value `mark_time` that will be represented in the `dv/v` and `corr` plot as a vertical line; It can be a string (i.e. YYYY-MM-DD) or directly a :class:`~datetime.datetime` object. if the `dv` dictionary also contains a 'comb_mseedid' keyword, its `value` (i.e. MUST be a string) will be reported in the title. In case of the chosen filename exist in the `save_dir` directory, a prefix _<n> with n:0..+Inf, is added. The aspect of the plot may change depending on the matplotlib version. The recommended one is matplotlib 1.1.1 :type dv: dict :param dv: velocity change estimate dictionary as output by :class:`~miic.core.stretch_mod.multi_ref_vchange_and_align` and successively "extended" to contain also the timing in the form {'time': time_vect} where `time_vect` is a :class:`~numpy.ndarray` of :class:`~datetime.datetime` objects. :type save_dir: string :param save_dir: Directory where to save the produced figure. It is created if it doesn't exist. :type figure_file_name: string :param figure_file_name: filename to use for saving the figure. If None the figure is displayed in interactive mode. :type mark_time: string or :class:`~datetime.datetime` object :param mark_time: It is a "special" time location that will be represented in the `dv/v` and `corr` plot as a vertical line. :type normalize_simmat: Bool :param normalize_simmat: if True the simmat will be normalized to a maximum of 1. Defaults to False :type sim_mat_Clim: 2 element array_like :param sim_mat_Clim: if non-empty it set the color scale limits of the similarity matrix image """ check_state = dv_check(dv) # Check if the dv dictionary is "correct" if check_state['is_incomplete']: print "Error: Incomplete dv" print "Possible errors:" for key in check_state: if key is not 'is_incomplete': print "%s: %s" % (key, check_state[key]) raise ValueError # For compatibility with TraitsUI if mark_time == '': mark_time = None if mark_time and type(mark_time) == str: mark_time = from_str_to_datetime(mark_time, datetimefmt=True) elif mark_time and type(mark_time) == datetime.datetime: pass elif mark_time: print "Error: wrong mark_time format!" mark_time = None if not os.path.isdir(save_dir): print "Warning: `save_dir` doesn't exist. Creating ..." os.mkdir(save_dir) print "Directory %s created" % save_dir # Create a unique filename if TraitsUI-default is given if figure_file_name == 'plot_default': fname = figure_file_name + '_change.png' exist = os.path.isfile(os.path.join(save_dir, fname)) i = 0 while exist: fname = "%s_%i" % (figure_file_name, i) exist = os.path.isfile(os.path.join(save_dir, fname + '_change.png')) i += 1 figure_file_name = fname # Extract the data from the dictionary value_type = dv['value_type'][0] method = dv['method'][0] corr = dv['corr'] dt = dv['value'] sim_mat = dv['sim_mat'] stretch_vect = dv['second_axis'] rtime = convert_time(dv['time']) # normalize simmat if requested if normalize_simmat: sim_mat = sim_mat/np.tile(np.max(sim_mat,axis=1),(sim_mat.shape[1],1)).T # if dv_type == 'single_ref': # dt = 1 - dt # stretch_vect = stretch_vect - 1 n_stretching = stretch_vect.shape[0] stretching_amount = np.max(stretch_vect) # Adapt plot details in agreement with the type of dictionary that # has been passed if (value_type == 'stretch') and (method == 'single_ref'): tit = "Single reference dv/v" dv_tick_delta = 0.01 dv_y_label = "dv/v" elif (value_type == 'stretch') and (method == 'multi_ref'): tit = "Multi reference dv/v" dv_tick_delta = 0.01 dv_y_label = "dv/v" elif (value_type == 'shift') and (method == 'time_shift'): tit = "Time shift" dv_tick_delta = 5 dv_y_label = "time shift (sample)" else: print "Error: Wrong dv type!" return f = plt.figure(figsize=figsize, dpi=dpi) if matplotlib.__version__ >= '1.1.1': gs = gridspec.GridSpec(3, 1, height_ratios=[3, 1, 1]) else: gs = [311, 312, 313] ax1 = f.add_subplot(gs[0]) imh = plt.imshow(sim_mat.T, interpolation='none', aspect='auto') if sim_mat_Clim: assert len(sim_mat_Clim) == 2, "sim_mat_Clim must be a two element list" imh.set_clim(sim_mat_Clim[0], sim_mat_Clim[1]) plt.gca().get_xaxis().set_visible(False) ax1.set_yticks(np.floor(np.linspace(0, n_stretching - 1, 7)).astype('int')) ax1.set_yticklabels(["%4.3f" % x for x in stretch_vect[np.floor(np.linspace(0, n_stretching - 1, 7)).astype('int')][:-1]]) if 'stats' in dv: stats = flatten_recarray(dv['stats']) comb_mseedid = \ stats['network'] + '.' + stats['station'] + \ '.' + stats['location'] + '.' + stats['channel'] tit = "%s estimate (%s)" % (tit, comb_mseedid) else: tit = "%s estimate" % tit ax1.set_title(tit) ax1.yaxis.set_label_position('right') ax1.yaxis.label.set_rotation(270) ax1.set_xticklabels([]) ax1.set_ylabel(dv_y_label) ax2 = f.add_subplot(gs[1]) plt.plot(rtime, -dt, '.') plt.xlim([rtime[0], rtime[-1]]) plt.ylim((-stretching_amount, stretching_amount)) if mark_time and not np.all(rtime < mark_time) \ and not np.all(rtime > mark_time): plt.axvline(mark_time, lw=1, color='r') ax2.yaxis.set_ticks_position('left') ax2.yaxis.set_label_position('right') ax2.yaxis.label.set_rotation(270) ax2.set_ylabel(dv_y_label) ax2.yaxis.set_major_locator(plt.MultipleLocator(dv_tick_delta)) ax2.yaxis.grid(True, 'major', linewidth=1) ax2.xaxis.grid(True, 'major', linewidth=1) ax2.set_xticklabels([]) ax3 = f.add_subplot(gs[2]) plt.plot(rtime, corr, '.') plt.xlim([rtime[0], rtime[-1]]) ax3.yaxis.set_ticks_position('right') ax3.set_ylabel("Correlation") plt.ylim((0, 1)) if mark_time and not np.all(rtime < mark_time)\ and not np.all(rtime > mark_time): plt.axvline(mark_time, lw=1, color='r') plt.setp(ax3.get_xticklabels(), rotation=45, ha='right') ax3.yaxis.set_major_locator(plt.MultipleLocator(0.2)) ax3.yaxis.grid(True, 'major', linewidth=1) ax3.xaxis.grid(True, 'major', linewidth=1) plt.subplots_adjust(hspace=0, wspace=0) if figure_file_name == None: plt.show() else: print 'saving to %s' % figure_file_name f.savefig(os.path.join(save_dir, figure_file_name + '_change.png'), dpi=dpi) plt.close()
def plot_single_corr_matrix(corr_mat, seconds=0, filename=None, normalize=True, normtype='absmax', norm_time_win=[None, None], clim=[], figsize=(8, 6), dpi=72): """ Plot a single correlation matrix. A simple plot of the correlation matrix `corr_mat` is generated and saved under `filename` + '_corrMatrix.png'. If filename is an empty string (default) the figure is saved under './plot_default_corrMatrix.png'. An optional parameter `center_win_len` can be passed that restricts the length of the plotted traces to the respective number of samples in the center. :type corr_mat: dictionary of the type correlation matrix :param corr_mat: correlation matrix to be plotted :type seconds: int :param seconds: How many seconds will be taken from the central part of the correlation function (in a symmetric way respect to zero time lag) :type filename: string :param filename: complete path the file under which the figure is to be saved :type normalize: bool :param normalize: If True the matix will be normalized before plotting :type normtype: string :param normtype: one of the following 'energy', 'max', 'absmax', 'abssum' to decide about the way to calculate the normalization. :type norm_time_win: list :param norm_time_win: list contaning start- and endtime in seconds of time window used for normalization :type clim: list :param clim: lower and upper limit of the colorscale for plotting. Empty list uses default color range """ zerotime = datetime.datetime(1971, 1, 1) if normalize: corr_mat = corr_mat_normalize(corr_mat, starttime=norm_time_win[0], endtime=norm_time_win[1], normtype=normtype) if seconds != 0: corr_mat = corr_mat_trim(corr_mat, -seconds, +seconds) X = corr_mat['corr_data'] if 'time' in corr_mat: time = convert_time(corr_mat['time']) else: time = np.linspace(0, X.shape[0], X.shape[0]) tlag = np.linspace((convert_time([corr_mat['stats'] ['starttime']])[0] - zerotime).total_seconds(), (convert_time([corr_mat['stats']['endtime']])[0] - zerotime).total_seconds(), corr_mat['stats']['npts']) plt.figure(figsize=figsize, dpi=dpi) plt.gcf().subplots_adjust(left=0.35) ax = plt.imshow(X, aspect='auto', origin='upper', interpolation='none', extent=(tlag[0], tlag[-1], X.shape[0], 0)) if clim: plt.clim(clim) plt.colorbar(format='%1.2g') ax.axes.xaxis.set_major_locator(ticker.MaxNLocator(nbins=7, integer=True, symmetric=True)) try: row, _ = X.shape # number of ylabel from 2 to 15 ynbins = max(2, min(15, row // 15)) except Exception: ynbins = 1 ax.axes.yaxis.set_major_locator(ticker.MaxNLocator(nbins=ynbins, integer=True, prune='upper')) ytickpos = np.array(ax.axes.yaxis.get_ticklocs()).astype('i') _ = ax.axes.yaxis.set_ticklabels(time[ytickpos]) plt.ylabel('Days') plt.title('Correlation Matrix') plt.xlabel('Correlation time [s]') if filename == None: plt.show() else: plt.savefig(filename + '_corrMatrix.png', dpi=dpi) plt.close()
def _single_corr_trace_to_obspy_trace(trace): """ Convert a correlation trace dictionary to an obspy trace. Convert a single correlation trace in an :class:`~obspy.core.trace.Trace` object. :type corr_trace: dictionary of type correlation trace :param corr_trace: input date to be converted :rtype: :class:`~obspy.core.trace.Trace` :return: **tr**: the obspy object containing the data """ tr = Trace(data=np.squeeze(trace['corr_trace'])) stats_keys = ['network', 'station', 'location', 'channel', 'npts', 'sampling_rate'] sac_keys = ['baz', 'az', 'stla', 'stlo', 'stel', 'evla', 'evlo', 'evel', 'dist'] # copy stats for key in stats_keys: try: tr.stats[key] = trace['stats'][key] except: print 'Error copying key: %s' % key raise # special keys tr.stats['starttime'] = UTCDateTime( convert_time([trace['stats']['starttime']])[0]) # test for presence of geo information flag = 0 for key in sac_keys: if not key in trace['stats']: flag += 1 if flag == 0: # geo information present tr.stats['sac'] = {} for key in sac_keys: tr.stats['sac'][key] = trace['stats'][key] # copy stats_tr1 if 'stats_tr1' in trace: tr.stats_tr1 = Stats() tr.stats_tr1['starttime'] = UTCDateTime( convert_time([trace['stats_tr1']['starttime']])[0]) for key in stats_keys: try: tr.stats_tr1[key] = trace['stats_tr1'][key] except: print 'Error copying key: %s' % key raise for key in sac_keys: try: tr.stats_tr1[key] = trace['stats_tr1'][key] except: pass # copy stats_tr2 if 'stats_tr2' in trace: tr.stats_tr2 = Stats() tr.stats_tr2['starttime'] = UTCDateTime( convert_time([trace['stats_tr2']['starttime']])[0]) for key in stats_keys: try: tr.stats_tr2[key] = trace['stats_tr2'][key] except: print 'Error copying key: %s' % key raise for key in sac_keys: try: tr.stats_tr2[key] = trace['stats_tr2'][key] except: pass return tr
def plot_spectrogram(spectrogram, filename=None, freq_range=[], clim=[], figsize=(8, 6), dpi=72): """ Plot a spectrogram. A simple plot of the spectrogram matrix `corr_mat` is generated and saved under `filename` + '_spectrogram.png'. If filename is an empty string (default) the figure is interactivly opened. :type spectrogram: dictionary of the type spectrogram :param spectrogram: spectrogram to be plotted :type filename: string :param filename: complete path the file under which the figure is to be saved :type freq_range: list like :param freq_range: limits of frequency range to plot :type clim: list :param clim: lower and upper limit of the colorscale for plotting. Empty list uses default color range """ check = spectrogram_check(spectrogram) if not check['valid']: print 'spectrogram is not a valid spectrogram dictionary' return frequency = spectrogram['frequency'].flatten() if freq_range: start = np.argmax(frequency[frequency <= freq_range[0]]) stop = np.argmax(frequency[frequency <= freq_range[1]]) X = np.array(spectrogram['spec_mat'])[:, start:stop] frequency = frequency[start:stop] else: X = spectrogram['spec_mat'] extent = [frequency[0], frequency[-1], X.shape[0], 0] if 'time' in spectrogram: time = convert_time(spectrogram['time']) else: time = np.linspace(0, X.shape[0], X.shape[0]) plt.figure(figsize=figsize, dpi=dpi) plt.gcf().subplots_adjust(left=0.35) ax = plt.imshow(np.log(X), aspect='auto', origin='upper', interpolation='none', extent=[frequency[0], frequency[-1], X.shape[0], 0]) if clim: plt.clim(clim) plt.colorbar(format='%1.2g') ax.axes.xaxis.set_major_locator( ticker.MaxNLocator(nbins=7, integer=True, symmetric=True)) try: row, _ = X.shape # number of ylabel from 2 to 15 ynbins = max(2, min(15, row // 15)) except Exception: ynbins = 1 ax.axes.yaxis.set_major_locator( ticker.MaxNLocator(nbins=ynbins, integer=True, prune='upper')) ytickpos = np.array(ax.axes.yaxis.get_ticklocs()).astype('i') _ = ax.axes.yaxis.set_ticklabels(time[ytickpos]) plt.ylabel('Date') plt.title('Spectrogram') plt.xlabel('Frequency [Hz]') if filename == None: plt.show() else: plt.savefig(filename + '_spectrogram.png', dpi=dpi) plt.close()
def plot_single_corr_matrix(corr_mat, seconds=0, filename=None, normalize=True, normtype='absmax', norm_time_win=[None, None], clim=[], figsize=(8, 6), dpi=72): """ Plot a single correlation matrix. A simple plot of the correlation matrix `corr_mat` is generated and saved under `filename` + '_corrMatrix.png'. If filename is an empty string (default) the figure is saved under './plot_default_corrMatrix.png'. An optional parameter `center_win_len` can be passed that restricts the length of the plotted traces to the respective number of samples in the center. :type corr_mat: dictionary of the type correlation matrix :param corr_mat: correlation matrix to be plotted :type seconds: int :param seconds: How many seconds will be taken from the central part of the correlation function (in a symmetric way respect to zero time lag) :type filename: string :param filename: complete path the file under which the figure is to be saved :type normalize: bool :param normalize: If True the matix will be normalized before plotting :type normtype: string :param normtype: one of the following 'energy', 'max', 'absmax', 'abssum' to decide about the way to calculate the normalization. :type norm_time_win: list :param norm_time_win: list contaning start- and endtime in seconds of time window used for normalization :type clim: list :param clim: lower and upper limit of the colorscale for plotting. Empty list uses default color range """ zerotime = datetime.datetime(1971, 1, 1) if normalize: corr_mat = corr_mat_normalize(corr_mat, starttime=norm_time_win[0], endtime=norm_time_win[1], normtype=normtype) if seconds != 0: corr_mat = corr_mat_trim(corr_mat, -seconds, +seconds) X = corr_mat['corr_data'] if 'time' in corr_mat: time = convert_time(corr_mat['time']) else: time = np.linspace(0, X.shape[0], X.shape[0]) tlag = np.linspace((convert_time([corr_mat['stats']['starttime']])[0] - zerotime).total_seconds(), (convert_time([corr_mat['stats']['endtime']])[0] - zerotime).total_seconds(), corr_mat['stats']['npts']) plt.figure(figsize=figsize, dpi=dpi) plt.gcf().subplots_adjust(left=0.35) ax = plt.imshow(X, aspect='auto', origin='upper', interpolation='none', extent=(tlag[0], tlag[-1], X.shape[0], 0)) if clim: plt.clim(clim) plt.colorbar(format='%1.2g') ax.axes.xaxis.set_major_locator( ticker.MaxNLocator(nbins=7, integer=True, symmetric=True)) try: row, _ = X.shape # number of ylabel from 2 to 15 ynbins = max(2, min(15, row // 15)) except Exception: ynbins = 1 ax.axes.yaxis.set_major_locator( ticker.MaxNLocator(nbins=ynbins, integer=True, prune='upper')) ytickpos = np.array(ax.axes.yaxis.get_ticklocs()).astype('i') _ = ax.axes.yaxis.set_ticklabels(time[ytickpos]) plt.ylabel('Days') plt.title('Correlation Matrix') plt.xlabel('Correlation time [s]') if filename == None: plt.show() else: plt.savefig(filename + '_corrMatrix.png', dpi=dpi) plt.close()
def plot_dv(dv, save_dir='.', figure_file_name=None, mark_time=None, normalize_simmat=False, sim_mat_Clim=[], figsize=(9, 11), dpi=72): """ Plot the "extended" dv dictionary This function is thought to plot the result of the velocity change estimate as output by :class:`~miic.core.stretch_mod.multi_ref_vchange_and_align` and successively "extended" to contain also the timing in the form {'time': time_vect} where `time_vect` is a :class:`~numpy.ndarray` of :class:`~datetime.datetime` objects. This addition can be done, for example, using the function :class:`~miic.core.miic_utils.add_var_to_dict`. The produced figure is saved in `save_dir` that, if necessary, it is created. It is also possible to pass a "special" time value `mark_time` that will be represented in the `dv/v` and `corr` plot as a vertical line; It can be a string (i.e. YYYY-MM-DD) or directly a :class:`~datetime.datetime` object. if the `dv` dictionary also contains a 'comb_mseedid' keyword, its `value` (i.e. MUST be a string) will be reported in the title. In case of the chosen filename exist in the `save_dir` directory, a prefix _<n> with n:0..+Inf, is added. The aspect of the plot may change depending on the matplotlib version. The recommended one is matplotlib 1.1.1 :type dv: dict :param dv: velocity change estimate dictionary as output by :class:`~miic.core.stretch_mod.multi_ref_vchange_and_align` and successively "extended" to contain also the timing in the form {'time': time_vect} where `time_vect` is a :class:`~numpy.ndarray` of :class:`~datetime.datetime` objects. :type save_dir: string :param save_dir: Directory where to save the produced figure. It is created if it doesn't exist. :type figure_file_name: string :param figure_file_name: filename to use for saving the figure. If None the figure is displayed in interactive mode. :type mark_time: string or :class:`~datetime.datetime` object :param mark_time: It is a "special" time location that will be represented in the `dv/v` and `corr` plot as a vertical line. :type normalize_simmat: Bool :param normalize_simmat: if True the simmat will be normalized to a maximum of 1. Defaults to False :type sim_mat_Clim: 2 element array_like :param sim_mat_Clim: if non-empty it set the color scale limits of the similarity matrix image """ check_state = dv_check(dv) # Check if the dv dictionary is "correct" if check_state['is_incomplete']: print "Error: Incomplete dv" print "Possible errors:" for key in check_state: if key is not 'is_incomplete': print "%s: %s" % (key, check_state[key]) raise ValueError # For compatibility with TraitsUI if mark_time == '': mark_time = None if mark_time and type(mark_time) == str: mark_time = from_str_to_datetime(mark_time, datetimefmt=True) elif mark_time and type(mark_time) == datetime.datetime: pass elif mark_time: print "Error: wrong mark_time format!" mark_time = None if not os.path.isdir(save_dir): print "Warning: `save_dir` doesn't exist. Creating ..." os.mkdir(save_dir) print "Directory %s created" % save_dir # Create a unique filename if TraitsUI-default is given if figure_file_name == 'plot_default': fname = figure_file_name + '_change.png' exist = os.path.isfile(os.path.join(save_dir, fname)) i = 0 while exist: fname = "%s_%i" % (figure_file_name, i) exist = os.path.isfile( os.path.join(save_dir, fname + '_change.png')) i += 1 figure_file_name = fname # Extract the data from the dictionary value_type = dv['value_type'][0] method = dv['method'][0] corr = dv['corr'] dt = dv['value'] sim_mat = dv['sim_mat'] stretch_vect = dv['second_axis'] rtime = convert_time(dv['time']) # normalize simmat if requested if normalize_simmat: sim_mat = sim_mat / np.tile(np.max(sim_mat, axis=1), (sim_mat.shape[1], 1)).T # if dv_type == 'single_ref': # dt = 1 - dt # stretch_vect = stretch_vect - 1 n_stretching = stretch_vect.shape[0] stretching_amount = np.max(stretch_vect) # Adapt plot details in agreement with the type of dictionary that # has been passed if (value_type == 'stretch') and (method == 'single_ref'): tit = "Single reference dv/v" dv_tick_delta = 0.01 dv_y_label = "dv/v" elif (value_type == 'stretch') and (method == 'multi_ref'): tit = "Multi reference dv/v" dv_tick_delta = 0.01 dv_y_label = "dv/v" elif (value_type == 'shift') and (method == 'time_shift'): tit = "Time shift" dv_tick_delta = 5 dv_y_label = "time shift (sample)" else: print "Error: Wrong dv type!" return f = plt.figure(figsize=figsize, dpi=dpi) if matplotlib.__version__ >= '1.1.1': gs = gridspec.GridSpec(3, 1, height_ratios=[3, 1, 1]) else: gs = [311, 312, 313] ax1 = f.add_subplot(gs[0]) imh = plt.imshow(sim_mat.T, interpolation='none', aspect='auto') if sim_mat_Clim: assert len( sim_mat_Clim) == 2, "sim_mat_Clim must be a two element list" imh.set_clim(sim_mat_Clim[0], sim_mat_Clim[1]) plt.gca().get_xaxis().set_visible(False) ax1.set_yticks(np.floor(np.linspace(0, n_stretching - 1, 7)).astype('int')) ax1.set_yticklabels([ "%4.3f" % x for x in stretch_vect[np.floor(np.linspace(0, n_stretching - 1, 7)).astype('int')][:-1] ]) if 'stats' in dv: stats = flatten_recarray(dv['stats']) comb_mseedid = \ stats['network'] + '.' + stats['station'] + \ '.' + stats['location'] + '.' + stats['channel'] tit = "%s estimate (%s)" % (tit, comb_mseedid) else: tit = "%s estimate" % tit ax1.set_title(tit) ax1.yaxis.set_label_position('right') ax1.yaxis.label.set_rotation(270) ax1.set_xticklabels([]) ax1.set_ylabel(dv_y_label) ax2 = f.add_subplot(gs[1]) plt.plot(rtime, -dt, '.') plt.xlim([rtime[0], rtime[-1]]) plt.ylim((-stretching_amount, stretching_amount)) if mark_time and not np.all(rtime < mark_time) \ and not np.all(rtime > mark_time): plt.axvline(mark_time, lw=1, color='r') ax2.yaxis.set_ticks_position('left') ax2.yaxis.set_label_position('right') ax2.yaxis.label.set_rotation(270) ax2.set_ylabel(dv_y_label) ax2.yaxis.set_major_locator(plt.MultipleLocator(dv_tick_delta)) ax2.yaxis.grid(True, 'major', linewidth=1) ax2.xaxis.grid(True, 'major', linewidth=1) ax2.set_xticklabels([]) ax3 = f.add_subplot(gs[2]) plt.plot(rtime, corr, '.') plt.xlim([rtime[0], rtime[-1]]) ax3.yaxis.set_ticks_position('right') ax3.set_ylabel("Correlation") plt.ylim((0, 1)) if mark_time and not np.all(rtime < mark_time)\ and not np.all(rtime > mark_time): plt.axvline(mark_time, lw=1, color='r') plt.setp(ax3.get_xticklabels(), rotation=45, ha='right') ax3.yaxis.set_major_locator(plt.MultipleLocator(0.2)) ax3.yaxis.grid(True, 'major', linewidth=1) ax3.xaxis.grid(True, 'major', linewidth=1) plt.subplots_adjust(hspace=0, wspace=0) if figure_file_name == None: plt.show() else: print 'saving to %s' % figure_file_name f.savefig(os.path.join(save_dir, figure_file_name + '_change.png'), dpi=dpi) plt.close()
def clock_offset_inversion(par): """Invert pairwise time differences for individual clock errors A number of pairwise time difference measurements (shifts of noise correlations) are used to estimate the individual clock errors that best explain the time differences. :type par: dict :param par: project parameters """ logging.basicConfig(filename=os.path.join(par['log_dir'],'clock_offset_\ inversion.log'), level=logging.DEBUG, format='%(asctime)s %(message)s') logger = logging.getLogger('clock_offset_inversion') logger.info('Clock_offset_inversion.') create_path(par['ce']['res_dir']) create_path(par['ce']['fig_dir']) # lists of times windows where changes should have been analyzed according to given parameters start_time_list = datetime_list(par['dt']['start_date'], par['dt']['end_date'], par['dt']['date_inc']) DIFFS = {} # loop over station combinations for comb in par['net']['comb']: station1 = par['net']['stations'][comb['sta'][0]] station2 = par['net']['stations'][comb['sta'][1]] print station1, station2 comb_key = station1+'-'+station2 # loop over channel combinations for cha in comb['cha']: comp = par['net']['channels'][cha[0]]+par['net']['channels'][cha[1]] print comp file_pattern = '*%s%s.%s%s.*.%s.mat' % (station1.split('.')[0],station2.split('.')[0],station1.split('.')[1],station2.split('.')[1],comp) filenames = dir_read(par['dt']['res_dir'],file_pattern) if len(filenames) != 1: logging.info('%d files found for correlation matrix matching %s. No processing done.' % (len(filenames),file_pattern)) continue filename = filenames[0] dt = mat_to_ndarray(filename) dt_bl = cpr.dt_baseline(dt) # correct baseline dt['value'] -= dt_bl # check if time period match flag = 0 if len(dt['time']) != len(start_time_list): flag += 1 else: for idx,tt in enumerate(convert_time(dt['time'])): if tt != start_time_list[idx]: flag += 1 # only if dt measurements span the same time if flag == 0: if comb_key not in DIFFS.keys(): DIFFS.update({comb_key:{'diff':[],'comp':[],'corr':[]}}) DIFFS[station1+'-'+station2]['diff'].append(dt['value']/dt['stats']['sampling_rate']) DIFFS[station1+'-'+station2]['comp'].append(comp) DIFFS[station1+'-'+station2]['corr'].append(dt['corr']) #claculate averages over components for same station combinations if comb_key in DIFFS.keys(): DIFFS[station1+'-'+station2]['diff'] = np.array(DIFFS[station1+'-'+station2]['diff']) DIFFS[station1+'-'+station2]['corr'] = np.array(DIFFS[station1+'-'+station2]['corr']) DIFFS[station1+'-'+station2].update({'mean_diff':np.mean(DIFFS[station1+'-'+station2]['diff'],axis=0), 'std':np.std(DIFFS[station1+'-'+station2]['diff'],axis=0)}) else: DIFFS.update({comb_key:{'diff':[],'comp':[],'corr':[],'mean_diff':[],'std':[]}}) DIFFS[comb_key]['diff'] = np.zeros(len(start_time_list))*np.nan DIFFS[comb_key]['corr'] = np.zeros(len(start_time_list))*np.nan DIFFS[comb_key]['mean_diff'] = np.zeros(len(start_time_list))*np.nan DIFFS[comb_key]['std'] = np.zeros(len(start_time_list))*np.nan # create the matrix to invert G = np.zeros([len(par['net']['comb']),len(par['net']['stations'])]) cnt = 0 for cnt,comb in enumerate(par['net']['comb']): idx1 = comb['sta'][0] idx2 = comb['sta'][1] station1 = par['net']['stations'][idx1] station2 = par['net']['stations'][idx2] G[cnt,idx1] = 1 G[cnt,idx2] = -1 cnt +=1 aG = G[:,:-1] # do the inversion for every measurement co = np.zeros((len(start_time_list),len(par['net']['stations']))) co[:] = np.nan coe = np.zeros((len(start_time_list),len(par['net']['stations']))) coe[:] = np.nan R = np.zeros((len(start_time_list),len(par['net']['comb']))) R[:] = np.nan d = np.zeros([len(DIFFS),1]) for nd in range(len(start_time_list)): d = np.zeros([len(par['net']['comb']),1]) for cnt,comb in enumerate(par['net']['comb']): idx1 = comb['sta'][0] idx2 = comb['sta'][1] station1 = par['net']['stations'][idx1] station2 = par['net']['stations'][idx2] d[cnt,0] = DIFFS[station1+'-'+station2]['mean_diff'][nd] # delete rows in case some measurements are missing atG = deepcopy(aG) tG = deepcopy(G) nanind = np.where(np.isnan(d))[0] tG = np.delete(tG,nanind,axis=0) atG = np.delete(atG,nanind,axis=0) td = np.delete(d,nanind,0) # continue if no station combination is present at this time if len(td)<1: logging.info('No data for %s.' % start_time_list[nd]) continue # delete columns that only contain zeros (unconstrained stations) idy = np.where(np.sum(np.abs(tG),axis=0)==0)[0] tG = np.delete(tG,idy,axis=1) atG = np.delete(atG,idy,axis=1) #tm = np.linalg.lstsq(atG,td,rcond=1e-5)[0] # calc inverse of G in the least squares sense itG = np.linalg.pinv(tG) m = np.dot(itG,td) # calculate residuals tR = np.squeeze(np.dot(tG,m)) - np.squeeze(td) tR = np.dot(tG,m) - td # take resuduals as proxy of standard deviation and estimate # the model variance mvar = np.dot(itG**2,tR**2) cnt = 0 for idx in range(len(par['net']['comb'])): if idx not in nanind: R[nd,idx] = tR[cnt] cnt += 1 cnt = 0 for idx in range(len(par['net']['stations'])): if idx not in idy: co[nd,idx] = m[cnt] coe[nd,idx] = np.sqrt(mvar[cnt]) cnt += 1 std_err = np.nanstd(R,axis=1) # adjust for reference stations for nd in range(len(start_time_list)): mr = [] for rs in par['ce']['ref_stations']: try: mr.append(co[nd,par['net']['stations'].index(rs)]) except: logging.info('Reference station % missing on %s.' % (rs, start_time_list[nd])) pass co[nd,:] -= np.nanmean(np.array(mr)) # create a data structure ce = {'time':convert_time_to_string(start_time_list)} ce.update({'clock_errors':{}}) ce.update({'std_err':std_err}) for ind,sta in enumerate(par['net']['stations']): ce['clock_errors'].update({sta:{'clock_delay':co[:,ind], 'standard_error':coe[:,ind]}}) if par['ce']['plot_clock_error']: tt = convert_time(ce['time']) nsta = len(ce['clock_errors'].keys()) plt.figure() for ind,sta in enumerate(ce['clock_errors'].keys()): plt.subplot(nsta,1,ind+1) lower = (ce['clock_errors'][sta]['clock_delay'] - ce['clock_errors'][sta]['standard_error']*3) upper = (ce['clock_errors'][sta]['clock_delay'] + ce['clock_errors'][sta]['standard_error']*3) plt.fill_between(tt,lower,upper,facecolor=np.array([1,1,1])*0.5,edgecolor=np.array([1,1,1])*0.5) plt.plot(tt, ce['clock_errors'][sta]['clock_delay'],'r',lw=2,label=sta) plt.ylabel('clock delay [s]') plt.legend(loc=0) plt.savefig(os.path.join(par['ce']['fig_dir'],'clock_delays.eps')) f = open(os.path.join(par['ce']['res_dir'],'clock_delays.pkl'),'wb') pickle.dump(ce,f) f.close() save_dict_to_matlab_file(os.path.join(par['ce']['res_dir'],'clock_delays.mat'),ce) # write to text file f = open(os.path.join(par['ce']['res_dir'],'clock_delays.txt'),'wb') f.write('relative (up to an additive constant) errors of the station clocks\n') f.write('time\tstd_err\t') for sta in par['net']['stations']: f.write(sta+'\t') f.write('\n') for ii,t in enumerate(ce['time']): f.write('%s\t%e\t' % (t,ce['std_err'][ii])) for sta in par['net']['stations']: f.write('%e\t' % ce['clock_errors'][sta]['clock_delay'][ii]) f.write('\n') f.close() return co