Esempio n. 1
0
def axis_degree(ax, axis="x"):
    """
    """
    if axis == "x":
        ax.set_xticklabels([common.num2latlon(i, 0, mode="each", x180=True, dtype="label")[1] for i in ax.get_xticks()])
    elif axis == "y":
        ax.set_yticklabels([common.num2latlon(0, i, mode="each", x180=True, dtype="label")[0] for i in ax.get_yticks()])
    else:
        raise ValueError("Invalid '%s' axis." % (axis))
Esempio n. 2
0
def axis_degree(ax, axis='x'):
    """
    """
    if axis == 'x':
        ax.set_xticklabels([common.num2latlon(i, 0, mode='each', x180=True,
            dtype='label')[1] for i in ax.get_xticks()])
    elif axis == 'y':
        ax.set_yticklabels([common.num2latlon(0, i, mode='each', x180=True,
            dtype='label')[0] for i in ax.get_yticks()])
    else:
        raise ValueError('Invalid \'%s\' axis.' % (axis))
Esempio n. 3
0
def wavelet_analysis(z,
                     tm,
                     lon=None,
                     lat=None,
                     mother='Morlet',
                     alpha=0.0,
                     siglvl=0.95,
                     loc=None,
                     onlyloc=False,
                     periods=None,
                     sel_periods=[],
                     show=False,
                     save='',
                     dsave='',
                     prefix='',
                     labels=dict(),
                     title=None,
                     name=None,
                     fpath='',
                     fpattern='',
                     std=dict(),
                     crange=None,
                     levels=None,
                     cmap=cm.GMT_no_green,
                     debug=False):
    """Continuous wavelet transform and significance analysis.

    The analysis is made using the methodology and statistical approach
    suggested by Torrence and Compo (1998).

    Depending on the dimensions of the input array, three different
    kinds of approaches are taken. If the input array is one-dimensional
    then only a simple analysis is performed. If the array is
    bi- or three-dimensional then spectral Hovmoller diagrams are drawn
    for each Fourier period given within a range of +/-25%.

    PARAMETERS
        z (array like) :
            Input data. The data array should have one of these forms,
            z[tm], z[tm, lat] or z[tm, lat, lon].
        tm (array like) :
            Time axis. It should contain values in matplotlib date
            format (i.e. number of days since 0001-01-01 UTC).
        lon (array like, optional) :
            Longitude.
        lat (array like, optional) :
            Latitude.
        mother (string, optional) :
            Gives the name of the mother wavelet to be used. Possible
            values are 'Morlet' (default), 'Paul' or 'Mexican hat'.
        alpha (float or dictionary, optional) :
            Lag-1 autocorrelation for background noise.  Default value
            is 0.0 (white noise). If different autocorrelation
            coefficients should be used for different locations, then
            the input should contain a dictionary with 'lon', 'lat',
            'map' keys as for the std parameter.
        siglvl (float, optional) :
            Significance level. Default value is 0.95.
        loc (array like, optional) :
            Special locations of interest. If the input array is of
            higher dimenstions, the output of the simple wavelet
            analysis of each of the locations is output. The list
            should contain the pairs of (lon, lat) for each locations
            of interest.
        onlyloc (boolean, optional) :
            If set to true then only the specified locations are
            analysed. The default is false.
        periods (array like, optional) :
            Special Fourier periods of interest in case of analysis of
            higher dimensions (in years).
        sel_periods (array like, optional) :
            Select which Fourier periods spectral power are averaged.
        show (boolean, optional) :
            If set to true the the resulting maps are shown on screen.
        save (string, optional) :
            The path in which the resulting plots are to be saved. If
            not set, then no images will be saved.
        dsave (string, optional) :
            If set, saves the scale averaged power spectrum series to
            this path. This is especially useful if memory is an issue.
        prefix (string, optional) :
            Prefix to retain naming conventions such as basin.
        labels (dictionary, optional) :
            Sets the labels for the plot axis.
        title (string, array like, optional) :
            Title of each of the selected periods.
        name (string, array like, optional) :
            Name of each of the selected periods. Used when saving the
            results to files.
        fpath (string, optional) :
            Path for the source files to be loaded when memory issues
            are a concern.
        fpattern (string, optional) :
            Regular expression pattern to match file names.
        std (dictionary, optional) :
            A dictionary containing a map of the standard deviation of
            the analysed time series. To set the longitude and latitude
            coordinates of the map, they should be included as
            separate 'lon' and 'lat' key items. If they are omitted,
            then the regular input parameters are assumed. Accepted
            standard deviation error is set in key 'err' (default value
            is 1e-2).
        crange (array like, optional) :
            Array of power levels to be used in average Hovmoler colour bar.
        levels (array like, optional) :
            Array of power levels to be used in spectrogram colour bar.
        cmap (colormap, optional) :
            Sets the colour map to be used in the plots. The default is
            the Generic Mapping Tools (GMT) no green.
        debug (boolean, optional) :
            If set to True then warnings are shown.

    OUTPUT
        If show or save are set, plots either on screen and or on file
        according to the specified parameters.

        If dsave parameter is set, also saves the scale averaged power
        series to files.

    RETURNS
        wave (dictionary) :
            Dictionary containing the resulting calculations from the
            wavelet analysis according to the input parameters. The
            output items might be:
                scale --
                    Wavelet scales.
                period --
                    Equivalent Fourier periods (in days).
                power_spectrum --
                    Wavelet power spectrum (in units**2).
                power_significance --
                    Relative significance of the power spectrum.
                global_power --
                    Global wavelet power spectrum (in units**2).
                scale_spectrum  --
                    Scale averaged wavelet spectra (in units**2)
                    according to selected periods.
                scale_significance --
                    Relative significance of the scale averaged wavelet
                    spectra.
                fft --
                    Fourier spectrum.
                fft_first --
                    Fourier spectrum of the first half of the
                    time-series.
                fft_second --
                    Fourier spectrum of the second half of the
                    time-series.
                fft_period --
                    Fourier periods (in days).
                trend --
                    Signal trend (in units/yr).
                wavelet_trend --
                    Wavelet spectrum trends (in units**2/yr).

    """
    t1 = time()
    result = {}

    # Resseting unit labels for hovmoller plots
    hlabels = dict(labels)
    hlabels['units'] = ''

    # Setting some titles and paths
    if name == None:
        name = title

    # Working with the std parameter and setting its properties:
    if 'val' in std.keys():
        if 'lon' not in std.keys():
            std['lon'] = lon
        std['lon180'] = common.lon180(std['lon'])
        if 'lat' not in std.keys():
            std['lat'] = lat
        if 'err' not in std.keys():
            std['err'] = 1e-2
        std['map'] = True
    else:
        std['map'] = False

    # Lag-1 autocorrelation parameter
    if type(alpha).__name__ == 'dict':
        if 'lon' not in alpha.keys():
            alpha['lon'] = lon
        alpha['lon180'] = common.lon180(alpha['lon'])
        if 'lat' not in alpha.keys():
            alpha['lat'] = lat
        alpha['mean'] = alpha['val'].mean()
        alpha['map'] = True
        alpha['calc'] = False
    else:
        if alpha == -1:
            alpha = {'mean': -1, 'calc': True}
        else:
            alpha = {'val': alpha, 'mean': alpha, 'map': False, 'calc': False}

    # Shows some of the options on screen.
    print('Average Lag-1 autocorrelation for background noise: %.2f' %
          (alpha['mean']))
    if save:
        print 'Saving result figures in \'%s\'.' % (save)
    if dsave:
        print 'Saving result data in \'%s\'.' % (dsave)

    if fpath:
        # Gets the list of files to be loaded individually extracts all the
        # latitudes and loads the first file to get the main parameters.
        flist = os.listdir(fpath)
        flist, match = common.reglist(flist, fpattern)
        if len(flist) == 0:
            raise Warning, 'No files matched search pattern.'
        flist = numpy.asarray(flist)
        lst_lat = []
        for item in match:
            y = string.atof(item[-2])
            if item[-1].upper() == 'S': y *= -1
            lst_lat.append(y)
        # Detect file type from file name
        ftype = fm.detect_ftype(flist[0])
        x, y, tm, z = fm.load_map('%s/%s' % (fpath, flist[0]),
                                  ftype=ftype,
                                  masked=True)
        if lon == None:
            lon = x
        lat = numpy.unique(lst_lat)
        dim = 2
    else:
        # Transforms input arrays in numpy arrays and numpy masked arrays.
        tm = numpy.asarray(tm)
        z = numpy.ma.asarray(z)
        z.mask = numpy.isnan(z)

        # Determines the number of dimensions of the variable to be plotted and
        # the sizes of each dimension.
        a = b = c = None
        dim = len(z.shape)
        if dim == 3:
            c, b, a = z.shape
        elif dim == 2:
            c, a = z.shape
            b = 1
            z = z.reshape(c, b, a)
        else:
            c = z.shape[0]
            a = b = 1
            z = z.reshape(c, b, a)
        if tm.size != c:
            raise Warning, 'Time and data lengths do not match.'

    # Transforms coordinate arrays into numpy arrays
    s = type(lat).__name__
    if s in ['int', 'float', 'float64']:
        lat = numpy.asarray([lat])
    elif s != 'NoneType':
        lat = numpy.asarray(lat)
    s = type(lon).__name__
    if s in ['int', 'float', 'float64']:
        lon = numpy.asarray([lon])
    elif s != 'NoneType':
        lon = numpy.asarray(lon)

    # Starts the mother wavelet class instance and determines important
    # analysis parameters
    mother = mother.lower()
    if mother == 'morlet':
        mother = wavelet.Morlet()
    elif mother == 'paul':
        mother = wavelet.Paul()
    elif mother in ['mexican hat', 'mexicanhat', 'mexican_hat']:
        mother = wavelet.Mexican_hat()
    else:
        raise Warning, 'Mother wavelet unknown.'

    t = tm / common.daysinyear  # Time array in years
    dt = tm[1] - tm[0]  # Temporal sampling interval
    try:  # Zonal sampling interval
        dx = lon[1] - lon[0]
    except:
        dx = 1
    try:  # Meridional sampling interval
        dy = lat[1] - lat[0]
    except:
        dy = dx
    if numpy.isnan(dt): dt = 1
    if numpy.isnan(dx): dx = 1
    if numpy.isnan(dy): dy = dx
    dj = 0.25  # Four sub-octaves per octave
    s0 = 2 * dt  # Smallest scale
    J = 7 / dj - 1  # Seven powers of two with dj sub-octaves
    scales = period = None

    if type(crange).__name__ == 'NoneType':
        crange = numpy.arange(0, 1.1, 0.1)
    if type(levels).__name__ == 'NoneType':
        levels = 2.**numpy.arange(-3, 6)

    if fpath:
        N = lat.size
        # TODO: refactoring # lon = numpy.arange(-81. - dx / 2., 290. + dx / 2, dx)
        # TODO: refactoring # lat = numpy.unique(numpy.asarray(lst_lat))
        c, b, a = tm.size, lat.size, lon.size
    else:
        N = a * b

    # Making sure that the longitudes range from -180 to 180 degrees and
    # setting the squared search radius R2.
    try:
        lon180 = common.lon180(lon)
    except:
        lon180 = None
    R2 = dx**2 + dy**2
    if numpy.isnan(R2):
        R2 = 65535.
    if loc != None:
        loc = numpy.asarray([[common.lon180(item[0]), item[1]]
                             for item in loc])

    # Initializes important result variables such as the global wavelet power
    # spectrum map, scale avaraged spectrum time-series and their significance,
    # wavelet power trend map.
    global_power = numpy.ma.empty([J + 1, b, a]) * numpy.nan
    try:
        C = len(periods) + 1
        dT = numpy.diff(periods)
        pmin = numpy.concatenate([[periods[0] - dT[0] / 2],
                                  0.5 * (periods[:-1] + periods[1:])])
        pmax = numpy.concatenate(
            [0.5 * (periods[:-1] + periods[1:]), [periods[-1] + dT[-1] / 2]])
    except:
        # Sets the lowest period to null and the highest to half the time
        # series length.
        C = 1
        pmin = numpy.array([0])
        pmax = numpy.array([(tm[-1] - tm[0]) / 2])
    if type(sel_periods).__name__ in ['int', 'float']:
        sel_periods = [sel_periods]
    elif len(sel_periods) == 0:
        sel_periods = [-1.]
    try:
        if fpath:
            raise Warning, 'Process files individually'
        avg_spectrum = numpy.ma.empty([C, c, b, a]) * numpy.nan
        mem_error = False
    except:
        avg_spectrum = numpy.ma.empty([C, c, a]) * numpy.nan
        mem_error = True
    avg_spectrum_signif = numpy.ma.empty([C, b, a]) * numpy.nan
    trend = numpy.ma.empty([b, a]) * numpy.nan
    wavelet_trend = numpy.ma.empty([C, b, a]) * numpy.nan
    fft_trend = numpy.ma.empty([C, b, a]) * numpy.nan
    std_map = numpy.ma.empty([b, a]) * numpy.nan
    zero = numpy.ma.empty([c, a])
    fft_spectrum = None
    fft_spectrum1 = None
    fft_spectrum2 = None

    # Walks through each latitude and then through each longitude to perform
    # the temporal wavelet analysis.
    if N == 1:
        plural = ''
    else:
        plural = 's'
    s = 'Spectral analysis of %d location%s... ' % (N, plural)
    stdout.write(s)
    stdout.flush()
    for j in range(b):
        t2 = time()
        isloc = False  # Ressets 'is special location' flag
        hloc = []  # Cleans location list for Hovmoller plots
        zero *= numpy.nan
        if mem_error:
            # Clears average spectrum for next step.
            avg_spectrum *= numpy.nan
            avg_spectrum.mask = False
        if fpath:
            findex = pylab.find(lst_lat == lat[j])
            if len(findex) == 0:
                continue
            ftype = fm.detect_ftype(flist[findex[0]])
            try:
                x, y, tm, z = fm.load_dataset(fpath,
                                              flist=flist[findex],
                                              ftype=ftype,
                                              masked=True,
                                              lon=lon,
                                              lat=lat[j:j + 1],
                                              verbose=True)
            except:
                continue
            z = z[:, 0, :]
            x180 = common.lon180(x)

        # Determines the first and second halves of the time-series and some
        # constants for the FFT
        fft_ta = numpy.ceil(t.min())
        fft_tb = numpy.floor(t.max())
        fft_tc = numpy.round(fft_ta + fft_tb) / 2
        fft_ia = pylab.find((t >= fft_ta) & (t <= fft_tc))
        fft_ib = pylab.find((t >= fft_tc) & (t <= fft_tb))
        fft_N = int(2**numpy.ceil(numpy.log2(max([len(fft_ia), len(fft_ib)]))))
        fft_N2 = fft_N / 2 - 1
        fft_dt = t[fft_ib].mean() - t[fft_ia].mean()

        for i in range(a):
            # Some string output.
            try:
                Y, X = common.num2latlon(lon[i],
                                         lat[j],
                                         mode='each',
                                         padding=False)
            except:
                Y = X = '?'

            # Extracts individual time-series from the whole dataset and
            # sets or calculates its standard deviation, squared standard
            # deviation and finally the normalized time-series.
            if fpath:
                try:
                    ilon = pylab.find(x == lon[i])[0]
                    fz = z[:, ilon]
                except:
                    continue
            else:
                fz = z[:, j, i]
            if fz.mask.all():
                continue
            if std['map']:
                try:
                    u = pylab.find(std['lon180'] == lon180[i])[0]
                    v = pylab.find(std['lat'] == lat[j])[0]
                except:
                    if debug:
                        warnings.warn(
                            'Unable to locate standard deviation '
                            'for (%s, %s)' % (X, Y), Warning)
                    continue
                fstd = std['val'][v, u]
                estd = fstd - fz.std()
                if (estd < 0) & (abs(estd) > std['err']):
                    if debug:
                        warnings.warn('Discrepant input standard deviation '
                                      '(%f) location (%.3f, %.3f) will be '
                                      'disregarded.' %
                                      (estd, lon180[i], lat[j]))
                    continue
            else:
                fstd = fz.std()
            fstd2 = fstd**2
            std_map[j, i] = fstd
            zero[:, i] = fz
            fz = (fz - fz.mean()) / fstd

            # Calculates the distance of the current point to any special
            # location set in the 'loc' parameter. If only special locations
            # are to be analysed, then skips all other ones. If the input
            # array is one dimensional, then do the analysis anyway.
            if dim == 1:
                dist = numpy.asarray([0.])
            else:
                try:
                    dist = numpy.asarray([
                        ((item[0] - (lon180[i]))**2 + (item[1] - lat[j])**2)
                        for item in loc
                    ])
                except:
                    dist = []
            if (dist > R2).all() & (loc != 'all') & onlyloc:
                continue

            # Determines the lag-1 autocorrelation coefficient to be used in
            # the significance test from the input parameter
            if alpha['calc']:
                ac = acorr(fz)
                alpha_ij = (ac[c + 1] + ac[c + 2]**0.5) / 2
            elif alpha['map']:
                try:
                    u = pylab.find(alpha['lon180'] == lon180[i])[0]
                    v = pylab.find(alpha['lat'] == lat[j])[0]
                    alpha_ij = alpha['val'][v, u]
                except:
                    if debug:
                        warnings.warn(
                            'Unable to locate standard deviation '
                            'for (%s, %s) using mean value instead' % (X, Y),
                            Warning)
                    alpha_ij = alpha['mean']
            else:
                alpha_ij = alpha['mean']

            # Calculates the continuous wavelet transform using the wavelet
            # Python module. Calculates the wavelet and Fourier power spectrum
            # and the periods in days. Also calculates the Fourier power
            # spectrum for the first and second halves of the timeseries.
            wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(
                fz, dt, dj, s0, J, mother)
            power = abs(wave * wave.conj())
            fft_power = abs(fft * fft.conj())
            period = 1. / freqs
            fftperiod = 1. / fftfreqs
            psel = pylab.find(period <= pmax.max())

            # Calculates the Fourier transform for the first and the second
            # halves ot the time-series for later trend analysis.
            fft_1 = numpy.fft.fft(fz[fft_ia], fft_N)[1:fft_N / 2] / fft_N**0.5
            fft_2 = numpy.fft.fft(fz[fft_ib], fft_N)[1:fft_N / 2] / fft_N**0.5
            fft_p1 = abs(fft_1 * fft_1.conj())
            fft_p2 = abs(fft_2 * fft_2.conj())

            # Creates FFT return array and stores the spectrum accordingly
            try:
                fft_spectrum[:, j, i] = fft_power * fstd2
                fft_spectrum1[:, j, i] = fft_p1 * fstd2
                fft_spectrum2[:, j, i] = fft_p2 * fstd2
            except:
                fft_spectrum = (numpy.ma.empty([len(fft_power), b, a]) *
                                numpy.nan)
                fft_spectrum1 = (numpy.ma.empty([fft_N2, b, a]) * numpy.nan)
                fft_spectrum2 = (numpy.ma.empty([fft_N2, b, a]) * numpy.nan)
                #
                fft_spectrum[:, j, i] = fft_power * fstd2
                fft_spectrum1[:, j, i] = fft_p1 * fstd2
                fft_spectrum2[:, j, i] = fft_p2 * fstd2

            # Performs the significance test according to the article by
            # Torrence and Compo (1998). The wavelet power is significant
            # if the ratio power/sig95 is > 1.
            signif, fft_theor = wavelet.significance(1.,
                                                     dt,
                                                     scales,
                                                     0,
                                                     alpha_ij,
                                                     significance_level=siglvl,
                                                     wavelet=mother)
            sig95 = (signif * numpy.ones((c, 1))).transpose()
            sig95 = power / sig95

            # Calculates the global wavelet power spectrum and its
            # significance. The global wavelet spectrum is the average of the
            # wavelet power spectrum over time. The degrees of freedom (dof)
            # have to be corrected for padding at the edges.
            glbl_power = power.mean(axis=1)
            dof = c - scales
            glbl_signif, tmp = wavelet.significance(1.,
                                                    dt,
                                                    scales,
                                                    1,
                                                    alpha_ij,
                                                    significance_level=siglvl,
                                                    dof=dof,
                                                    wavelet=mother)
            global_power[:, j, i] = glbl_power * fstd2

            # Calculates the average wavelet spectrum along the scales and its
            # significance according to Torrence and Compo (1998) eq. 24. The
            # scale_avg_full variable is used multiple times according to the
            # selected periods range.
            #
            # Also calculates the average Fourier power spectrum.
            Cdelta = mother.cdelta
            scale_avg_full = (scales * numpy.ones((c, 1))).transpose()
            scale_avg_full = power / scale_avg_full
            for k in range(C):
                if k == 0:
                    sel = pylab.find((period >= pmin[0])
                                     & (period <= pmax[-1]))
                    pminmax = [period[sel[0]], period[sel[-1]]]
                    les = pylab.find((fftperiod >= pmin[0])
                                     & (fftperiod <= pmax[-1]))
                    fminmax = [fftperiod[les[0]], fftperiod[les[-1]]]
                else:
                    sel = pylab.find((period >= pmin[k - 1])
                                     & (period < pmax[k - 1]))
                    pminmax = [pmin[k - 1], pmax[k - 1]]
                    les = pylab.find((fftperiod >= pmin[k - 1])
                                     & (fftperiod <= pmax[k - 1]))
                    fminmax = [fftperiod[les[0]], fftperiod[les[-1]]]

                scale_avg = numpy.ma.array(
                    (dj * dt / Cdelta * scale_avg_full[sel, :].sum(axis=0)))
                scale_avg_signif, tmp = wavelet.significance(
                    1.,
                    dt,
                    scales,
                    2,
                    alpha_ij,
                    significance_level=siglvl,
                    dof=[scales[sel[0]], scales[sel[-1]]],
                    wavelet=mother)
                scale_avg.mask = (scale_avg < scale_avg_signif)
                if mem_error:
                    avg_spectrum[k, :, i] = scale_avg
                else:
                    avg_spectrum[k, :, j, i] = scale_avg
                avg_spectrum_signif[k, j, i] = scale_avg_signif

                # Trend analysis using least square polynomial fit of one
                # degree of the original input data and scale averaged
                # wavelet power. The wavelet power trend is calculated only
                # where the cone of influence spans the highest analyzed
                # period. In the end, the returned value for the trend is in
                # units**2.
                #
                # Also calculates the trends in the Fourier power spectrum.
                # Note that the FFT power spectrum is already multiplied by
                # the signal's standard deviation.
                incoi = pylab.find(coi >= pmax[-1])
                if len(incoi) == 0:
                    incoi = numpy.arange(c)
                polyw = numpy.polyfit(t[incoi], scale_avg[incoi].data, 1)
                wavelet_trend[k, j, i] = polyw[0] * fstd2
                fft_trend[k, j, i] = (
                    fft_spectrum2[les[les < fft_N2], j, i] -
                    fft_spectrum1[les[les < fft_N2], j, i]).mean() / fft_dt
                if k == 0:
                    polyz = numpy.polyfit(t, fz * fstd, 1)
                    trend[j, i] = polyz[0]

                # Plots the wavelet analysis results for the individual
                # series. The plot is only generated if the dimension of the
                # input variable z is one, if a special location is within a
                # range of the search radius R and if the show or save
                # parameters are set.
                if (show | (save != '')) & ((k in sel_periods)):
                    if (dist < R2).any() | (loc == 'all') | (dim == 1):
                        # There is an interesting spot within the search
                        # radius of location (%s, %s).' % (Y, X)
                        isloc = True
                        if (dist < R2).any():
                            try:
                                hloc.append(loc[(dist < R2)][0, 0])
                            except:
                                pass
                        if save:
                            try:
                                sv = '%s/tz_%s_%s_%d' % (
                                    save, prefix,
                                    common.num2latlon(lon[i], lat[j]), k)
                            except:
                                sv = '%s' % (save)
                        else:
                            sv = ''
                        graphics.wavelet_plot(tm,
                                              period[psel],
                                              fz,
                                              power[psel, :],
                                              coi,
                                              glbl_power[psel],
                                              scale_avg.data,
                                              fft=fft,
                                              fft_period=fftperiod,
                                              power_signif=sig95[psel, :],
                                              glbl_signif=glbl_signif[psel],
                                              scale_signif=scale_avg_signif,
                                              pminmax=pminmax,
                                              labels=labels,
                                              normalized=True,
                                              std=fstd,
                                              ztrend=polyz,
                                              wtrend=polyw,
                                              show=show,
                                              save=sv,
                                              levels=levels,
                                              cmap=cmap)

        # Saves and/or plots the intermediate results as zonal temporal
        # diagrams.
        if dsave:
            for k in range(C):
                if k == 0:
                    sv = '%s/%s/%s_%s.xt.gz' % (
                        dsave, 'global', prefix,
                        common.num2latlon(lon[i], lat[j], mode='each')[0])
                else:
                    sv = '%s/%s/%s_%s.xt.gz' % (
                        dsave, name[k - 1].lower(), prefix,
                        common.num2latlon(lon[i], lat[j], mode='each')[0])
                if mem_error:
                    fm.save_map(lon, tm, avg_spectrum[k, :, :].data, sv,
                                lat[j])
                else:
                    fm.save_map(lon, tm, avg_spectrum[k, :, j, :].data, sv,
                                lat[j])

        if ((dim > 1) and (show or (save != '')) & (not onlyloc)
                and len(hloc) > 0):
            hloc = common.lon360(numpy.unique(hloc))
            if save:
                sv = '%s/xt_%s_%s' % (save, prefix,
                                      common.num2latlon(
                                          lon[i], lat[j], mode='each')[0])
            else:
                sv = ''
            if mem_error:
                # To include overlapping original signal, use zz=zero
                gis.hovmoller(lon,
                              tm,
                              avg_spectrum[1:, :, :],
                              zo=avg_spectrum_signif[1:, j, :],
                              title=title,
                              crange=crange,
                              show=show,
                              save=sv,
                              labels=hlabels,
                              loc=hloc,
                              cmap=cmap,
                              bottom='avg',
                              right='avg',
                              std=std_map[j, :])
            else:
                gis.hovmoller(lon,
                              tm,
                              avg_spectrum[1:, :, j, :],
                              zo=avg_spectrum_signif[1:, j, :],
                              title=title,
                              crange=crange,
                              show=show,
                              save=sv,
                              labels=hlabels,
                              loc=hloc,
                              cmap=cmap,
                              bottom='avg',
                              right='avg',
                              std=std_map[j, :])

        # Flushing profiling text.
        stdout.write(len(s) * '\b')
        s = 'Spectral analysis of %d location%s (%s)... %s ' % (
            N, plural, Y, common.profiler(b, j + 1, 0, t1, t2))
        stdout.write(s)
        stdout.flush()

    stdout.write('\n')

    result['scale'] = scales
    result['period'] = period
    if dim == 1:
        result['power_spectrum'] = power * fstd2
        result['power_significance'] = sig95
        result['cwt'] = wave
        result['fft'] = fft
    result['global_power'] = global_power
    result['scale_spectrum'] = avg_spectrum
    if fpath:
        result['lon'] = lon
        result['lat'] = lat
    result['scale_significance'] = avg_spectrum_signif
    result['trend'] = trend
    result['wavelet_trend'] = wavelet_trend
    result['fft_power'] = fft_spectrum
    result['fft_first'] = fft_spectrum1
    result['fft_second'] = fft_spectrum2
    result['fft_period'] = fftperiod
    result['fft_trend'] = fft_trend
    return result
Esempio n. 4
0
def wavelet_analysis(z, tm, lon=None, lat=None, mother='Morlet', alpha=0.0,
                     siglvl=0.95, loc=None, onlyloc=False, periods=None,
                     sel_periods=[], show=False, save='', dsave='', prefix='',
                     labels=dict(), title=None, name=None, fpath='',
                     fpattern='', std=dict(), crange=None, levels=None,
                     cmap=cm.GMT_no_green, debug=False):
    """Continuous wavelet transform and significance analysis.

    The analysis is made using the methodology and statistical approach
    suggested by Torrence and Compo (1998).

    Depending on the dimensions of the input array, three different
    kinds of approaches are taken. If the input array is one-dimensional
    then only a simple analysis is performed. If the array is
    bi- or three-dimensional then spectral Hovmoller diagrams are drawn
    for each Fourier period given within a range of +/-25%.

    PARAMETERS
        z (array like) :
            Input data. The data array should have one of these forms,
            z[tm], z[tm, lat] or z[tm, lat, lon].
        tm (array like) :
            Time axis. It should contain values in matplotlib date
            format (i.e. number of days since 0001-01-01 UTC).
        lon (array like, optional) :
            Longitude.
        lat (array like, optional) :
            Latitude.
        mother (string, optional) :
            Gives the name of the mother wavelet to be used. Possible
            values are 'Morlet' (default), 'Paul' or 'Mexican hat'.
        alpha (float or dictionary, optional) :
            Lag-1 autocorrelation for background noise.  Default value
            is 0.0 (white noise). If different autocorrelation
            coefficients should be used for different locations, then
            the input should contain a dictionary with 'lon', 'lat',
            'map' keys as for the std parameter.
        siglvl (float, optional) :
            Significance level. Default value is 0.95.
        loc (array like, optional) :
            Special locations of interest. If the input array is of
            higher dimenstions, the output of the simple wavelet
            analysis of each of the locations is output. The list
            should contain the pairs of (lon, lat) for each locations
            of interest.
        onlyloc (boolean, optional) :
            If set to true then only the specified locations are
            analysed. The default is false.
        periods (array like, optional) :
            Special Fourier periods of interest in case of analysis of
            higher dimensions (in years).
        sel_periods (array like, optional) :
            Select which Fourier periods spectral power are averaged.
        show (boolean, optional) :
            If set to true the the resulting maps are shown on screen.
        save (string, optional) :
            The path in which the resulting plots are to be saved. If
            not set, then no images will be saved.
        dsave (string, optional) :
            If set, saves the scale averaged power spectrum series to
            this path. This is especially useful if memory is an issue.
        prefix (string, optional) :
            Prefix to retain naming conventions such as basin.
        labels (dictionary, optional) :
            Sets the labels for the plot axis.
        title (string, array like, optional) :
            Title of each of the selected periods.
        name (string, array like, optional) :
            Name of each of the selected periods. Used when saving the
            results to files.
        fpath (string, optional) :
            Path for the source files to be loaded when memory issues
            are a concern.
        fpattern (string, optional) :
            Regular expression pattern to match file names.
        std (dictionary, optional) :
            A dictionary containing a map of the standard deviation of
            the analysed time series. To set the longitude and latitude
            coordinates of the map, they should be included as
            separate 'lon' and 'lat' key items. If they are omitted,
            then the regular input parameters are assumed. Accepted
            standard deviation error is set in key 'err' (default value
            is 1e-2).
        crange (array like, optional) :
            Array of power levels to be used in average Hovmoler colour bar.
        levels (array like, optional) :
            Array of power levels to be used in spectrogram colour bar.
        cmap (colormap, optional) :
            Sets the colour map to be used in the plots. The default is
            the Generic Mapping Tools (GMT) no green.
        debug (boolean, optional) :
            If set to True then warnings are shown.

    OUTPUT
        If show or save are set, plots either on screen and or on file
        according to the specified parameters.

        If dsave parameter is set, also saves the scale averaged power
        series to files.

    RETURNS
        wave (dictionary) :
            Dictionary containing the resulting calculations from the
            wavelet analysis according to the input parameters. The
            output items might be:
                scale --
                    Wavelet scales.
                period --
                    Equivalent Fourier periods (in days).
                power_spectrum --
                    Wavelet power spectrum (in units**2).
                power_significance --
                    Relative significance of the power spectrum.
                global_power --
                    Global wavelet power spectrum (in units**2).
                scale_spectrum  --
                    Scale averaged wavelet spectra (in units**2)
                    according to selected periods.
                scale_significance --
                    Relative significance of the scale averaged wavelet
                    spectra.
                fft --
                    Fourier spectrum.
                fft_first --
                    Fourier spectrum of the first half of the
                    time-series.
                fft_second --
                    Fourier spectrum of the second half of the
                    time-series.
                fft_period --
                    Fourier periods (in days).
                trend --
                    Signal trend (in units/yr).
                wavelet_trend --
                    Wavelet spectrum trends (in units**2/yr).

    """
    t1 = time()
    result = {}

    # Resseting unit labels for hovmoller plots
    hlabels = dict(labels)
    hlabels['units'] = ''

    # Setting some titles and paths
    if name == None:
        name = title

    # Working with the std parameter and setting its properties:
    if 'val' in std.keys():
        if 'lon' not in std.keys():
            std['lon'] = lon
        std['lon180'] = common.lon180(std['lon'])
        if 'lat' not in std.keys():
            std['lat'] = lat
        if 'err' not in std.keys():
            std['err'] = 1e-2
        std['map'] = True
    else:
        std['map'] = False

    # Lag-1 autocorrelation parameter
    if type(alpha).__name__ == 'dict':
        if 'lon' not in alpha.keys():
            alpha['lon'] = lon
        alpha['lon180'] = common.lon180(alpha['lon'])
        if 'lat' not in alpha.keys():
            alpha['lat'] = lat
        alpha['mean'] = alpha['val'].mean()
        alpha['map'] = True
        alpha['calc'] = False
    else:
        if alpha == -1:
            alpha = {'mean': -1, 'calc': True}
        else:
            alpha = {'val': alpha, 'mean': alpha, 'map': False, 'calc': False}

    # Shows some of the options on screen.
    print ('Average Lag-1 autocorrelation for background noise: %.2f' %
        (alpha['mean']))
    if save:
        print 'Saving result figures in \'%s\'.' % (save)
    if dsave:
        print 'Saving result data in \'%s\'.' % (dsave)

    if fpath:
        # Gets the list of files to be loaded individually extracts all the
        # latitudes and loads the first file to get the main parameters.
        flist = os.listdir(fpath)
        flist, match = common.reglist(flist, fpattern)
        if len(flist) == 0:
            raise Warning, 'No files matched search pattern.'
        flist = numpy.asarray(flist)
        lst_lat = []
        for item in match:
            y = string.atof(item[-2])
            if item[-1].upper() == 'S': y *= -1
            lst_lat.append(y)
        # Detect file type from file name
        ftype = fm.detect_ftype(flist[0])
        x, y, tm, z = fm.load_map('%s/%s' % (fpath, flist[0]),
            ftype=ftype, masked=True)
        if lon == None:
            lon = x
        lat = numpy.unique(lst_lat)
        dim = 2
    else:
        # Transforms input arrays in numpy arrays and numpy masked arrays.
        tm = numpy.asarray(tm)
        z = numpy.ma.asarray(z)
        z.mask = numpy.isnan(z)

        # Determines the number of dimensions of the variable to be plotted and
        # the sizes of each dimension.
        a = b = c = None
        dim = len(z.shape)
        if dim == 3:
            c, b, a = z.shape
        elif dim == 2:
            c, a = z.shape
            b = 1
            z = z.reshape(c, b, a)
        else:
            c = z.shape[0]
            a = b = 1
            z = z.reshape(c, b, a)
        if tm.size != c:
            raise Warning, 'Time and data lengths do not match.'

    # Transforms coordinate arrays into numpy arrays
    s = type(lat).__name__
    if s in ['int', 'float', 'float64']:
        lat = numpy.asarray([lat])
    elif s != 'NoneType':
        lat = numpy.asarray(lat)
    s = type(lon).__name__
    if s in ['int', 'float', 'float64']:
        lon = numpy.asarray([lon])
    elif s != 'NoneType':
        lon = numpy.asarray(lon)

    # Starts the mother wavelet class instance and determines important
    # analysis parameters
    mother = mother.lower()
    if mother == 'morlet':
        mother = wavelet.Morlet()
    elif mother == 'paul':
        mother = wavelet.Paul()
    elif mother in ['mexican hat', 'mexicanhat', 'mexican_hat']:
        mother = wavelet.Mexican_hat()
    else:
        raise Warning, 'Mother wavelet unknown.'

    t = tm / common.daysinyear        # Time array in years
    dt = tm[1] - tm[0]                # Temporal sampling interval
    try:                              # Zonal sampling interval
        dx = lon[1] - lon[0]
    except:
        dx = 1
    try:                              # Meridional sampling interval
        dy = lat[1] - lat[0]
    except:
        dy = dx
    if numpy.isnan(dt): dt = 1
    if numpy.isnan(dx): dx = 1
    if numpy.isnan(dy): dy = dx
    dj = 0.25                         # Four sub-octaves per octave
    s0 = 2 * dt                       # Smallest scale
    J = 7 / dj - 1                    # Seven powers of two with dj sub-octaves
    scales = period = None

    if type(crange).__name__ == 'NoneType':
        crange = numpy.arange(0, 1.1, 0.1)
    if type(levels).__name__ == 'NoneType':
        levels = 2. ** numpy.arange(-3, 6)

    if fpath:
        N = lat.size
        # TODO: refactoring # lon = numpy.arange(-81. - dx / 2., 290. + dx / 2, dx)
        # TODO: refactoring # lat = numpy.unique(numpy.asarray(lst_lat))
        c, b, a = tm.size, lat.size, lon.size
    else:
        N = a * b

    # Making sure that the longitudes range from -180 to 180 degrees and
    # setting the squared search radius R2.
    try:
        lon180 = common.lon180(lon)
    except:
        lon180 = None
    R2 = dx ** 2 + dy ** 2
    if numpy.isnan(R2):
        R2 = 65535.
    if loc != None:
        loc = numpy.asarray([[common.lon180(item[0]), item[1]] for item in
            loc])

    # Initializes important result variables such as the global wavelet power
    # spectrum map, scale avaraged spectrum time-series and their significance,
    # wavelet power trend map.
    global_power = numpy.ma.empty([J + 1, b, a]) * numpy.nan
    try:
        C = len(periods) + 1
        dT = numpy.diff(periods)
        pmin = numpy.concatenate([[periods[0] - dT[0] / 2],
                                 0.5 * (periods[:-1] + periods[1:])])
        pmax = numpy.concatenate([0.5 * (periods[:-1] + periods[1:]),
                                 [periods[-1] + dT[-1] / 2]])
    except:
        # Sets the lowest period to null and the highest to half the time
        # series length.
        C = 1
        pmin = numpy.array([0])
        pmax = numpy.array([(tm[-1] - tm[0]) / 2])
    if type(sel_periods).__name__ in ['int', 'float']:
        sel_periods = [sel_periods]
    elif len(sel_periods) == 0:
        sel_periods = [-1.]
    try:
        if fpath:
            raise Warning, 'Process files individually'
        avg_spectrum = numpy.ma.empty([C, c, b, a]) * numpy.nan
        mem_error = False
    except:
        avg_spectrum = numpy.ma.empty([C, c, a]) * numpy.nan
        mem_error = True
    avg_spectrum_signif = numpy.ma.empty([C, b, a]) * numpy.nan
    trend = numpy.ma.empty([b, a]) * numpy.nan
    wavelet_trend = numpy.ma.empty([C, b, a]) * numpy.nan
    fft_trend = numpy.ma.empty([C, b, a]) * numpy.nan
    std_map = numpy.ma.empty([b, a]) * numpy.nan
    zero = numpy.ma.empty([c, a])
    fft_spectrum = None
    fft_spectrum1 = None
    fft_spectrum2 = None

    # Walks through each latitude and then through each longitude to perform
    # the temporal wavelet analysis.
    if N == 1:
        plural = ''
    else:
        plural = 's'
    s = 'Spectral analysis of %d location%s... ' % (N, plural)
    stdout.write(s)
    stdout.flush()
    for j in range(b):
        t2 = time()
        isloc = False  # Ressets 'is special location' flag
        hloc = []      # Cleans location list for Hovmoller plots
        zero *= numpy.nan
        if mem_error:
            # Clears average spectrum for next step.
            avg_spectrum *= numpy.nan
            avg_spectrum.mask = False
        if fpath:
            findex = pylab.find(lst_lat == lat[j])
            if len(findex) == 0:
                continue
            ftype = fm.detect_ftype(flist[findex[0]])
            try:
                x, y, tm, z = fm.load_dataset(fpath, flist=flist[findex],
                    ftype=ftype, masked=True, lon=lon, lat=lat[j:j+1],
                    verbose=True)
            except:
                continue
            z = z[:, 0, :]
            x180 = common.lon180(x)

        # Determines the first and second halves of the time-series and some
        # constants for the FFT
        fft_ta = numpy.ceil(t.min())
        fft_tb = numpy.floor(t.max())
        fft_tc = numpy.round(fft_ta + fft_tb) / 2
        fft_ia = pylab.find((t >= fft_ta) & (t <= fft_tc))
        fft_ib = pylab.find((t >= fft_tc) & (t <= fft_tb))
        fft_N = int(2 ** numpy.ceil(numpy.log2(max([len(fft_ia),
            len(fft_ib)]))))
        fft_N2 = fft_N / 2 - 1
        fft_dt = t[fft_ib].mean() - t[fft_ia].mean()

        for i in range(a):
            # Some string output.
            try:
                Y, X = common.num2latlon(lon[i], lat[j], mode='each',
                    padding=False)
            except:
                Y = X = '?'

            # Extracts individual time-series from the whole dataset and
            # sets or calculates its standard deviation, squared standard
            # deviation and finally the normalized time-series.
            if fpath:
                try:
                    ilon = pylab.find(x == lon[i])[0]
                    fz = z[:, ilon]
                except:
                    continue
            else:
                fz = z[:, j, i]
            if fz.mask.all():
                continue
            if std['map']:
                try:
                    u = pylab.find(std['lon180'] == lon180[i])[0]
                    v = pylab.find(std['lat'] == lat[j])[0]
                except:
                    if debug:
                        warnings.warn('Unable to locate standard deviation '
                                      'for (%s, %s)' % (X, Y), Warning)
                    continue
                fstd = std['val'][v, u]
                estd = fstd - fz.std()
                if (estd < 0) & (abs(estd) > std['err']):
                    if debug:
                        warnings.warn('Discrepant input standard deviation '
                            '(%f) location (%.3f, %.3f) will be '
                            'disregarded.' % (estd, lon180[i], lat[j]))
                    continue
            else:
                fstd = fz.std()
            fstd2 = fstd ** 2
            std_map[j, i] = fstd
            zero[:, i] = fz
            fz = (fz - fz.mean()) / fstd

            # Calculates the distance of the current point to any special
            # location set in the 'loc' parameter. If only special locations
            # are to be analysed, then skips all other ones. If the input
            # array is one dimensional, then do the analysis anyway.
            if dim == 1:
                dist = numpy.asarray([0.])
            else:
                try:
                    dist = numpy.asarray([((item[0] - (lon180[i])) **
                        2 + (item[1] - lat[j]) ** 2) for item in loc])
                except:
                    dist = []
            if (dist > R2).all() & (loc != 'all') & onlyloc:
                continue

            # Determines the lag-1 autocorrelation coefficient to be used in
            # the significance test from the input parameter
            if alpha['calc']:
                ac = acorr(fz)
                alpha_ij = (ac[c + 1] + ac[c + 2] ** 0.5) / 2
            elif alpha['map']:
                try:
                    u = pylab.find(alpha['lon180'] == lon180[i])[0]
                    v = pylab.find(alpha['lat'] == lat[j])[0]
                    alpha_ij = alpha['val'][v, u]
                except:
                    if debug:
                        warnings.warn('Unable to locate standard deviation '
                            'for (%s, %s) using mean value instead' %
                            (X, Y), Warning)
                    alpha_ij = alpha['mean']
            else:
                alpha_ij = alpha['mean']

            # Calculates the continuous wavelet transform using the wavelet
            # Python module. Calculates the wavelet and Fourier power spectrum
            # and the periods in days. Also calculates the Fourier power
            # spectrum for the first and second halves of the timeseries.
            wave, scales, freqs, coi, fft, fftfreqs = wavelet.cwt(fz, dt, dj,
                s0, J, mother)
            power = abs(wave * wave.conj())
            fft_power = abs(fft * fft.conj())
            period = 1. / freqs
            fftperiod = 1. / fftfreqs
            psel = pylab.find(period <= pmax.max())

            # Calculates the Fourier transform for the first and the second
            # halves ot the time-series for later trend analysis.
            fft_1 = numpy.fft.fft(fz[fft_ia], fft_N)[1:fft_N/2] / fft_N ** 0.5
            fft_2 = numpy.fft.fft(fz[fft_ib], fft_N)[1:fft_N/2] / fft_N ** 0.5
            fft_p1 = abs(fft_1 * fft_1.conj())
            fft_p2 = abs(fft_2 * fft_2.conj())

            # Creates FFT return array and stores the spectrum accordingly
            try:
                fft_spectrum[:, j, i] = fft_power * fstd2
                fft_spectrum1[:, j, i] = fft_p1 * fstd2
                fft_spectrum2[:, j, i] = fft_p2 * fstd2
            except:
                fft_spectrum = (numpy.ma.empty([len(fft_power), b, a]) *
                    numpy.nan)
                fft_spectrum1 = (numpy.ma.empty([fft_N2, b, a]) *
                    numpy.nan)
                fft_spectrum2 = (numpy.ma.empty([fft_N2, b, a]) *
                    numpy.nan)
                #
                fft_spectrum[:, j, i] = fft_power * fstd2
                fft_spectrum1[:, j, i] = fft_p1 * fstd2
                fft_spectrum2[:, j, i] = fft_p2 * fstd2

            # Performs the significance test according to the article by
            # Torrence and Compo (1998). The wavelet power is significant
            # if the ratio power/sig95 is > 1.
            signif, fft_theor = wavelet.significance(1., dt, scales, 0,
                alpha_ij, significance_level=siglvl, wavelet=mother)
            sig95 = (signif * numpy.ones((c, 1))).transpose()
            sig95 = power / sig95

            # Calculates the global wavelet power spectrum and its
            # significance. The global wavelet spectrum is the average of the
            # wavelet power spectrum over time. The degrees of freedom (dof)
            # have to be corrected for padding at the edges.
            glbl_power = power.mean(axis=1)
            dof = c - scales
            glbl_signif, tmp = wavelet.significance(1., dt, scales, 1,
                alpha_ij, significance_level=siglvl, dof=dof, wavelet=mother)
            global_power[:, j, i] = glbl_power * fstd2

            # Calculates the average wavelet spectrum along the scales and its
            # significance according to Torrence and Compo (1998) eq. 24. The
            # scale_avg_full variable is used multiple times according to the
            # selected periods range.
            #
            # Also calculates the average Fourier power spectrum.
            Cdelta = mother.cdelta
            scale_avg_full = (scales * numpy.ones((c, 1))).transpose()
            scale_avg_full = power / scale_avg_full
            for k in range(C):
                if k == 0:
                    sel = pylab.find((period >= pmin[0]) &
                        (period <= pmax[-1]))
                    pminmax = [period[sel[0]], period[sel[-1]]]
                    les = pylab.find((fftperiod >= pmin[0]) &
                        (fftperiod <= pmax[-1]))
                    fminmax = [fftperiod[les[0]], fftperiod[les[-1]]]
                else:
                    sel = pylab.find((period >= pmin[k - 1]) &
                        (period < pmax[k - 1]))
                    pminmax = [pmin[k-1], pmax[k-1]]
                    les = pylab.find((fftperiod >= pmin[k - 1]) &
                        (fftperiod <= pmax[k - 1]))
                    fminmax = [fftperiod[les[0]], fftperiod[les[-1]]]

                scale_avg = numpy.ma.array((dj * dt / Cdelta *
                    scale_avg_full[sel, :].sum(axis=0)))
                scale_avg_signif, tmp = wavelet.significance(1., dt, scales,
                    2, alpha_ij, significance_level=siglvl,
                    dof=[scales[sel[0]], scales[sel[-1]]], wavelet=mother)
                scale_avg.mask = (scale_avg < scale_avg_signif)
                if mem_error:
                    avg_spectrum[k, :, i] = scale_avg
                else:
                    avg_spectrum[k, :, j, i] = scale_avg
                avg_spectrum_signif[k, j, i] = scale_avg_signif

                # Trend analysis using least square polynomial fit of one
                # degree of the original input data and scale averaged
                # wavelet power. The wavelet power trend is calculated only
                # where the cone of influence spans the highest analyzed
                # period. In the end, the returned value for the trend is in
                # units**2.
                #
                # Also calculates the trends in the Fourier power spectrum.
                # Note that the FFT power spectrum is already multiplied by
                # the signal's standard deviation.
                incoi = pylab.find(coi >= pmax[-1])
                if len(incoi) == 0:
                    incoi = numpy.arange(c)
                polyw = numpy.polyfit(t[incoi], scale_avg[incoi].data, 1)
                wavelet_trend[k, j, i] = polyw[0] * fstd2
                fft_trend[k, j, i] = (fft_spectrum2[les[les<fft_N2], j, i] -
                    fft_spectrum1[les[les<fft_N2], j, i]).mean() / fft_dt
                if k == 0:
                    polyz = numpy.polyfit(t, fz * fstd, 1)
                    trend[j, i] = polyz[0]

                # Plots the wavelet analysis results for the individual
                # series. The plot is only generated if the dimension of the
                # input variable z is one, if a special location is within a
                # range of the search radius R and if the show or save
                # parameters are set.
                if (show | (save != '')) & ((k in sel_periods)):
                    if (dist < R2).any() | (loc == 'all') | (dim == 1):
                        # There is an interesting spot within the search
                        # radius of location (%s, %s).' % (Y, X)
                        isloc = True
                        if (dist < R2).any():
                            try:
                                hloc.append(loc[(dist < R2)][0, 0])
                            except:
                                pass
                        if save:
                            try:
                                sv = '%s/tz_%s_%s_%d' % (save, prefix,
                                    common.num2latlon(lon[i], lat[j]), k)
                            except:
                                sv = '%s' % (save)
                        else:
                            sv = ''
                        graphics.wavelet_plot(tm, period[psel], fz,
                            power[psel, :], coi, glbl_power[psel],
                            scale_avg.data, fft=fft, fft_period=fftperiod,
                            power_signif=sig95[psel, :],
                            glbl_signif=glbl_signif[psel],
                            scale_signif=scale_avg_signif, pminmax=pminmax,
                            labels=labels, normalized=True, std=fstd,
                            ztrend=polyz, wtrend=polyw, show=show, save=sv,
                            levels=levels, cmap=cmap)

        # Saves and/or plots the intermediate results as zonal temporal
        # diagrams.
        if dsave:
            for k in range(C):
                if k == 0:
                    sv = '%s/%s/%s_%s.xt.gz' % (dsave, 'global', prefix,
                        common.num2latlon(lon[i], lat[j], mode='each')[0])
                else:
                    sv = '%s/%s/%s_%s.xt.gz' % (dsave, name[k - 1].lower(),
                        prefix,
                        common.num2latlon(lon[i], lat[j], mode='each')[0])
                if mem_error:
                    fm.save_map(lon, tm, avg_spectrum[k, :, :].data,
                        sv, lat[j])
                else:
                    fm.save_map(lon, tm, avg_spectrum[k, :, j, :].data,
                        sv, lat[j])

        if ((dim > 1) and (show or (save != '')) & (not onlyloc) and
                len(hloc) > 0):
            hloc = common.lon360(numpy.unique(hloc))
            if save:
                sv = '%s/xt_%s_%s' % (save, prefix,
                    common.num2latlon(lon[i], lat[j], mode='each')[0])
            else:
                sv = ''
            if mem_error:
                # To include overlapping original signal, use zz=zero
                gis.hovmoller(lon, tm, avg_spectrum[1:, :, :],
                    zo=avg_spectrum_signif[1:, j, :], title=title,
                    crange=crange, show=show, save=sv, labels=hlabels,
                    loc=hloc, cmap=cmap, bottom='avg', right='avg',
                    std=std_map[j, :])
            else:
                gis.hovmoller(lon, tm, avg_spectrum[1:, :, j, :],
                    zo=avg_spectrum_signif[1:, j, :], title=title,
                    crange=crange, show=show, save=sv, labels=hlabels,
                    loc=hloc, cmap=cmap, bottom='avg', right='avg',
                    std=std_map[j, :])

        # Flushing profiling text.
        stdout.write(len(s) * '\b')
        s = 'Spectral analysis of %d location%s (%s)... %s ' % (N, plural, Y,
            common.profiler(b, j + 1, 0, t1, t2))
        stdout.write(s)
        stdout.flush()

    stdout.write('\n')

    result['scale'] = scales
    result['period'] = period
    if dim == 1:
        result['power_spectrum'] = power * fstd2
        result['power_significance'] = sig95
        result['cwt'] = wave
        result['fft'] = fft
    result['global_power'] = global_power
    result['scale_spectrum'] = avg_spectrum
    if fpath:
        result['lon'] = lon
        result['lat'] = lat
    result['scale_significance'] = avg_spectrum_signif
    result['trend'] = trend
    result['wavelet_trend'] = wavelet_trend
    result['fft_power'] = fft_spectrum
    result['fft_first'] = fft_spectrum1
    result['fft_second'] = fft_spectrum2
    result['fft_period'] = fftperiod
    result['fft_trend'] = fft_trend
    return result
Esempio n. 5
0
def contour(x, y, z, title='', xlabel='', xunits='', ylabel='', yunits='',
    zunits='', label='', label_pos=[0.02, 0.95], fig=None, ax=None,
    subplot=(1, 1, 1), sharex=None, sharey=None, xlim=None, ylim=None,
    xscale='linear', yscale='linear', zscale='linear', nospines=False,
    scale=1., scale_label=None, crange=None, cticks=None,
    cmap=custom_cm.custom_viridis, colorbar=True, cbarpos=None,
    orientation='horizontal', extend='both', **kwargs):
    """
    """
    # OLD: cmap=cm.GMT_no_green
    #
    if fig == None:
        fig = figure()

    if ax == None:
        if len(subplot) == 3:
            ax = fig.add_subplot(subplot[0], subplot[1], subplot[2],
                sharex=sharex, sharey=sharey)
        elif len(subplot) == 4:
            ax = fig.add_axes(subplot, sharex=sharex, sharey=sharey)

    if nospines:
        dropspines(ax)
    ax.minorticks_on()
    ax.tick_params(direction='out', which='both')

    norm = None
    # Base 10 logarithmic scale
    if zscale == 'log':
        z = numpy.log10(z)
        crange = numpy.log10(crange)
    elif zscale == 'log2':
        z = numpy.log2(z)
        crange = numpy.log2(crange)
    # The chlorophyll-a color scale as described at
    # http://oceancolor.gsfc.nasa.gov/DOCS/standard_chlorophyll_colorscale.txt
    # Chl-a concentration are converted from mg m-3 to a log like scale, i.e.
    #   pix = (log10(chlor_a) + 2) / 0.015
    #   chlor_a = 10 ** (0.015 * pix - 2)
    elif zscale == 'chla':
        cmap = custom_cm.custom_chla
        z = (numpy.log10(z) + 2) / 0.015
        zrange = numpy.array([0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 60])
        #crange = numpy.arange(0, 256)
        crange = (numpy.log10(zrange) + 2) / 0.015
        cticks = dict(values=(numpy.log10(zrange) + 2) / 0.015, text=zrange)

    # Setting the color ranges
    if (crange == None) & (cticks == None):
        cmajor, cminor, crange, cticks, extend = common.step(z / scale,
            returnrange=True)
        cticks = dict(values = cticks)
    if cticks == None:
        if len(crange) < 15:
            cticks = dict(values = crange[::2])
        else:
            cticks = dict(values = crange[::4])
    if zscale == 'log':
        # Checks if tick values are all integers.
        cticks['text'] = ['10$^{{{}}}$'.format(tick) for tick in
            cticks['values']]
    elif zscale == 'log2':
        cticks['text'] = ['2$^{{{%d}}}$' % (tick) for tick in cticks['values']]

    # Sets scale label according to scale
    if scale_label == None:
        log = int(numpy.log10(scale))
        scale = 10 ** log
        if log != 0:
            scale_label = r'\times 10^{%d}' % (log)
            crange /= scale
        else:
            scale_label = ''

    xmin, xmax = 9e9, 0
    xmin, xmax = min(xmin, x.min()), max(xmax, x.max())
    ymin, ymax = 9e9, 0
    ymin, ymax = min(ymin, y.min()), max(ymax, y.max())
    #bbox = dict(edgecolor='w', facecolor='w', alpha=0.9)
    bbox = dict(boxstyle='square, pad=0.3', facecolor='w', edgecolor='none',
        alpha=0.9)

    try:
        xmask = z.mask.any(axis=0)
        sel = pylab.find(~xmask)
        xmin, xmax = x[sel[0]], x[sel[-1]]
    except:
        pass
    try:
        ymask = z.mask.any(axis=1)
        sel = pylab.find(~ymask)
        ymin, ymax = y[sel[0]], y[sel[-1]]
    except:
        pass

    # The contour!
    im = ax.contourf(x, y, z / scale, crange, extend=extend, cmap=cmap,
        norm=norm)

    # Draws colorbar
    if colorbar:
        corners = ax.get_position().corners()
        if orientation == 'squared':
            co = 'horizontal'
        elif orientation  in ['landscape', 'landscape.golden', 'worldmap',
            'horizontal']:
            if cbarpos == None:
                cbarpos = [0.05, -0.08, -0.1, 0.02]
            position = numpy.array([corners[0, 0], corners[0, 1],
                corners[2, 0] - corners[0, 0], 0]) + numpy.array(cbarpos)
            co = 'horizontal'
        elif orientation in ['portrait', 'vertical']:
            if cbarpos == None:
                cbarpos = [0.03, 0.025, 0.017, -0.05]
            position = numpy.array([corners[2, 0], corners[2, 1], 0,
                corners[3, 1] - corners[2, 1]]) + numpy.array(cbarpos)
            co = 'vertical'
        else:
            raise Warning('Invalid orientation %s.' % orientation)
        cax = fig.add_axes(position)
        pylab.colorbar(im, cax=cax, orientation=co, ticks=cticks['values'],
            extend=extend)
        if 'text' in cticks.keys():
            if co == 'horizontal':
                cax.set_xticklabels(cticks['text'])
            else:
                cax.set_yticklabels(cticks['text'])

    if title:
        ax.set_title(title)
    if xunits != '':
        ax.set_xlabel(ur'\textbf{%s} $\left[%s\right]$' % (xlabel, xunits))
    elif xlabel != '':
        ax.set_xlabel(ur'\textbf{%s}' % xlabel)
    if yunits:
        ax.set_ylabel(ur'\textbf{%s} $\left[%s\right]$' % (ylabel, yunits))
    elif ylabel:
        ax.set_ylabel(ur'\textbf{%s}' % (ylabel))
    if xscale == 'time':
        timeformat(ax, dt=xmax-xmin)
    if xscale == 'deg':
		try:
			pyplot.locator_params(axis='x', nbins=3)
		except:
			pass
		ax.set_xticklabels([common.num2latlon(i, 0, mode='each',
			x180=True, dtype='label')[1] for i in ax.get_xticks()])
    if yscale == 'time':
        timeformat(ax, dt=ymax-ymin, axis='y')
    if yscale == 'deg':
        ax.set_yticklabels([common.num2latlon(0, i, mode='each', x180=False,
            dtype='label')[0] for i in ax.get_yticks()])
    if (zunits != '') | (scale_label != ''):
        if co == 'horizontal':
            ci, cj, ha, va = 1.05, 0.5, 'left', 'center'
        else:
            ci, cj, ha, va = 0.5, -0.15, 'left', 'top'
        cax.text(ci, cj, r'$\left[%s %s\right]$' % (scale_label, zunits), ha=ha,
            va=va, transform=cax.transAxes)
    if label:
        ax.text(label_pos[0], label_pos[1], label, ha='left', va='top',
            transform=ax.transAxes, bbox=bbox)

    if xlim == None:
        xlim = [xmin, xmax]
    if ylim == None:
        ylim = [ymin, ymax]
    ax.set_xlim(xlim)
    ax.set_ylim(ylim)
    ax.minorticks_on()
    pylab.draw()
    return ax
Esempio n. 6
0
def plot(x, y, title='', xlabel='', xunits='', ylabel='', yunits='', label='',
    format='-', color='k', linewidth=1.5, markersize=7, fig=None, ax=None,
    subplot=(1, 1, 1), sharex=None, sharey=None, xlim=None, ylim=None,
    xscale='linear', yscale='linear', xaxis='same', yaxis='same',
    scale=1., scale_label='', nospines=False, xtick='auto', ytick='auto',
    legend_label=None, orientation='portrait', style=None, alpha=1.,
    label_pos=[0.02, 0.95], new_line=False, return_handles=False, err=None,
    **kwargs):
    """Plot lines and/or markers.

    PARAMETERS
        x (array like) :
        y (array like) :
        style (string, optional) :
            Barb, quiver, scatter, ...
        return_handles (boolean, optional) :
            If true returns ax and plot handles.

    RETURNS
        ax[, handles] : axis

    """
    if fig == None:
        fig = figure()
    if type(y).__name__ in ['ndarray', 'MaskedArray']:
        x, y = [x], [y]
        format = [format]
        color = [color]
        alpha = [alpha]
        linewidth = [linewidth]
        markersize = [markersize]
        ylabel = [ylabel]
        yunits = [yunits]
        yscale = [yscale]
        n = 1
    else:
        n = len(y)
        if type(x).__name__ == 'ndarray':
            x = [x] * n
        if type(format) in [str, unicode]:
            format = [format] * n
        if type(color) in [str, unicode]:
            color = [color] * n
        if type(alpha) in [float, int]:
            alpha = [alpha] * n
        if type(linewidth) in [float, int]:
            linewidth = [linewidth] * n
        if type(markersize) in [float, int]:
            markersize = [markersize] * n
        if type(ylabel) in [str, unicode]:
            ylabel = [ylabel] * n
        if type(yunits) in [str, unicode]:
            yunits = [yunits] * n
        if type(yscale) in [str, unicode]:
            yscale = [yscale] * n

    if ax == None:
        if len(subplot) == 3:
            if xaxis == 'twin':
                ax = fig.add_subplot(subplot[0], subplot[1], subplot[2],
                    sharex=sharex, sharey=sharey)
            if yaxis == 'twin':
                ax = fig.add_subplot(subplot[0], subplot[1], subplot[2],
                    sharex=sharex, sharey=sharey)
            else:
                ax = fig.add_subplot(subplot[0], subplot[1], subplot[2],
                    sharex=sharex, sharey=sharey)
        elif len(subplot) == 4:
            ax = fig.add_axes(subplot, sharex=sharex, sharey=sharey)
    else:
        ax.hold('on')

    if nospines:
        dropspines(ax)

    # Adds a line between labels and units. Makes sure that character is
    # UTF8 encoded.
    if new_line:
        new_line = u'\n'
    else:
        new_line = u''

    bbox = dict(edgecolor='w', facecolor='w', alpha=0.9)
    xmin, xmax = 9e9, 0
    handles = []
    Ax = []
    for i in range(n):
        if i == 0:
            bx = ax
        else:
            if xaxis == 'twin':
                bx = ax.twinx()
                offset = 1 + (i - 1) * 1.1
                bx.spines['right'].set_position(('axes', offset))

            elif yaxis == 'twin':
                bx = ax.twiny()
                offset = 1 + (i - 1) * 2.1
                bx.spines['bottom'].set_position(('axes', 0))
                print 'Ahhhh!!!!'
        #
        Ax.append(bx)
        #

        if xscale == 'log2':
            xs = numpy.log2(x[i])
        else:
            xs = x[i]
        if yscale == 'log2':
            ys = numpy.log2(y[i])
        else:
            ys = y[i]

        if ((n == 1) | (xaxis == 'twin')) & (scale == None):
            std = numpy.log10(ys.std())
            if (std > 3) | (std <=-1):
                std = numpy.round(std)
                scale = 10 ** std
                scale_label = r'\times 10^{%d}' % (std)
            else:
                scale = 1.
                scale_label = ''

        # Sets label for legend
        try:
            _label = legend_label[i]
        except:
            _label = None

        # Sets scale label if not set
        if (scale != 1) & (scale_label == ''):
            scale_label = r'\times %s' % (scale)

        args = kwargs.copy()
        args.update(dict(color=color[i], markerfacecolor=color[i],
            linewidth=linewidth[i], markersize=markersize[i], alpha=alpha[i]),
            label=_label)
        quiver = False
        if numpy.iscomplex(ys).any():
            quiver = True
            if style == 'barbs':
                handle = bx.barbs(xs, xs * 0, ys.real, ys.imag, **kwargs)
            else:
                # Normalize vectors!
                ysN = numpy.sqrt(ys.real**2 + ys.imag**2)
                ys = ys / ysN
                #
                q = bx.quiver(xs, xs * 0, ys.real, ys.imag, units='y',
                    scale_units='y', scale=scale, **kwargs)
                handle = q
                if yunits[i] == '':
                    qk = bx.quiverkey(q, 0.1, 0.1, 1., labelpos='E')
                else:
                    qk = bx.quiverkey(q, 0.1, 0.1, 1., ur'%d $%s$' % (1,
                        yunits[i]), labelpos='E')
        elif (xscale != 'log') & (yscale[i] == 'log'):
            handle, = bx.semilogy(xs, ys/scale, format[i], **args)
        elif (xscale == 'log') & (yscale[i] != 'log'):
            handle, = bx.semilogx(xs, ys/scale, format[i], **args)
        elif err != None:
            _draw_ellipse = False
            if 'ellipse' in err.keys():
                if err['ellipse']:
                    _draw_ellipse = True
            if _draw_ellipse:
                for _x, _y, _w, _h in zip(xs, ys/scale, err['x'], err['y']):
                    _e = Ellipse(xy=(_x, _y), width=_w, height=_h, alpha=0.5, color='#333333')
                    handle = bx.add_patch(_e)
            else:
                handle, _, _= bx.errorbar(xs, ys/scale, xerr=err['x'],
                    yerr=err['y'], fmt=format[i], **args)
        elif style == 'scatter':
            handle = bx.scatter(xs, ys/scale, marker=format[i],
                s=args['markersize'], c=args['color'], cmap=args['cmap'],
                alpha=args['alpha'], zorder=args['zorder'], vmin=args['vmin'],
                vmax=args['vmax'])
        else:
            handle = bx.plot(xs, ys/scale, format[i], **args)
        handles.append(handle)
        try:
            xmin, xmax = min(xmin, xs.min()), max(xmax, xs.max())
        except:
            xmin, xmax = 0, 1
        #
        if not (xlim == None):
            try:
                if len(xlim[i]) == 2:
                    bx.set_xlim(xlim[i])
                else:
                    raise ValueError()
            except:
                bx.set_xlim(xlim)
        if not (ylim == None):
            try:
                if len(ylim[i]) == 2:
                    bx.set_ylim(ylim[i])
                else:
                    raise ValueError()
            except:
                bx.set_ylim(ylim)
        #
        if ytick[:4] == 'auto':
            n = ytick.find(':')
            if n >= 0:
                n = atof(ytick[n+1:])
            else:
                n = 5
            if yscale[i] == 'linear':
                bx.yaxis.set_major_locator(ticker.MaxNLocator(n))
        if (xaxis == 'twin') | (i == 0):
            if not quiver:
                if yunits[i] or scale_label:
                    bx.set_ylabel(ur'\textbf{%s}' % (ylabel[i]) + new_line +
                        ur'$\left[%s %s\right]$' % (scale_label, yunits[i]))
                elif ylabel[i]:
                    bx.set_ylabel(ur'\textbf{%s}' % (ylabel[i]))
            else:
                pylab.setp(bx.get_yticklabels(), visible=False)
                if ylabel[i]:
                    bx.set_ylabel(ur'\textbf{%s}' % (ylabel[i]))
            if yscale[i] == 'deg':
                bx.set_yticklabels([common.num2latlon(0, tk, mode='each',
                    x180=False, dtype='label')[0] for tk in bx.get_yticks()])

    if xscale == 'log2':
        xmin, xmax = numpy.floor([-3., xmax])
        xticks = 2 ** numpy.arange(xmin, xmax)
        bx.set_xticks(numpy.log2(xticks))
        bx.set_xticklabels(xticks)
        pylab.setp(bx.get_xticklabels()[::2], visible=False)
    if yscale == 'log2':
        ymin, ymax = numpy.floor([-3., ymax])
        yticks = 2 ** numpy.arange(ymin, ymax)
        bx.set_yticks(numpy.log2(yticks))
        bx.set_yticklabels(yticks)
        pylab.setp(bx.get_yticklabels()[::2], visible=False)

    if xlim == None:
        ax.set_xlim([xmin, xmax])
    if xtick[:4] == 'auto':
        n = xtick.find(':')
        if n >= 0:
            n = atof(xtick[n+1:])
        else:
            n = 10
        if xscale == 'linear':
            ax.xaxis.set_major_locator(ticker.MaxNLocator(n))
    if title:
        ax.set_title(title)
    if label:
        axes_label(label, ax, label_pos[0], label_pos[1], bbox)
    if xunits != '':
        ax.set_xlabel(ur'\textbf{%s} $\left[%s\right]$' % (xlabel, xunits))
    elif xlabel != None:
        ax.set_xlabel(ur'\textbf{%s}' % xlabel)
    else:
        pylab.setp(bx.get_xticklabels(), visible=False)
    if xscale == 'time':
        timeformat(ax, dt=xmax-xmin, orientation=orientation)
    if xscale == 'deg':
        ax.set_xticklabels([common.num2latlon(i, 0, mode='each', x180=False,
            dtype='label')[1] for i in bx.get_xticks()])
    #
    ax.minorticks_on()
    ax.grid(True, zorder=0)

    if (1 == 2) & (legend_label is not None):
        # Draws legend
        legend(legend_label, ax=ax)

    pylab.draw()
    #
    if len(Ax) == 1:
        if return_handles:
            return Ax[0], handles
        else:
            return Ax[0]
    else:
        if return_handles:
            return Ax, handles
        else:
            return Ax
Esempio n. 7
0
def hovmoller(lon, tm, z, zo=None, zz=None, title=None, label=None,
              labels=dict(), crange=None, cmap=cm.GMT_no_green,
              orientation='landscape', show=False, save='', ftype='png',
              adjustprops=None, bottom=None, right=None, loc=[], std=None,
              xunits='deg', draft=False, hookx=None, hooky=None):
    """Hovmoller plots.

    PARAMETERS
        lon (array like) :
            Longitude axis.
        tm (array like) :
            Time axis.
        z (array like) :
            Filled contour variable.
        zo (array like) :
            Overlapping contour variable (e.g. relative significance of
            wavelet analysis) to be ploted with a thick solid black line.
        zz (array like) :
            Another overlapping contour variagle (e.g. original data) to be
            ploted with a thin solid white line.
        title (string, array like, optional) :
            Sets the contour plot title. If array like, each element of
            the array becomes the title for plot.
        label (string, array like, optional) :
            Sets the label for each plot. If array like, each element
            of the array becomes the label for each plot.
        labels (dictionary, optional) :
            Sets the labels for the plot axis.
        units (string, array like, optional) :
            Determines the units for all the contours together or
            sepparatelly.
        crange (array like, optional) :
            Sets the color range of the maps. If not given then the
            range is calculated from the input data.
        cmap (colormap, optional) :
            Sets the colormap to be used in the plots. The default is
            the Generic Mapping Tools (GMT) no green.
        orientation (string, optional) :
            Sets the orientation of the figure. Allowed options are
            'landscape' (default), 'portrait', 'squared'.
        show (boolean, optional) :
            If set to true the the resulting maps are explicitly shown
            on screen.
        save (string, optional) :
            The path in which the resulting plots are to be saved. If
            not set, then no images will be saved.
        ftype (string, optional) :
            The image file type. Most backends support png, pdf, ps,
            eps and svg.
        adjustprops (dict, optional) :
            Dictionary containing the subplot parameters.
        bottom (string, optional) :
            If set to ether 'std' or 'avg' plots respectively the
            standard deviation or mean of the signal at the bottom.
        loc (list, optional) :
            Lists the longitude of locations to be marked in plot.
        xunit (string, optional) :
            Determines the x-axis unit. Valid options are either 'deg'
            for degrees (default) or 'km' for kilometers.
        draft (boolean, optional) :
            If set to true, then reduces the size of the colorbar to
            approximatelly two colors to save time. Default is false.
        hookx, hooky (function, optional) :
            Executes a hook function after the plot in the x and y
            axes, respectivelly.

    OUTPUT
        Hovmoller contour plots plots either on screen and or on file
        according to the specified parameters.

    RETURNS
        Nothing.

    """
    t1 = time()
    __init__()

    # Setting undefined label strings.
    if 'units' not in labels.keys():
        labels['units'] = ''
    if 'Year' not in labels.keys():
        labels['Year'] = 'Year'
    if 'std' not in labels.keys():
        labels['std'] = 'Std'
    if 'avg' not in labels.keys():
            labels['avg'] = 'Avg'

    # Transforms input arrays in numpy arrays and numpy masked arrays.
    lon = numpy.asarray(lon)
    tm = numpy.asarray(tm)
    if type(z).__name__ != 'MaskedArray':
        z = numpy.ma.asarray(z)
        z.mask = numpy.isnan(z)
    else:
        z.mask = z.mask | numpy.isnan(z.data)

    # Determines the number of dimensions of the variable to be plotted and
    # the sizes of each dimension.
    dim = len(z.shape)
    if dim == 3:
        c, b, a = z.shape
    elif dim == 2:
        b, a = z.shape
        c = 1
        z = z.reshape(c, b, a)
    else:
        raise Warning, ('Hovmoller plots require either bi-dimensional or tri-'
                        'dimensional data.')
    if lon.size != a:
        raise Warning, 'Longitude and data lengths do not match.'
    if tm.size != b:
        raise Warning, 'Time and data lengths do not match.'

    if type(zo).__name__ != 'NoneType':
        dimo = len(zo.shape)
        if dimo == 2:
            co, ao = zo.shape
            bo = b
            zo = zo * numpy.ones([bo, co, ao])
        if (co != c) | (ao != a):
                raise Warning ('Overlapping array dimensions do not match')
        overlap = True
    else:
        overlap = False
    if type(zz).__name__ != 'NoneType':
        dimz = len(zz.shape)
        if dimz == 2:
            bz, az = zz.shape
            cz = 0
        elif dimz == 3:
            cz, bz, az = zz.shape
        else:
            cz = bz = az = 0
        if (bz != b) | (az != a):
                raise Warning ('Overlapping array dimensions do not match')
        zero = True
    else:
        zero = False

    # Verifies if title, label, unit and std parameters have the same number
    # of items as the number of plots to be drawn.
    if type(title).__name__ == 'str':
        title = [title] * c
    elif type(title).__name__ in ['list', 'tuple', 'ndarray']:
        C = len(title)
        if c > C:
            title = list(title) * int(numpy.ceil(float(c) / C))
    if type(label).__name__ == 'str':
        label = [label] * c
    elif type(label).__name__ in ['list', 'tuple', 'ndarray']:
        C = len(label)
        if c > C:
            label = list(label) * int(numpy.ceil(float(c) / C))

    # If the edges contain only NaN's, then slice them out.
    sel = pylab.find(~numpy.isnan(z.data).all(axis=0).all(axis=0))
    if len(sel) != a:
        a = len(sel)
        if a == 0:
            return
        lon = lon[sel[0]:sel[-1]]
        z = z[:, :, sel[0]:sel[-1]]
        if overlap:
            zo = zo[:, :, sel[0]:sel[-1]]
        if zero:
            if dimz == 2:
                zz = zz[:, sel[0]:sel[-1]]
            elif dimz == 3:
                zz = zz[:, :, sel[0]:sel[-1]]

    # Setting the color ranges
    bbox = dict(edgecolor='w', facecolor='w', alpha=0.9)
    if crange == None:
        cmajor, cminor, crange, cticks, extend = common.step(z,
            returnrange=True)
    else:
        crange = numpy.asarray(crange)
        cminor = numpy.diff(crange).mean()
        if crange.size > 11:
            cmajor = 2 * cminor
        if len(crange) < 15 :
            cticks = crange[::2]
        else:
            cticks = crange[::5]

        xmin, xmax = z.min(), z.max()
        rmin, rmax = crange.min(), crange.max()
        if (xmin < rmin) & (xmax > rmax):
            extend = 'both'
        elif (xmin < rmin) & (xmax <= rmax):
            extend = 'min'
        elif (xmin >= rmin) & (xmax > rmax):
            extend = 'max'
        elif (xmin >= rmin) & (xmax <= rmax):
            extend = 'neither'
        else:
            raise Warning, 'Unable to determine extend'
    if draft:
        crange = [min(crange), crange[len(crange) / 2], max(crange)]

    # Turning interactive mode on or off according to show parameter.
    if show == False:
        pylab.ioff()
    elif show == True:
        pylab.ion()
    else:
        raise Warning, 'Invalid show option.'

    # Sets the figure properties according to the orientation parameter and to
    # the data dimensions including the subplot number of rows and columns.
    if orientation == 'landscape':
        #figprops = dict(figsize=(7.33, 5.33), dpi=96)
        plcols = c
        plrows = 1
    elif orientation == 'portrait':
        #figprops = dict(figsize=(5.33, 7.33), dpi=96)
        plcols = 1
        plrows = c
    elif orientation == 'squared':
        #figprops = dict(figsize=(5.33, 5.33), dpi=96)
        plrows = plcols = numpy.ceil(c ** 0.5)
    else:
        raise Warning, 'Orientation \'%s\' not allowed.' % (orientation, )
    if adjustprops == None:
        adjustprops = dict(left=0.1, bottom=0.15, right=0.99, top=0.9,
                           wspace=0.05, hspace=0.02)
    
    fig = graphics.figure(ap=adjustprops, orientation=orientation)

    # Some figure parameters definitions and initializations
    if bottom:
        bottommin, bottommax = [0, -65535]
        baxes = []
    if right:
        rightmin, rightmax = [0, -65535]
        grey = [0.66, 0.66, 0.66]
        lfmt = ['-', '-', '-', '-']
        lclr = ['k', 'k', grey, grey]
        lwth = [2., 1., 2., 1.]
        c += 1 # Adds one more sub-plot for size calculations only.

    # Subplot width and height parameters
    w = ((adjustprops['right'] - adjustprops['left']) / c -
        adjustprops['hspace'])
    if bottom:
        y = 0.25 + adjustprops['bottom'] - adjustprops['wspace']
    else:
        y = adjustprops['bottom']
    if bottom:
        h = 0.75 - adjustprops['bottom']
    else:
        h = adjustprops['top'] - y + adjustprops['wspace']

    if right:
        c -= 1 # Adjusts to original number of Hovmoller sub-plots.

    for k in range(c):
        x = (w + adjustprops['hspace']) * k + adjustprops['left']

        if k == 0:
            ax = pylab.axes([x, y, w, h])
            bx = ax
            if right:
                xx = ((w + adjustprops['hspace']) * c + adjustprops['left'])
                rx = pylab.axes([xx, y, w, h], sharey=ax)
                pylab.setp(rx.get_yticklabels(), visible=False)
                pylab.axes(bx)
        else:
            bx = pylab.axes([x, y, w, h], sharex=ax, sharey=ax)

        if zero:
            if dimz == 2:
                oz = zz
            elif dimz == 3:
                oz = zz[k, :, :]
            pylab.contour(lon, tm, oz, [-1e10, 0, 1e10],
                colors=[[0.9, 0.9, 0.9]], linestyles='-', linewidths=0.5,
                alpha=0.9)

        if overlap:
            if dimo == 2:
                o = (z[k, :, :].data >= zo[:, k, :])
            elif dimo == 3:
                o = (z[k, :, :].data >= zo[k, :, :])
            pylab.contour(lon, tm, o, [0, 1],
                colors='k', linestyles='-', linewidths=1.)

        # Plots the contour. Uses assigned data for power hovmollers.
        pylab.contourf(lon, tm, z[k, :, :].data, crange, cmap=cmap,
            extend=extend)

        # Running x and y hooks on current axis.
        try:
            hookx(bx)
        except:
            pass
        try:
            hooky(bx)
        except:
            pass

        if right:
            if right == 'std':
                rz = nanstd(z[k, :, :].data, axis=1)
            elif right == 'avg':
                rz = nanmean(z[k, :, :].data, axis=1)
            rx.plot(rz, tm, lfmt[k], color=lclr[k], linewidth=lwth[k])

            # Running y hook on right axis.
            try:
                hooky(rx)
            except:
                pass

            rightmin = min([numpy.nanmin(rz), rightmin])
            rightmax = max([numpy.nanmax(rz), rightmax])

        for i in loc:
            pylab.plot([i, i], [tm.min(), tm.max()], 'D', markersize=14,
                color='w', alpha=1)

        if bottom:
            yb, hb = adjustprops['bottom'], 0.175
            if k == 0:
                cx = pylab.axes([x, yb, w, hb], sharex=ax)
                dx = cx
            else:
                dx = pylab.axes([x, yb, w, hb], sharex=ax, sharey=cx)
            baxes.append(dx)
            if bottom == 'avg':
                bz = nanmean(z[k, :, :].data, axis=0)
            elif bottom == 'std':
                bz = nanstd(z[k, :, :].data, axis=0)
            pylab.plot(lon, bz, 'k-')
            
            # Running x hook on bottom axis.
            try:
                hookx(dx)
            except:
                pass

            bottommin = min([numpy.nanmin(bz), bottommin])
            bottommax = max([numpy.nanmax(bz), bottommax])
            #
            pylab.setp(bx.get_xticklabels(), visible=False)
            if k > 0:
                pylab.setp(dx.get_yticklabels(), visible=False)

        if k == 0:
            if orientation == 'landscape':
                corientation = 'horizontal'
                cax = pylab.axes([adjustprops['left'] + 0.15, 0.05,
                    adjustprops['right'] - adjustprops['left'] - 0.3, 0.03])
                ci, cj, ha, va = 1.05, 0.5, 'left', 'center'
            elif orientation == 'portrait':
                corientation = 'vertical'
                cax = pylab.axes([adjustprops['right'] + 0.02, y + 0.05, 
                    0.03, h - 0.1])
                ci, cj, ha, va = 0.5, -0.05, 'center', 'baseline'
            pylab.colorbar(cax=cax, ax=ax, orientation=corientation,
                extend=extend, ticks=cticks)
            if labels['units']:
                cax.text(ci, cj, r'$\left[%s\right]$' % (labels['units']),
                         ha=ha, va=va, transform=cax.transAxes)
        else:
            pylab.setp(bx.get_yticklabels(), visible=False)

        if title:
            bx.set_title('%s' % (title[k]), va='baseline', fontsize='medium')
        if label:
            bx.text(0.07, 0.97, '%s' % (label[k]), ha='left', va='top',
                transform=bx.transAxes, bbox=bbox)

    # Formatting the plot axis.
    if bottom:
        ystep, ystep1 = common.step([bottommin, bottommax], 1.5)
        bottommax = pylab.ceil(bottommax / ystep) * ystep

        for dx in baxes:
            for i in loc:
                dx.plot([i, i], [bottommin, bottommax], 'D', markersize=10,
                    color='w', alpha=1)

        cx.set_ylim([bottommin, bottommax])
        ymajor = pylab.matplotlib.ticker.MultipleLocator(ystep)
        yminor = pylab.matplotlib.ticker.MultipleLocator(ystep1)
        cx.yaxis.set_major_locator(ymajor)
        cx.yaxis.set_minor_locator(yminor)
        if labels['units']:
            cx.set_ylabel(r'\textbf{%s} $\left[%s\right]$' % (labels[bottom],
                labels['units']))
        else:
            cx.set_ylabel(r'\textbf{%s}' % (labels[bottom]))
        if xunits == 'km':
            cx.set_xlabel(r'\textbf{%s}' % (xunits))
    else:
        if xunits == 'km':
            ax.set_xlabel(r'\textbf{%s}' % (xunits))
    if right:
        xstep, xstep1 = common.step([rightmin, rightmax], 1.5)
        rightmax = pylab.ceil(rightmax / xstep) * xstep

        rx.set_xlim([rightmin, rightmax])
        xmajor = pylab.matplotlib.ticker.MultipleLocator(xstep)
        xminor = pylab.matplotlib.ticker.MultipleLocator(xstep1)
        rx.xaxis.set_major_locator(xmajor)
        rx.xaxis.set_minor_locator(xminor)
        if labels['units']:
            rx.set_title(r'%s $\left[%s\right]$' % (labels[right],
                labels['units']))
        else:
            rx.set_title(r'%s' % (labels[right]))

    ax.set_xlim([lon.min(), lon.max()])
    ax.set_ylim([tm.min(),  tm.max()])
    if xunits == 'deg':
        xstep, xstep1 = common.step(lon, 2)
    elif xunits == 'km':
        xstep, xstep1 = common.step(lon, 5)
    xmajor = pylab.matplotlib.ticker.MultipleLocator(xstep)
    xminor = pylab.matplotlib.ticker.MultipleLocator(xstep1)
    ax.xaxis.set_major_locator(xmajor)
    ax.xaxis.set_minor_locator(xminor)
    graphics.timeformat(ax, dt=tm[-1]-tm[0], axis='y')
    if xunits == 'deg':
        ax.set_xticklabels([common.num2latlon(i, 0, mode='each', x180=False,
            dtype='label')[1] for i in ax.get_xticks()])
    ax.set_ylabel(r'\textbf{%s}' % (labels['Year']))

    # Drawing and saving the figure if appropriate.
    pylab.draw()
    if save:
        pylab.savefig('%s.%s' % (save, ftype), dpi=150)
    if show == False:
        pylab.close(fig)
Esempio n. 8
0
def plot(
    x,
    y,
    title="",
    xlabel="",
    xunits="",
    ylabel="",
    yunits="",
    label="",
    format="-",
    color="k",
    linewidth=1.5,
    markersize=7,
    fig=None,
    ax=None,
    subplot=(1, 1, 1),
    sharex=None,
    sharey=None,
    xlim=None,
    ylim=None,
    xscale="linear",
    yscale="linear",
    xaxis="same",
    yaxis="same",
    scale=1.0,
    scale_label="",
    nospines=False,
    xtick="auto",
    ytick="auto",
    legend_label=None,
    orientation="portrait",
    style=None,
    alpha=1.0,
    label_pos=[0.02, 0.95],
    new_line=False,
    return_handles=False,
    err=None,
    **kwargs
):
    """Plot lines and/or markers.

    PARAMETERS
        x (array like) :
        y (array like) :
        style (string, optional) :
            Barb, quiver, scatter, ...
        return_handles (boolean, optional) :
            If true returns ax and plot handles.

    RETURNS
        ax[, handles] : axis

    """
    if fig == None:
        fig = figure()
    if type(y).__name__ in ["ndarray", "MaskedArray"]:
        x, y = [x], [y]
        format = [format]
        color = [color]
        alpha = [alpha]
        linewidth = [linewidth]
        markersize = [markersize]
        ylabel = [ylabel]
        yunits = [yunits]
        yscale = [yscale]
        n = 1
    else:
        n = len(y)
        if type(x).__name__ == "ndarray":
            x = [x] * n
        if type(format) in [str, unicode]:
            format = [format] * n
        if type(color) in [str, unicode]:
            color = [color] * n
        if type(alpha) in [float, int]:
            alpha = [alpha] * n
        if type(linewidth) in [float, int]:
            linewidth = [linewidth] * n
        if type(markersize) in [float, int]:
            markersize = [markersize] * n
        if type(ylabel) in [str, unicode]:
            ylabel = [ylabel] * n
        if type(yunits) in [str, unicode]:
            yunits = [yunits] * n
        if type(yscale) in [str, unicode]:
            yscale = [yscale] * n

    if ax == None:
        if len(subplot) == 3:
            if xaxis == "twin":
                ax = fig.add_subplot(subplot[0], subplot[1], subplot[2], sharex=sharex, sharey=sharey)
            if yaxis == "twin":
                ax = fig.add_subplot(subplot[0], subplot[1], subplot[2], sharex=sharex, sharey=sharey)
            else:
                ax = fig.add_subplot(subplot[0], subplot[1], subplot[2], sharex=sharex, sharey=sharey)
        elif len(subplot) == 4:
            ax = fig.add_axes(subplot, sharex=sharex, sharey=sharey)
    else:
        ax.hold("on")

    if nospines:
        dropspines(ax)

    # Adds a line between labels and units. Makes sure that character is
    # UTF8 encoded.
    if new_line:
        new_line = u"\n"
    else:
        new_line = u""

    bbox = dict(edgecolor="w", facecolor="w", alpha=0.9)
    xmin, xmax = 9e9, 0
    handles = []
    Ax = []
    for i in range(n):
        if i == 0:
            bx = ax
        else:
            if xaxis == "twin":
                bx = ax.twinx()
                offset = 1 + (i - 1) * 1.1
                bx.spines["right"].set_position(("axes", offset))

            elif yaxis == "twin":
                bx = ax.twiny()
                offset = 1 + (i - 1) * 2.1
                bx.spines["bottom"].set_position(("axes", 0))
                print "Ahhhh!!!!"
        #
        Ax.append(bx)
        #

        if xscale == "log2":
            xs = numpy.log2(x[i])
        else:
            xs = x[i]
        if yscale == "log2":
            ys = numpy.log2(y[i])
        else:
            ys = y[i]

        if ((n == 1) | (xaxis == "twin")) & (scale == None):
            std = numpy.log10(ys.std())
            if (std > 3) | (std <= -1):
                std = numpy.round(std)
                scale = 10 ** std
                scale_label = r"\times 10^{%d}" % (std)
            else:
                scale = 1.0
                scale_label = ""

        # Sets scale label if not set
        if (scale != 1) & (scale_label == ""):
            scale_label = r"\times %s" % (scale)

        args = kwargs.copy()
        args.update(
            dict(
                color=color[i],
                markerfacecolor=color[i],
                linewidth=linewidth[i],
                markersize=markersize[i],
                alpha=alpha[i],
            )
        )
        quiver = False
        if numpy.iscomplex(ys).any():
            quiver = True
            if style == "barbs":
                handle = bx.barbs(xs, xs * 0, ys.real, ys.imag, **kwargs)
            else:
                # Normalize vectors!
                ysN = numpy.sqrt(ys.real ** 2 + ys.imag ** 2)
                ys = ys / ysN
                #
                q = bx.quiver(xs, xs * 0, ys.real, ys.imag, units="y", scale_units="y", scale=scale, **kwargs)
                handle = q
                if yunits[i] == "":
                    qk = bx.quiverkey(q, 0.1, 0.1, 1.0, labelpos="E")
                else:
                    qk = bx.quiverkey(q, 0.1, 0.1, 1.0, ur"%d $%s$" % (1, yunits[i]), labelpos="E")
        elif (xscale != "log") & (yscale[i] == "log"):
            handle, = bx.semilogy(xs, ys / scale, format[i], **args)
        elif (xscale == "log") & (yscale[i] != "log"):
            handle, = bx.semilogx(xs, ys / scale, format[i], **args)
        elif err != None:
            _draw_ellipse = False
            if "ellipse" in err.keys():
                if err["ellipse"]:
                    _draw_ellipse = True
            if _draw_ellipse:
                for _x, _y, _w, _h in zip(xs, ys / scale, err["x"], err["y"]):
                    _e = Ellipse(xy=(_x, _y), width=_w, height=_h, alpha=0.5, color="#333333")
                    handle = bx.add_patch(_e)
            else:
                handle, _, _ = bx.errorbar(xs, ys / scale, xerr=err["x"], yerr=err["y"], fmt=format[i], **args)
        elif style == "scatter":
            handle = bx.scatter(
                xs,
                ys / scale,
                marker=format[i],
                s=args["markersize"],
                c=args["color"],
                cmap=args["cmap"],
                alpha=args["alpha"],
                zorder=args["zorder"],
                vmin=args["vmin"],
                vmax=args["vmax"],
            )
        else:
            handle = bx.plot(xs, ys / scale, format[i], **args)
        handles.append(handle)
        try:
            xmin, xmax = min(xmin, xs.min()), max(xmax, xs.max())
        except:
            xmin, xmax = 0, 1
        #
        if not (xlim == None):
            try:
                if len(xlim[i]) == 2:
                    bx.set_xlim(xlim[i])
                else:
                    raise ValueError()
            except:
                bx.set_xlim(xlim)
        if not (ylim == None):
            try:
                if len(ylim[i]) == 2:
                    bx.set_ylim(ylim[i])
                else:
                    raise ValueError()
            except:
                bx.set_ylim(ylim)
        #
        if ytick[:4] == "auto":
            n = ytick.find(":")
            if n >= 0:
                n = atof(ytick[n + 1 :])
            else:
                n = 5
            if yscale[i] == "linear":
                bx.yaxis.set_major_locator(ticker.MaxNLocator(n))
        if (xaxis == "twin") | (i == 0):
            if not quiver:
                if yunits[i] or scale_label:
                    bx.set_ylabel(
                        ur"\textbf{%s}" % (ylabel[i]) + new_line + ur"$\left[%s %s\right]$" % (scale_label, yunits[i])
                    )
                elif ylabel[i]:
                    bx.set_ylabel(ur"\textbf{%s}" % (ylabel[i]))
            else:
                pylab.setp(bx.get_yticklabels(), visible=False)
                if ylabel[i]:
                    bx.set_ylabel(ur"\textbf{%s}" % (ylabel[i]))
            if yscale[i] == "deg":
                bx.set_yticklabels(
                    [common.num2latlon(0, tk, mode="each", x180=False, dtype="label")[0] for tk in bx.get_yticks()]
                )

    if xscale == "log2":
        xmin, xmax = numpy.floor([-3.0, xmax])
        xticks = 2 ** numpy.arange(xmin, xmax)
        bx.set_xticks(numpy.log2(xticks))
        bx.set_xticklabels(xticks)
        pylab.setp(bx.get_xticklabels()[::2], visible=False)
    if yscale == "log2":
        ymin, ymax = numpy.floor([-3.0, ymax])
        yticks = 2 ** numpy.arange(ymin, ymax)
        bx.set_yticks(numpy.log2(yticks))
        bx.set_yticklabels(yticks)
        pylab.setp(bx.get_yticklabels()[::2], visible=False)

    if xlim == None:
        ax.set_xlim([xmin, xmax])
    if xtick[:4] == "auto":
        n = xtick.find(":")
        if n >= 0:
            n = atof(xtick[n + 1 :])
        else:
            n = 10
        if xscale == "linear":
            ax.xaxis.set_major_locator(ticker.MaxNLocator(n))
    if title:
        ax.set_title(title)
    if label:
        ax.text(label_pos[0], label_pos[1], label, ha="left", va="top", transform=ax.transAxes, bbox=bbox, zorder=99)
    if xunits != "":
        ax.set_xlabel(ur"\textbf{%s} $\left[%s\right]$" % (xlabel, xunits))
    elif xlabel != None:
        ax.set_xlabel(ur"\textbf{%s}" % xlabel)
    else:
        pylab.setp(bx.get_xticklabels(), visible=False)
    if xscale == "time":
        timeformat(ax, dt=xmax - xmin, orientation=orientation)
    if xscale == "deg":
        ax.set_xticklabels(
            [common.num2latlon(i, 0, mode="each", x180=False, dtype="label")[1] for i in bx.get_xticks()]
        )
    #
    ax.minorticks_on()
    ax.grid(True, zorder=0)

    if legend_label != None:
        # Draws legend
        legend(legend_label, ax=ax)

    pylab.draw()
    #
    if len(Ax) == 1:
        if return_handles:
            return Ax[0], handles
        else:
            return Ax[0]
    else:
        if return_handles:
            return Ax, handles
        else:
            return Ax