コード例 #1
0
def main(index, diagnostics=False, anomalies="None", grid="ADW"):
    """
    :param str index: which index to run
    :param bool diagnostics: output diagnostic information
    :param str anomalies: run code on anomalies or climatology rather than raw data
    :param str grid: gridding type ADW/CAM
    """

    # get details of index
    index = utils.INDICES[index] 

    # allow for option of running through each month
    if index.name in utils.MONTHLY_INDICES:
        nmonths = 13
        timescale = "MON"
    else:
        nmonths = 1
        timescale = "ANN"

    # setting up colours
    cmap = plt.cm.viridis
    bounds = np.arange(0, 110, 10)
    norm = mpl.cm.colors.BoundaryNorm(bounds, cmap.N)


    # plot all month versions at once
    for month, name in enumerate(month_names):

        if diagnostics:
            print(name)

        # set up the figure
        fig = plt.figure(figsize=(8, 6))
        plt.clf()
        ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])

        filename = os.path.join(utils.FINALROOT, utils.make_filenames(index=index.name, grid=grid, anomalies=anomalies, extra="", month_index=""))

        try:
            ncfile = ncdf.Dataset(filename, 'r')
            
            timevar = ncfile.variables['time'] # array of YYYMMDD
            latvar = ncfile.variables['latitude'] # array of lats
            lonvar = ncfile.variables['longitude'] # array of lons
            
            annualvar = ncfile.variables[month_names[month]] # array of arrays

            if anomalies == "anomalies":
                # to make into actuals, add climatology to the anomalies
                clim_filename = os.path.join(utils.FINALROOT, utils.make_filenames(index=index.name, grid=grid, anomalies="climatology", extra="", month_index=""))

                clim_file = ncdf.Dataset(clim_filename, 'r')
                climvar = clim_file.variables[month_names[month]]
        except RuntimeError:
            print("File not found: {}".format(filename))
            
        except IOError:
            print("File not found: {}".format(filename))
            
        except KeyError:
            continue

        # extract the information
        times = timevar[:]
        lats = latvar[:]
        lons = lonvar[:]

        # get land sea mask
        if month == 0:
            lsm = utils.get_land_sea_mask(lats, lons, floor=False)
            n_land_boxes = np.sum(lsm.astype(int), axis=1).astype(float)
            n_land_boxes = np.ma.expand_dims(n_land_boxes, axis=1)

        # if anomalies from HadEX3, then need to add onto climatology
        if anomalies == "anomalies":
            annual_data = annualvar[:] + climvar[:]
        else:
            annual_data = annualvar[:]

        # go through each year and count up
        zonal_boxes = np.zeros(annual_data.shape[:2][::-1])
        for y, year_data in enumerate(annual_data):
            zonal_boxes[:, y] = np.ma.count(year_data, axis=1).astype(float)

        zonal_boxes = np.ma.masked_where(zonal_boxes == 0, zonal_boxes)
            
        normalised_boxes = 100. * zonal_boxes / np.tile(n_land_boxes, [1, annual_data.shape[0]])

        newtimes, newlats = np.meshgrid(np.append(times, times[-1]+1), utils.box_edge_lats)
            
        mesh = plt.pcolormesh(newtimes, newlats, normalised_boxes, cmap=cmap, norm=norm)
        
        cb = plt.colorbar(mesh, orientation='horizontal', pad=0.07, fraction=0.05, \
                            aspect=30, ticks=bounds[1:-1], label="%", drawedges=True)
        
        cb.set_ticklabels(["{:g}".format(b) for b in bounds[1:-1]])
        cb.outline.set_linewidth(2)
        cb.dividers.set_color('k')
        cb.dividers.set_linewidth(2)

        plt.ylim([-90, 90])
        plt.yticks(np.arange(-90, 120, 30))
        ax.yaxis.set_ticklabels(["{}N".format(i) if i > 0 else "{}S".format(abs(i)) if i < 0 else "{}".format(i) for i in np.arange(-90, 120, 30)])
        plt.ylabel("Latitude")
        plt.title("{} - {}".format(index.name, name))

        outname = putils.make_filenames("latitude_coverage", index=index.name, grid=grid, anomalies=anomalies, month=name)

        plt.savefig("{}/{}/{}".format(utils.PLOTLOCS, index.name, outname))

        plt.close()

    return # main
コード例 #2
0
ファイル: plot_trend_maps.py プロジェクト: rjhd2/HadEX3
def main(index, diagnostics=False, anomalies="None", grid="ADW"):
    """
    Plot maps of linear trends

    :param str index: which index to run
    :param bool diagnostics: output diagnostic information
    :param str anomalies: run code on anomalies or climatology rather than raw data
    :param str grid: gridding type ADW/CAM
    """

    # get details of index
    index = utils.INDICES[index]

    # allow for option of running through each month
    if index.name in utils.MONTHLY_INDICES:
        nmonths = 13
    else:
        nmonths = 1

    # sort the colour maps
    RdYlBu, RdYlBu_r = putils.adjust_RdYlBu()
    BrBG, BrBG_r = putils.make_BrBG()

    cube_list = iris.load(
        os.path.join(
            utils.FINALROOT,
            utils.make_filenames(index=index.name,
                                 grid=grid,
                                 anomalies=anomalies,
                                 extra="",
                                 month_index="")))

    names = np.array([cube.var_name for cube in cube_list])

    #*************
    # plot trend map

    for month in range(nmonths):

        if month == 0:
            # annual
            if anomalies != "climatology":
                if index.name in ["TX90p", "TN90p", "SU", "TR", "GSL"]:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = RdYlBu_r
                elif index.name in ["DTR", "ETR"]:
                    bounds = [
                        -100, -1, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1,
                        100
                    ]
                    cmap = RdYlBu_r
                elif index.name in ["WSDI"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu_r
                elif index.name in ["TX10p", "TN10p", "FD", "ID"]:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = RdYlBu
                elif index.name in ["CSDI"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu
                elif index.name in ["TXn", "TNn"]:
                    bounds = [
                        -100, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 100
                    ]
                    cmap = RdYlBu_r
                elif index.name in ["TXx", "TNx"]:
                    bounds = [
                        -100, -1, -0.5, -0.25, -0.1, 0, 0.1, 0.25, 0.5, 1, 100
                    ]
                    cmap = RdYlBu_r
                elif index.name in ["Rx1day"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = BrBG
                elif index.name in ["Rx5day"]:
                    bounds = [-100, -4, -3, -2, -1, 0, 1, 2, 3, 4, 100]
                    cmap = BrBG
                elif index.name in ["PRCPTOT"]:
                    bounds = [-100, -20, -10, -5, -2, 0, 2, 5, 10, 20, 100]
                    cmap = BrBG
                elif index.name in ["Rnnmm", "R95p", "R99p"]:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = BrBG
                elif index.name in ["R95pTOT"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = BrBG
                elif index.name in ["R99pTOT"]:
                    bounds = [
                        -100, -1, -0.5, -0.25, -0.1, 0, 0.1, 0.25, 0.5, 1, 100
                    ]
                    cmap = BrBG
                elif index.name in ["R10mm"]:
                    bounds = [
                        -100, -3, -1.5, -0.75, -0.25, 0, 0.25, 0.75, 1.5, 3,
                        100
                    ]
                    cmap = BrBG
                elif index.name in ["R20mm"]:
                    bounds = [
                        -100, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 100
                    ]
                    cmap = BrBG
                elif index.name in ["CWD"]:
                    bounds = [
                        -100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100
                    ]
                    cmap = BrBG
                elif index.name in ["SDII"]:
                    bounds = [
                        -100, -0.75, -0.5, -0.25, -0.1, 0, 0.1, 0.25, 0.5,
                        0.75, 100
                    ]
                    cmap = BrBG
                elif index.name in ["CDD"]:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = BrBG_r
                elif index.name in ["CDDcold18"]:
                    bounds = [
                        -10000, -100, -50, -20, -10, 0, 10, 20, 50, 100, 10000
                    ]
                    cmap = RdYlBu_r
                elif index.name in ["HDDheat18"]:
                    bounds = [
                        -10000, -800, -400, -200, -100, 0, 100, 200, 400, 800,
                        10000
                    ]
                    cmap = RdYlBu
                elif index.name in ["GDDgrow10"]:
                    bounds = [
                        -10000, -400, -200, -100, -50, 0, 50, 100, 200, 400,
                        10000
                    ]
                    cmap = RdYlBu
                elif index.name in ["WSDI3"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu_r
                elif index.name in ["CSDI3"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu
                elif index.name in [
                        "TNlt2", "TNltm2", "TNltm20", "TMlt10", "TMlt5"
                ]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu
                elif index.name in [
                        "TXge30", "TXge35", "TMge5", "TMge10", "TXge50p"
                ]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu_r
                elif index.name in ["TNm", "TXm", "TMm", "TXTN"]:
                    bounds = [
                        -100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100
                    ]
                    cmap = RdYlBu_r
                elif index.name in ["TXbTNb"]:
                    bounds = [
                        -100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100
                    ]
                    cmap = RdYlBu
                elif index.name in ["RXday"]:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = BrBG
                else:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = RdYlBu_r

            else:
                if index.name in [
                        "TX90p", "TN90p", "WSDI", "SU", "TR", "GSL", "DTR",
                        "ETR"
                ]:
                    bounds = np.arange(0, 60, 5)
                    cmap = plt.cm.YlOrRd
                elif index.name in ["TX10p", "TN10p", "CSDI", "FD", "ID"]:
                    bounds = np.arange(0, 60, 5)
                    cmap = plt.cm.YlOrRd_r
                elif index.name in ["TXx", "TNx"]:
                    bounds = np.arange(-10, 40, 5)
                    cmap = plt.cm.YlOrRd
                elif index.name in ["TXn", "TNn"]:
                    bounds = np.arange(-30, 10, 5)
                    cmap = plt.cm.YlOrRd
                elif index.name in ["Rx1day", "Rx5day"]:
                    bounds = np.arange(0, 100, 10)
                    cmap = plt.cm.YlGnBu
                elif index.name in ["PRCPTOT"]:
                    bounds = np.arange(0, 1000, 100)
                    cmap = plt.cm.YlGnBu
                elif index.name in [
                        "CWD", "R20mm", "R10mm", "Rnnmm", "R95pTOT", "R99pTOT",
                        "R95p", "R99p", "SDII"
                ]:
                    bounds = np.arange(0, 200, 20)
                    cmap = plt.cm.YlGnBu
                elif index.name in ["CDD"]:
                    bounds = np.arange(0, 200, 20)
                    cmap = plt.cm.YlGnBu_r
                else:
                    bounds = np.arange(0, 60, 5)
                    cmap = plt.cm.YlOrRd

        else:
            # monthly
            if anomalies != "climatology":
                if index.name in [
                        "TX90p", "TN90p", "SU", "TR", "GSL", "DTR", "ETR"
                ]:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = RdYlBu_r
                elif index.name in ["WSDI"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu_r
                elif index.name in ["TX10p", "TN10p", "FD", "ID"]:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = RdYlBu
                elif index.name in ["CSDI"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu
                elif index.name in ["TXx", "TNx", "TXn", "TNn"]:
                    bounds = [
                        -100, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 100
                    ]
                    cmap = RdYlBu_r
                elif index.name in ["Rx1day", "Rx5day"]:
                    bounds = [-100, -4, -3, -2, -1, 0, 1, 2, 3, 4, 100]
                    cmap = BrBG
                elif index.name in ["PRCPTOT"]:
                    bounds = [-100, -10, -5, -2, -1, 0, 1, 2, 5, 10, 100]
                    cmap = BrBG
                elif index.name in [
                        "Rnnmm", "R95pTOT", "R99pTOT", "R95p", "R99p"
                ]:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = BrBG
                elif index.name in ["R20mm", "R10mm"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = BrBG
                elif index.name in ["CWD"]:
                    bounds = [
                        -100, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 100
                    ]
                    cmap = BrBG
                elif index.name in ["SDII"]:
                    bounds = [
                        -100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100
                    ]
                    cmap = BrBG
                elif index.name in ["CDD"]:
                    bounds = [-100, -4, -3, -2, -1, 0, 1, 2, 3, 4, 100]
                    cmap = BrBG_r
                elif index.name in ["CDDcold18"]:
                    bounds = [-100, -20, -15, -10, -5, 0, 5, 10, 15, 20, 100]
                    cmap = RdYlBu_r
                elif index.name in ["HDDheat18"]:
                    bounds = [
                        -10000, -800, -400, -200, -100, 0, 100, 200, 400, 800,
                        10000
                    ]
                    cmap = RdYlBu
                elif index.name in ["GDDgrow10"]:
                    bounds = [
                        -10000, -400, -200, -100, -50, 0, 50, 100, 200, 400,
                        10000
                    ]
                    cmap = RdYlBu
                elif index.name in ["WSDI3"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu_r
                elif index.name in ["CSDI3"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu
                elif index.name in [
                        "TNlt2", "TNltm2", "TNltm20", "TMlt10", "TMlt5"
                ]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu
                elif index.name in [
                        "TXge30", "TXge35", "TMge5", "TMge10", "TXge50p"
                ]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = RdYlBu_r
                elif index.name in ["TNm", "TXm", "TMm", "TXTN"]:
                    bounds = [
                        -100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100
                    ]
                    cmap = RdYlBu_r
                elif index.name in ["TXbTNb"]:
                    bounds = [
                        -100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100
                    ]
                    cmap = RdYlBu
                elif index.name in ["RXday"]:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = BrBG
                elif index.name in [
                        "24month_SPI", "12month_SPI", "6month_SPI",
                        "3month_SPI", "24month_SPEI", "12month_SPEI",
                        "6month_SPEI", "3month_SPEI"
                ]:
                    bounds = [
                        -100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100
                    ]
                    cmap = BrBG
                else:
                    bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
                    cmap = RdYlBu_r

            else:
                if index.name in [
                        "TX90p", "TN90p", "WSDI", "SU", "TR", "GSL", "DTR",
                        "ETR"
                ]:
                    bounds = np.arange(0, 60, 5)
                    cmap = plt.cm.YlOrRd
                elif index.name in ["TX10p", "TN10p", "CSDI", "FD", "ID"]:
                    bounds = np.arange(0, 60, 5)
                    cmap = plt.cm.YlOrRd_r
                elif index.name in ["TXx", "TNx"]:
                    bounds = np.arange(-10, 40, 5)
                    cmap = plt.cm.YlOrRd
                elif index.name in ["TXn", "TNn"]:
                    bounds = np.arange(-30, 10, 5)
                    cmap = plt.cm.YlOrRd
                elif index.name in ["Rx1day", "Rx5day"]:
                    bounds = np.arange(0, 100, 10)
                    cmap = plt.cm.YlGnBu
                elif index.name in ["PRCPTOT"]:
                    bounds = np.arange(0, 1000, 100)
                    cmap = plt.cm.YlGnBu
                elif index.name in [
                        "CWD", "R20mm", "R10mm", "Rnnmm", "R95pTOT", "R99pTOT",
                        "R95p", "R99p", "SDII"
                ]:
                    bounds = np.arange(0, 200, 20)
                    cmap = plt.cm.YlGnBu
                elif index.name in ["CDD"]:
                    bounds = np.arange(0, 200, 20)
                    cmap = plt.cm.YlGnBu_r
                else:
                    bounds = np.arange(0, 60, 5)
                    cmap = plt.cm.YlOrRd

        selected_cube, = np.where(names == month_names[month])

        cube = cube_list[selected_cube[0]]
        try:
            cube.coord('grid_latitude').guess_bounds()
            cube.coord('grid_longitude').guess_bounds()
        except ValueError:
            pass

        # fix percent -> days issue for these four
        if index.name in ["TX90p", "TN90p", "TX10p", "TN10p"]:
            cube.data = cube.data * 3.65
            index.units = "days"

        # get recent period and trend
        if anomalies != "climatology":
            postYYYY = periodConstraint(cube, utils.TREND_START)
            cube = cube.extract(postYYYY)
            preYYYY = periodConstraint(cube, utils.TREND_END, lower=False)
            cube = cube.extract(preYYYY)
            trend_cube, sigma, significance = TrendingCalculation(
                cube, verbose=diagnostics)

        if index.units == "degrees_C":
            units = '$^{\circ}$' + "C"
        else:
            units = index.units

        if anomalies != "climatology":
            outname = putils.make_filenames("trend",
                                            index=index.name,
                                            grid=grid,
                                            anomalies=anomalies,
                                            month=month_names[month])

            putils.plot_smooth_map_iris(
                "{}/{}/{}".format(utils.PLOTLOCS, index.name, outname),
                trend_cube,
                cmap,
                bounds,
                "Trend ({}/10 year)".format(units),
                title="{} - {}, Linear Trend {}-{}".format(
                    index.name, month_names[month], utils.TREND_START,
                    utils.TREND_END),
                figtext="(a)",
                significance=significance)

        else:
            outname = putils.make_filenames("climatology",
                                            index=index.name,
                                            grid=grid,
                                            anomalies=anomalies,
                                            month=month_names[month])

            putils.plot_smooth_map_iris(
                "{}/{}/{}".format(utils.PLOTLOCS, index.name, outname),
                cube[0],
                cmap,
                bounds,
                "{}".format(units),
                title="{} - {}, Climatology {}-{}".format(
                    index.name, month_names[month], utils.CLIM_START.year,
                    utils.CLIM_END.year),
                figtext="(a)")

    return  # main
コード例 #3
0
def main(index, comparison=False, diagnostics=False, anomalies="None", grid="ADW", normalise=False, matched=False):
    """
    :param str index: which index to run
    :param bool comparison: compare against other datasets
    :param bool diagnostics: output diagnostic information
    :param str anomalies: run code on anomalies or climatology rather than raw data
    :param str grid: gridding type ADW/CAM
    :param bool normalise: plot as anomalies from 1961-90
    :param bool matched: match HadEX3 to HadEX2 coverage and plot timeseries
    """

    # get details of index
    index = utils.INDICES[index] 

    # allow for option of running through each month
    if index.name in utils.MONTHLY_INDICES:
        nmonths = 13
        timescale = "MON"
    else:
        nmonths = 1
        timescale = "ANN"

    # currently not stored - but ready just in case
    outfilename = "{}/timeseries_{}.dat".format(TSOUTLOCATION, index.name)
    if os.path.exists(outfilename): os.remove(outfilename)

    coveragefilename = "{}/timeseries_{}_boxes.dat".format(TSOUTLOCATION, index.name)
    if os.path.exists(coveragefilename): os.remove(coveragefilename)

    # plot all month versions at once
    for month, name in enumerate(month_names[:nmonths]):

        if diagnostics:
            print(name)

        # set up the figure
        fig = plt.figure(figsize=(8, 5.5))
        plt.clf()
        ax = fig.add_axes([0.17, 0.35, 0.8, 0.6])
        ax2 = fig.add_axes([0.17, 0.1, 0.8, 0.2], sharex=ax)

        # number of valid boxes
        timeseries = {}
        valid_boxes = {}
        land_boxes = {}
        colours = {}

        masks = []
        # get common mask first
        for BASEP in ["61-90", "81-10"]:

            # have to do by hand - and amend when adding anomalies
            infilename = "{}/{}_{}_{}-{}_{}_{}_{}.nc".format(utils.FINALROOT, "HadEX3", index.name, utils.STARTYEAR.year, utils.ENDYEAR.year-1, grid, BASEP, "{}x{}deg".format(utils.DELTALAT, utils.DELTALON))

            try:
                cubelist = iris.load(infilename)
                names = np.array([c.var_name for c in cubelist])
                incube = cubelist[np.where(names == name)[0][0]]
                incube.coord("grid_latitude").standard_name = "latitude"
                incube.coord("grid_longitude").standard_name = "longitude"
        
                incube = fix_time_coord(incube)
            except RuntimeError:
                print("File not found: {}".format(infilename))
                continue

            except IOError:
                print("File not found: {}".format(infilename))
                continue

            except KeyError:
                print("Month not available in {}".format(filename))
                continue

            masks += [incube.data.mask]

            # have to extract box counts before using a common mask
            LABEL = "{}".format(BASEP)
            coord = incube.coord("time")
            years = np.array([c.year for c in coord.units.num2date(coord.points)])

            # find which boxes have x% of years with data - default is 90% for timeseries
            completeness_mask = utils.CompletenessCheckGrid(incube.data, utils.ENDYEAR.year, utils.STARTYEAR.year)

            # apply completeness mask, and obtain box counts
            nboxes_completeness, completeness_masked_data = MaskData(incube.data, incube.data.fill_value, completeness_mask)    
            incube.data = completeness_masked_data

            nboxes = np.zeros(nboxes_completeness.shape[0])
            for year in range(incube.data.shape[0]):
                nboxes[year] = np.ma.count(incube.data[year])

            # for coverage, number of boxes will vary with lat/lon resolution
            max_boxes = np.product(incube.data.shape[1:])
            if diagnostics:
                print("{}: total grid boxes = {}, max filled = {}".format(LABEL, max_boxes, int(np.max(nboxes))))

            # collect outputs
            valid_boxes[LABEL] = [years, 100.*nboxes/max_boxes] # scale by total number as lat/lon resolution will be different
            if month == 0:
                lsm = utils.get_land_sea_mask(incube.coord("latitude").points, incube.coord("longitude").points, floor=False)
                n_land_boxes = len(np.where(lsm == False)[0])
            land_boxes[LABEL] = [years, 100.*nboxes/n_land_boxes] # scale by number of land boxes

        # now merge masks

        final_mask = np.ma.mask_or(masks[0], masks[1])

        for BASEP in ["61-90", "81-10"]:

            # have to do by hand - and amend when adding anomalies
            infilename = "{}/{}_{}_{}-{}_{}_{}_{}.nc".format(utils.FINALROOT, "HadEX3", index.name, utils.STARTYEAR.year, utils.ENDYEAR.year-1, grid, BASEP, "{}x{}deg".format(utils.DELTALAT, utils.DELTALON))

            try:
                cubelist = iris.load(infilename)
                names = np.array([c.var_name for c in cubelist])
                incube = cubelist[np.where(names == name)[0][0]]
                incube.coord("grid_latitude").standard_name = "latitude"
                incube.coord("grid_longitude").standard_name = "longitude"

                incube = fix_time_coord(incube)

                incube.data.mask = final_mask

            except RuntimeError:
                print("File not found: {}".format(infilename))
                continue

            except IOError:
                print("File not found: {}".format(infilename))
                continue

            except KeyError:
                print("Month not available in {}".format(filename))
                continue


            # fix percent -> days issue for these four
            if index.name in ["TX90p", "TN90p", "TX10p", "TN10p"]:
                if name == "Ann":
                    incube.data = incube.data * (DAYSPERYEAR/100.)
                else:
                    incube.data = incube.data * (calendar.monthrange(2019, month)[1]/100.)
                    
                index.units = "days"

            # restrict to times of interest
            time_constraint = iris.Constraint(time=lambda cell: utils.STARTYEAR <= cell <= utils.ENDYEAR)

            incube = incube.extract(time_constraint)

            # find which boxes have x% of years with data - default is 90%
            completeness_mask = utils.CompletenessCheckGrid(incube.data, utils.ENDYEAR.year, utils.STARTYEAR.year)

            # apply completeness mask, and obtain box counts
            nboxes_completeness, completeness_masked_data = MaskData(incube.data, incube.data.fill_value, completeness_mask)    
            incube.data = completeness_masked_data

            if normalise:
                # apply normalisation!
#                clim_constraint = iris.Constraint(time=lambda cell: dt.datetime(utils.REF_START, 1, 1) <= cell <= dt.datetime(utils.REF_END, 1, 1))

                if BASEP == "61-90":
                    clim_constraint = iris.Constraint(time=lambda cell: dt.datetime(1961, 1, 1) <= cell <= dt.datetime(1990, 1, 1))
                elif BASEP == "81-10":
                    clim_constraint = iris.Constraint(time=lambda cell: dt.datetime(1981, 1, 1) <= cell <= dt.datetime(2010, 1, 1))

                norm_cube = incube.extract(clim_constraint)
                norm = norm_cube.collapsed(['time'], iris.analysis.MEAN)

                incube = incube - norm

            # weights for the region
            weights = iris.analysis.cartography.cosine_latitude_weights(incube)
            ts = incube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=weights)

            # only plot where there are non-missing values
            coord = ts.coord("time")
            years = np.array([c.year for c in coord.units.num2date(coord.points)])


            # do the plot, highlighting vanilla HadEX3
            LABEL = "{}".format(BASEP)
            if BASEP == "61-90":
                line = ax.plot(years, ts.data, c='k', ls="-", lw=2, label=LABEL, zorder=10)
            else:
                line = ax.plot(years, ts.data, ls="-", lw=2, label=LABEL, zorder=5)
            timeseries[LABEL] = [years, ts]
            colours[LABEL] = line[0].get_color()

        # once all lines plotted, then tidy up
        putils.SortAxesLabels(plt, ax, index, utils.STARTYEAR.year, utils.ENDYEAR.year, month)
        putils.SortAxesLabels(plt, ax2, index, utils.STARTYEAR.year, utils.ENDYEAR.year, month)
        ax.set_xlim([1900, 2020])

        if normalise:
            # only plot zero line if done as anomalies
            ax.axhline(0, color='0.5', ls='--')
        elif index.name in ["TX90p", "TX10p", "TN90p", "TN10p"]:
            # or plot expected line otherwise
            if name == "Ann":
                ax.axhline(36.5, color='0.5', ls='--')
            else:
                ax.axhline(calendar.monthrange(2019, month)[1]*0.1, color='0.5', ls='--')

        # plot legend below figure
        leg = ax.legend(loc='lower center', ncol=2, bbox_to_anchor=(0.46, -0.05), frameon=False, title='', prop={'size':utils.FONTSIZE}, labelspacing=0.15, columnspacing=0.5)

        # and plot the difference
        ax2.plot(years, timeseries["61-90"][1].data-timeseries["81-10"][1].data, ls="-", lw=2, c="#e41a1c")
        ax2.set_title("")
        ax2.set_ylabel("Difference\n({})".format(index.units), fontsize=utils.FONTSIZE)
#        ax2.axhline(0, color='k', ls='--')


        # extra information
        if utils.WATERMARK:
            watermarkstring = "{} {}".format(os.path.join("/".join(os.getcwd().split('/')[4:]), os.path.basename(__file__)), dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M"))
            plt.figtext(0.01, 0.01, watermarkstring, size=6)

        ax = putils.Rstylee(ax)
        ax2 = putils.Rstylee(ax2)
        plt.setp(ax.get_xticklabels(), visible=False)

        if index.name in ["TX90p"]:
            fig.text(0.03, 0.97, "(a)", fontsize=utils.FONTSIZE)
        elif index.name in ["TN10p"]:
            fig.text(0.03, 0.97, "(b)", fontsize=utils.FONTSIZE)
        elif index.name in ["R95p"]:
            fig.text(0.03, 0.97, "(e)", fontsize=utils.FONTSIZE)
        elif index.name in ["R99p"]:
            fig.text(0.03, 0.97, "(f)", fontsize=utils.FONTSIZE)
            
        # and save
        outname = putils.make_filenames("interRefP_ts", index=index.name, grid=grid, anomalies="None", month=name)

        plt.savefig("{}/{}/{}".format(utils.PLOTLOCS, index.name, outname), dpi=300)

        plt.close()

        #*****************
        # plot coverage - how many grid boxes have values (scaled by total number)

        fig = plt.figure(figsize=(8, 5.5))
        plt.clf()
        ax = fig.add_axes([0.17, 0.35, 0.8, 0.6])
        ax2 = fig.add_axes([0.17, 0.1, 0.8, 0.2], sharex=ax)

#        PlotCoverage(plt, ax, valid_boxes, colours, utils.STARTYEAR.year, utils.ENDYEAR.year, index.name, month, '', ncol=2)
        PlotCoverage(plt, ax, land_boxes, colours, utils.STARTYEAR.year, utils.ENDYEAR.year, index.name, month, '', ncol=2)
        ax.set_xlim([1900, 2020])

        # and plot the difference
#        ax2.plot(years, valid_boxes["81-10"][1].data-valid_boxes["61-90"][1].data, ls="-", lw=2, c="#e41a1c")
        ax2.plot(years, land_boxes["81-10"][1]-land_boxes["61-90"][1], ls="-", lw=2, c="#e41a1c")
        ax2.set_title("")
        ax2.set_ylabel("Difference\n(%)", fontsize=utils.FONTSIZE)
        ax2.tick_params(labelsize=utils.FONTSIZE)
       
        ax = putils.Rstylee(ax)
        ax2 = putils.Rstylee(ax2)
        plt.setp(ax.get_xticklabels(), visible=False)
        ax.xaxis.set_minor_locator(minorLocator)

        outname = putils.make_filenames("interRefP_coverage_ts", index=index.name, grid=grid, anomalies="None", month=name)
        print("{}/{}/{}".format(utils.PLOTLOCS, index.name, outname))
        plt.savefig("{}/{}/{}".format(utils.PLOTLOCS, index.name, outname), dpi=300)

        plt.close("all")

    return # main
コード例 #4
0
def main(index="TX90p", diagnostics=False, qc_flags="", anomalies="None"):
    """
    Read inventories and make scatter plot

    :param str index: which index to run
    :param bool diagnostics: extra verbose output
    :param str qc_flags: which QC flags to process W, B, A, N, C, R, F, E, V, M
    :param str anomalies: run code on anomalies or climatology rather than raw data

    """

    if index in utils.MONTHLY_INDICES:
        timescale = ["ANN", "MON"]
    else:
        timescale = ["ANN"]

    # move this up one level eventually?
    all_datasets = utils.get_input_datasets()

    for ts in timescale:
        # set up the figure
        fig = plt.figure(figsize=(10, 6.5))
        plt.clf()
        ax = plt.axes([0.025, 0.14, 0.95, 0.90], projection=cartopy.crs.Robinson())
        ax.gridlines() #draw_labels=True)
        ax.add_feature(cartopy.feature.LAND, zorder=0, facecolor="0.9", edgecolor="k")
        ax.coastlines()

        # dummy scatters for full extent
        plt.scatter([-180, 180, 0, 0], [0, 0, -90, 90], c="w", s=1, transform=cartopy.crs.Geodetic(), \
                        edgecolor='w', linewidth='0.01')

        # run all datasets
        total = 0
        for dataset in all_datasets:

            try:
                # choose appropriate subdirectory.
                if anomalies == "None":
                    subdir = "formatted/indices"
                elif anomalies == "anomalies":
                    subdir = "formatted/anomalies"
                elif anomalies == "climatology":
                    subdir = "formatted/climatology"

                ds_stations = utils.read_inventory(dataset, subdir=subdir, final=True, \
                                                   timescale=ts, index=index, anomalies=anomalies, qc_flags=qc_flags)
                ds_stations = utils.select_qc_passes(ds_stations, qc_flags=qc_flags)

            except IOError:
                # file missing
                print("No stations with data for {}".format(dataset.name))
                ds_stations = []

            if len(ds_stations) > 0:
                lats = np.array([stn.latitude for stn in ds_stations])
                lons = np.array([stn.longitude for stn in ds_stations])

                # and plot
                scatter = plt.scatter(lons, lats, c=COLOURS[dataset.name], s=15, \
                                          label="{} ({})".format(get_label(dataset.name), len(ds_stations)), \
                                          transform=cartopy.crs.Geodetic(), edgecolor='0.5', linewidth='0.5')

                total += len(ds_stations)

        # make a legend
        leg = plt.legend(loc='lower center', ncol=5, bbox_to_anchor=(0.50, -0.3), \
                             frameon=False, title="", prop={'size':12}, labelspacing=0.15, columnspacing=0.5, numpoints=3)
        plt.setp(leg.get_title(), fontsize=12)

        plt.figtext(0.06, 0.91, "{} Stations".format(total))
        plt.title("{} - {}".format(index, ts))

        # extra information
        if utils.WATERMARK:
            watermarkstring = "{} {}".format(os.path.join("/".join(os.getcwd().split('/')[4:]), os.path.basename(__file__)), dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M"))
            plt.figtext(0.01, 0.01, watermarkstring, size=6)
#        plt.figtext(0.03, 0.95, "(c)", size=14)

        # and save
        outname = putils.make_filenames("station_locations", index=index, grid="ADW", anomalies=anomalies, month=ts.capitalize())

        plt.savefig("{}/{}/{}".format(utils.PLOTLOCS, index, outname))
            
        plt.close()

        # write out total station number
        if ts == "ANN":
            with open(os.path.join(utils.INFILELOCS, "{}_stations.txt".format(index)), "w") as outfile:
                outfile.write("{}\n".format(index))
                outfile.write("{}".format(total))
        
    return # main
コード例 #5
0
ファイル: plot_timeseries_iris.py プロジェクト: rjhd2/HadEX3
def main(index,
         comparison=False,
         diagnostics=False,
         anomalies="None",
         grid="ADW",
         normalise=False,
         matched=False,
         uncertainties=False):
    """
    :param str index: which index to run
    :param bool comparison: compare against other datasets
    :param bool diagnostics: output diagnostic information
    :param str anomalies: run code on anomalies or climatology rather than raw data
    :param str grid: gridding type ADW/CAM
    :param bool normalise: plot as anomalies from e.g. 1961-90
    :param bool matched: match HadEX3 to HadEX2 coverage and plot timeseries
    :param bool uncertainties: plot ERA5 derived coverage uncertainties
    """

    # get details of index
    index = utils.INDICES[index]

    # allow for option of running through each month
    if index.name in utils.MONTHLY_INDICES:
        nmonths = 13
        timescale = "MON"
    else:
        nmonths = 1
        timescale = "ANN"

    # currently not stored - but ready just in case
    outfilename = "{}/timeseries_{}.dat".format(TSOUTLOCATION, index.name)
    if os.path.exists(outfilename): os.remove(outfilename)

    coveragefilename = "{}/timeseries_{}_boxes.dat".format(
        TSOUTLOCATION, index.name)
    if os.path.exists(coveragefilename): os.remove(coveragefilename)

    # plot all month versions at once
    for month, name in enumerate(month_names[:nmonths]):

        if diagnostics:
            print(name)

        # set up the figure
        fig = plt.figure(figsize=(8, 5.5))
        plt.clf()
        ax = fig.add_axes([0.15, 0.2, 0.82, 0.75])

        # number of valid boxes
        timeseries = {}
        valid_boxes = {}
        land_boxes = {}

        e5cube = 0
        # spin through all comparison datasets
        for ds, dataset in enumerate(DATASETS):
            incube = 0

            print(dataset)
            if not comparison:
                # but if not doing comparisons, skip (just run HadEX3)
                if dataset != "HadEX3":
                    continue

            if dataset == "HadEX":
                if name != "Ann":
                    continue
                else:
                    try:
                        if index.name == "R95pTOT":
                            filename = "{}/HadEX_{}_1951-2003.txt".format(
                                utils.HADEX_LOC, "R95pT")
                        else:
                            filename = "{}/HadEX_{}_1951-2003.txt".format(
                                utils.HADEX_LOC, index.name)

                        all_data, years = [], []
                        data = np.zeros((72, 96))
                        latc = -1
                        with open(filename, "r") as infile:

                            for lc, line in enumerate(infile):

                                if lc == 0:
                                    # skip header
                                    continue

                                # read each line
                                line = line.split()
                                if len(line) < 10:
                                    years += [int(line[0])]
                                    if line[0] != "1951":
                                        all_data += [data]
                                    # reset storage
                                    data = np.zeros((72, 96))
                                    latc = -1
                                else:
                                    latc += 1
                                    data[latc, :] = line

                        # add final year
                        all_data += [data]
                        all_data = np.array(all_data).astype(float)
                        all_data = np.ma.masked_where(all_data == -999.99,
                                                      all_data)
                        if index.name == "R95pTOT":
                            all_data *= 100
                        latitudes = np.arange(90, -90, -2.5)
                        longitudes = np.arange(-180, 180, 3.75)

                        incube = make_iris_cube_3d(all_data, years, "unknown",
                                                   latitudes, longitudes, name,
                                                   index.units)
                        incube = fix_time_coord(incube)

                    except RuntimeError:
                        print("File not found: {}".format(filename))
                        continue

                    except IOError:
                        print("File not found: {}".format(filename))
                        continue

#                    except IndexError:
#                        print("Month not available in {}".format(filename))
#                        continue

            elif dataset == "HadEX2":
                filename = "{}/HadEX2_{}_1901-2010_h2_mask_m4.nc".format(
                    utils.HADEX2_LOC, index.name)

                try:
                    cubelist = iris.load(filename)
                    names = np.array([c.var_name for c in cubelist])
                    incube = cubelist[np.where(names == name)[0][0]]
                    incube.coord("lat").standard_name = "latitude"
                    incube.coord("lon").standard_name = "longitude"

                    incube = fix_time_coord(incube)

                except RuntimeError:
                    print("File not found: {}".format(filename))
                    continue

                except IOError:
                    print("File not found: {}".format(filename))
                    continue

                except IndexError:
                    print("Month not available in {}".format(filename))
                    continue

            elif dataset == "HadEX3":

                filename = os.path.join(
                    utils.FINALROOT,
                    utils.make_filenames(index=index.name,
                                         grid=grid,
                                         anomalies=anomalies,
                                         extra="",
                                         month_index=""))

                try:
                    cubelist = iris.load(filename)
                    names = np.array([c.var_name for c in cubelist])
                    incube = cubelist[np.where(names == name)[0][0]]
                    incube.coord("grid_latitude").standard_name = "latitude"
                    incube.coord("grid_longitude").standard_name = "longitude"

                    incube = fix_time_coord(incube)

                    h3_cube = copy.deepcopy(incube)

                    if anomalies == "anomalies":

                        # to make into actuals, add climatology to the anomalies
                        clim_filename = os.path.join(
                            utils.FINALROOT,
                            utils.make_filenames(index=index.name,
                                                 grid=grid,
                                                 anomalies="climatology",
                                                 extra="",
                                                 month_index=""))

                        clim_cubelist = iris.load(clim_filename)
                        names = np.array([c.var_name for c in cubelist])
                        clim_cube = clim_cubelist[np.where(
                            names == name)[0][0]]
                        try:
                            clim_cube.coord(
                                "grid_latitude").standard_name = "latitude"
                            clim_cube.coord(
                                "grid_longitude").standard_name = "longitude"
                        except iris.exceptions.CoordinateNotFoundError:
                            pass

                        if clim_cube.coord(
                                "time"
                        ).units.origin != "days since 1901-01-01 00:00":
                            clim_cube = fix_time_coord(clim_cube)

                except RuntimeError:
                    print("File not found: {}".format(filename))
                    continue

                except IOError:
                    print("File not found: {}".format(filename))
                    continue

                except IndexError:
                    print("Month not available in {}".format(filename))
                    continue

            elif dataset == "GHCNDEX":

                filename = "{}/{}/GHCND_{}_1951-2019_RegularGrid_global_2.5x2.5deg_LSmask.nc".format(
                    utils.GHCNDEX_LOC, GHCNDEX_VERSION, index.name)

                try:
                    cubelist = iris.load(filename)
                    names = np.array([c.var_name for c in cubelist])
                    incube = cubelist[np.where(names == name)[0][0]]

                    incube = fix_time_coord(incube)

                except RuntimeError:
                    print("File not found: {}".format(filename))
                    continue

                except IOError:
                    print("File not found: {}".format(filename))
                    continue

                except IndexError:
                    print("Month not available in {}".format(filename))
                    continue

            elif dataset == "ERA5":

                filename = "{}/ERA5_{}_1979-2019.nc".format(
                    utils.ERA5_LOC, index.name)

                try:
                    cubelist = iris.load(filename)
                    names = np.array([c.var_name for c in cubelist])
                    incube = cubelist[np.where(names == name)[0][0]]

                    # match latitude order
                    incube.coord('latitude').points = incube.coord(
                        'latitude').points[::-1]
                    incube.data = incube.data[:, ::-1, :]

                    # match to H3 grid
                    try:
                        h3_cube.coord("longitude").guess_bounds()
                        h3_cube.coord("latitude").guess_bounds()
                    except ValueError:
                        # already has bounds
                        pass
                    try:
                        incube.coord("longitude").guess_bounds()
                        incube.coord("latitude").guess_bounds()
                    except ValueError:
                        # already has bounds
                        pass
                    incube = incube.regrid(
                        h3_cube,
                        iris.analysis.Linear(extrapolation_mode="mask"))

                    e5_cube = copy.deepcopy(incube)

                except RuntimeError:
                    print("File not found: {}".format(filename))
                    e5_cube = False
                    continue

                except IOError:
                    print("File not found: {}".format(filename))
                    e5_cube = False
                    continue

                except IndexError:
                    print("Month not available in {}".format(filename))
                    e5_cube = False
                    continue

                print("need to match grids")

            # process data ready for plotting

            # if anomalies from HadEX3, then need to add onto climatology
            if anomalies == "anomalies" and dataset == "HadEX3":
                incube.data = incube.data + clim_cube.data

            # fix percent -> days issue for these four
            if index.name in ["TX90p", "TN90p", "TX10p", "TN10p"]:
                incube.data = incube.data * (DAYSPERYEAR / 100.)
                index.units = "days"

            # restrict to times of interest
            time_constraint = iris.Constraint(
                time=lambda cell: utils.STARTYEAR <= cell <= utils.ENDYEAR)

            incube = incube.extract(time_constraint)

            if matched and (utils.DELTALON == 3.75 and utils.DELTALAT == 2.5):
                # if matching coverage, retain hadex2
                if dataset == "HadEX2":
                    hadex2_mask = incube.data.mask[:]
                if dataset == "HadEX3":
                    # new cube to hold
                    matched_hadex3 = incube.data[:hadex2_mask.shape[0]]

            # find which boxes have x% of years with data - default is 90%
            if dataset == "GHCNDEX":
                completeness_mask = utils.CompletenessCheckGrid(
                    incube.data, utils.ENDYEAR.year, 1951)
            elif dataset == "HadEX":
                completeness_mask = utils.CompletenessCheckGrid(
                    incube.data, 2003, 1951)
            elif dataset == "HadEX2":
                completeness_mask = utils.CompletenessCheckGrid(
                    incube.data, 2010, utils.STARTYEAR.year)
            elif dataset == "HadEX3":
                completeness_mask = utils.CompletenessCheckGrid(
                    incube.data, utils.ENDYEAR.year, utils.STARTYEAR.year)
            elif dataset == "ERA5":
                completeness_mask = utils.CompletenessCheckGrid(
                    incube.data, utils.ENDYEAR.year, 1979)

            # extract number of boxes before applying temporal completeness
            nboxes = np.zeros(incube.data.shape[0])
            for year in range(incube.data.shape[0]):
                nboxes[year] = np.ma.count(incube.data[year])

            # apply completeness mask, and obtain box counts
            nboxes_completeness, completeness_masked_data = MaskData(
                incube.data, incube.data.fill_value, completeness_mask)
            incube.data = completeness_masked_data

            if normalise:
                # apply normalisation!
                clim_constraint = iris.Constraint(
                    time=lambda cell: dt.datetime(utils.REF_START, 1, 1) <=
                    cell <= dt.datetime(utils.REF_END, 1, 1))
                norm_cube = incube.extract(clim_constraint)
                norm = norm_cube.collapsed(['time'], iris.analysis.MEAN)

                incube = incube - norm

            # weights for the region
            weights = iris.analysis.cartography.cosine_latitude_weights(incube)
            ts = incube.collapsed(['longitude', 'latitude'],
                                  iris.analysis.MEAN,
                                  weights=weights)

            # only plot where there are non-missing values
            coord = ts.coord("time")
            years = np.array(
                [c.year for c in coord.units.num2date(coord.points)])
            if dataset == "ERA5":
                if PLOTERA:
                    # only plot ERA5 if selected
                    plt.plot(years,
                             ts.data,
                             c=COLOURS[dataset],
                             ls=LS[dataset],
                             lw=2,
                             label=LABELS[dataset],
                             zorder=ZORDER[dataset])
            else:
                plt.plot(years,
                         ts.data,
                         c=COLOURS[dataset],
                         ls=LS[dataset],
                         lw=2,
                         label=LABELS[dataset],
                         zorder=ZORDER[dataset])

            if dataset == "HadEX3":
                h3_ts = ts
                h3_years = years

            # save
            max_boxes = np.product(incube.data.shape[1:])
            if diagnostics:
                print("{}: total grid boxes = {}, max filled = {}".format(
                    dataset, max_boxes, int(np.max(nboxes))))

            valid_boxes[dataset] = [
                years, 100. * nboxes / max_boxes
            ]  # scale by total number as lat/lon resolution will be different

            store_ts = np.ones(h3_ts.shape) * utils.HADEX_MDI
            match = np.in1d(h3_years, years)
            match_back = np.in1d(years, h3_years)
            store_ts[match] = ts.data[match_back]
            timeseries[dataset] = [h3_years, store_ts]

            # get land sea mask
            if month == 0:
                lsm = utils.get_land_sea_mask(incube.coord("latitude").points,
                                              incube.coord("longitude").points,
                                              floor=False)
                n_land_boxes = len(np.where(lsm == False)[0])
            land_boxes[dataset] = [years, 100. * nboxes / n_land_boxes
                                   ]  # scale by number of land boxes

        # once all lines plotted
        # get coverage error for HadEX3
        if uncertainties:
            try:
                # test to see if there was an actual cube from this loop (else using stale cube from previous)
                if e5_cube != 0:
                    coverage_offset, coverage_stdev = putils.compute_coverage_error(
                        h3_cube, e5_cube)
                    coverage_stdev *= 2.  # 90%, 2s.d.
                    plt.fill_between(h3_years,
                                     h3_ts.data - coverage_stdev,
                                     h3_ts.data + coverage_stdev,
                                     color='0.5',
                                     label="ERA5 coverage uncertainty")
            except UnboundLocalError:
                # e5_cube not referenced - i.e. no ERA5 data for this index?
                pass

        # then tidy up
        putils.SortAxesLabels(plt, ax, index, utils.STARTYEAR.year,
                              utils.ENDYEAR.year, month)

        plt.xlim([1900, 2020])
        if normalise:
            # only plot zero line if done as anomalies
            plt.axhline(0, color='k', ls='--')

        # plot legend below figure
        leg = plt.legend(loc='lower center',
                         ncol=3,
                         bbox_to_anchor=(0.46, -0.31),
                         frameon=False,
                         title='',
                         prop={'size': utils.FONTSIZE},
                         labelspacing=0.15,
                         columnspacing=0.5)
        # extra information
        if utils.WATERMARK:
            watermarkstring = "{} {}".format(
                os.path.join("/".join(os.getcwd().split('/')[4:]),
                             os.path.basename(__file__)),
                dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M"))
            plt.figtext(0.01, 0.01, watermarkstring, size=6)
        plt.figtext(0.03, 0.95, "(c)", size=utils.FONTSIZE)

        ax = putils.Rstylee(ax)

        # and save
        if uncertainties:
            outname = putils.make_filenames("timeseries_uncertainties",
                                            index=index.name,
                                            grid=grid,
                                            anomalies=anomalies,
                                            month=name)
        else:
            outname = putils.make_filenames("timeseries",
                                            index=index.name,
                                            grid=grid,
                                            anomalies=anomalies,
                                            month=name)

        plt.savefig("{}/{}/{}".format(utils.PLOTLOCS, index.name, outname),
                    dpi=300)

        plt.close()

        # output data file
        if comparison and name == "Ann":
            with open(
                    os.path.join(utils.INFILELOCS,
                                 "{}_timeseries.dat".format(index.name)),
                    "w") as outfile:

                outfile.write("{:4s} {:7s} {:7s} {:7s} {:7s}\n".format(
                    "Year", "HadEX3", "HadEX2", "HadEX", "GHCNDEX"))

                years = timeseries["HadEX3"][0]

                for y, year in enumerate(years):
                    items = [year]

                    for key in ["HadEX3", "HadEX2", "HadEX", "GHCNDEX"]:
                        if key in timeseries.keys():
                            items += [timeseries[key][1][y]]
                        else:
                            items += [utils.HADEX_MDI]

                    outfile.write(
                        "{:4d} {:7.2f} {:7.2f} {:7.2f} {:7.2f}\n".format(
                            items[0], items[1], items[2], items[3], items[4]))

        #*****************
        # Plot coverage - how many grid boxes have values (scaled by total number)

        fig = plt.figure(figsize=(8, 5.5))
        plt.clf()
        ax = fig.add_axes([0.15, 0.2, 0.82, 0.75])

        PlotCoverage(plt,
                     ax,
                     valid_boxes,
                     COLOURS,
                     utils.STARTYEAR.year,
                     utils.ENDYEAR.year,
                     index.name,
                     month,
                     '',
                     ncol=2)
        plt.figtext(0.03, 0.95, "(d)", size=utils.FONTSIZE)

        plt.xlim([1900, 2020])

        ax = putils.Rstylee(ax)

        outname = putils.make_filenames("timeseries_coverage",
                                        index=index.name,
                                        grid=grid,
                                        anomalies=anomalies,
                                        month=name)

        plt.savefig("{}/{}/{}".format(utils.PLOTLOCS, index.name, outname),
                    dpi=300)

        plt.close("all")

        #*****************
        # plot coverage - how many grid boxes have values (scaled by land fraction)

        fig = plt.figure(figsize=(8, 5.5))
        plt.clf()
        ax = fig.add_axes([0.15, 0.2, 0.82, 0.75])

        PlotCoverage(plt,
                     ax,
                     land_boxes,
                     COLOURS,
                     utils.STARTYEAR.year,
                     utils.ENDYEAR.year,
                     index.name,
                     month,
                     '',
                     ncol=2,
                     land=True)
        plt.figtext(0.03, 0.95, "(d)", size=utils.FONTSIZE)

        plt.xlim([1900, 2020])

        ax = putils.Rstylee(ax)

        outname = putils.make_filenames("timeseries_land_coverage",
                                        index=index.name,
                                        grid=grid,
                                        anomalies=anomalies,
                                        month=name)

        plt.savefig("{}/{}/{}".format(utils.PLOTLOCS, index.name, outname),
                    dpi=300)

        plt.close("all")

    return  # main
コード例 #6
0
def main(diagnostics=False):
    """
    Read inventories and make scatter plot

    :param bool diagnostics: extra verbose output

    """

    # move this up one level eventually?
    all_datasets = utils.get_input_datasets()

    # set up the figure
    fig = plt.figure(figsize=(10, 6.7))
    plt.clf()
    ax = plt.axes([0.025, 0.14, 0.95, 0.90], projection=cartopy.crs.Robinson())
    ax.gridlines()  #draw_labels=True)
    ax.add_feature(cartopy.feature.LAND,
                   zorder=0,
                   facecolor="0.9",
                   edgecolor="k")
    ax.coastlines()

    # dummy scatters for full extent
    plt.scatter([-180, 180, 0, 0], [0, 0, -90, 90], c="w", s=1, transform=cartopy.crs.Geodetic(), \
                    edgecolor='w', linewidth='0.01')

    # run all datasets
    total = 0
    for dataset in all_datasets:

        try:
            # choose appropriate subdirectory.
            subdir = "formatted/indices"

            ds_stations = utils.read_inventory(dataset, subdir=subdir, final=False, \
                                               timescale="", index="", anomalies="None", qc_flags="")

        except IOError:
            # file missing
            print("No stations with data for {}".format(dataset.name))
            ds_stations = []

        if len(ds_stations) > 0:
            lats = np.array([stn.latitude for stn in ds_stations])
            lons = np.array([stn.longitude for stn in ds_stations])

            # and plot
            scatter = plt.scatter(lons, lats, c=COLOURS[dataset.name], s=15, \
                                      label="{} ({})".format(get_label(dataset.name), len(ds_stations)), \
                                      transform=cartopy.crs.Geodetic(), edgecolor='0.5', linewidth='0.5')

            total += len(ds_stations)

    # make a legend
    leg = plt.legend(loc='lower center', ncol=5, bbox_to_anchor=(0.50, -0.34), \
                         frameon=False, title="", prop={'size':12}, labelspacing=0.15, columnspacing=0.5, numpoints=3)
    plt.setp(leg.get_title(), fontsize=12)

    plt.figtext(0.05, 0.92, "{} Stations".format(total))

    plt.title("HadEX3 stations")

    # and save
    outname = putils.make_filenames("station_locations",
                                    index="All",
                                    grid="ADW",
                                    anomalies="None",
                                    month="All")

    plt.savefig("{}/{}".format(utils.PLOTLOCS, outname), dpi=300)

    plt.close()

    return  # main
コード例 #7
0
def main(index, diagnostics=False, anomalies="None", grid="ADW"):
    """
    Plot maps of linear trends

    :param str index: which index to run
    :param bool diagnostics: output diagnostic information
    :param str anomalies: run code on anomalies or climatology rather than raw data
    :param str grid: gridding type ADW/CAM
    """


    # get details of index
    index = utils.INDICES[index] 

    # sort the colour maps
    RdYlBu, RdYlBu_r = putils.adjust_RdYlBu()
    BrBG, BrBG_r = putils.make_BrBG()

    cube_list = iris.load(os.path.join(utils.FINALROOT, utils.make_filenames(index=index.name, grid=grid, anomalies=anomalies, extra="", month_index="")))    

    names = np.array([cube.var_name for cube in cube_list])

    #*************
    # plot trend map

    for season in SEASONS:

        three_month_data = []
        months = SEASON_DICT[season]
        for month in months:

            if anomalies != "climatology":
                if index.name in ["TX90p", "TN90p", "SU", "TR"]:
                    bounds = [-100, -4, -3, -2, -1, 0, 1, 2, 3, 4, 100]
                    cmap = RdYlBu_r
                elif index.name in ["TX10p", "TN10p", "FD", "ID"]:
                    bounds = [-100, -4, -3, -2, -1, 0, 1, 2, 3, 4, 100]
                    cmap = RdYlBu
                elif index.name in ["DTR", "ETR"]:
                    bounds = [-100, -1, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1, 100]
                    cmap = RdYlBu_r
                elif index.name in ["TXx", "TNx", "TXn", "TNn"]:
                    bounds = [-100, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 100]
                    cmap = RdYlBu_r
                elif index.name in ["Rx1day", "Rx5day"]:
                    bounds = [-100, -4, -3, -2, -1, 0, 1, 2, 3, 4, 100]
                    cmap = BrBG
                elif index.name in ["CWD"]:
                    bounds = [-100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100]
                    cmap = BrBG
                elif index.name in ["CDD", "PRCPTOT"]:
                    bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
                    cmap = BrBG_r
                elif index.name in ["R10mm", "R20mm"]:
                    bounds = [-100, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 100]
                    cmap = BrBG
                else:
                    bounds = [-100, -4, -3, -2, -1, 0, 1, 2, 3, 4, 100]
                    cmap = RdYlBu_r

            else:
                if index.name in ["TX90p", "TN90p", "SU", "TR"]:
                    bounds = np.arange(0, 60, 5)
                    cmap = plt.cm.YlOrRd
                elif index.name in ["TX10p", "TN10p", "FD", "ID"]:
                    bounds = np.arange(0, 60, 5)
                    cmap = plt.cm.YlOrRd_r
                elif index.name in ["DTR", "ETR"]:
                    bounds = np.arange(0, 30, 5)
                    cmap = plt.cm.YlOrRd
                elif index.name in ["TXx", "TNx"]:
                    bounds = np.arange(-10, 40, 5)
                    cmap = plt.cm.YlOrRd
                elif index.name in ["TXn", "TNn"]:
                    bounds = np.arange(-30, 10, 5)
                    cmap = plt.cm.YlOrRd
                elif index.name in ["Rx1day", "Rx5day"]:
                    bounds = np.arange(0, 100, 10)
                    cmap = plt.cm.YlGnBu
                elif index.name in ["CWD"]:
                    bounds = np.arange(0, 100, 10)
                    cmap = BrBG
                elif index.name in ["CDD", "PRCPTOT", "R10mm", "R20mm"]:
                    bounds = np.arange(0, 100, 10)
                    cmap = BrBG_r
                else:
                    bounds = np.arange(0, 60, 5)
                    cmap = plt.cm.YlOrRd

            selected_cube, = np.where(names == month)

            cube = cube_list[selected_cube[0]]
            try:
                cube.coord('grid_latitude').guess_bounds()
                cube.coord('grid_longitude').guess_bounds()  
            except ValueError:
                pass

            # fix percent -> days issue for these four
            if index.name in ["TX90p", "TN90p", "TX10p", "TN10p"]:
                this_month, = np.where(month_names == month)
                ndays = np.array([calendar.monthrange(y, this_month[0])[1] for y in utils.REFERENCEYEARS])

                cube.data = cube.data * ndays[:, None, None] / 100.
                index.units = "days"

            three_month_data += [cube.data]

        # extracted the three months of the season
        season_cube = copy.deepcopy(cube)
        
        three_month_data = np.ma.array(three_month_data)
        # take appropriate seasonal value
        if index.name in ["TX90p", "TN90p", "TX10p", "TN10p"]:
            season_cube.data = np.ma.sum(three_month_data, axis=0)
        elif index.name in ["FD", "ID", "SU", "TR"]:
            season_cube.data = np.ma.sum(three_month_data, axis=0)
        elif index.name in ["TXx", "TNx", "ETR"]:
            season_cube.data = np.ma.max(three_month_data, axis=0)
        elif index.name in ["TXn", "TNn"]:
            season_cube.data = np.ma.min(three_month_data, axis=0)
        elif index.name in ["Rx1day", "Rx5day"]:
            season_cube.data = np.ma.max(three_month_data, axis=0)
        elif index.name in ["CDD", "CWD"]:
            season_cube.data = np.ma.max(three_month_data, axis=0)
        elif index.name in ["R10mm", "R20mm", "PRCPTOT"]:
            season_cube.data = np.ma.sum(three_month_data, axis=0)
        elif index.name in ["R95pTOT", "R99pTOT", "DTR"]:
            season_cube.data = np.ma.mean(three_month_data, axis=0)
        elif index.name in ["TNlt2", "TNltm2", "TNltm20", "TXge35", "TXge30", "TMlt10", "TMge10", "TMlt5", "TMge5"]:
            season_cube.data = np.ma.sum(three_month_data, axis=0)
        elif index.name in ["TMm", "TXm", "TNm", "TXgt50p"]:
            season_cube.data = np.ma.mean(three_month_data, axis=0)


        # mask if fewer that 2 months present
        nmonths_locs = np.ma.count(three_month_data, axis=0)
        season_cube.data = np.ma.masked_where(nmonths_locs < 2, season_cube.data)

        # get recent period and trend
        if anomalies != "climatology":
            postYYYY = periodConstraint(season_cube, utils.TREND_START)
            season_cube = season_cube.extract(postYYYY)
            trend_cube, sigma, significance = TrendingCalculation(season_cube)

        if anomalies != "climatology":
            figtext=""
            if index.name == "TX90p":
                if season == "DJF":
                    figtext = "(a)"
                elif season == "MAM":
                    figtext = "(b)"
                elif season == "JJA":
                    figtext = "(c)"
                elif season == "SON":
                    figtext = "(d)"
            elif index.name == "TN10p":
                if season == "DJF":
                    figtext = "(e)"
                elif season == "MAM":
                    figtext = "(f)"
                elif season == "JJA":
                    figtext = "(g)"
                elif season == "SON":
                    figtext = "(h)"

            outname = putils.make_filenames("trend", index=index.name, grid=grid, anomalies=anomalies, month=season)

            putils.plot_smooth_map_iris("{}/{}/{}".format(utils.PLOTLOCS, index.name, outname), trend_cube, cmap, bounds, "Trend ({}/10 year)".format(index.units), title="{} - {}, {}-2018".format(index.name, season, utils.TREND_START), figtext=figtext, significance=significance)

        else:
            outname = putils.make_filenames("climatology", index=index.name, grid=grid, anomalies=anomalies, month=season)

            putils.plot_smooth_map_iris("{}/{}/{}".format(utils.PLOTLOCS, index.name, outname), cube[0], cmap, bounds, "{}".format(index.units), title="{} - {}, {}-{}".format(index.name, season, utils.CLIM_START.year, utils.CLIM_END.year))

    return # main
コード例 #8
0
ファイル: plot_yearly_stations.py プロジェクト: rjhd2/HadEX3
def main(index="TX90p", diagnostics=False, qc_flags="", anomalies="None"):
    """
    Read inventories and make scatter plot

    :param str index: which index to run
    :param bool diagnostics: extra verbose output
    :param str qc_flags: which QC flags to process W, B, A, N, C, R, F, E, V, M
    :param str anomalies: run code on anomalies or climatology rather than raw data

    """
    with open(
            os.path.join(utils.INFILELOCS,
                         "{}_yearly_stations.txt".format(index)),
            "w") as outfile:
        outfile.write("{}\n".format(index))

    if index in utils.MONTHLY_INDICES:
        timescale = ["ANN", "MON"]  # allow for future!
    else:
        timescale = ["ANN"]

    # move this up one level eventually?
    all_datasets = utils.get_input_datasets()

    for ts in timescale:

        # run all datasets
        for d, dataset in enumerate(all_datasets):

            print(dataset)

            try:
                # choose appropriate subdirectory.
                subdir = "formatted/indices"

                ds_stations = utils.read_inventory(dataset, subdir=subdir, final=True, \
                                                       timescale=ts, index=index, anomalies=anomalies, qc_flags=qc_flags)
                ds_stations = utils.select_qc_passes(ds_stations,
                                                     qc_flags=qc_flags)

            except IOError:
                # file missing
                print("No stations with data for {}".format(dataset.name))
                ds_stations = []

            # extract relevant info for this dataset
            if len(ds_stations) > 0:

                # extract values for this dataset
                for s, stn in enumerate(ds_stations):
                    presence = time_presence(stn, index, ts)  # year/month
                    if s == 0:
                        ds_presence = np.expand_dims(presence, axis=0)[:]
                    else:
                        ds_presence = np.append(ds_presence,
                                                np.expand_dims(presence,
                                                               axis=0),
                                                axis=0)  # station/year/month

                ds_lats = np.array([stn.latitude for stn in ds_stations])
                ds_lons = np.array([stn.longitude for stn in ds_stations])

                # store in overall arrays
                try:
                    all_lats = np.append(all_lats, ds_lats[:], axis=0)
                    all_lons = np.append(all_lons, ds_lons[:], axis=0)
                    all_presence = np.append(
                        all_presence, ds_presence[:],
                        axis=0)  # dataset*station/year/month
                    all_dataset_names = np.append(
                        all_dataset_names,
                        np.array([dataset.name for i in ds_lats]))
                except NameError:
                    # if not yet defined, then set up
                    all_lats = ds_lats[:]
                    all_lons = ds_lons[:]
                    all_presence = ds_presence[:]
                    all_dataset_names = np.array(
                        [dataset.name for i in ds_lats])

        for y, year in enumerate(utils.REFERENCEYEARS):

            # set up the figure
            fig = plt.figure(figsize=(10, 6.5))
            plt.clf()
            ax = plt.axes([0.025, 0.10, 0.95, 0.90],
                          projection=cartopy.crs.Robinson())
            ax.gridlines()  #draw_labels=True)
            ax.add_feature(cartopy.feature.LAND,
                           zorder=0,
                           facecolor="0.9",
                           edgecolor="k")
            ax.coastlines()

            # dummy scatters for full extent
            plt.scatter([-180, 180, 0, 0], [0, 0, -90, 90], c="w", s=1, transform=cartopy.crs.Geodetic(), \
                            edgecolor='w', linewidth='0.01')

            total = 0
            for dataset in all_datasets:

                ds, = np.where(all_dataset_names == dataset.name)
                locs, = np.where(all_presence[ds, y, 0] == 1)

                if len(locs) > 0:
                    plt.scatter(all_lons[ds][locs], all_lats[ds][locs], c=ps.COLOURS[dataset.name], \
                                    s=15, label="{} ({})".format(ps.get_label(dataset.name), len(locs)), \
                                    transform=cartopy.crs.Geodetic(), edgecolor='0.5', linewidth='0.5')
                    total += len(locs)
                else:
                    # aiming to show all, even if zero
                    plt.scatter([-180], [-90], c=ps.COLOURS[dataset.name], s=15, \
                                    label="{} ({})".format(ps.get_label(dataset.name), len(locs)), \
                                    transform=cartopy.crs.Geodetic(), edgecolor='0.5', linewidth='0.5')
                time.sleep(1)

            # make a legend
            leg = plt.legend(loc='lower center', ncol=6, bbox_to_anchor=(0.50, -0.25), frameon=False, \
                                 title="", prop={'size':10}, labelspacing=0.15, columnspacing=0.5, numpoints=3)
            plt.setp(leg.get_title(), fontsize=12)

            plt.figtext(0.05, 0.92, "{} Stations".format(total))

            plt.title("{} - {} - {}".format(index, ts, year))

            # and save
            outname = putils.make_filenames("station_locations_{}_{}".format(
                ts.capitalize(), year),
                                            index=index,
                                            grid="ADW",
                                            anomalies=anomalies)

            plt.savefig("{}/{}/{}".format(utils.PLOTLOCS, index, outname))

            plt.close()
            plt.clf()
            print("{} done".format(year))

            # write out total station number
            with open(
                    os.path.join(utils.INFILELOCS,
                                 "{}_yearly_stations.txt".format(index)),
                    "a") as outfile:
                outfile.write("{} {}\n".format(year, total))

            time.sleep(1)

        # reset namespace
        del all_lats
        del all_lons
        del all_presence
        del all_dataset_names
    return  # main
コード例 #9
0
def main(index,
         first,
         second,
         length,
         diagnostics=False,
         anomalies="None",
         grid="ADW"):
    """
    Plot maps of linear trends

    :param str index: which index to run
    :param int first: start of first period
    :param int second: start of second period
    :param int length: length of periods
    :param bool diagnostics: output diagnostic information
    :param str anomalies: run code on anomalies or climatology rather than raw data
    :param str grid: gridding type ADW/CAM
    """

    if first + length - 1 > second:
        print("Periods overlap, please re-specify")
        return

    # get details of index
    index = utils.INDICES[index]

    # allow for option of running through each month
    if index.name in utils.MONTHLY_INDICES:
        nmonths = 13
    else:
        nmonths = 1

    # sort the colour maps
    RdYlBu, RdYlBu_r = putils.adjust_RdYlBu()
    BrBG, BrBG_r = putils.make_BrBG()

    print(index.name)

    if index.name in ["TX90p", "TN90p", "SU", "TR", "GSL"]:
        bounds = [-100, -20, -15, -10, -5, 0, 5, 10, 15, 20, 100]
        cmap = RdYlBu_r
    elif index.name in ["DTR", "ETR"]:
        bounds = [-100, -4, -3, -2, -1, 0, 1, 2, 3, 4, 100]
        cmap = RdYlBu_r
    elif index.name in ["WSDI"]:
        bounds = [-100, -10, -7.5, -5, -2.5, 0, 2.5, 5, 7.5, 10, 100]
        cmap = RdYlBu_r
    elif index.name in ["TX10p", "TN10p", "ID"]:
        bounds = [-100, -20, -15, -10, -5, 0, 5, 10, 15, 20, 100]
        cmap = RdYlBu
    elif index.name in ["FD"]:
        bounds = [-100, -12, -9, -6, -3, 0, 3, 6, 9, 12, 100]
        cmap = RdYlBu
    elif index.name in ["CSDI"]:
        bounds = [-100, -10, -7.5, -5, -2.5, 0, 2.5, 5, 7.5, 10, 100]
        cmap = RdYlBu
    elif index.name in ["TXn", "TNn"]:
        bounds = [-100, -5, -3, -2, -1, 0, 1, 2, 3, 5, 100]
        cmap = RdYlBu_r
    elif index.name in ["TXx", "TNx"]:
        bounds = [-100, -4, -2, -1, 0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu_r
    elif index.name in ["Rx1day"]:
        bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
        cmap = BrBG
    elif index.name in ["Rx5day"]:
        bounds = [-100, -10, -7.5, -5, -2.5, 0, 2.5, 5, 7.5, 10, 100]
        cmap = BrBG
    elif index.name in ["PRCPTOT"]:
        bounds = [-100, -40, -20, -10, -5, 0, 5, 10, 20, 40, 100]
        cmap = BrBG
    elif index.name in ["Rnnmm", "R95p", "R99p"]:
        bounds = [-100, -20, -15, -10, -5, 0, 5, 10, 15, 20, 100]
        cmap = BrBG
    elif index.name in ["R95pTOT", "R99pTOT"]:
        bounds = [-100, -10, -5, -2.5, -1, 0, 1, 2.5, 5, 10, 100]
        cmap = BrBG
    elif index.name in ["R10mm"]:
        bounds = [-100, -10, -5, -2.5, -1, 0, 1, 2.5, 5, 10, 100]
        cmap = BrBG
    elif index.name in ["R20mm"]:
        bounds = [-100, -5, -2.5, -1, -0.5, 0, 0.5, 1, 2.5, 5, 100]
        cmap = BrBG
    elif index.name in ["CWD"]:
        bounds = [-100, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 100]
        cmap = BrBG
    elif index.name in ["SDII"]:
        bounds = [-100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100]
        cmap = BrBG
    elif index.name in ["CDD"]:
        bounds = [-100, -10, -7.5, -5, -2.5, 0, 2.5, 5, 7.5, 10, 100]
        cmap = BrBG_r
    elif index.name in ["CDDcold18"]:
        bounds = [-10000, -100, -50, -20, -10, 0, 10, 20, 50, 100, 10000]
        cmap = RdYlBu_r
    elif index.name in ["HDDheat18"]:
        bounds = [-10000, -800, -400, -200, -100, 0, 100, 200, 400, 800, 10000]
        cmap = RdYlBu
    elif index.name in ["GDDgrow10"]:
        bounds = [-10000, -400, -200, -100, -50, 0, 50, 100, 200, 400, 10000]
        cmap = RdYlBu
    elif index.name in ["WSDI3"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu_r
    elif index.name in ["CSDI3"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu
    elif index.name in ["TNlt2", "TNltm2", "TNltm20", "TMlt10", "TMlt5"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu
    elif index.name in ["TXge30", "TXge35", "TMge5", "TMge10", "TXge50p"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu_r
    elif index.name in ["TNm", "TXm", "TMm", "TXTN"]:
        bounds = [-100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100]
        cmap = RdYlBu_r
    elif index.name in ["TXbTNb"]:
        bounds = [-100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100]
        cmap = RdYlBu
    elif index.name in ["RXday"]:
        bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
        cmap = BrBG
    else:
        bounds = [-100, -20, -15, -10, -5, 0, 5, 10, 15, 20, 100]
        cmap = RdYlBu_r

    cube_list = iris.load(
        os.path.join(
            utils.FINALROOT,
            utils.make_filenames(index=index.name,
                                 grid=grid,
                                 anomalies=anomalies,
                                 extra="",
                                 month_index="")))

    names = np.array([cube.var_name for cube in cube_list])

    #*************
    # plot difference map

    for month in range(nmonths):
        selected_cube, = np.where(names == month_names[month])

        cube = cube_list[selected_cube[0]]
        try:
            cube.coord('grid_latitude').guess_bounds()
            cube.coord('grid_longitude').guess_bounds()
        except ValueError:
            pass

        # fix percent -> days issue for these four
        if index.name in ["TX90p", "TN90p", "TX10p", "TN10p"]:
            cube.data = cube.data * 3.65
            index.units = "days"

        # get two cubes to difference
        first_cube, first_sigma = get_climatology(cube, first, length)

        second_cube, second_sigma = get_climatology(cube, second, length)

        differences = second_cube - first_cube

        # get "significance" by looking at non-overlapping sigmas
        total_sigma = first_sigma + second_sigma

        significance = np.ma.zeros(first_cube.shape)
        significance[differences.data > total_sigma.data] = 1
        significance.mask = differences.data.mask

        significance = putils.make_iris_cube_2d(
            significance,
            cube.coord("grid_latitude").points,
            cube.coord("grid_longitude").points, "difference_significance", "")

        first_string = "{}{}".format(
            str(first)[-2:],
            str(first + length - 1)[-2:])
        second_string = "{}{}".format(
            str(second)[-2:],
            str(second + length - 1)[-2:])

        if index.units == "degrees_C":
            units = '$^{\circ}$' + "C"
        else:
            units = index.units

        if anomalies != "climatology":
            outname = putils.make_filenames("diff_{}-{}".format(
                second_string, first_string),
                                            index=index.name,
                                            grid=grid,
                                            anomalies=anomalies,
                                            month=month_names[month])

            putils.plot_smooth_map_iris(
                "{}/{}/{}".format(utils.PLOTLOCS, index.name, outname),
                differences,
                cmap,
                bounds,
                "Difference {}-{} ({})".format(second_string, first_string,
                                               units),
                title="{} - {}, Difference ({}-{}) - ({}-{})".format(
                    index.name, month_names[month], second,
                    second + length - 1, first, first + length - 1),
                figtext="(b)")  #, significance=significance)

    return  # main
コード例 #10
0
def main(index,
         diagnostics=False,
         normalise=True,
         anomalies="None",
         grid="ADW"):
    """
    :param str index: which index to run
    :param bool diagnostics: output diagnostic information
    :param str anomalies: run code on anomalies or climatology rather than raw data
    :param str grid: gridding type ADW/CAM
    """

    cosine = False

    # get details of index
    index = utils.INDICES[index]

    # allow for option of running through each month
    if index.name in utils.MONTHLY_INDICES:
        nmonths = 13
    else:
        nmonths = 1

    # sort the colour maps
    RdYlBu, RdYlBu_r = putils.adjust_RdYlBu()
    BrBG, BrBG_r = putils.make_BrBG()

    # assign bounds and colormaps
    if index.name in ["TX90p", "TN90p", "SU", "TR", "GSL"]:
        bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
        cmap = RdYlBu_r
    elif index.name in ["DTR", "ETR"]:
        bounds = [-100, -1, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1, 100]
        cmap = RdYlBu_r
    elif index.name in ["WSDI"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu_r
    elif index.name in ["TX10p", "TN10p", "FD", "ID"]:
        bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
        cmap = RdYlBu
    elif index.name in ["CSDI"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu
    elif index.name in ["TXn", "TNn"]:
        bounds = [-100, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 100]
        cmap = RdYlBu_r
    elif index.name in ["TXx", "TNx"]:
        bounds = [-100, -1, -0.5, -0.25, -0.1, 0, 0.1, 0.25, 0.5, 1, 100]
        cmap = RdYlBu_r
    elif index.name in ["Rx1day"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = BrBG
    elif index.name in ["Rx5day"]:
        bounds = [-100, -4, -3, -2, -1, 0, 1, 2, 3, 4, 100]
        cmap = BrBG
    elif index.name in ["PRCPTOT"]:
        bounds = [-100, -20, -10, -5, -2, 0, 2, 5, 10, 20, 100]
        cmap = BrBG
    elif index.name in ["Rnnmm", "R95p", "R99p"]:
        bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
        cmap = BrBG
    elif index.name in ["R95pTOT"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = BrBG
    elif index.name in ["R99pTOT"]:
        bounds = [-100, -1, -0.5, -0.25, -0.1, 0, 0.1, 0.25, 0.5, 1, 100]
        cmap = BrBG
    elif index.name in ["R10mm"]:
        bounds = [-100, -3, -1.5, -0.75, -0.25, 0, 0.25, 0.75, 1.5, 3, 100]
        cmap = BrBG
    elif index.name in ["R20mm"]:
        bounds = [-100, -2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2, 100]
        cmap = BrBG
    elif index.name in ["CWD"]:
        bounds = [-100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100]
        cmap = BrBG
    elif index.name in ["SDII"]:
        bounds = [-100, -0.75, -0.5, -0.25, -0.1, 0, 0.1, 0.25, 0.5, 0.75, 100]
        cmap = BrBG
    elif index.name in ["CDD"]:
        bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
        cmap = BrBG_r
    elif index.name in ["CDDcold18"]:
        bounds = [-10000, -100, -50, -20, -10, 0, 10, 20, 50, 100, 10000]
        cmap = RdYlBu_r
    elif index.name in ["HDDheat18"]:
        bounds = [-10000, -800, -400, -200, -100, 0, 100, 200, 400, 800, 10000]
        cmap = RdYlBu
    elif index.name in ["GDDgrow10"]:
        bounds = [-10000, -400, -200, -100, -50, 0, 50, 100, 200, 400, 10000]
        cmap = RdYlBu
    elif index.name in ["WSDI3"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu_r
    elif index.name in ["CSDI3"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu
    elif index.name in ["TNlt2", "TNltm2", "TNltm20", "TMlt10", "TMlt5"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu
    elif index.name in ["TXge30", "TXge35", "TMge5", "TMge10", "TXge50p"]:
        bounds = [-100, -4, -2, -1, -0.5, 0, 0.5, 1, 2, 4, 100]
        cmap = RdYlBu_r
    elif index.name in ["TNm", "TXm", "TMm", "TXTN"]:
        bounds = [-100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100]
        cmap = RdYlBu_r
    elif index.name in ["TXbTNb"]:
        bounds = [-100, -1, -0.5, -0.2, -0.1, 0, 0.1, 0.2, 0.5, 1, 100]
        cmap = RdYlBu
    elif index.name in ["RXday"]:
        bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
        cmap = BrBG
    else:
        bounds = [-100, -8, -4, -2, -1, 0, 1, 2, 4, 8, 100]
        cmap = RdYlBu_r

    norm = mpl.cm.colors.BoundaryNorm(bounds, cmap.N)

    cube_list = iris.load(
        os.path.join(
            utils.FINALROOT,
            utils.make_filenames(index=index.name,
                                 grid=grid,
                                 anomalies=anomalies,
                                 extra="",
                                 month_index="")))

    names = np.array([cube.var_name for cube in cube_list])

    # plot all month versions at once
    for month, mname in enumerate(month_names):

        if diagnostics:
            print(mname)

        selected_cube, = np.where(names == mname)

        cube = cube_list[selected_cube[0]]
        try:
            cube.coord('grid_latitude').guess_bounds()
            cube.coord('grid_longitude').guess_bounds()
        except ValueError:
            pass

        # fix percent -> days issue for these four
        if index.name in ["TX90p", "TN90p", "TX10p", "TN10p"]:
            cube.data = cube.data * 3.65
            index.units = "days"

        # Take the mean over latitude
        cube = cube.collapsed('grid_longitude', iris.analysis.MEAN)

        # if show relative to climatology
        if normalise:
            clim_constraint = iris.Constraint(
                time=lambda cell: utils.REF_START <= cell <= utils.REF_END)
            norm_cube = cube.extract(clim_constraint)
            norm_cube = norm_cube.collapsed(['time'], iris.analysis.MEAN)

            cube = cube - norm_cube

        # plot
        # set up the figure
        fig = plt.figure(figsize=(8, 6))
        plt.clf()
        ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
        ax.patch.set_facecolor("0.8")

        contour = iris.plot.pcolor(
            cube, cmap=cmap, norm=norm)  #, vmax=bounds[-2], vmin=bounds[1])

        cb = plt.colorbar(contour, orientation='horizontal', pad=0.07, fraction=0.05, \
                            aspect=30, ticks=bounds[1:-1], drawedges=True)

        cb.set_label(index.units, size=utils.FONTSIZE)
        # thicken border of colorbar and the dividers
        # http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib
        cb.set_ticklabels(["{:g}".format(b) for b in bounds[1:-1]])
        cb.ax.tick_params(labelsize=utils.FONTSIZE, size=0)

        #        cb.outline.set_color('k')
        cb.outline.set_linewidth(2)
        cb.dividers.set_color('k')
        cb.dividers.set_linewidth(2)

        for tick in ax.xaxis.get_major_ticks():
            tick.label.set_fontsize(utils.FONTSIZE)
        for tick in ax.yaxis.get_major_ticks():
            tick.label.set_fontsize(utils.FONTSIZE)

        ax.set_xlim([1900, 2020])

        if cosine:
            ax.set_ylim(np.sin(np.deg2rad(np.array([-90, 90]))))
            ax.set_yticks(
                np.sin(np.deg2rad(np.array([-90, -60, -30, 0, 30, 60, 90]))))
            ax.set_yticklabels(["-90"+r'$^{\circ}$'+"S", "-60"+r'$^{\circ}$'+"S", \
                                    "-30"+r'$^{\circ}$'+"S", "0"+r'$^{\circ}$'+"", \
                                    "30"+r'$^{\circ}$'+"N", "60"+r'$^{\circ}$'+"N", "90"+r'$^{\circ}$'+"N"], fontsize=utils.FONTSIZE)
        else:
            ax.set_ylim([-90, 90])
            ax.set_yticks([-60, -30, 0, 30, 60])
            ax.set_yticklabels(["-60"+r'$^{\circ}$'+"S", "-30"+r'$^{\circ}$'+"S", \
                                    "0"+r'$^{\circ}$'+"", "30"+r'$^{\circ}$'+"N", "60"+r'$^{\circ}$'+"N"], fontsize=utils.FONTSIZE)

        plt.title("{} - {}, Hovmöller".format(index.name, month_names[month]),
                  fontsize=utils.FONTSIZE)
        fig.text(0.03, 0.95, "(e)", fontsize=utils.FONTSIZE)

        if utils.WATERMARK:
            watermarkstring = "{} {}".format(
                os.path.join("/".join(os.getcwd().split('/')[4:]),
                             os.path.basename(__file__)),
                dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M"))
            plt.figtext(0.01, 0.01, watermarkstring, size=6)

        outname = putils.make_filenames("hovmoeller",
                                        index=index.name,
                                        grid=grid,
                                        anomalies=anomalies,
                                        month=mname)
        plt.savefig("{}/{}/{}".format(utils.PLOTLOCS, index.name, outname),
                    dpi=300)

        plt.close()

    return  # main