示例#1
0
def track_density_year_month(lead,
                             y,
                             year1,
                             year2,
                             month,
                             box_size=2.5,
                             threeway=True):
    """Calculates the TC track density as the number of tracks per grid
	box (n.b., NOT per unit time), with the given grid box size in degrees, at a
	particular forecast lead time (in days).

	y is the year the month in question belongs to, to get the right data and correctly name the output file
	year1 is the first year of the 2-year period, for the directory name
	year2 is the second year of the 2-year period, for the directory name"""

    #print lead
    # Check lead time is in available range (7 days)
    if not 0 <= lead <= 6:
        raise ValueError('lead=%s; must be 0 <= lead <= 6' % lead)

    # Compute track density

    #track_file = INTERPOLATE_FILENAME.format(y,m,d,h)
    infile = os.path.join(REFORMAT_DIR, '%Y', INTERPOLATE_FILENAME)

    outfile = os.path.join(DENSITY_DIR, str(lead),
                           str(year1) + str(year2),
                           DENSITY_FILENAME.format(l, y,
                                                   m))  # % (lead, year, month)
    exclude = pl.exclude_days(lead=lead)

    #print exclude

    pl.track_density(infile,
                     outfile,
                     'ukmo_nwp',
                     year=int(y),
                     month=int(month),
                     box_size=2.5,
                     lead=int(lead),
                     exclude=exclude,
                     contributing_days=True)
def composite_pcp_tot_year_month(year,
                                 month,
                                 lead=None,
                                 mjo=None,
                                 y1=2006,
                                 y2=2017):
    """Computes the total precipitation from TRMM 3B42 for a particular year and month. For 2017, uses data up to 2017/07/11 00:00 (inclusive) only."""
    print "starting"
    # Check values are in valid ranges
    if lead is not None:
        if not 0 <= lead <= 6:
            raise ValueError('lead=%s; must be 0 <= lead <= 6' % lead)
    if mjo is not None:
        if not 0 <= mjo <= 8:
            raise ValueError('mjo=%s; must be 0 <= mjo <= 8' % mjo)
    # if year == 2017:
    #     if not 1 <= month <= 7:
    #         raise ValueError('Data in 2017 used up to July only')

    # Compute file names and get list of dates to exclude due to missing
    # ukmo_nwp forecasts
    if lead is None:
        if mjo is None:
            outfile = COMP_PCP_TOT_FILE % (year, month)
        else:
            outfile = COMP_PCP_TOT_FILE_MJO % (year, month, mjo)
        exclude = []
    else:
        if not 0 <= lead <= 6:
            raise ValueError('lead=%s; must be None or 0 <= lead <= 6' % lead)
        if mjo is None:
            outfile = COMP_PCP_TOT_FILE_LEAD % (lead, year, month)
        else:
            outfile = COMP_PCP_TOT_FILE_LEAD_MJO % (lead, year, month, mjo)
        exclude = pl.exclude_days(lead)
    outpath = os.path.join(COMP_PCP_TOT_DIR, str(year), outfile)
    if not os.path.exists(os.path.join(COMP_PCP_TOT_DIR, str(year))):
        os.makedirs(os.path.join(COMP_PCP_TOT_DIR, str(year)))
    if os.path.isfile(outpath):
        raise ValueError('Output file %s already exists' % outpath)

    # For a given MJO phase, get all cyclone tracks for this year and month,
    # and exclude those whose genesis was in the wrong month; then get list of
    # times for remaining track points
    if mjo is not None:
        tc_file = os.path.join(TRACK_DIR, TRACK_FILE % (1979, 2016))
        ain = np.genfromtxt(tc_file,
                            dtype=float,
                            skip_header=1,
                            usecols=[0] + range(3, 9))
        ain_y = ain[np.where(ain[:, 1] == year)]
        ain_ym = ain_y[np.where(ain_y[:, 2] == month)]
        fn = os.path.join(TRACK_DIR, TRACK_FILE_0 % (1979, 2016) + \
                          '.genesis.mjo%d_trackIDs' % mjo)
        ids_list = np.genfromtxt(fn)
        ain_use = ain_ym[np.in1d(ain_ym[:, 0], ids_list)]
        ain_dates = ain_use[:, range(1, 5)]
        dates = [datetime.datetime(*d) for d in ain_dates.astype(int)]

    # Iterate for every day available in this year and month
    t1 = datetime.datetime(year, month, 1)
    print t1
    dt = datetime.timedelta(days=1)
    if month == 12:
        t2 = datetime.datetime(year, 12, 31)
    else:
        t2 = datetime.datetime(year, month + 1, 1) - (dt * 3)
    print t2
    pcp_cubes = iris.cube.CubeList()
    count_days = 0
    if (year, month) == (2010, 3):
        pcp_cubes_res = {
            'n320': iris.cube.CubeList(),
            'n512': iris.cube.CubeList()
        }
        count_days_res = {'n320': 0.0, 'n512': 0.0}
        start_n512 = datetime.datetime(2010, 3, 9, 12)
    elif (year, month) == (2014, 7):
        pcp_cubes_res = {
            'n512': iris.cube.CubeList(),
            'n768': iris.cube.CubeList()
        }
        count_days_res = {'n512': 0.0, 'n768': 0.0}
        start_n768 = datetime.datetime(2014, 7, 15, 12)
    while t1 <= t2:

        # If on list of dates to exclude, move on
        y, m, d = [getattr(t1, a) for a in ['year', 'month', 'day']]
        if t1 in exclude:
            print t1.strftime('%Y/%m/%d'), '-- EXCLUDE'
            t1 += dt
            continue

        # Iterate for each 6 hour period in day
        # (Remember, h=3 means 00-06UTC, etc.)
        if t1.timetuple()[:3] == (2017, 7, 11):
            count_days += 0.5
        else:
            count_days += 1
        for h in xrange(3, 22, 6):
            this_t_h = t1 + datetime.timedelta(hours=h)
            #if this_t_h >= datetime.datetime(2017, 7, 11, 12):
            #t1 = t2 + dt
            #break
            if (year, month) == (2010, 3):
                if (t1 + datetime.timedelta(hours=h)) < start_n512:
                    count_days_res['n320'] += 0.25
                else:
                    count_days_res['n512'] += 0.25
            elif (year, month) == (2014, 7):
                if (t1 + datetime.timedelta(hours=h)) < start_n768:
                    count_days_res['n512'] += 0.25
                else:
                    count_days_res['n768'] += 0.25

            # For a given MJO phase, skip if this time is not on the list found
            # above (note that we are doing this AFTER counting the
            # contributing days above)
            if mjo is not None:
                if (t1 + datetime.timedelta(hours=h)) not in dates:
                    continue
            print t1.strftime('%Y/%m/%d'), '%02d:00' % h

            # Get data
            this_pcp = gpm_imerg_pcp_accumulation(y, m, d, h)
            if this_pcp is not None:
                if this_pcp.data.min() < 0:
                    raise ValueError('Found negative precip value(s)')
                pcp_cubes.append(this_pcp)

                # For months including a change of resolution, append Cube to
                # the appropriate CubeList
                if (year, month) == (2010, 3):
                    if datetime.datetime(y, m, d, h) < start_n512:
                        pcp_cubes_res['n320'].append(this_pcp)
                    else:
                        pcp_cubes_res['n512'].append(this_pcp)
                elif (year, month) == (2014, 7):
                    if datetime.datetime(y, m, d, h) < start_n768:
                        pcp_cubes_res['n512'].append(this_pcp)
                    else:
                        pcp_cubes_res['n768'].append(this_pcp)

        # Increment day
        t1 += dt

    # Sum over Cubes
    if len(pcp_cubes):
        pcp_sum = pl.add_cubes(pcp_cubes, contributing_days=False)
    else:
        print 'No data - creating dummy map'
        dummy = gpm_imerg_pcp_accumulation(1998, 1, 1, 3)
        pcp_sum = pl.add_cubes([dummy, dummy], contributing_days=False)
        pcp_sum.data = np.zeros_like(pcp_sum.data)

    # Set metadata
    pcp_sum.units = 'mm'
    pcp_sum.standard_name = 'lwe_thickness_of_precipitation_amount'
    pcp_sum.long_name = 'precipitation'
    pcp_sum.var_name = 'pcp'
    pcp_sum.attributes['contributing_days'] = float(count_days)

    # Save
    print outpath
    iris.save(pcp_sum, outpath)
def composite_pcp_tc_year_month(year, month, lead):
    """Computes a composite of the precipitation due to all TCs at a particular
	forecast lead time (in days) for a particular year and month.

	Total is divided by 2 at the end as each day is composited from both the
	00Z and the 12Z forecast.  (Of course there may be no tracks at certain
	times, in which case that time just contributes 0 to the total.)

	**Arguments**

	*year*, *month*
		`int`s, year and month of validity times for which to calculate the
		composite

	*lead*
		`int`, length of time after forecast initialization
	"""

    # Check lead time is in available range
    if not 0 <= lead <= 6:
        raise ValueError('lead=%s; must be 0 <= lead <= 6' % lead)
    #if year == 2017:
    #if not 1 <= month <= 7:
    #raise ValueError('Data in 2017 used up to July only')

    # Check whether output file already exists

    #infilename = TRACK_FILE %
    infile = os.path.join(TRACK_DIR_3WAY, '%Y', TRACK_FILE)
    print infile

    outdir = os.path.join(COMP_PCP_TC_DIR, str(lead), str(year))
    comp_file = COMP_PCP_TC_FILE % (lead, year, month)
    outfile = os.path.join(outdir, comp_file)

    print outfile

    if os.path.isfile(outfile):
        raise ValueError('Output file %s already exists' % outfile)
    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    # Iterate for every time available in this year and month
    t1 = datetime.datetime(year, month, 1, 0)
    dt = datetime.timedelta(days=1)
    if (year, month) == (2017, 7):
        t2 = datetime.datetime(2017, 7, 11)
    elif month == 12:
        t2 = datetime.datetime(year + 1, 1, 1) - dt
    else:
        t2 = datetime.datetime(year, month + 1, 1) - dt
    pcp_cubes = iris.cube.CubeList()
    exclude = pl.exclude_days(lead)
    count_days = 0
    if (year, month) == (2010, 3):
        count_days_res = {'n320': 0.0, 'n512': 0.0}
        start_n512 = datetime.datetime(2010, 3, 9, 12)
    elif (year, month) == (2014, 7):
        count_days_res = {'n512': 0.0, 'n768': 0.0}
        start_n768 = datetime.datetime(2014, 7, 15, 12)
    vt_list = []
    while t1 <= t2:

        # Check whether this day is on the list of those to exclude
        if t1 in exclude:
            print t1, '- EXCLUDE'
            t1 += dt
            continue
        if t1.timetuple()[:3] == (2017, 7, 11):
            count_days += 0.5
        else:
            count_days += 1
        print t1.strftime('%Y/%m/%d')

        # Get list of forecast and validity times for the three forecasts to be
        # used
        ftime_deltas = np.arange(-12, 13, 12) - lead * 24
        ftimes = (t1 + datetime.timedelta(hours=hh) for hh in ftime_deltas)
        vtimes = (np.array([15, 21]) + lead * 24,
                  np.arange(3, 22, 6) + lead * 24,
                  np.array([3, 9]) + lead * 24)

        # Iterate for each of the three forecasts
        for ff, vv in itertools.izip(ftimes, vtimes):

            # If on or after 2017/07/11 12:00, skip
            #if ff >= datetime.datetime(2017, 7, 11, 12):
            #continue

            # Get year, month, day, hour, lon, lat from file
            this_infile = ff.strftime(infile)
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                ain = np.genfromtxt(this_infile,
                                    dtype=float,
                                    skip_header=1,
                                    usecols=range(3, 9))

            # Count days for each resolution
            for v in vv:
                vt = ff + datetime.timedelta(hours=v)
                if vt not in vt_list:
                    vt_list.append(vt)
                    if (year, month) == (2010, 3):
                        if vt < start_n512:
                            count_days_res['n320'] += 0.25
                        else:
                            count_days_res['n512'] += 0.25
                    elif (year, month) == (2014, 7):
                        if vt < start_n768:
                            count_days_res['n512'] += 0.25
                        else:
                            count_days_res['n768'] += 0.25

            # If no tracks in this forecast, skip it
            if not ain.size:
                print '   ', ff, '- no tracks'
                continue

            # Iterate for every validity time required from this forecast
            for v in vv:

                # Get track(s) with point(s) this time
                gd = ff + datetime.timedelta(hours=v)
                aint = ain[np.where((ain[:, 0] == gd.year) &\
                     (ain[:, 1] == gd.month) &\
                     (ain[:, 2] == gd.day) &\
                     (ain[:, 3] == gd.hour))]
                if not aint.size:
                    print '   ', ff, 'T+%03d' % v, '- no tracks'
                    continue
                print '   ', ff, 'T+%03d' % v

                # Iterate for each track
                for lon, lat in aint[:, [4, 5]]:
                    this_pcp = cf.nwp_pcp_accumulation(ff, v, lon, lat)
                    this_pcp.coord(axis='X').var_name = 'longitude'
                    this_pcp.coord(axis='Y').var_name = 'latitude'
                    this_pcp.coord(axis='X').attributes = {}
                    this_pcp.coord(axis='Y').attributes = {}
                    pcp_cubes.append(iris.util.squeeze(this_pcp))

        # Increment time
        t1 += dt

    # If no Cubes, create a dummy one with zeros
    def dummy_cube():
        dummy = None
        dummy_t = datetime.datetime(year, month, 1)
        while dummy is None:
            dummy = this_pcp = cf.nwp_pcp_accumulation(dummy_t, 3)
            dummy_t += dt
        dummy = iris.util.squeeze(dummy)
        dummy.data = np.zeros_like(dummy.data)
        dummy.remove_coord(dummy.coord(axis='T'))
        return dummy

    if not len(pcp_cubes):
        pcp_cubes = iris.cube.CubeList([dummy_cube()])

    # Sum over Cubes and divide by 2
    pcp = pl.add_cubes(
        pcp_cubes, deal_with_masks=False, contributing_days=False) / 2.

    # Set metadata
    pcp.units = 'mm'
    pcp.standard_name = 'lwe_thickness_of_precipitation_amount'
    pcp.long_name = 'precipitation'
    pcp.var_name = 'pcp'
    pcp.attributes['contributing_days'] = count_days

    # Save
    iris.save(pcp, outfile)
    print outfile

    # For months with more than one resolution, sum separately and divide by 2
    if (year, month) in [(2010, 3), (2014, 7)]:
        if year == 2010:
            res_list = ['n320', 'n512']
        elif year == 2014:
            res_list = ['n512', 'n768']
        #res_list = {2010: ['n320', 'n512'], 2014: ['n512', 'n768']}[year]
        pcp_sep = pl.add_cubes(pcp_cubes,
                               deal_with_masks=False,
                               separate_resolutions=True,
                               contributing_days=False)
        file_tot = os.path.join(COMP_PCP_TOT_DIR, str(lead), str(year),
                                COMP_PCP_TOT_FILE % (lead, year, month))
        for k in pcp_sep.iterkeys():
            pcp_sep_k = pcp_sep[k] / 2.

            # Set metadata
            pcp_sep_k.units = 'mm'
            pcp_sep_k.standard_name = 'lwe_thickness_of_precipitation_amount'
            pcp_sep_k.long_name = 'precipitation'
            pcp_sep_k.var_name = 'pcp'

            # Number of contributing days is difficult to count so just get the
            # value from the total pcp composites (the number should be the
            # same anyway)
            res = {640: 'n320', 1024: 'n512', 1536: 'n768'}[k[1]]
            res_list.remove(res)
            file_tot_k = file_tot.replace('.nc', '%s.nc' % res)
            cube_tot_k = iris.load_cube(file_tot_k)
            pcp_sep_k.attributes['contributing_days'] = \
             float(cube_tot_k.attributes['contributing_days'])

            # Save
            outfile_k = outfile.replace('.nc', '.%s.nc' % res)
            iris.save(pcp_sep_k, outfile_k)
            print outfile_k

        # If any resolutions are still in res_list it means there were no
        # tracks at that resolution, so save an empty Cube
        for res in res_list:
            pcp_sep_k = dummy_cube()
            pcp_sep_k.units = 'mm'
            pcp_sep_k.standard_name = 'lwe_thickness_of_precipitation_amount'
            pcp_sep_k.long_name = 'precipitation'
            pcp_sep_k.var_name = 'pcp'
            file_tot_k = file_tot.replace('.nc', '%s.nc' % res)
            cube_tot_k = iris.load_cube(file_tot_k)
            pcp_sep_k.attributes['contributing_days'] = float(
                cube_tot_k.attributes['contributing_days'])
            outfile_k = outfile.replace('.nc', '.%s.nc' % res)
            iris.save(pcp_sep_k, outfile_k)
            print outfile_k
示例#4
0
def composite_pcp_tc_year_month(year, month, lead):
    """Computes a composite of the precipitation due to all TCs at a particular
	forecast lead time (in days) for a particular year and month

	Total is divided by 2 at the end as each day is composited from both the
	00Z and the 12Z forecast.  (Of course there may be no tracks at certain
	times, in which case that time just contributes 0 to the total.)

	**Arguments**

	*year*, *month*
		`int`s, year and month of validity times for which to calculate the
		composite

	*lead*
		`int`, length of time after forecast initialization
		
	"""

    # If no Cubes, create a dummy one with zeros

    # Check lead time is in available range
    if not 0 <= lead <= 6:
        raise ValueError('lead=%s; must be 0 <= lead <= 6' % lead)
    #if year == 2017:
    #if not 1 <= month <= 7:
    #raise ValueError('Data in 2017 used up to July only')

    # Check whether output file already exists

    #infilename = TRACK_FILE %
    infile = os.path.join(TRACK_DIR_3WAY, TRACK_FILE)
    print infile

    outdir = os.path.join(COMP_PCP_TC_DIR, str(lead), str(year))
    comp_file = COMP_PCP_TC_FILE % (lead, year, month)
    outfile = os.path.join(outdir, comp_file)

    print outfile

    if os.path.isfile(outfile):
        raise ValueError('Output file %s already exists' % outfile)
    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    # Iterate for every time available in this year and month
    t1 = datetime.datetime(year, month, 4, 12)
    print "t1:", t1
    t2 = datetime.datetime(year, month, 14, 0)
    print "t2:", t2
    dt = datetime.timedelta(days=1)
    #if (year, month) == (2017, 7):
    #t2 = datetime.datetime(2017, 7, 11)
    #elif month == 12:
    #t2 = datetime.datetime(year+1, 1, 1) - dt
    #else:
    #t2 = datetime.datetime(year, month+1, 1) - dt
    pcp_cubes = iris.cube.CubeList()
    exclude = pl.exclude_days(lead)
    count_days = 0
    #if (year, month) == (2010, 3):
    #count_days_res = {'n320': 0.0, 'n512': 0.0}
    #start_n512 = datetime.datetime(2010, 3, 9, 12)
    #elif (year, month) == (2014, 7):
    #count_days_res = {'n512': 0.0, 'n768': 0.0}
    #start_n768 = datetime.datetime(2014, 7, 15, 12)
    vt_list = []
    print "EXCLUDED DATES:", exclude
    while t1 <= t2:

        # Check whether this day is on the list of those to exclude
        if t1 in exclude:
            print t1, '- EXCLUDE'
            t1 += dt
            continue
        #if t1.timetuple()[:3] == (2017, 7, 11):
        #count_days += 0.5
        #else:
        count_days += 1
        print t1.strftime('%Y/%m/%d')

        # Get list of forecast and validity times for the three forecasts to be
        # used
        ftime_deltas = np.arange(-12, 13, 12) - lead * 24
        print "ftime_deltas:", ftime_deltas
        ftimes = []
        for hh in ftime_deltas:
            ftimes.append(t1 + datetime.timedelta(hours=hh))
        #ftimes = (t1 + datetime.timedelta(hours=hh) for hh in ftime_deltas)
        print "ftimes:", ftimes
        vtimes = (np.array([15, 21]) + lead * 24,
                  np.arange(3, 22, 6) + lead * 24,
                  np.array([3, 9]) + lead * 24)
        print "vtimes:", vtimes

        # Iterate for each of the three forecasts
        for ff, vv in itertools.izip(ftimes, vtimes):
            print "vv:", vv

            # If on or after 2017/07/11 12:00, skip
            #if ff >= datetime.datetime(2017, 7, 11, 12):
            #continue

            # Get year, month, day, hour, lon, lat from file
            this_infile = ff.strftime(infile)
            with warnings.catch_warnings():
                warnings.simplefilter('ignore')
                ain = np.genfromtxt(this_infile,
                                    dtype=float,
                                    skip_header=1,
                                    usecols=range(3, 9))

            # Count days for each resolution
            #for v in vv:
            #vt = ff + datetime.timedelta(hours=v)
            #if vt not in vt_list:
            #vt_list.append(vt)
            #if (year, month) == (2010, 3):
            #if vt < start_n512:
            #count_days_res['n320'] += 0.25
            #else:
            #count_days_res['n512'] += 0.25
            #elif (year, month) == (2014, 7):
            #if vt < start_n768:
            #count_days_res['n512'] += 0.25
            #else:
            #count_days_res['n768'] += 0.25

            # If no tracks in this forecast, skip it
            if not ain.size:
                print '   ', ff, '- no tracks'
                continue

            # Iterate for every validity time required from this forecast
            for v in vv:
                print "v:", v

                # Get track(s) with point(s) this time
                gd = ff + datetime.timedelta(hours=v)
                print "gd:", gd
                aint = ain[np.where((ain[:, 0] == gd.year) &\
                     (ain[:, 1] == gd.month) &\
                     (ain[:, 2] == gd.day) &\
                     (ain[:, 3] == gd.hour))]

                if not aint.size:
                    print '   ', ff, 'T+%03d' % v, '- no tracks'
                    continue
                print '   ', ff, 'T+%03d' % v

                # Iterate for each track
                print "running nwp_pcp_accumulation function for points along track:"
                #print "ff:", ff
                #print "v:", v
                #print "aint:", aint
                for lon, lat in aint[:, [4, 5]]:
                    print aint[:, [4, 5]]
                    print "this_pcp = nwp_pcp_accumulation(", ff, v, lon, lat, ")"
                    this_pcp = nwp_pcp_accumulation(ff, v, lon, lat)
                    print "finished running nwp_pcp_accumulation"
                    if this_pcp is None:
                        print "no precip, running dummy_cube()"
                        this_pcp = iris.cube.CubeList([dummy_cube()])
                    else:
                        this_pcp.coord(axis='X').var_name = 'longitude'
                        this_pcp.coord(axis='Y').var_name = 'latitude'
                        this_pcp.coord(axis='X').attributes = {}
                        this_pcp.coord(axis='Y').attributes = {}
                        pcp_cubes.append(iris.util.squeeze(this_pcp))

        # Increment time
        t1 += dt

    def dummy_cube():
        dummy = None
        dummy_t = datetime.datetime(year, month, 1)
        while dummy is None:
            dummy = this_pcp = cf.nwp_pcp_accumulation(dummy_t, 3)
            dummy_t += dt
        dummy = iris.util.squeeze(dummy)
        dummy.data = np.zeros_like(dummy.data)
        dummy.remove_coord(dummy.coord(axis='T'))
        return dummy

    if not len(pcp_cubes):
        pcp_cubes = iris.cube.CubeList([dummy_cube()])

    if not len(pcp_cubes):
        pcp_cubes = iris.cube.CubeList([dummy_cube()])

    # Sum over Cubes and divide by 2
    pcp = pl.add_cubes(
        pcp_cubes, deal_with_masks=False, contributing_days=False) / 2.

    # Set metadata
    pcp.units = 'mm'
    pcp.standard_name = 'lwe_thickness_of_precipitation_amount'
    pcp.long_name = 'precipitation'
    pcp.var_name = 'pcp'
    pcp.attributes['contributing_days'] = count_days

    # Save
    iris.save(pcp, outfile)
    print "saved: ", outfile
def composite_pcp_tot_year_month(year, month, lead):
	"""Computes a composite of all precipitation at a particular forecast lead
	time (in days) for a particular year and month.

	Total is divided by 2 at the end as each day is composited from both the
	00Z and the 12Z forecast.

	**Arguments**

	*year*, *month*
		`int`s, year and month of validity times for which to calculate the
		composite

	*lead*
		`int`, length of time after forecast initialization
	"""

	# Check lead time is in available range
	if not 0 <= lead <= 6:
		raise ValueError('lead=%s; must be 0 <= lead <= 6' % lead)
	#if year == 2017:
		#if not 1 <= month <= 7:
			#raise ValueError('Data in 2017 used up to July only')

	# Check whether output directory and file already exist
	outfile = os.path.join(COMP_PCP_TOT_DIR, str(lead), str(year),
						   COMP_PCP_TOT_FILE % (lead, year, month))
	if os.path.isfile(outfile):
		raise ValueError('Output file %s already exists' % outfile)

	# Iterate for every time available in this year and month
	########################################################################################
	# MODIFIED BELOW TO TEST SCRIPT, SHOULD BE t1 = datetime.datetime(year, month, 1, 0)
	########################################################################################
	
	#t1 = datetime.datetime(year, month, 1, 0)
	
	t1 = datetime.datetime(year, month, 25, 0) #testing
	
	dt = datetime.timedelta(days=1)
	#if (year, month) == (2017, 7):
		#t2 = datetime.datetime(2017, 7, 11, 12)
	if month == 12:
		t2 = datetime.datetime(year, 12, 31, 12)
	else:
		t2 = datetime.datetime(year, month+1, 1, 12) - dt
	pcp_cubes = iris.cube.CubeList()
	exclude = pl.exclude_days(lead=lead)
	count_days = 0
	if (year, month) == (2010, 3):
		count_days_res = {'n320': 0.0, 'n512': 0.0}
		start_n512 = datetime.datetime(2010, 3, 9, 12)
	elif (year, month) == (2014, 7):
		count_days_res = {'n512': 0.0, 'n768': 0.0}
		start_n768 = datetime.datetime(2014, 7, 15, 12)
	elif (year, month) == (2017,7):
		count_days_res = {'n768': 0.0, 'n1280': 0.0}
		start_n1280 = datetime.datetime(2017, 7, 11, 12)
		
	vt_list = []
	while t1 <= t2:

		# Check whether this day is on the list of those to exclude
		if t1 in exclude:
			print t1.strftime('%Y/%m/%d'), '- EXCLUDE'
			t1 += dt
			continue
		#if t1.timetuple()[:3] == (2017, 7, 11):
			#count_days += 0.5
		else:
			count_days += 1
		print t1.strftime('%Y/%m/%d')

		# Get list of forecast and validity times for the three forecasts to be
		# used
		ftime_deltas = np.arange(-12, 13, 12) - lead*24
		ftimes = [t1 + datetime.timedelta(hours=hh) for hh in ftime_deltas]
		vtimes = (np.array([15, 21]) + lead*24, np.arange(3, 22, 6) + lead*24,
				  np.array([3, 9]) + lead*24)

		# Iterate for each of the three forecasts
		for ff, vv in itertools.izip(ftimes, vtimes):
			#if ff >= datetime.datetime(2017, 7, 11, 12):
				#continue

			# Iterate for every validity time required from this forecast
			for v in vv:
				print '   ', ff, 'T+%03d' % v

				# Count days for each resolution
				vt = ff + datetime.timedelta(hours=v)
				if vt not in vt_list:
					vt_list.append(vt)
					if (year, month) == (2010, 3):
						if vt < start_n512:
							count_days_res['n320'] += 0.25
						else:
							count_days_res['n512'] += 0.25
					elif (year, month) == (2014, 7):
						if vt < start_n768:
							count_days_res['n512'] += 0.25
						else:
							count_days_res['n768'] += 0.25
					elif (year, month) == (2017,7):
						if vt < start_n1280:
							count_days_res['n768'] += 0.25
						else:
							count_days_res['n1280'] += 0.25
							

				# Get precipitation data
				this_pcp = cf.nwp_pcp_accumulation(ff, v)
				this_pcp.coord(axis='X').var_name = 'longitude'
				this_pcp.coord(axis='Y').var_name = 'latitude'
				this_pcp.coord(axis='X').attributes = {}
				this_pcp.coord(axis='Y').attributes = {}
				pcp_cubes.append(iris.util.squeeze(this_pcp))

		# Increment time
		t1 += dt

	# Sum over Cubes and divide by 2
	pcp = pl.add_cubes(pcp_cubes, deal_with_masks=False, contributing_days=False)/2.

	# Set metadata
	pcp.units = 'mm'
	pcp.standard_name = 'lwe_thickness_of_precipitation_amount'
	pcp.long_name = 'precipitation'
	pcp.var_name = 'pcp'
	pcp.attributes['contributing_days'] = float(count_days)
	# Save
	print outfile
	iris.save(pcp, outfile)
	#print outfile
	# For months with more than one resolution, sum separately and divide by 2
	if (year, month) in [(2010, 3), (2014, 7), (2017,7)]:
		print "pcp_cubes:", pcp_cubes
		pcp_sep = pl.add_cubes_sep_res(pcp_cubes, year, deal_with_masks=False, separate_resolutions=True, contributing_days=False)

		for k in pcp_sep.iterkeys():
			print k
			print k[1]

		for k in pcp_sep.iterkeys():
			pcp_sep_k = pcp_sep[k]/2.
			#res = {640: 'n320', 1024: 'n512', 1536: 'n768'}[k[1]]
			res=str(k)

			# Set metadata
			pcp_sep_k.units = 'mm'
			pcp_sep_k.standard_name = 'lwe_thickness_of_precipitation_amount'
			pcp_sep_k.long_name = 'precipitation'
			pcp_sep_k.var_name = 'pcp'
			pcp_sep_k.attributes['contributing_days'] = count_days_res[res]

			# Save
			outfile_k = outfile.replace('.nc', str(res)+'.nc')
			iris.save(pcp_sep_k, outfile_k)
			print outfile_k