Esempio n. 1
0
def map_to_region(fin, fout, xfirst, xinc, xsize, yfirst, yinc, ysize):
    gfname = tempfile.mktemp()
    with open(gfname, 'w') as o:
        o.write(GRID_DESC.format(xsize, ysize, xfirst, xinc, yfirst, yinc))
    c = cdo.Cdo()
    c.remapbil(gfname, input=fin, output=fout)
    os.unlink(gfname)
Esempio n. 2
0
def main(args):
    nc_paths = get_paths(args.sim_dir)
    c = cdo.Cdo()
    for date_str, nc_list in nc_paths.items():
        print("%s IN:" % date_str)
        start_list, stop_list = [], []
        for nc in nc_list:
            start, stop = get_dt_range(c, nc)
            print("  %s (%s to %s)" % (basename(nc), start, stop))
            start_list.append(start)
            stop_list.append(stop)
        start, stop = min(start_list), max(stop_list)
        out_subd = join(args.out_dir, date_str)
        print("%s OUT:" % date_str)
        print("  %s (%s to %s)" % (date_str, start, stop))
        sys.stdout.flush()
        try:
            os.makedirs(out_subd)
        except FileExistsError:
            pass
        pairs = utils.get_images(args.radar_dir, after=start, before=stop)
        for dt, src in pairs:
            out_name = "%s.png" % strftime(dt, utils.FMT)
            dst = join(out_subd, out_name)
            with open(src, "rb") as fi, open(dst, "wb") as fo:
                fo.write(fi.read())
        print()
Esempio n. 3
0
def zmnam_preproc(ifile):
    """Preprocessing of the input dataset files."""
    cdo = cd.Cdo()
    # Delete leap day, if any.
    full_da_nl = cdo.delete('month=2,day=29', input=ifile)

    # Fill missing values with bilinear method.
    full_da_nl_nn = cdo.fillmiss(input=full_da_nl)

    # Detrend with memory-efficient method.
    full_da_nl_a, full_da_nl_b = cdo.trend(input=full_da_nl_nn)
    full_da_nl_detr = cdo.subtrend(input=full_da_nl_nn + ' ' + full_da_nl_a +
                                   ' ' + full_da_nl_b)

    # Compute anomalies from the daily/monthly means.
    gh_da_dm = cdo.ydaymean(input=full_da_nl_detr)
    gh_da_an = cdo.sub(input=full_da_nl_detr + ' ' + gh_da_dm)
    gh_da_an_zm = cdo.zonmean(input=gh_da_an)

    # Compute monthly mean anomalies.
    gh_mo = cdo.monmean(input=full_da_nl_detr)
    gh_mo_mm = cdo.ymonmean(input=gh_mo)
    gh_mo_an = cdo.sub(input=gh_mo + ' ' + gh_mo_mm)

    return (gh_da_an_zm, gh_mo_an)
Esempio n. 4
0
def build_cdo_object():
    try:
        return cdo.Cdo()
    except Exception as e:
        print("Python exception:", e)
        print("Unable to use CDO, this won't work, sorry...")
        print("You may consider running >> module load cdo << and trying to run again")
        sys.exit(1)
Esempio n. 5
0
 def setUp(self):
     """Class constructor. Create a Dynamic_HD_Production_Run_Driver object."""
     self.driver = Dynamic_HD_Production_Run_Drivers()
     self.cdo_instance = cdo.Cdo()
     self.temp_dir = os.path.join(data_dir, 'temp', 'temp_workdir')
     try:
         os.stat(self.temp_dir)
     except:
         os.mkdir(self.temp_dir)
Esempio n. 6
0
 def setUp(self):
     """Class constructor. Create a Dynamic_HD_Production_Run_Driver object."""
     self.driver = Dynamic_Lake_Production_Run_Drivers()
     self.cdo_instance = cdo.Cdo()
     self.temp_dirs = [
         os.path.join(data_dir, 'temp', 'temp_workdir_lake_deglac'),
         os.path.join(data_dir, 'temp', 'temp_workdir_pd')
     ]
     self.clean_up()
     for temp_dir in self.temp_dirs:
         try:
             os.stat(temp_dir)
         except:
             os.mkdir(temp_dir)
Esempio n. 7
0
def zmnam_preproc(ifile):
    """Preprocessing of the input dataset files."""
    cdo = cd.Cdo()
    # Delete leap day, if any.
    full_da_nl = cdo.delete('month=2,day=29', input=ifile)

    # Compute anomalies from the daily/monthly means.
    gh_da_dm = cdo.ydaymean(input=full_da_nl)
    gh_da_an = cdo.sub(input=full_da_nl + ' ' + gh_da_dm)
    gh_da_an_zm = cdo.zonmean(input=gh_da_an)

    gh_mo = cdo.monmean(input=full_da_nl)
    gh_mo_mm = cdo.ymonmean(input=gh_mo)
    gh_mo_an = cdo.sub(input=gh_mo + ' ' + gh_mo_mm)

    return (gh_da_an_zm, gh_mo_an)
Esempio n. 8
0
    def setup(self):
        """Setup process."""
        self.ifile = './tests/testdata/sresa1b_ncar_ccsm3-example.nc'
        cdomethods = cdo.Cdo()
        self.tmp_iter = cdomethods.mermean(input=self.ifile,
                                           output=self.ifile[:-3]+'-mer.nc',
                                           options="-O -f nc")
        self.final = cdomethods.zonmean(input=self.tmp_iter,
                                        output=self.ifile[:-3]+'-mer-zon.nc',
                                        options="-O -f nc")
        self.data_iter = ncd.Dataset(self.final)

        tmp_at_one_go = "-mermean "+self.ifile
        self.final_str = cdomethods.zonmean(input=tmp_at_one_go,
                                            output=self.ifile[:-3] +
                                            '-mer-zon-str.nc')
        self.datas = ncd.Dataset(self.final_str)
        self.sellev = cdomethods.sellevidx(12, input=self.ifile,
                                           output=self.ifile[:-3]+'-lvl12.nc',
                                           options="-O -f nc")
Esempio n. 9
0
def find_sp_variable(task):
    global ifs_gridpoint_file_, ifs_spectral_file_, surface_pressure, ln_surface_pressure
    log.info("Looking for surface pressure variable in input files...")
    command = cdo.Cdo()
    code_string = command.showcode(input=ifs_spectral_file_)
    codes = [cmor_source.grib_code(int(c)) for c in code_string[0].split()]
    if surface_pressure in codes:
        log.info("Found surface pressure in spectral file")
        setattr(task, "path", ifs_spectral_file_)
        task.source.grid_ = 1
        return
    if ln_surface_pressure in codes:
        log.info("Found lnsp in spectral file")
        setattr(task, "path", ifs_spectral_file_)
        task.source = cmor_source.ifs_source.read("var134=exp(var152)")
        return
    log.info(
        "Did not find sp or lnsp in spectral file: assuming gridpoint file contains sp"
    )
    setattr(task, cmor_task.output_path_key, ifs_gridpoint_file_)
    task.source.grid_ = 0
Esempio n. 10
0
    def __init__(self, other_parent_dirs=None, **ComponentComputeArgs):
        super(ComponentCouple, self).__init__(**ComponentComputeArgs)

        try:
            assert isinstance(self.calendar, CouplingEsmCalendar)
        except AssertionError:
            raise TypeError("You must supply a calendar with coupling functionality: CouplingEsmCalendar, and not %s" % type(self.calendar))

        self.files["couple"] = FileDict()
        self._register_directory("couple", use_name="generic")

        self.couple_attrs = {}
        for couple_type in self.COMPATIBLE_COUPLE_TYPES:
            if couple_type in other_parent_dirs:
                self.couple_attrs[couple_type] = {"parent_dir": other_parent_dirs[couple_type]}
            else:
                self.couple_attrs[couple_type] = {"parent_dir": self._parent_dir}
        self._grid_string = None
        self._cleanup_list = []
        self._cdo_stderr = open(self.couple_dir+"/"+self.NAME+"Couple_cdo_log", "w")
        self.CDO = cdo.Cdo()
        self.NCO = nco.Nco()
Esempio n. 11
0
def interpolate_icon_grib_to_latlon(path, grib_filename, latlon_filename,
                                    model):

    if model == 'icon-global-det':
        targetgridfile = path['base'] + path[
            'grid'] + 'target_grid_global_latlon_0.1.txt'
        weightsfile = path['base'] + path[
            'grid'] + 'weights_dis_{}_icosahedral_to_latlon_0.1.nc'.format(
                model)

    if model == 'icon-eu-eps':
        targetgridfile = path['base'] + path[
            'grid'] + 'target_grid_eu_latlon_0.2.txt'
        weightsfile = path['base'] + path[
            'grid'] + 'weights_dis_{}_icosahedral_to_latlon_0.2.nc'.format(
                model)

    cdo_module = cdo.Cdo()
    cdo_module.remap(targetgridfile + ',' + weightsfile,
                     input=path['base'] + path['subdir'] + grib_filename,
                     output=path['base'] + path['subdir'] + latlon_filename,
                     options='-f nc')
    return
Esempio n. 12
0
    def write_fields(self, filename, fields,griddescfile=None,fieldnames=None):
        """Write a field to a given target NetCDF4 file

        Arguments:
        filename: full path of the netcdf file to write to
        fields: A list of Field (or Field subclass object); fields to write
        griddescfile (optional): string; full path to the grid description metadata
            to add to file written out. Nothing is added if this is
            set to None
        fieldnames: A list of strings; name of the output fields to create and write to
        Returns:nothing
        """

        nlat,nlong = fields[0].get_grid().get_grid_dimensions()
        if fieldnames is None:
            fieldnames = ['field_value']*len(fields)
        print("Writing output to {0}".format(filename))
        if griddescfile is not None:
            output_filename=filename
            filename=path.splitext(filename)[0] + '_temp' + path.splitext(filename)[1]
        with netCDF4.Dataset(filename,mode='w',format='NETCDF4') as dataset:
            dataset.createDimension("latitude",nlat)
            dataset.createDimension("longitude",nlong)
            for field,fieldname in zip(fields,fieldnames):
                data_was_bool = False
                if field.get_data().dtype == np.bool_:
                    field.set_data(field.get_data().astype(np.int32))
                    data_was_bool=True
                field_values = dataset.createVariable(fieldname,field.get_data().dtype,
                                                      ('latitude','longitude'))
                field_values[:,:] = field.get_data()
                if data_was_bool:
                    field.set_data(field.get_data().astype(np.bool_))
        if griddescfile is not None:
            cdo_instance = cdo.Cdo()
            cdo_instance.setgrid(griddescfile,input=filename,output=output_filename)
            os.remove(filename)
Esempio n. 13
0
def create_time_axis(freq, path, name, has_bounds):
    global log, start_date_, ref_date_
    command = cdo.Cdo()
    times = command.showtimestamp(input=path)[0].split()
    datetimes = sorted(
        set(
            map(lambda s: datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S"),
                times)))
    if len(datetimes) == 0:
        log.error(
            "Empty time step list encountered at time axis creation for files %s"
            % str(path))
        return
    refdate = cmor_utils.make_datetime(ref_date_)
    if has_bounds:
        n = len(datetimes)
        bounds = numpy.empty([n, 2])
        rounded_times = map(
            lambda time:
            (cmor_utils.get_rounded_time(freq, time) - refdate).total_seconds(
            ) / 3600., datetimes)
        bounds[:, 0] = rounded_times[:]
        bounds[0:n - 1, 1] = rounded_times[1:n]
        bounds[n - 1,
               1] = (cmor_utils.get_rounded_time(freq, datetimes[n - 1], 1) -
                     refdate).total_seconds() / 3600.
        times[:] = bounds[:, 0] + (bounds[:, 1] - bounds[:, 0]) / 2
        return cmor.axis(table_entry=str(name),
                         units="hours since " + str(ref_date_),
                         coord_vals=times,
                         cell_bounds=bounds)
    times = numpy.array([(d - refdate).total_seconds() / 3600
                         for d in datetimes])
    return cmor.axis(table_entry=str(name),
                     units="hours since " + str(ref_date_),
                     coord_vals=times)
Esempio n. 14
0
    def __init__(self, **EchamComputeArgs):
        # FIXME: This all belongs in a generalized class, not here...
        super(Echam6Couple_Ice, self).__init__(**EchamComputeArgs)

        try:
            assert isinstance(self.calendar, CouplingEsmCalendar)
        except AssertionError:
            raise TypeError(
                "You must supply a calendar with coupling functionality: CouplingEsmCalendar, and not %s"
                % type(self.calendar))

        self.files["couple"] = FileDict()
        self._register_directory("couple", use_Name="generic")

        self.__cleanup_list = []
        self._cdo_stderr = open(self.couple_dir + "/EchamCouple_Ice_cdo_log",
                                "w")
        self.CDO = cdo.Cdo(logging=True, logFile=self._cdo_stderr)

        # Get relevant environmental variables
        self.ECHAM_TO_ISM_multiyear_mean = load_environmental_variable_1_0(
            "ECHAM_TO_ISM_multiyear_mean")
        self.ECHAM_TO_ISM_time_mean = load_environmental_variable_1_0(
            "ECHAM_TO_ISM_time_mean")
Esempio n. 15
0
def advanced_basin_evaluation_driver(
        input_minima_file,
        input_minima_fieldname,
        input_raw_orography_file,
        input_raw_orography_fieldname,
        input_corrected_orography_file,
        input_corrected_orography_fieldname,
        input_cell_areas_file,
        input_cell_areas_fieldname,
        input_prior_fine_rdirs_file,
        input_prior_fine_rdirs_fieldname,
        input_prior_fine_catchments_file,
        input_prior_fine_catchments_fieldname,
        input_coarse_catchment_nums_file,
        input_coarse_catchment_nums_fieldname,
        input_coarse_rdirs_file,
        input_coarse_rdirs_fieldname,
        combined_output_filename,
        output_filepath,
        output_filelabel,
        output_basin_catchment_nums_filepath=None):
    input_minima = iodriver.advanced_field_loader(
        input_minima_file,
        field_type='Generic',
        fieldname=input_minima_fieldname)
    input_raw_orography = iodriver.advanced_field_loader(
        input_raw_orography_file,
        field_type='Orography',
        fieldname=input_raw_orography_fieldname)
    input_corrected_orography = iodriver.advanced_field_loader(
        input_corrected_orography_file,
        field_type='Orography',
        fieldname=input_corrected_orography_fieldname)
    input_cell_areas = iodriver.advanced_field_loader(
        input_cell_areas_file,
        field_type='Generic',
        fieldname=input_cell_areas_fieldname)
    input_prior_fine_rdirs = iodriver.advanced_field_loader(
        input_prior_fine_rdirs_file,
        field_type='RiverDirections',
        fieldname=input_prior_fine_rdirs_fieldname)
    input_prior_fine_catchments = iodriver.advanced_field_loader(
        input_prior_fine_catchments_file,
        field_type='Generic',
        fieldname=input_prior_fine_catchments_fieldname)
    input_coarse_catchment_nums = iodriver.advanced_field_loader(
        input_coarse_catchment_nums_file,
        field_type='Generic',
        fieldname=input_coarse_catchment_nums_fieldname)
    input_coarse_rdirs = iodriver.advanced_field_loader(
        input_coarse_rdirs_file,
        field_type='Generic',
        fieldname=input_coarse_rdirs_fieldname)
    fine_grid = input_raw_orography.get_grid()
    fine_shape = input_raw_orography.get_data().shape
    connection_volume_thresholds = field.Field(
        np.zeros(fine_shape, dtype=np.float64, order='C'), fine_grid)
    flood_volume_thresholds = field.Field(
        np.zeros(fine_shape, dtype=np.float64, order='C'), fine_grid)
    flood_next_cell_lat_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    flood_next_cell_lon_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    connect_next_cell_lat_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    connect_next_cell_lon_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    flood_force_merge_lat_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    flood_force_merge_lon_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    connect_force_merge_lat_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    connect_force_merge_lon_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    flood_redirect_lat_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    flood_redirect_lon_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    connect_redirect_lat_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    connect_redirect_lon_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    additional_flood_redirect_lat_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    additional_flood_redirect_lon_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    additional_connect_redirect_lat_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    additional_connect_redirect_lon_index = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    flood_local_redirect = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    connect_local_redirect = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    additional_flood_local_redirect = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    additional_connect_local_redirect = field.Field(
        np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    merge_points = field.Field(np.zeros(fine_shape, dtype=np.int32, order='C'),
                               fine_grid)
    if output_basin_catchment_nums_filepath is not None:
        basin_catchment_numbers = field.Field(
            np.zeros(fine_shape, dtype=np.int32, order='C'), fine_grid)
    else:
        basin_catchment_numbers = None
    evaluate_basins_wrapper.evaluate_basins(
        minima_in_int=np.ascontiguousarray(input_minima.get_data(),
                                           dtype=np.int32),
        raw_orography_in=np.ascontiguousarray(input_raw_orography.get_data(),
                                              dtype=np.float64),
        corrected_orography_in=np.ascontiguousarray(
            input_corrected_orography.get_data(), dtype=np.float64),
        cell_areas_in=np.ascontiguousarray(input_cell_areas.get_data(),
                                           dtype=np.float64),
        connection_volume_thresholds_in=connection_volume_thresholds.get_data(
        ),
        flood_volume_thresholds_in=flood_volume_thresholds.get_data(),
        prior_fine_rdirs_in=np.ascontiguousarray(
            input_prior_fine_rdirs.get_data(), dtype=np.float64),
        prior_coarse_rdirs_in=np.ascontiguousarray(
            input_coarse_rdirs.get_data(), dtype=np.float64),
        prior_fine_catchments_in=np.ascontiguousarray(
            input_prior_fine_catchments.get_data(), dtype=np.int32),
        coarse_catchment_nums_in=np.ascontiguousarray(
            input_coarse_catchment_nums.get_data(), dtype=np.int32),
        flood_next_cell_lat_index_in=flood_next_cell_lat_index.get_data(),
        flood_next_cell_lon_index_in=flood_next_cell_lon_index.get_data(),
        connect_next_cell_lat_index_in=connect_next_cell_lat_index.get_data(),
        connect_next_cell_lon_index_in=connect_next_cell_lon_index.get_data(),
        flood_force_merge_lat_index_in=flood_force_merge_lat_index.get_data(),
        flood_force_merge_lon_index_in=flood_force_merge_lon_index.get_data(),
        connect_force_merge_lat_index_in=connect_force_merge_lat_index.
        get_data(),
        connect_force_merge_lon_index_in=connect_force_merge_lon_index.
        get_data(),
        flood_redirect_lat_index_in=flood_redirect_lat_index.get_data(),
        flood_redirect_lon_index_in=flood_redirect_lon_index.get_data(),
        connect_redirect_lat_index_in=connect_redirect_lat_index.get_data(),
        connect_redirect_lon_index_in=connect_redirect_lon_index.get_data(),
        additional_flood_redirect_lat_index_in=
        additional_flood_redirect_lat_index.get_data(),
        additional_flood_redirect_lon_index_in=
        additional_flood_redirect_lon_index.get_data(),
        additional_connect_redirect_lat_index_in=
        additional_connect_redirect_lat_index.get_data(),
        additional_connect_redirect_lon_index_in=
        additional_connect_redirect_lon_index.get_data(),
        flood_local_redirect_out_int=flood_local_redirect.get_data(),
        connect_local_redirect_out_int=connect_local_redirect.get_data(),
        additional_flood_local_redirect_out_int=additional_flood_local_redirect
        .get_data(),
        additional_connect_local_redirect_out_int=
        additional_connect_local_redirect.get_data(),
        merge_points_out_int=merge_points.get_data(),
        basin_catchment_numbers_in=basin_catchment_numbers.get_data())
    connection_volume_thresholds_filename = path.join(
        output_filepath, "connect_vts_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(connection_volume_thresholds_filename,
                                   connection_volume_thresholds,
                                   fieldname='connection_volume_thresholds')
    flood_volume_thresholds_filename = path.join(
        output_filepath, "flood_vts_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(flood_volume_thresholds_filename,
                                   flood_volume_thresholds,
                                   fieldname='flood_volume_thresholds')
    flood_next_cell_lat_index_filename = path.join(
        output_filepath, "flood_nci_lat_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(flood_next_cell_lat_index_filename,
                                   flood_next_cell_lat_index,
                                   fieldname='flood_next_cell_lat_index')
    flood_next_cell_lon_index_filename = path.join(
        output_filepath, "flood_nci_lon_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(flood_next_cell_lon_index_filename,
                                   flood_next_cell_lon_index,
                                   fieldname='flood_next_cell_lon_index')
    connect_next_cell_lat_index_filename = path.join(
        output_filepath, "connect_nci_lat_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(connect_next_cell_lat_index_filename,
                                   connect_next_cell_lat_index,
                                   fieldname='connect_next_cell_lat_index')
    connect_next_cell_lon_index_filename = path.join(
        output_filepath, "connect_nci_lon_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(connect_next_cell_lon_index_filename,
                                   connect_next_cell_lon_index,
                                   fieldname='connect_next_cell_lon_index')
    flood_force_merge_lat_index_filename = path.join(
        output_filepath, "flood_fmi_lat_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(flood_force_merge_lat_index_filename,
                                   flood_force_merge_lat_index,
                                   fieldname='flood_force_merge_lat_index')
    flood_force_merge_lon_index_filename = path.join(
        output_filepath, "flood_fmi_lon_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(flood_force_merge_lon_index_filename,
                                   flood_force_merge_lon_index,
                                   fieldname='flood_force_merge_lon_index')
    connect_force_merge_lat_index_filename = path.join(
        output_filepath, "connect_fmi_lat_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(connect_force_merge_lat_index_filename,
                                   connect_force_merge_lat_index,
                                   fieldname='connect_force_merge_lat_index')
    connect_force_merge_lon_index_filename = path.join(
        output_filepath, "connect_fmi_lon_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(connect_force_merge_lon_index_filename,
                                   connect_force_merge_lon_index,
                                   fieldname='connect_force_merge_lon_index')
    flood_redirect_lat_index_filename = path.join(
        output_filepath, "flood_ri_lat_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(flood_redirect_lat_index_filename,
                                   flood_redirect_lat_index,
                                   fieldname='flood_redirect_lat_index')
    flood_redirect_lon_index_filename = path.join(
        output_filepath, "flood_ri_lon_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(flood_redirect_lon_index_filename,
                                   flood_redirect_lon_index,
                                   fieldname='flood_redirect_lon_index')
    connect_redirect_lat_index_filename = path.join(
        output_filepath, "connect_ri_lat_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(connect_redirect_lat_index_filename,
                                   connect_redirect_lat_index,
                                   fieldname='connect_redirect_lat_index')
    connect_redirect_lon_index_filename = path.join(
        output_filepath, "connect_ri_lon_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(connect_redirect_lon_index_filename,
                                   connect_redirect_lon_index,
                                   fieldname='connect_redirect_lon_index')
    additional_flood_redirect_lat_index_filename = path.join(
        output_filepath, "additional_flood_ri_lat_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(
        additional_flood_redirect_lat_index_filename,
        additional_flood_redirect_lat_index,
        fieldname='additional_flood_redirect_lat_index')
    additional_flood_redirect_lon_index_filename = path.join(
        output_filepath, "additional_flood_ri_lon_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(
        additional_flood_redirect_lon_index_filename,
        additional_flood_redirect_lon_index,
        fieldname='additional_flood_redirect_lon_index')
    additional_connect_redirect_lat_index_filename = path.join(
        output_filepath,
        "additional_connect_ri_lat_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(
        additional_connect_redirect_lat_index_filename,
        additional_connect_redirect_lat_index,
        fieldname='additional_connect_redirect_lat_index')
    additional_connect_redirect_lon_index_filename = path.join(
        output_filepath,
        "additional_connect_ri_lon_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(
        additional_connect_redirect_lon_index_filename,
        additional_connect_redirect_lon_index,
        fieldname='additional_connect_redirect_lon_index')
    flood_local_redirect_filename = path.join(
        output_filepath, "flood_local_r_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(flood_local_redirect_filename,
                                   flood_local_redirect,
                                   fieldname='flood_local_redirect')
    connect_local_redirect_filename = path.join(
        output_filepath, "connect_local_r_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(connect_local_redirect_filename,
                                   connect_local_redirect,
                                   fieldname='connect_local_redirect')
    additional_flood_local_redirect_filename = path.join(
        output_filepath,
        "additional_flood_local_r_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(additional_flood_local_redirect_filename,
                                   additional_flood_local_redirect,
                                   fieldname='additional_flood_local_redirect')
    additional_connect_local_redirect_filename = path.join(
        output_filepath,
        "additional_connect_local_r_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(
        additional_connect_local_redirect_filename,
        additional_connect_local_redirect,
        fieldname='additional_connect_local_redirect')
    merge_points_filename = path.join(
        output_filepath, "merge_points_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(merge_points_filename,
                                   merge_points,
                                   fieldname='merge_points')
    lake_centers_filename = path.join(
        output_filepath, "lake_centers_" + output_filelabel + ".nc")
    iodriver.advanced_field_writer(lake_centers_filename,
                                   input_minima,
                                   fieldname="lake_centers")
    individual_field_filenames = [
        connection_volume_thresholds_filename,
        flood_volume_thresholds_filename, flood_next_cell_lat_index_filename,
        flood_next_cell_lon_index_filename,
        connect_next_cell_lat_index_filename,
        connect_next_cell_lon_index_filename,
        flood_force_merge_lat_index_filename,
        flood_force_merge_lon_index_filename,
        connect_force_merge_lat_index_filename,
        connect_force_merge_lon_index_filename,
        flood_redirect_lat_index_filename, flood_redirect_lon_index_filename,
        connect_redirect_lat_index_filename,
        connect_redirect_lon_index_filename,
        additional_flood_redirect_lat_index_filename,
        additional_flood_redirect_lon_index_filename,
        additional_connect_redirect_lat_index_filename,
        additional_connect_redirect_lon_index_filename,
        flood_local_redirect_filename, connect_local_redirect_filename,
        additional_flood_local_redirect_filename,
        additional_connect_local_redirect_filename, merge_points_filename,
        lake_centers_filename
    ]
    cdo_inst = cdo.Cdo()
    cdo_inst.merge(input=" ".join(individual_field_filenames),
                   output=combined_output_filename)
    for individual_field_filename in individual_field_filenames:
        os.remove(individual_field_filename)
    if output_basin_catchment_nums_filepath is not None:
        iodriver.advanced_field_writer(output_basin_catchment_nums_filepath,
                                       basin_catchment_numbers,
                                       fieldname="basin_catchment_numbers")
Esempio n. 16
0
"""loading_tools
======================

The loading_tools module of cmipdata is a set of functions which use
the cdo python bindings and NetCDF4 to load data from input NetCDF
files listed in a cmipdata ensemble object into python numpy arrays.
Some processing can optionally be done during the loading, specifically
remapping, time-slicing, time-averaging and zonal-averaging.

.. moduleauthor:: Neil Swart <*****@*****.**>
"""
import cdo as cdo
cdo = cdo.Cdo()  # recommended import
import os
import numpy as np
from netCDF4 import Dataset, num2date, date2num
import datetime

# clean out tmp to make space for CDO processing.
os.system('rm -rf /tmp/cdo*')


def loadvar(ifile, varname, cdostr=None, **kwargs):
    """
        Load variables from a NetCDF file with optional pre-processing.

        Load a CMIP5 netcdf variable "varname" from "ifile" and an optional
        cdo string for preprocessing the data from the netCDF files.
        Requires netCDF4, CDO and CDO python bindings.
        Returns a masked array, var.
      """
def split_years(fname, target):
    cdoapp = cdo.Cdo()
    #    cdoapp.debug = True
    return cdoapp.splityear(input=fname, output=target)
Esempio n. 18
0
def load_remap(ifile_fp, varname, file_type=None):
    path, ifile = os.path.split(ifile_fp)

    if not os.path.isfile('remapped-data/intlev_{}'.format(ifile)):
        try:
            import cdo
            cdo = cdo.Cdo()
            cdo.env = {'REMAP EXTRAPOLATE': 'off'}
        except ImportError:
            print 'CDO must be installed to preproces files.'
            raise

        print 'preprocessing', ifile

        with open('parameters/default_depths') as f:
            default_depths = f.read().replace('\n', ',')

        try:
            with Dataset('parameters/masks.nc', 'r') as mask:
                with Dataset(ifile_fp, 'r') as data:
                    if np.all(np.array(mask['nav_lat'][:]) == np.array(data['nav_lat'][:])) and \
                            np.all(np.array(mask['nav_lon'][:]) == np.array(data['nav_lon'][:])):
                        cdo.yearmean(input=ifile_fp, output='temp_' + ifile)
                    else:
                        raise AttributeError
                with Dataset('temp_' + ifile, 'a') as to_mask:
                    if file_type:
                        with open(
                                'parameters/scale_factors.json') as json_file:
                            var_defs = json.load(json_file)
                            to_mask.set_auto_maskandscale(True)
                            for var in to_mask.variables:
                                if var in var_defs[file_type].keys():
                                    to_mask[var].setncattr(
                                        'scale', var_defs[file_type][var][0])
                                    to_mask[var].units = var_defs[file_type][
                                        var][1]
                                    to_mask[var].long_name = var_defs[
                                        file_type][var][2]
                    for var in to_mask.variables:
                        if len(to_mask[var].shape) == 4:
                            to_mask[var][:] = np.ma.masked_invalid(
                                np.where(mask['tmask'][0, ], to_mask[var][:],
                                         np.NaN))
            cdo.intlevel(default_depths,
                         input='-remapdis,parameters/default_grid temp_' +
                         ifile,
                         output='remapped-data/intlev_' + ifile,
                         options='-L',
                         force=False)
            os.remove('temp_' + ifile)
        except (AttributeError, IndexError):
            print 'Data from {} is not on the NAA grid, no land mask applied.'.format(
                ifile)
            cdo.intlevel(default_depths,
                         input='-remapdis,parameters/default_grid -yearmean ' +
                         ifile_fp,
                         output='remapped/intlev_' + ifile,
                         options='-L',
                         force=False)
        print ifile, 'preprocessed.'

    remapped_file = 'remapped-data/intlev_' + ifile
    with Dataset(remapped_file, 'r') as nc:
        ncvar = nc.variables[varname]
        data = ncvar[:].squeeze()

        try:
            units = ncvar.units
        except AttributeError:
            print 'Units not given for {} in {}, leaving empty.'.format(
                varname, ifile)
            units = ''

        dimensions = ncvar.dimensions

        try:
            for dimension in dimensions:
                if 'depth' in dimension.lower():
                    depth = nc.variables[dimension][:]
                    break
            else:
                raise IndexError
        except IndexError:
            raise SystemExit('\nDepths not given for {} in {}.'.format(
                varname, ifile))

        try:
            dates = num2date(nc['time_counter'][:],
                             units=nc['time_counter'].units,
                             calendar=nc['time_counter'].calendar)
            years = [x.year for x in dates]
        except IndexError:
            years = [0]

    lon = np.linspace(0, 360, 721)
    lat = np.linspace(40, 90, 101)

    return data, units, lon, lat, depth, dimensions, years
"""
Does remapping of the reanalyses and observations onto a 1x1 grid and computes 
zonal means.

.. moduleauthor:: Neil Swart <*****@*****.**>
"""
import os
import glob
import cdo; cdo = cdo.Cdo()

def preprocess_observations(datapath='./'):
    # where we are starting from
    cwd = os.getcwd()
    # move to where the data is
    os.chdir(datapath)
    # Get the reanalysis monthly mean files
    rean = ['R1', 'R2', '20CR', 'ERA-Int', 'CFSR', 'MERRA']
    var = ['slp', 'u10m', 'uflx']
    files = []
    for r in rean:
        for v in var:
            files.extend([r + '_' + v + '.mon.mean.nc'])

    files = [f for f in files if not f.startswith('remap') and not 
            f.startswith('zonal-mean')]
    # Add in CCMp and HadSLP2r files
    
    files.extend(['CCMP_198701-201112.nc', 'HadSLP2r_slp.mon.mean.nc'])
    files.extend(['20CR_ens_slp.mon.mean.nc', '20CR_ens_u10m.mon.mean.nc'])

    for f in files:
Esempio n. 20
0
 def __init__(self, code=0):
     self.operators = {}
     self.app = cdo.Cdo()
     if code > 0:
         self.add_operator(cdo_command.select_code_operator, code)
Esempio n. 21
0
    def __init__(self, target_grid: str, outfile: str, sources: list, **kwargs):
        """
        Contains universal methods

        :param target_grid: grid_routines to be interpolated onto
        :param sources: Information about which variables should be taken from which files using
                which interpolation method onto which type of grid_routines and whether to interpolate them vertically
        :param scrip_grid: Scrip file on rho grid_routines to interpolate with. Will be created if not provided
        :param verbose: whether text should be printed as the program is running
        Can have any of the following optional arguments:
            theta_s - S-coordinate surface control parameter - default 7.0
            theta_b - S-coordinate bottom control parameter - default 0.0
            layers - Number of S-coordinate layers - default 32
            hc - S-coordinate critical depth - default 150
            tcline - S-coordinate surface/bottom layer width - default 150
            sigma_type - default 3
            zeta_source - dict with keys 'name' 'file'. Sets zeta to the first timestep of 'name' in 'file'.
            file_type - output filetype -  default nc4c
            processes - number of processes cdo should use - default 8
            scrip_grid - SCRIP version of target_grid - optional, will be created if not passed
            verbose - whether to print runtime information - default false
            time_underscored - whether 'time' variables should be replaced with '#_time' - default False
            keep_weights - whether to keep calculated weights - default False
            keep_z_clim - whether to keep zclim - default False
        """
        # cdo
        self.cdo = cdo.Cdo()
        test_cdo(self.cdo)
        # Sources
        verify_sources(sources, kwargs.get('verbose', False))
        self.sources, self.target_grid = sources, target_grid

        # Vertical interpolation information
        # Replace anything not passed in with default values
        self.has_vertical = has_vertical(self.sources)
        if self.has_vertical:
            self.theta_s, self.theta_b, self.layers, self.hc, self.tcline, self.sigma_type = (
                kwargs.get('theta_s', 7.0), kwargs.get('theta_b', 0.0),
                kwargs.get("layers", 32), kwargs.get("hc", 150),
                kwargs.get('tcline', 150), kwargs.get("sigma_type", 3)
            )
            self.sc = sigma_stretch_sc(self.layers, True)
            self.cs = sigma_stretch_cs(self.theta_s, self.theta_b, self.sc, self.sigma_type)

            # Get z_levels
            with netCDF4.Dataset(target_grid, mode='r') as my_grid:
                self.h = my_grid.variables['h'][:]
            self.zeta = self.get_zeta(kwargs['zeta_source']) if 'zeta_source' in kwargs else np.zeros_like(self.h)
            self.z_level_rho, self.z_level_u, self.z_level_v = get_z_levels(self.h, self.sc, self.cs, self.hc,
                                                                            self.zeta)

        # CDO options
        self.file_type, self.processes = kwargs.get('file_type', 'nc4c'), kwargs.get('processes', 8)
        # Other Options
        self.verbose, self.time_underscored, self.keep_weights, self.keep_z_clim = (
            kwargs.get('verbose', False), kwargs.get('time_underscored', False),
            kwargs.get('keep_weights', False), kwargs.get('keep_z_clim', False)
        )
        self._adjustments = None
        self.outfile = outfile
        # Interpolator
        self.scrip_grid = kwargs.get('scrip_grid', scrip_grid_from_nc(target_grid))
        self.shift_pairs = ShiftPairCollection()
        if self.verbose:
            print("Finished setup")
Esempio n. 22
0
    if not os.path.exists(tmp):
        os.makedirs(tmp)
# setup folder for working via sshfs_mistral in ~/mistral_work
elif my_system is 'local':
    mistral_work = f'~/mistral_work/'
    work = f'{mistral_work}{group}/{user}/'
    cdo_mistral = True
    if cdo_mistral:
        tmp = os.path.expanduser('~/tmp')
    else:
        tmp = work + 'tmp'
    if not os.path.exists(tmp):
        os.makedirs(tmp)

# start
cdo = cdo.Cdo(tempdir=tmp)


cmip6_folder = mistral_work+'ik1017/CMIP6/data/CMIP6'
cmip5_folder = mistral_work+'kd0956/CMIP5/data/cmip5/output1'
GE_folder = mistral_work+'mh1007'


def remap_cdo(da):
    if not isinstance(da, xr.core.dataset.Dataset):
        da = da.to_dataset()
    remap = cdo.remapbil(
        'r360x180', input=da, returnXDataset=True, options='-P 8')
    return remap

Esempio n. 23
0
def concatenate(ncs_dir, output_nc):
    ncs = sorted(get_files(ncs_dir, '.nc'))
    c = cdo.Cdo()
    c.cat(input=' '.join(ncs), output=output_nc, options='-r -f nc')
Esempio n. 24
0
"""
Calculate the SAM index from a netCDF file containing sea-level pressure.

.. moduleauthor:: Neil Swart <*****@*****.**>
"""
import numpy as np
import pandas as pd
import cdo as cdo; cdo = cdo.Cdo() # recommended import
import os
os.system( 'rm -rf /tmp/cdo*') # clean out tmp to make space for CDO processing.
import cmipdata as cd
from netCDF4 import Dataset

def calc_sam(psl_file, varname, start_date='1800-01-01', end_date='2013-12-31'):
    """
    Compute the SAM index as the pressure difference between 40 and 65S
    
    Parameters:
    -----------
        psl_file : str
            The name of the **zonal meaned** SLP netcdf file to compute the SAM
            from. Can be a full path.
        varname : str
            The name of the Sea Level Pressure variable in psl_file.
    
    Returns:
    -------
        sam : array
            The calculated SAM index
    """
    
Esempio n. 25
0
def read_time_stamps(path):
    command = cdo.Cdo()
    times = command.showtimestamp(input=path)[0].split()
    return map(lambda s: datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S"), times)
Esempio n. 26
0
#!/usr/bin/env python
# coding: utf-8

import matplotlib.pyplot as plt
from plot_tools import _decorate_x_axes_for_ymonmean
from simulation_tools import cosmos_standard_analysis
import cdo
from custom_io import get_remote_data
from scipy.io import netcdf

CDO = cdo.Cdo(cdfMod="scipy")

user = "******"
host = "stan1"
prepath = "pgierz@stan1:/ace/user/pgierz/cosmos-aso-wiso"
Eem130 = cosmos_standard_analysis(user, host,
                                  prepath.split(":")[-1],
                                  "Eem130-S2").ymonmean("SICOMO")
Eem125 = cosmos_standard_analysis(user, host,
                                  prepath.split(":")[-1],
                                  "Eem125-S2").ymonmean("SICOMO")
Eem120 = cosmos_standard_analysis(user, host,
                                  prepath.split(":")[-1],
                                  "Eem120-S2").ymonmean("SICOMO")
PI = cosmos_standard_analysis(user, host,
                              prepath.split(":")[-1],
                              "EXP003").ymonmean("SICOMO")

# Runs with future CO2
prepath = "pgierz@stan1:/ace/user/pgierz/cosmos-ao"
RCP6 = cosmos_standard_analysis(user, host,
Esempio n. 27
0
def main(args):
    '''
    The main function
    '''
    climatedb.connect()

    variable_name, data_source, frequency = get_args(args)

    print(data_source, variable_name)

    config = load_config()

    if data_source in config:
        organisation, author, year, article_url = get_citation_from_config(
            config, data_source)
        file_url = get_file_url_from_config(config, data_source, variable_name)
        file_urls = [file_url]

    else:
        model, scenario = data_source.split('.')

        search_url = config['esgf']['search']['url']
        project = config['esgf']['search']['project']
        variant = config['esgf']['search']['variant']

        datasets = search_esgf(search_url, project, variable_name, model,
                               scenario, frequency, variant)

        dataset_info = datasets[0]
        organisation = dataset_info['institution_id'][0]
        author, year, article_url = get_citation_from_url(
            dataset_info['citation_url'][0])

        thredds_url, thredds_id = get_thredds_url(dataset_info)
        file_urls = get_file_urls_from_thredds(thredds_url, thredds_id)

    file_paths = fetch_urls(file_urls)

    if len(file_paths) == 0:
        print('No datasets found', file=sys.stderr)
        return 1

    file_base, ext = file_paths[0].rsplit(os.path.extsep)
    output_file_path = os.path.join(
        DATASET_DIR, '%s-%s.%s' % (data_source, variable_name, ext))
    os.path.exists(output_file_path) and os.remove(output_file_path)

    if len(file_paths) == 1:
        file_path = file_paths[0]
        os.symlink(os.path.basename(file_path), output_file_path)

    else:
        if ext == 'nc':
            cdo.Cdo().selall(input=sorted(file_paths),
                             output=output_file_path,
                             options='-f nc4')

    baseline = (config['baseline'] == data_source)
    create_data_source(data_source, organisation, author, year, article_url,
                       baseline)

    return 0
Esempio n. 28
0
import os
import tempfile

# External Imports
import cdo
import nco
import numpy as np
import xarray as xr

# This Library Imports:
from pyesm.core.component.component_coupling import ComponentCouple, cleanup_after_send, write_couple_vars_to_json
from pyesm.components.pism.pism_compute import PismCompute
from pyesm.core.helpers import load_environmental_variable_1_0, ComponentFile, FileDict
from pyesm.core.errors import CouplingError

CDO = cdo.Cdo()
NCO = nco.Nco()


class PismCouple(PismCompute, ComponentCouple):
    """ Functionality to couple PISM with other models """
    COMPATIBLE_COUPLE_TYPES = ["atmosphere", "solid_earth"]

    def __init__(self, **PismComputeArgs):
        super(PismCouple, self).__init__(**PismComputeArgs)
        self.files['couple']['ice_grid'] = ComponentFile(
            src=self.POOL_DIR + "grids/" + self.Domain + "/pismr_" + self.Domain + "_" + self.LateralResolution + ".griddes",
            dest=self.couple_dir)

    ################################################################################
    ################################################################################
Esempio n. 29
0
===============

This module contains several functions associated with traversing
directories and gathering information from files.

.. moduleauthor:: David Fallis
"""
import os
from netCDF4 import Dataset, num2date, date2num
import datetime
import itertools
import tarfile
import cmipdata as cd
import validate
import cdo
cdo = cdo.Cdo()

MEANDIR = None


def _variable_dictionary(plots):
    """ Creates a dictionary with the variable names as keys
        mapped to empty lists

    Parameters
    ----------
    plots : list of dictionaries with 'variable' key

    Returns
    -------
    dictionary
Esempio n. 30
0
def load(ifile_fp, varname, file_type=None):
    path, ifile = os.path.split(ifile_fp)

    if not os.path.isfile('remapped-data/yearmean_{}'.format(ifile)):
        try:
            import cdo
            cdo = cdo.Cdo()
            cdo.env = {'REMAP EXTRAPOLATE': 'off'}
        except ImportError:
            raise SystemExit('CDO must be installed to preproces files.')
        cdo.yearmean(input=ifile_fp, output='remapped-data/yearmean_' + ifile)
        with Dataset('parameters/masks.nc',
                     'r') as mask, Dataset('remapped-data/yearmean_' + ifile,
                                           'a') as to_mask:
            if file_type:
                with open('parameters/scale_factors.json') as json_file:
                    var_defs = json.load(json_file)
                    for var in to_mask.variables:
                        if var in var_defs[file_type].keys():
                            to_mask[var].setncattr('scale',
                                                   var_defs[file_type][var][0])
                            to_mask[var].units = var_defs[file_type][var][1]
                            to_mask[var].long_name = var_defs[file_type][var][
                                2]
            for var in to_mask.variables:
                if len(to_mask[var].shape) == 4:
                    to_mask[var][:] = np.ma.masked_invalid(
                        np.where(mask['tmask'][0, ], to_mask[var][:], np.NaN))

    with Dataset('remapped-data/yearmean_' + ifile, 'r') as nc:

        ncvar = nc.variables[varname]
        data = ncvar[:].squeeze()

        try:
            units = ncvar.units
        except AttributeError:
            print 'Units not given for {} in {}, leaving empty.'.format(
                varname, ifile)
            units = ''

        dimensions = ncvar.dimensions

        try:
            for dimension in dimensions:
                if 'depth' in dimension.lower():
                    depth = nc.variables[dimension][:]
                    break
            else:
                raise IndexError
        except IndexError:
            raise SystemExit('\nDepths not given for {} in {}.'.format(
                varname, ifile))

        try:
            dates = num2date(nc['time_counter'][:],
                             units=nc['time_counter'].units,
                             calendar=nc['time_counter'].calendar)
            years = [x.year for x in dates]
        except IndexError:
            years = [0]

        lon = nc.variables['nav_lon']
        lon = lon[:].squeeze()
        lat = nc.variables['nav_lat']
        lat = lat[:].squeeze()

    return data, units, lon, lat, depth, dimensions, years