Esempio n. 1
0
def read_multiband(objid, objdir, band=('g', 'r', 'z'), pixscale=0.262):
    """Read the multi-band images, construct the residual image, and then create a
    masked array from the corresponding inverse variances image.  Finally,
    convert to surface brightness by dividing by the pixel area.

    """
    import fitsio
    from scipy.ndimage.morphology import binary_dilation

    data = dict()
    for filt in band:

        image = fitsio.read(
            os.path.join(objdir, '{}-image-{}.fits.fz'.format(objid, filt)))
        model = fitsio.read(
            os.path.join(objdir, '{}-model-{}.fits.fz'.format(objid, filt)))
        invvar = fitsio.read(
            os.path.join(objdir, '{}-invvar-{}.fits.fz'.format(objid, filt)))

        # Mask pixels with ivar<=0. Also build an object mask from the model
        # image, to handle systematic residuals.
        sig1 = 1.0 / np.sqrt(np.median(invvar[invvar > 0]))

        mask = (invvar <= 0) * 1  # 1=bad, 0=good
        mask = np.logical_or(mask, (model > (2 * sig1)) * 1)
        mask = binary_dilation(mask, iterations=5) * 1

        data[filt] = (image - model) / pixscale**2  # [nanomaggies/arcsec**2]

        data['{}_mask'.format(filt)] = mask == 0  # 1->bad
        data['{}_masked'.format(filt)] = ma.masked_array(
            data[filt], ~data['{}_mask'.format(filt)])  # 0->bad
        ma.set_fill_value(data['{}_masked'.format(filt)], 0)

    return data
def rm_centre_colomn(ssh_model, ssh_obs, lon, lat, x_ac, x_al, time_sec):
    
    fill_value = ssh_model.fill_value
    
    enssize = ssh_model.shape[0]
    dimtime = ssh_model.shape[1]
    dimnc = ssh_model.shape[2]-1 
    
    lon = np.delete(lon, (60), axis=1)
    lat = np.delete(lat, (60), axis=1)
    x_ac = np.delete(x_ac, (60))
    
    ssh_model_rmd = np.zeros([enssize,dimtime,dimnc]) # [layer, time, nC]
    ssh_obs_rmd = np.zeros([enssize,dimtime,dimnc]) 
    
    for k in range (0,enssize):
        ssh_model_rmd[k,:,:] = np.delete(ssh_model[k,:,:], (60), axis=1)
        ssh_obs_rmd[k,:,:] = np.delete(ssh_obs[k,:,:], (60), axis=1)
        
    ssh_model_rmd = ma.masked_where(ssh_model_rmd==fill_value,ssh_model_rmd)
    ma.set_fill_value(ssh_model_rmd, fill_value)
    ssh_obs_rmd = ma.masked_where(ssh_obs_rmd==fill_value,ssh_obs_rmd)
    ma.set_fill_value(ssh_obs_rmd, fill_value)
    
    return ssh_model_rmd, ssh_obs_rmd, lon, lat, x_ac, x_al, time_sec
Esempio n. 3
0
def func(V, Y):
    count_each_feature_value = Counter(V)
    feature_value = np.unique(V)
    num_value_feature = len(feature_value)
    subtotal = 0
    count = 0
    for ele in feature_value:  # each value in this feature
        weight = count_each_feature_value[feature_value[count]] / float(
            sampleNum)
        after_mask = ma.masked_not_equal(V, ele)
        ma.set_fill_value(after_mask, 0)
        after_mask = after_mask.filled()
        mat_each_value_feature = np.dot(after_mask, Y)  #
        tmp = mat_each_value_feature
        tmp_sum = np.sum(mat_each_value_feature)
        mat_each_value_feature = np.dot(mat_each_value_feature, 1.0 / tmp_sum)
        current = 0
        #after_compress = ma.masked_equal(mat_each_value_feature, 0)
        #after_compress = after_compress.compressed()
        #t5 = time.clock()
        #current = np.dot(mat_each_value_feature, np.log2(after_compress))
        #t6 = time.clock()
        #mat_each_value_feature = mat_each_value_feature[-mat_each_value_feature.mask]
        #ma.set_fill_value(mat_each_value_feature,1)
        #mat_each_value_feature = mat_each_value_feature.filled()
        #current = np.dot(mat_each_value_feature, np.log2(mat_each_value_feature))
        #current = 0
        for k in mat_each_value_feature:
            if k == 0:
                continue
            current += k * math.log(k, 2)
        subtotal += current * weight
        count += 1
    subtotal = -subtotal
    return subtotal
Esempio n. 4
0
def func(V, Y):
	count_each_feature_value = Counter(V) 
	feature_value = np.unique(V)
	num_value_feature = len(feature_value)
	subtotal = 0
	count = 0
	for ele in feature_value:# each value in this feature
		weight = count_each_feature_value[feature_value[count]] / float(sampleNum)
		after_mask = ma.masked_not_equal(V,ele)
		ma.set_fill_value(after_mask,0)
		after_mask = after_mask.filled()
		mat_each_value_feature = np.dot(after_mask, Y) #
		tmp = mat_each_value_feature
		tmp_sum = np.sum(mat_each_value_feature)
		mat_each_value_feature = np.dot(mat_each_value_feature,  1.0 / tmp_sum)
		current = 0
			#after_compress = ma.masked_equal(mat_each_value_feature, 0)
			#after_compress = after_compress.compressed()
			#t5 = time.clock()
			#current = np.dot(mat_each_value_feature, np.log2(after_compress))
			#t6 = time.clock()
			#mat_each_value_feature = mat_each_value_feature[-mat_each_value_feature.mask]
			#ma.set_fill_value(mat_each_value_feature,1)
			#mat_each_value_feature = mat_each_value_feature.filled()
			#current = np.dot(mat_each_value_feature, np.log2(mat_each_value_feature))
			#current = 0
		for k in mat_each_value_feature:
			if k == 0:
				continue;
			current += k * math.log(k,2)
		subtotal += current * weight
		count += 1
	subtotal = - subtotal
	return subtotal
Esempio n. 5
0
def resample_from_array(
    in_raster=None, in_affine=None, out_tile=None, resampling="nearest",
    nodataval=0
):
    """
    Extract and resample from array to target tile.

    Parameters
    ----------
    in_raster : array
    in_affine : ``Affine``
    out_tile : ``BufferedTile``
    resampling : string
        one of rasterio's resampling methods (default: nearest)
    nodataval : integer or float
        raster nodata value (default: 0)

    Returns
    -------
    resampled array : array
    """
    if isinstance(in_raster, ma.MaskedArray):
        pass
    if isinstance(in_raster, np.ndarray):
        in_raster = ma.MaskedArray(in_raster, mask=in_raster == nodataval)
    elif isinstance(in_raster, ReferencedRaster):
        in_affine = in_raster.affine
        in_raster = in_raster.data
    elif isinstance(in_raster, tuple):
        in_raster = ma.MaskedArray(
            data=np.stack(in_raster),
            mask=np.stack([
                band.mask
                if isinstance(band, ma.masked_array)
                else np.where(band == nodataval, True, False)
                for band in in_raster
            ]),
            fill_value=nodataval
        )
    else:
        raise TypeError("wrong input data type: %s" % type(in_raster))
    if in_raster.ndim == 2:
        in_raster = ma.expand_dims(in_raster, axis=0)
    elif in_raster.ndim == 3:
        pass
    else:
        raise TypeError("input array must have 2 or 3 dimensions")
    if in_raster.fill_value != nodataval:
        ma.set_fill_value(in_raster, nodataval)
    out_shape = (in_raster.shape[0], ) + out_tile.shape
    dst_data = np.empty(out_shape, in_raster.dtype)
    in_raster = ma.masked_array(
        data=in_raster.filled(), mask=in_raster.mask, fill_value=nodataval)
    reproject(
        in_raster, dst_data, src_transform=in_affine, src_crs=out_tile.crs,
        dst_transform=out_tile.affine, dst_crs=out_tile.crs,
        resampling=Resampling[resampling])
    return ma.MaskedArray(dst_data, mask=dst_data == nodataval)
Esempio n. 6
0
def output_indices(bands, indices, meta, output_path):
    for name, formula in indices.items():
        image = formula(bands)

        image = utils.image_histogram_equalization(image)
        ma.set_fill_value(image, 0)

        store_as_single_color_geotiff(image, f"{output_path}/{name} (I).tif",
                                      meta)
def read_ens(ensemble):
    """Read arrays from an ensemble of netcdf files.
    
    Parameters:
    ----------
    ensemble: input ensemble name
    
    Returns:
    -------
    ssh_model, ssh_obs, lat, lon, x_ac, time_sec: arrays
    """
# function reads an ensemble and returns a matrix containing the ssh at all grid points of all members #

### verify name of ensemble ###
    #regexEnsname = re.compile(r'[\w]\d{4,4}.nc.bas') #<name><nnnn>.nc.bas
    ensname = ensemble.split('/')[-1]
    #ensdir = ensemble.split(ensname)[0]
    #match = regexEnsname.search(ensname)
    #if match == None:
        #raise NameError("Ensemble not correctly named - see http://pp.ige-grenoble.fr/pageperso/brankarj/SESAM/ for naming input files")

### extract number of members of ensemble ###
    enssizelist = re.findall(r'\d{4,4}', ensname)
    enssize = int(enssizelist[0])

### get grid dimension ###
    membername1 = ensemble+'/vctgridSWOT0001.nc' 
    """eventually remove _denoised"""
    with xr.open_dataset(membername1,mask_and_scale=False) as dsmember1:
        time = dsmember1.time.size
        nc = dsmember1.nC.size
        lat = dsmember1.lat[:,:].values
        lon = dsmember1.lon[:,:].values
        x_ac = dsmember1.x_ac[:].values
        x_al = dsmember1.x_al[:].values
        time_sec = dsmember1.time_sec[:].values
        # first member of ensemble is later used to mask all members
        fill_value = dsmember1.ssh_model._FillValue
### load data ###
    ssh_model = np.zeros([enssize,time,nc]) # [layer, time, nC]
    ssh_obs = np.zeros([enssize,time,nc])
    for k in range (1,enssize+1):
        membername = (ensemble+'/vctgridSWOT{:04d}.nc').format(k) 
        with xr.open_dataset(membername,mask_and_scale=False) as ds:
            buf_model = ds.ssh_model[:,:]
            buf_obs = ds.ssh_obs[:,:]
            ssh_model[k-1,:,:] = buf_model
            ssh_obs[k-1,:,:] = buf_obs
    
    ssh_obs = ma.masked_where(ssh_obs==fill_value, ssh_obs)
    ssh_model = ma.masked_where(ssh_model==fill_value, ssh_model)
    ma.set_fill_value(ssh_obs, fill_value)
    ma.set_fill_value(ssh_model, fill_value)

### return data ###
    return ssh_model, ssh_obs, lat, lon, x_ac, x_al, time_sec
Esempio n. 8
0
def constructY(Y, width):

    Y = ma.masked_equal(Y, value=10, copy=True)
    ma.set_fill_value(Y, fill_value=0)
    Y = Y.filled()

    Ynew = np.zeros((len(Y), width))
    for i in range(len(Y)):
        Ynew[i][Y[i]] = 1
    return Ynew
Esempio n. 9
0
def output_composite(bands, indices, meta, output_path):
    for name, formula in indices.items():
        image = [formula["R"](bands), formula["G"](bands), formula["B"](bands)]

        image = [utils.image_histogram_equalization(c) for c in image]

        for c in image:
            ma.set_fill_value(c, 0)

        store_as_rgb_geotiff(image, f"{output_path}/{name} (C).tif", meta)
Esempio n. 10
0
def projectToLatLong(ifile, outfile, img_bnd_coords, proj4string):

    ####################################
    # Inputs
    # For details see http://fcm7/projects/SPS/browser/SPS/trunk/data/products/MSG_Products.nl
    ulx_ll, uly_ll, lrx_ll, lry_ll = img_bnd_coords  # Bounding coordinates in latlon, although the the image is in mercator projection!!!
    # epsg_code = 3395 # Mercator projection code of the imagery
    ####################################

    # For testing
    # ifile = '/data/users/hadhy/HyVic/Obs/OLR_noborders/EIAM50_201901210230.png'

    # Open the image file
    ds = gdal.Open(ifile)
    band = ds.GetRasterBand(1)
    arr = band.ReadAsArray()
    # Mask out pixel values with no data
    arr = ma.masked_less(arr, 4)
    arr = ma.masked_greater(arr, 254)
    # Apply equation to retrieve brightness temperatures. See Chamberlain et al. 2013 https://rmets.onlinelibrary.wiley.com/doi/full/10.1002/met.1403
    arrbt = (-0.44 * (arr - 4.)) + 308.
    # Fill masked values with -9999 (because the Geotiff format doesn't accept masked arrays)
    ma.set_fill_value(arrbt, -9999)
    arrbt = arrbt.filled()

    # Set some image info ...
    ## Gets the bounding coordinates in Mercator projection
    ulx, uly = latlon2projected(ulx_ll, uly_ll, proj4string)
    lrx, lry = latlon2projected(lrx_ll, lry_ll, proj4string)
    ## Calculate geotransform object
    nx, ny = [ds.RasterXSize, ds.RasterYSize]

    ## NB: The coordinates start in the top left corner (usually), so
    ## xDist needs to be positive, and yDist negative
    xDist = (lrx - ulx) / nx if lrx > ulx else -1 * (lrx - ulx) / nx
    yDist = -1 * (uly - lry) / ny if uly > lry else (uly - lry) / ny
    rtnX, rtnY = [0, 0]
    gt = [ulx, xDist, rtnX, uly, rtnY, yDist]
    ## Projection information
    srs = osr.SpatialReference()
    srs.ImportFromProj4(proj4string)
    # srs.ImportFromEPSG(epsg_code) # Documentation says it is mercator

    # Create dataset in mercator projection
    memfile = ''
    ds_merc = makeGDALds(arrbt, nx, ny, gt, srs, 'MEM', memfile)

    # Regrid to latlon
    gdal.Warp(outfile, ds_merc, dstSRS='EPSG:4326', dstNodata=-9999)

    # Convert geotiff to a cube and save
    timestamp = os.path.basename(outfile).split('.')[0].split('_')[1]
    cube = sf.geotiff2cube(outfile, timestamp)

    return cube
Esempio n. 11
0
def mask_outside_disk(inst_map):
    # Find coordinates and radius
    hpc_coords = all_coordinates_from_map(inst_map)
    r = np.sqrt(hpc_coords.Tx**2 + hpc_coords.Ty**2) / inst_map.rsun_obs

    # Mask everything outside of the solar disk
    mask = ma.masked_greater_equal(r, 1)
    ma.set_fill_value(mask, np.nan)
    where_disk = np.where(mask.mask == 1)

    return where_disk
Esempio n. 12
0
    def _standardize_table(self, tab):

        new_tab = tab.copy()
        for col in tab.columns:
            if tab[col].name == 'source_id':
                new_tab[col] = tab[col].astype(np.int64)
            if tab[col].unit is not None:
                colunit = new_tab[col].unit.to_string()
                new_tab[col].unit = GaiaQuery.unit_conversion.get(colunit)
            if tab[col].name == 'pos':
                ma.set_fill_value(new_tab[col], 180.0)
                new_tab[col] = new_tab[col].astype(np.float64)
                new_tab[col].name = 'dist'

        return new_tab
Esempio n. 13
0
File: ppi.py Progetto: MrWwei/Utils
def extractvel(i):
    fcontent2 = wrl.io.read_iris(i)
    ftype = fcontent2['product_hdr']['product_configuration'][
        "product_name"].strip()
    for _, data in fcontent2['data'].items():
        vel = data["sweep_data"]["DB_VEL"]
        vel_data = vel['data']
        ma.set_fill_value(vel_data, 0)
        vel_data = vel_data.filled()
        vel_angel = vel['ele_start'][0]
        break
    cdict = [
        '#E6E6E6', '#00E0FE', '#0080FF', '#320096', '#00FB90', '#00BB99',
        '#008F00', '#CDC99F', '#767676', '#F88700', '#FFCF00', '#FFFF00',
        '#AE0000', '#D07000', '#FF0000', '#FF007D'
    ]
    cmap = colors.ListedColormap(cdict)
    norm = colors.Normalize(vmin=-50, vmax=50)
    bins = 778
    if ftype == 'VOL_A':
        bins = 831
    elif ftype == 'VOL_B':  # 无标题文档
        bins = 414
    el = np.zeros((360, bins))  # 仰角
    for i in range(360):
        for j in range(bins):
            el[i, j] = vel_angel
    az = np.zeros((360, bins))  # 方位角
    for i in range(360):
        for j in range(bins):
            az[i, j] = i
    rl = np.zeros((360, bins))  # 径向长度
    for i in range(360):
        for j in range(bins):
            rl[i, j] = j


#    dbz = np.zeros((len(data), 460))  # 反射率
    x, y, h = sph2cord(el, az, rl)
    x = np.concatenate((x, [x[0]]))  # 闭合
    y = np.concatenate((y, [y[0]]))  # 闭合
    plt.pcolor(x, y, vel_data, norm=norm, cmap=cmap)
    plt.title('Velocity')
    plt.axis('square')
    plt.colorbar()
    plt.show()
Esempio n. 14
0
def read_ens(ensemble, varname='ssh_model'):
    """Read arrays from an ensemble of netcdf files.
    
    Parameters:
    ----------
    ensemble: input ensemble name
    
    Returns:
    -------
    ssh_all, lat, lon: arrays
    """
    regexEnsname = re.compile('[\\w]\\d{4,4}.nc.bas')
    ensname = ensemble.split('/')[-1]
    match = regexEnsname.search(ensname)
    if match == None:
        raise NameError(
            'Ensemble not correctly named - see http://pp.ige-grenoble.fr/pageperso/brankarj/SESAM/ for naming input files'
        )
    enssizelist = re.findall('\\d{4,4}', ensname)
    enssize = int(enssizelist[0])
    membername1 = ensemble + '/vctgridSWOT0001_denoised.nc'  #_denoised
    with xr.open_dataset(membername1, mask_and_scale=False) as (dsmember1):
        time = dsmember1.time.size
        nc = dsmember1.nC.size
        lat = dsmember1.lat[:, :].values
        lon = dsmember1.lon[:, :].values
        if varname == 'ssh_obs':
            fill_value = dsmember1.ssh_obs.fill_value  #_FillValue # we used with data written with SWOTdenoise, eventually change to ssh_obs.fill_value #_obs
        else:
            fill_value = dsmember1.ssh_model.fill_value  #_FillValue # we used with data written with SWOTdenoise, eventually change to ssh_obs.fill_value
    ssh_all = np.zeros([enssize, time, nc])
    for k in range(1, enssize + 1):
        membername = (ensemble + '/vctgridSWOT{:04d}_denoised.nc').format(k)
        with xr.open_dataset(membername, mask_and_scale=False) as (ds):
            if varname == 'ssh_obs':
                buf = ds.ssh_obs[:, :]  #_obs
            else:
                buf = ds.ssh_model[:, :]
            ssh_all[k - 1, :, :] = buf

    ssh_all = ma.masked_where(ssh_all == fill_value, ssh_all)
    ma.set_fill_value(ssh_all, fill_value)
    return (ssh_all, lat, lon)
Esempio n. 15
0
def generate_d2(ssh_all):
    """Calculate the second derivations of the ssh across and along track (central difference)
    
    Parameters:
    ----------
    ssh_all: array
    
    Returns:
    -------
    d2c,d2a: arrays"""
    fill_value = ssh_all.fill_value
    if ssh_all.ndim == 3:
        isobs = False
        enssize = ssh_all.shape[0]
        dimtime = ssh_all.shape[1]
        dimnc = ssh_all.shape[2]
    if ssh_all.ndim == 2:
        isobs = True
        enssize = 1
        dimtime = ssh_all.shape[0]
        dimnc = ssh_all.shape[1]
        tmp = np.zeros([enssize, dimtime, dimnc])
        tmp[0, :, :] = ssh_all
        ssh_all = tmp
    ssh_all = ma.masked_where(ssh_all == fill_value, ssh_all)
    operand1 = ssh_all[:, :, 2:]
    operand2 = ssh_all[:, :, 1:dimnc - 1] * 2
    operand3 = ssh_all[:, :, :dimnc - 2]
    d2c = operand1 - operand2 + operand3
    d2c = ma.masked_where(d2c == fill_value, d2c)
    ma.set_fill_value(d2c, fill_value)
    operand1 = ssh_all[:, 2:, :]
    operand2 = ssh_all[:, 1:dimtime - 1, :] * 2
    operand3 = ssh_all[:, :dimtime - 2, :]
    d2a = operand1 - operand2 + operand3
    d2a = ma.masked_where(d2a == fill_value, d2a)
    ma.set_fill_value(d2a, fill_value)
    if isobs:
        return (d2a[0, :, :], d2c[0, :, :])
    if ssh_all.ndim == 3:
        return (d2a, d2c)
def save_the_mat_file(lat,lon,data_mean,p_values, field, difference_on,pole,title,layer_interface,file_type,lat_lon_bounds,cscale,axes_direction,input_folder,\
		start_year,end_year,months_str,lon_sector ):
	#Setting absent variables to zero for saving purposes (can't save None)
	#if p_values==None:
	if p_values is None:
		p_values=0
	if layer_interface is None:
		#if layer_interface==None:
		layer_interface=0
	if cscale==None:
		cscale=0
	if field=='rho' and file_type=='ice_month':
		field='SSD'
	
	#Creating filename
	exp_name=str.split(input_folder,'_bergs_')[-1]
	if len(exp_name)>15:
		exp_name='Control'
	mat_filename='processed_data/'+exp_name+'_'+field+'_'+str(start_year)+'_to_'+ \
	str(end_year)+'_'+months_str+'_'+pole + '_' + axes_direction
	if difference_on==1:
		mat_filename=mat_filename+'_anomaly'
	if axes_direction!='xy' and lon_sector!='all':
		mat_filename=mat_filename+'_'+lon_sector
	mat_filename=mat_filename +'.mat'
	
	#Setting the mask equal to 1.e20 for saving reasons, to avoid the format being screwed up.
	#if axes_direction=='yz':
	fill_extra_values=False  #This is a hack.
	if fill_extra_values==True:
		ma.set_fill_value(data_mean,1.e20)
		data_mean=data_mean.filled()

	print p_values

	sc.savemat(mat_filename, {'lat':lat, 'lon':lon, 'data':data_mean,'p_values':p_values,'field':field, \
			'difference_on':difference_on, 'cscale':cscale,'pole': pole, 'title':title, 'layer_interface':layer_interface, \
			'file_type': file_type, 'axes_direction':axes_direction, 'lat_lon_bounds' : lat_lon_bounds})

	print 'File: ' + mat_filename + ' saved.'
Esempio n. 17
0
def generate_d1(ssh_all):
    """Calculate the first derivations of the ssh across and along track (right difference)
    
    Parameters:
    ----------
    ssh_all: array
    
    Returns:
    -------
    d1c,d1a: arrays
    
    PROBLEM: SWOT simulator does not fill grid points that are not within the swaths. (ncdump -v ssh_obs xyz.nc shows a _ at these points). ncview fills these points with the _FillValue=2147483647.0 whereas python would like to fill them with missing_value which is not declared in the SWOT simulator output files. Python therefore takes an own value=9.96920996839e+36. SOLUTION: i) mask arrays with _PythonFillValue, ii) add a missing_value=2147483647.0 to variable ssh_obs in netcdf files and adjust this script to mask nan"""
    fill_value = ssh_all.fill_value
    if ssh_all.ndim == 3:
        isobs = False
        enssize = ssh_all.shape[0]
        dimtime = ssh_all.shape[1]
        dimnc = ssh_all.shape[2]
    if ssh_all.ndim == 2:
        isobs = True
        enssize = 1
        dimtime = ssh_all.shape[0]
        dimnc = ssh_all.shape[1]
        tmp = np.zeros([enssize, dimtime, dimnc])
        tmp[0, :, :] = ssh_all
        ssh_all = tmp
    ssh_all = ma.masked_where(ssh_all == fill_value, ssh_all)
    operand1 = ssh_all[:, :, 1:]
    operand2 = ssh_all[:, :, :dimnc - 1]
    d1c = operand1 - operand2
    d1c = ma.masked_where(d1c == fill_value, d1c)
    ma.set_fill_value(d1c, fill_value)
    operand1 = ssh_all[:, 1:, :]
    operand2 = ssh_all[:, :dimtime - 1, :]
    d1a = operand1 - operand2
    d1a = ma.masked_where(d1a == fill_value, d1a)
    ma.set_fill_value(d1a, fill_value)
    if isobs:
        return (d1a[0, :, :], d1c[0, :, :])
    return (d1a, d1c)
Esempio n. 18
0
def read_obs(observation):
    """Read arrays from a netcdf file.
    
    Parameters:
    ----------
    observation: input observation name
    
    Returns:
    -------
    ssh_obs, lat, lon: arrays
    """
    with xr.open_dataset(observation, mask_and_scale=False) as (ds):
        time = ds.time.size
        nc = ds.nC.size
        lat = ds.lat[:, :].values
        lon = ds.lon[:, :].values
        ssh_obs = np.zeros([time, nc])
        ssh_obs[:, :] = ds.ssh_obs[:, :]
        fill_value = ds.ssh_obs._FillValue  # we used with data written with SWOTdenoise, eventually change to ssh_obs.fill_value
        ssh_obs = ma.masked_where(ssh_obs == fill_value, ssh_obs)
        ma.set_fill_value(ssh_obs, fill_value)
    return (ssh_obs, lat, lon)
Esempio n. 19
0
def NDVI(rast_1_path,rast_2_path,mask_path,outpath):
	############################################################ I/O setup, read data sets and process into masked arrays
	#open mask band
	try:
		image_mask=gdal_io.multi_band(mask_path,False)
		image_mask.get_multiband_array()
		mask = image_mask.array   !=4096
		image_mask = None		#free up the memory 
		#open red band
		image_red=gdal_io.multi_band(rast_1_path,False)
		image_red.get_multiband_array()
		#get a masked array from the red data object
		red_array=ma.masked_array(deepcopy(image_red.array),mask)
		#create the output object using the red band as a template
		ndvi_image=gdal_io.raster_output(outpath,image_red,False)
		ndvi_image.datatype=3		#set to int16
		ndvi_image.Create_Dataset()
		ndvi_image.get_multiband_array()
		image_red=None		#free up the memory 
		#open nir band
		image_nir=gdal_io.multi_band(rast_2_path,False)
		image_nir.get_multiband_array()
		nir_array=ma.masked_array(deepcopy(image_nir.array),mask)
		image_nir=None		#free up the memory
		########################################################### NDVI Processing
		ndvi_arr=((((nir_array-red_array)/(nir_array+red_array).astype('float16'))/SCALEF).astype('int16'))
		ma.set_fill_value(ndvi_arr,INT16_NODATA)
		ndvi_image.array =ndvi_arr.data
		ndvi_image.write_array()
		########################################################### Clean Up
		ndvi_image=None
		nir_array=None
		red_array=None
		return 0
	except Exception:
		return -1
def main():
    """Create the model and start the evaluation process."""
    args = Parameters().parse()
    # #
    # args.method = 'student_res18_pre'
    args.method = 'student_esp_d'
    args.dataset = 'camvid_light'
    args.data_list = "/ssd/yifan/SegNet/CamVid/test.txt"
    args.data_dir = "/ssd/yifan/"
    args.num_classes = 11
    # args.method='psp_dsn_floor'
    args.restore_from = "./checkpoint/Camvid/ESP/base_57.8.pth"
    # args.restore_from="/teamscratch/msravcshare/v-yifan/ESPNet/train/0.4results_enc_01_enc_2_8/model_298.pth"
    # args.restore_from = "/teamscratch/msravcshare/v-yifacd n/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER864/CS_scenes_40000.pth"
    # args.restore_from = "/teamscratch/msravcshare/v-yifan/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER5121024_esp/CS_scenes_40000.pth"
    # args.data_list = '/teamscratch/msravcshare/v-yifan/deeplab_v3/dataset/list/cityscapes/train.lst'
    args.batch_size = 1
    print("Input arguments:")
    for key, val in vars(args).items():
        print("{:16} {}".format(key, val))

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    print(args)
    output_path = args.output_path
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    # args.method='psp_dsn'
    deeplab = get_segmentation_model(args.method, num_classes=args.num_classes)

    ignore_label = 255
    id_to_trainid = {
        -1: ignore_label,
        0: ignore_label,
        1: ignore_label,
        2: ignore_label,
        3: ignore_label,
        4: ignore_label,
        5: ignore_label,
        6: ignore_label,
        7: 0,
        8: 1,
        9: ignore_label,
        10: ignore_label,
        11: 2,
        12: 3,
        13: 4,
        14: ignore_label,
        15: ignore_label,
        16: ignore_label,
        17: 5,
        18: ignore_label,
        19: 6,
        20: 7,
        21: 8,
        22: 9,
        23: 10,
        24: 11,
        25: 12,
        26: 13,
        27: 14,
        28: 15,
        29: ignore_label,
        30: ignore_label,
        31: 16,
        32: 17,
        33: 18
    }

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    # args.restore_from="/teamscratch/msravcshare/v-yifan/sd_pytorch0.3/checkpoint/snapshots_resnet_psp_dsn_1e-4_5e-4_8_20000_DSN_0.4_769light/CS_scenes_20000.pth"
    # if 'dense' in args.method:
    #
    if args.restore_from is not None:
        saved_state_dict = torch.load(args.restore_from)
        c_keys = saved_state_dict.keys()
        for i in c_keys:
            flag = i.split('.')[0]
        if 'module' in flag:
            deeplab = nn.DataParallel(deeplab)
        deeplab.load_state_dict(saved_state_dict)
        if 'module' not in flag:
            deeplab = nn.DataParallel(deeplab)
    # if 'dense' not in args.method:
    #     deeplab = nn.DataParallel(deeplab)
    model = deeplab
    model.eval()
    model.cuda()
    # args.dataset='cityscapes_light'
    testloader = data.DataLoader(get_segmentation_dataset(
        args.dataset,
        root=args.data_dir,
        list_path=args.data_list,
        crop_size=(360, 480),
        mean=IMG_MEAN,
        scale=False,
        mirror=False),
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 pin_memory=True)

    data_list = []
    confusion_matrix = np.zeros((args.num_classes, args.num_classes))

    palette = get_palette(20)

    image_id = 0
    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % (index))
        if args.side:
            image, label, _, size, name = batch
        elif 'sd' in args.dataset:
            _, image, label, size, name = batch
        else:
            image, label, size, name = batch
        # print('image name: {}'.format(name))
        size = size[0].numpy()
        output = predict_esp(model, image)
        # seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
        result = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
        # result=cv2.resize(result, (1024, 1024), interpolation=cv2.INTER_NEAREST)
        m_seg_pred = ma.masked_array(result, mask=torch.eq(label, 255))
        ma.set_fill_value(m_seg_pred, 20)
        seg_pred = m_seg_pred

        for i in range(image.size(0)):
            image_id += 1
            print('%d th segmentation map generated ...' % (image_id))
            args.store_output = 'True'
            output_path = './esp_camvid_base/'
            if not os.path.exists(output_path):
                os.mkdir(output_path)
            if args.store_output == 'True':
                # print('a')
                output_im = PILImage.fromarray(seg_pred[i])
                output_im.putpalette(palette)
                output_im.save(output_path + '/' + name[i] + '.png')

        seg_gt = np.asarray(label.numpy()[:, :size[0], :size[1]], dtype=np.int)
        ignore_index = seg_gt != 255
        seg_gt = seg_gt[ignore_index]
        seg_pred = seg_pred[ignore_index]
        confusion_matrix += get_confusion_matrix(seg_gt, seg_pred,
                                                 args.num_classes)

    pos = confusion_matrix.sum(1)
    res = confusion_matrix.sum(0)
    tp = np.diag(confusion_matrix)

    IU_array = (tp / np.maximum(1.0, pos + res - tp))
    mean_IU = IU_array.mean()

    print({'meanIU': mean_IU, 'IU_array': IU_array})

    print("confusion matrix\n")
    print(confusion_matrix)
Esempio n. 21
0
def createImgSCISAT(fileAbsPath):
    # read info from netcdf
    ncfile = Dataset(fileAbsPath, 'r')
    latitude = ncfile.groups['ACE-FTS-v2.2'].latitude
    longitude = ncfile.groups['ACE-FTS-v2.2'].longitude
    datestart = datetime.strptime(ncfile.groups['ACE-FTS-v2.2'].start_time,
                                  '%Y-%m-%d %H:%M:%S+00')
    dateend = datetime.strptime(ncfile.groups['ACE-FTS-v2.2'].end_time,
                                '%Y-%m-%d %H:%M:%S+00')
    ozone = ncfile.groups['ACE-FTS-v2.2'].groups['Data-L2_1km_grid'].variables[
        'O3'][:]
    heightLevels = ncfile.groups['ACE-FTS-v2.2'].groups[
        'Data-L2_1km_grid'].variables['z'][:]
    numBand = len(ozone)
    ncfile.close()

    #common vars
    no_value = -9999
    minValue = ma.min(ozone)
    maxValue = ma.max(ozone)
    ma.set_fill_value(ozone, no_value)
    ozone = ozone.filled()
    #ma.set_fill_value(heightLevels, no_value)
    #heightLevels = heightLevels.filled()
    sizeX = 1
    sizeY = 1
    dataType = gdal.GDT_Float32
    resolution = 1.0  # in degree
    driver = gdal.GetDriverByName('GTiff')
    outFile = 'ACE-FTS_L2_ozone_' + datestart.strftime(
        '%Y%m%d.%H%M%S') + '.tif'

    #create tiff
    dst_ds = driver.Create(outFile, sizeX, sizeY, numBand, dataType)
    for i in range(numBand):
        dst_ds.GetRasterBand(i + 1).WriteArray(
            np.expand_dims(np.expand_dims(ozone[i], axis=0), axis=0))
        # The computed stat produces this warning
        # Warning 1: Lost metadata writing to GeoTIFF ... too large to fit in tag.
        # An additional  *.aux.xml is added
        #if ozone[i] != no_value:
        #    dst_ds.GetRasterBand(i+1).ComputeStatistics(False)
        dst_ds.GetRasterBand(i + 1).SetNoDataValue(no_value)

    #set geotrasform matrix
    top_left_x = longitude - (resolution / 2)
    w_e_pixel_resolution = resolution
    top_left_y = latitude - (resolution / 2)
    n_s_pixel_resolution = -resolution
    coord = [
        top_left_x, w_e_pixel_resolution, 0, top_left_y, 0,
        n_s_pixel_resolution
    ]
    dst_ds.SetGeoTransform(coord)
    srs = osr.SpatialReference()
    srs.SetWellKnownGeogCS('WGS84')
    dst_ds.SetProjection(srs.ExportToWkt())

    #set metadata
    dst_ds.SetMetadataItem('GLOBAL_MAX', str(maxValue))
    dst_ds.SetMetadataItem('GLOBAL_MIN', str(minValue))
    dst_ds.SetMetadataItem('TIME_END', dateend.strftime('%Y-%m-%dT%H:%M:%SZ'))
    dst_ds.SetMetadataItem('TIME_START',
                           datestart.strftime('%Y-%m-%dT%H:%M:%SZ'))
    dst_ds.SetMetadataItem('VERTICAL_LEVELS_NUMBER', str(len(heightLevels)))
    dst_ds.SetMetadataItem('VERTICAL_LEVELS',
                           ','.join(str(x) for x in heightLevels))

    dst_ds = None

    return [outFile]
Esempio n. 22
0
def main():
    """Create the model and start the evaluation process."""
    args = Parameters().parse()

    # file_log = open(args.log_file, "w")
    # sys.stdout = sys.stderr = file_log

    print("Input arguments:")
    sys.stdout.flush()
    for key, val in vars(args).items():
        print("{:16} {}".format(key, val))

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    output_path = args.output_path
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    deeplab = get_segmentation_model("_".join([args.network, args.method]),
                                     num_classes=args.num_classes)

    ignore_label = 255
    id_to_trainid = {
        -1: ignore_label,
        0: ignore_label,
        1: ignore_label,
        2: ignore_label,
        3: ignore_label,
        4: ignore_label,
        5: ignore_label,
        6: ignore_label,
        7: 0,
        8: 1,
        9: ignore_label,
        10: ignore_label,
        11: 2,
        12: 3,
        13: 4,
        14: ignore_label,
        15: ignore_label,
        16: ignore_label,
        17: 5,
        18: ignore_label,
        19: 6,
        20: 7,
        21: 8,
        22: 9,
        23: 10,
        24: 11,
        25: 12,
        26: 13,
        27: 14,
        28: 15,
        29: ignore_label,
        30: ignore_label,
        31: 16,
        32: 17,
        33: 18
    }

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    import urllib.request

    local_checkpoint, _ = urllib.request.urlretrieve(
        'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet101-imagenet.pth',
        'resnet101-imagenet.pth')

    saved_state_dict = torch.load(local_checkpoint)
    deeplab.load_state_dict(saved_state_dict)

    model = nn.DataParallel(deeplab)
    model.eval()
    model.cuda()

    testloader = data.DataLoader(get_segmentation_dataset(
        args.dataset,
        root=args.data_dir,
        list_path=args.data_list,
        crop_size=(1024, 2048),
        scale=False,
        mirror=False,
        network=args.network),
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 pin_memory=True)

    data_list = []
    confusion_matrix = np.zeros((args.num_classes, args.num_classes))

    palette = get_palette(20)

    image_id = 0
    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % (index))
            sys.stdout.flush()
        image, label, size, name = batch
        size = size[0].numpy()
        if torch_ver == '0.3':
            if args.use_ms == 'True':
                output = predict_multi_scale(model, image.numpy(),
                                             ([0.75, 1, 1.25]), input_size,
                                             args.num_classes, args.use_flip,
                                             args.method)
            else:
                if args.use_flip == 'True':
                    output = predict_multi_scale(model, image.numpy(),
                                                 ([args.whole_scale]),
                                                 input_size, args.num_classes,
                                                 args.use_flip, args.method)
                else:
                    if 'gt' in args.method:
                        label = Variable(label.long().cuda())
                        output = predict_whole_img_w_label(
                            model,
                            image.numpy(),
                            args.num_classes,
                            args.method,
                            scale=float(args.whole_scale),
                            label=label)
                    else:
                        output = predict_whole_img(model,
                                                   image.numpy(),
                                                   args.num_classes,
                                                   args.method,
                                                   scale=float(
                                                       args.whole_scale))
        else:
            with torch.no_grad():
                if args.use_ms == 'True':
                    output = predict_multi_scale(model, image.numpy(),
                                                 ([0.75, 1, 1.25]), input_size,
                                                 args.num_classes,
                                                 args.use_flip, args.method)
                else:
                    if args.use_flip == 'True':
                        output = predict_multi_scale(model, image.numpy(),
                                                     ([args.whole_scale]),
                                                     input_size,
                                                     args.num_classes,
                                                     args.use_flip,
                                                     args.method)
                    else:
                        if 'gt' in args.method:
                            output = predict_whole_img_w_label(
                                model,
                                image.numpy(),
                                args.num_classes,
                                args.method,
                                scale=float(args.whole_scale),
                                label=Variable(label.long().cuda()))
                        else:
                            output = predict_whole_img(model,
                                                       image.numpy(),
                                                       args.num_classes,
                                                       args.method,
                                                       scale=float(
                                                           args.whole_scale))

        seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
        m_seg_pred = ma.masked_array(seg_pred, mask=torch.eq(label, 255))
        ma.set_fill_value(m_seg_pred, 20)
        seg_pred = m_seg_pred

        for i in range(image.size(0)):
            image_id += 1
            print('%d th segmentation map generated ...' % (image_id))
            sys.stdout.flush()
            if args.store_output == 'True':
                output_im = PILImage.fromarray(seg_pred[i])
                output_im.putpalette(palette)
                output_im.save(output_path + '/' + name[i] + '.png')

        seg_gt = np.asarray(label.numpy()[:, :size[0], :size[1]], dtype=np.int)
        ignore_index = seg_gt != 255
        seg_gt = seg_gt[ignore_index]
        seg_pred = seg_pred[ignore_index]
        confusion_matrix += get_confusion_matrix(seg_gt, seg_pred,
                                                 args.num_classes)

    pos = confusion_matrix.sum(1)
    res = confusion_matrix.sum(0)
    tp = np.diag(confusion_matrix)

    IU_array = (tp / np.maximum(1.0, pos + res - tp))
    mean_IU = IU_array.mean()

    print({'meanIU': mean_IU, 'IU_array': IU_array})

    print("confusion matrix\n")
    print(confusion_matrix)
    sys.stdout.flush()
Esempio n. 23
0
    def __init__(self, filename):
        '''
        Handles setting up a reader.
        '''
        self.fields = {}
        self.info = {}

        self.nc_dataset = Dataset(filename)
        self.filename = filename

        self.time = common.ncvar_to_dict(self.nc_dataset.variables['Time'])
        self.time['data'] = ma.array(
            get_datetime_from_epoch(self.time['data'][:]))
        self.time['units'] = "Datetime objects"
        del self.time['_FillValue']

        self.fields['Nd'] = common.ncvar_to_dict(
            self.nc_dataset.variables['VolumetricDrops'])
        self.fields['Nd']['data'] = ma.transpose(
            np.power(10, self.fields['Nd']['data']))
        ma.set_fill_value(self.fields['Nd']['data'], 0.)
        self.fields['Nd']['data'] = self.fields['Nd']['data'].filled()
        del self.fields['Nd']['_FillValue']
        self.fields['Nd']['units'] = "1/m^3 1/mm"

        self.fields['RR'] = common.ncvar_to_dict(
            self.nc_dataset.variables['ParsivelIntensity'])
        self.fields['RR']['data'] = ma.masked_array(self.fields['RR']['data'])
        ma.set_fill_value(self.fields['RR']['data'],
                          self.fields['RR']['_FillValue'])
        del self.fields['RR']['_FillValue']

        self.fields['Zh'] = common.ncvar_to_dict(
            self.nc_dataset.variables['Reflectivity'])
        del self.fields['Zh']['_FillValue']

        self.fields['num_particles'] = common.ncvar_to_dict(
            self.nc_dataset.variables['RawDrops'])
        self.fields['num_particles']['data'] = ma.masked_array(
            self.fields['num_particles']['data'])
        ma.set_fill_value(self.fields['num_particles']['data'],
                          self.fields['num_particles']['_FillValue'])
        del self.fields['num_particles']['_FillValue']

        self.fields['terminal_velocity'] = \
            common.ncvar_to_dict(self.nc_dataset.variables['VelocityDrops'])
        self.fields['terminal_velocity']['data'] = \
            ma.transpose(ma.masked_array(self.fields['terminal_velocity']['data']))
        ma.set_fill_value(self.fields['terminal_velocity']['data'],
                          self.fields['terminal_velocity']['_FillValue'])
        del self.fields['terminal_velocity']['_FillValue']

        self.fields['Precip_Code'] = common.ncvar_to_dict(
            self.nc_dataset.variables['PrecipCode'])

        diameter = ma.array([
            0.0625, 0.1875, 0.3125, 0.4375, 0.5625, 0.6875, 0.8125, 0.9375,
            1.0625, 1.1875, 1.375, 1.625, 1.875, 2.125, 2.375, 2.75, 3.25,
            3.75, 4.25, 4.75, 5.5, 6.5, 7.5, 8.5, 9.5, 11., 13., 15., 17., 19.,
            21.5, 24.5
        ])

        spread = ma.array([
            0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125, 0.125,
            0.125, 0.250, 0.250, 0.250, 0.250, 0.250, 0.500, 0.500, 0.500,
            0.500, 0.500, 1.000, 1.000, 1.000, 1.000, 1.000, 2.000, 2.000,
            2.000, 2.000, 2.000, 3.000, 3.000
        ])

        # velocity = ma.array([0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95, 1.1, 1.3,
        #                      1.5, 1.7, 1.9, 2.2, 2.6, 3.0, 3.4, 3.8, 4.4, 5.2, 6.0, 6.8, 7.6, 8.8,
        #                     10.4, 12.0, 13.6, 15.2, 17.6, 20.8])

        self.bin_edges = common.var_to_dict(
            'bin_edges', np.hstack((0, diameter + np.array(spread) / 2)), 'mm',
            'Boundaries of bin sizes')
        self.spread = common.var_to_dict('spread', spread, 'mm',
                                         'Bin size spread of bins')
        self.diameter = common.var_to_dict('diameter', diameter, 'mm',
                                           'Particle diameter of bins')

        for key in self.nc_dataset.ncattrs():
            self.info[key] = self.nc_dataset.getncattr(key)
Esempio n. 24
0
                np_mask = torch.squeeze(
                    flags.eq(2)).cpu().data.numpy().astype(float)
                rho = torch.squeeze(density).cpu().data.numpy()
                p = torch.squeeze(pressure).cpu().data.numpy()
                img_norm_vel = torch.squeeze(
                    torch.norm(tensor_vel, dim=1,
                               keepdim=True)).cpu().data.numpy()
                img_velx = torch.squeeze(tensor_vel[:, 0]).cpu().data.numpy()
                img_vely = torch.squeeze(tensor_vel[:, 1]).cpu().data.numpy()
                img_vel_norm = torch.squeeze( \
                        torch.norm(tensor_vel, dim=1, keepdim=True)).cpu().data.numpy()

                img_velx_masked = ma.array(img_velx, mask=np_mask)
                img_vely_masked = ma.array(img_vely, mask=np_mask)
                img_vel_norm_masked = ma.array(img_vel_norm, mask=np_mask)
                ma.set_fill_value(img_velx_masked, np.nan)
                ma.set_fill_value(img_vely_masked, np.nan)
                ma.set_fill_value(img_vel_norm_masked, np.nan)
                img_velx_masked = img_velx_masked.filled()
                img_vely_masked = img_vely_masked.filled()
                img_vel_norm_masked = img_vel_norm_masked.filled()

                if real_time:
                    cax_rho.clear()
                    cax_velx.clear()
                    cax_vely.clear()
                    cax_p.clear()
                    cax_div.clear()
                    fig.suptitle("it = " + str(it), fontsize=16)
                    im0 = ax_rho.imshow(rho[minY:maxY, minX:maxX],
                                        cmap=my_map,
import numpy as np
import numpy.ma as ma

a = np.arange(5)
a
a = ma.masked_where(a < 3, a)
a
ma.set_fill_value(a, -999)
a
a = range(5)
a
ma.set_fill_value(a, 100)
a
a = np.arange(5)
a
ma.set_fill_value(a, 100)
a
Esempio n. 26
0
def resample_from_array(in_raster=None,
                        in_affine=None,
                        out_tile=None,
                        in_crs=None,
                        resampling="nearest",
                        nodataval=None,
                        nodata=0):
    """
    Extract and resample from array to target tile.

    Parameters
    ----------
    in_raster : array
    in_affine : ``Affine``
    out_tile : ``BufferedTile``
    resampling : string
        one of rasterio's resampling methods (default: nearest)
    nodata : integer or float
        raster nodata value (default: 0)

    Returns
    -------
    resampled array : array
    """
    if nodataval is not None:
        warnings.warn("'nodataval' is deprecated, please use 'nodata'")
        nodata = nodata or nodataval
    # TODO rename function
    if isinstance(in_raster, ma.MaskedArray):
        pass
    elif isinstance(in_raster, np.ndarray):
        in_raster = ma.MaskedArray(in_raster, mask=in_raster == nodata)
    elif isinstance(in_raster, ReferencedRaster):
        in_affine = in_raster.affine
        in_crs = in_raster.crs
        in_raster = in_raster.data
    elif isinstance(in_raster, tuple):
        in_raster = ma.MaskedArray(
            data=np.stack(in_raster),
            mask=np.stack([
                band.mask if isinstance(band, ma.masked_array) else np.where(
                    band == nodata, True, False) for band in in_raster
            ]),
            fill_value=nodata)
    else:
        raise TypeError("wrong input data type: %s" % type(in_raster))
    if in_raster.ndim == 2:
        in_raster = ma.expand_dims(in_raster, axis=0)
    elif in_raster.ndim == 3:
        pass
    else:
        raise TypeError("input array must have 2 or 3 dimensions")
    if in_raster.fill_value != nodata:
        ma.set_fill_value(in_raster, nodata)
    dst_data = np.empty((in_raster.shape[0], ) + out_tile.shape,
                        in_raster.dtype)
    logger.debug(in_raster)
    logger.debug(in_affine)
    logger.debug(out_tile.affine)
    reproject(in_raster.filled(),
              dst_data,
              src_transform=in_affine,
              src_crs=in_crs or out_tile.crs,
              src_nodata=nodata,
              dst_transform=out_tile.affine,
              dst_crs=out_tile.crs,
              dst_nodata=nodata,
              resampling=Resampling[resampling])
    logger.debug(dst_data)
    hanse = ma.MaskedArray(dst_data,
                           mask=dst_data == nodata,
                           fill_value=nodata)
    logger.debug(hanse)
    return hanse
Esempio n. 27
0
     #create geotransform
     xres = (xmax - xmin) / float(ncols)
     yres = (ymax - ymin) / float(nrows)
     geotransform = (xmin,xres,0,ymax,0, -yres)
     #create mask
     mask_DS = gdal.GetDriverByName('MEM').Create('', ncols, nrows, 1 ,gdal.GDT_Int32)
     mask_RB = mask_DS.GetRasterBand(1)
     mask_RB.Fill(0) #initialise raster with zeros
     mask_RB.SetNoDataValue(-32767)
     mask_DS.SetGeoTransform(geotransform)
     maskvalue = 1
     err = gdal.RasterizeLayer(mask_DS, [maskvalue], shpLyr)
     mask_DS.FlushCache()
     mask_array = mask_DS.GetRasterBand(1).ReadAsArray()    
     mask_RES = ma.masked_equal(mask_array, 255)          
     ma.set_fill_value(mask_RES, -32767)  
 ########################################################################
 #subset
 ########################################################################
 var_subset = varData_Ori[:,min(lat_inds[0]):max(lat_inds[0]) + 1, min(lon_inds[0]):max(lon_inds[0]) + 1]
 var_subset._set_mask(np.logical_not(np.flipud(mask_RES.mask)))  # update mask (flipud is reverse 180)
 lon_subset = lon_Ori[lon_inds]
 lat_subset = lat_Ori[lat_inds]
 ###################################################################################
 # Open a new NetCDF file to write the data to.  For format, you can choose
 # from
 # 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
 ###################################################################################
 #create file path and name
 InputFileDir,InputFile    = os.path.split(file_name)  
 InputDir,InputFileDirName = os.path.split(InputFileDir)  
Esempio n. 28
0
def NC_CLIP_VIA_SHP(NcFilePath,ShpFilePath,OutFilePath):
    strShpFilePath = ShpFilePath
    strNetCDFPathInput = NC
    #read shapefile
    shpDS = ogr.Open(strShpFilePath)
    shpLyr = shpDS.GetLayer()
    Envelop = shpLyr.GetExtent() 
    xmin,xmax,ymin,ymax = [np.round(Envelop[0]),
                           np.round(Envelop[1]),
                           np.round(Envelop[2]),
                           np.round(Envelop[3])]    #Your extents as given above
    mask_RES = []
    ######################################################
    #                      Process                       #
    ######################################################
    leadtime = 0
    EnsembleMember = 0
    ncInput  = Dataset(strNetCDFPathInput)
    var_name = list(ncInput.variables.keys())
    lon_Ori = getDimVar(ncInput,var_name,'X')
    lat_Ori = getDimVar(ncInput,var_name,'Y')
    reftime_Ori = getDimVar(ncInput,var_name,'S')
    #time = getDimVar(ncInput,varList,'time')
    name = 'prec'
    varData_Ori  =  getDataVar(ncInput,name)
    ######################################################
    #                    Create mask                     #
    ######################################################
    if len(mask_RES) == 0 :
        #get boundary and xs ys
        lat_bnds, lon_bnds = [ymin, ymax], [xmin+180, xmax+180]
        lat_inds = np.where((lat_Ori >= (lat_bnds[0])) & (lat_Ori <= lat_bnds[1]))
        lon_inds = np.where((lon_Ori >= (lon_bnds[0])) & (lon_Ori <= lon_bnds[1]))
        ncols = len(lon_inds[0])
        nrows = len(lat_inds[0])
        nreftime = len(reftime_Ori)
        #create geotransform
        xres = (xmax - xmin) / float(ncols)
        yres = (ymax - ymin) / float(nrows)
        geotransform = (xmin-3,xres,0,ymax,0,-yres)
        #create mask
        mask_DS = gdal.GetDriverByName('MEM').Create('', ncols, nrows, 1 ,gdal.GDT_Int32)
        mask_RB = mask_DS.GetRasterBand(1)
        mask_RB.Fill(0) #initialise raster with zeros
        mask_RB.SetNoDataValue(-32767)
        mask_DS.SetGeoTransform(geotransform)
        maskvalue = 1
        err = gdal.RasterizeLayer(mask_DS, [maskvalue], shpLyr)
        mask_DS.FlushCache()
        mask_array = mask_DS.GetRasterBand(1).ReadAsArray()    
        mask_RES = ma.masked_equal(mask_array, 255)          
        ma.set_fill_value(mask_RES, -32767)  
    ######################################################
    #                      Subset                        #
    ######################################################
    var_subset = varData_Ori[:,min(lat_inds[0])-1:max(lat_inds[0]), min(lon_inds[0])-1:max(lon_inds[0])]
    var_subset.__setmask__(np.logical_not(np.flipud(mask_RES.mask))) # update mask (flipud is reverse 180)
    #var_subset = var_subset.data
    lon_subset = lon_Ori[lon_inds]-180
    lat_subset = lat_Ori[lat_inds]
    ######################################################
    # Open a new NetCDF file to for data svaing. You can #
    # choose one of the formats from 'NETCDF3_CLASSIC',  #
    # 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'  #
    ######################################################
    # Using our previous dimension info, we can create the new time dimension
    # Even though we know the size, we are going to set the size to unknown
    OutputFileDirName = OutFilePath
    ncOutput = Dataset(OutputFileDirName, 'w', format='NETCDF4')

    #ncOutput.createDimension('time', None)
    ncOutput.createDimension('lon', ncols)
    ncOutput.createDimension('lat', nrows)
    ncOutput.createDimension('reftime', nreftime)

    # Add lat Variable
    var_out_lat = ncOutput.createVariable('lat','f',("lat"))
    ncOutput.variables['lat'][:] = lat_subset[:]

    # Add lon Variable
    var_out_lon = ncOutput.createVariable('lon','f',("lon"))
    ncOutput.variables['lon'][:] = lon_subset[:]

    # Add leadtime Variable
    var_out_reftime = ncOutput.createVariable('reftime','f',("reftime"))
    ncOutput.variables['reftime'][:] = reftime_Ori[:]

    # Add data Variable
    var_out_data = ncOutput.createVariable('Precip', 'f',("reftime","lat","lon"))
    for i in range(np.size(reftime_Ori)):
        ncOutput.variables['Precip'][i,:,:] = var_subset[i,:,:]

    # attr
    ncOutput.history = "CLIP Created datatime" + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " by LujunZ at OU"
    ncOutput.source  = "netCDF4 under python 3.6.5"
    ######################################################
    #                   Write close                      #
    ######################################################
    ncOutput.close()  # close the new file
    print('Done')
    
    return
Esempio n. 29
0
            print('NetCDF file is not right')
            continue
    else:
        nc = ncfile(local_store[0])

    if (count == 0): # Just do this once to get the dimensionality of the variable array

        data2 = nc.variables[variable][:]
        no_dims = len(np.shape(data2))
    # Load variable of interest, plus dimensions
    
    requested_months = (eyear - syear + 1)*12 #how many months span requested time period
    
    if (no_dims == 4): data2 = nc.variables[variable][date_diff:date_diff+requested_months,:,:,:]
    if (no_dims == 3): data2 = nc.variables[variable][date_diff:date_diff+requested_months:,:,:]
    ma.set_fill_value(data2, np.nan) # Change any masked values to NaN to help with better interpolation (later)
    lat = nc.variables['lat'][:]
    lon = nc.variables['lon'][:]
    if (no_dims == 4):
        p_mod = nc.variables['plev'][:]
        p_units = str(nc.variables['plev'].units)

    # Load relevant metadata
    model_name = str(nc.source_id)
    ensemble_name = str(nc.variant_label)
    expid = str(nc.parent_source_id)
    grid_label = str(nc.grid_label)
    variant_label = str(nc.variant_label)

    # Create name of output file, and check if it has already been created
    if (inter == False): out_name = data_path + variable + '_' + vtype + '_' + expid +'_'+experiment+'_'+variant_label+'_'+grid_label+'_'+str(syear)+'01-'+str(eyear)+'12.nc'
objtype = np.char.strip(ma.getdata(NGC['type']))

# Keep all globular clusters and planetary nebulae
keeptype = ('PN', 'GCl')
keep = np.zeros(len(NGC), dtype=bool)
for otype in keeptype:
    ww = [otype == tt for tt in objtype]
    keep = np.logical_or(keep, ww)
print(np.sum(keep))

clusters = NGC[keep]

# Fill missing major axes with a nominal 0.4 arcmin (roughly works
# for NGC7009, which is the only missing PN in the footprint).
ma.set_fill_value(clusters['majax'], 0.4)
clusters['majax'] = ma.filled(clusters['majax'].data)

# Increase the radius of IC4593
# https://github.com/legacysurvey/legacypipe/issues/347
clusters[clusters['name'] == 'IC4593']['majax'] = 0.5

#indesi = desimodel.footprint.is_point_in_desi(tiles, ma.getdata(clusters['ra']),
#                                              ma.getdata(clusters['dec']))
#print(np.sum(indesi))
#bb = clusters[indesi]
#bb[np.argsort(bb['majax'])[::-1]]['name', 'ra', 'dec', 'majax', 'type']

# Build the output catalog: select a subset of the columns and rename
# majax-->radius (arcmin-->degree)
out = Table()
import matplotlib.pyplot as  plt 
import sys
import os

#vmin = -2
#vmax = 30

### figure 1
nc = Dataset(sys.argv[2])
var1=nc.variables[''.join([sys.argv[1],"Atl"])]
values=var1[:]
xs = nc["lat"][:]
ys = nc["zoc"][:]
error_value = -1.e30
values = ma.masked_values(values, error_value)
ma.set_fill_value(values, 0)
#title = sys.argv[2].split("/")[-1].replace(".nc","")
title = ''.join([sys.argv[1],"Atl"])
ylabel=var1.dimensions[0]
xlabel=var1.dimensions[1]
#clabel=var1.units

siz=16
siz_tick=14
X, Y = np.meshgrid(xs, ys)

fig, ax = plt.subplots()
plt.contour(X, Y, values,colors='k')
plt.contourf(X, Y, values,50,cmap=plt.cm.Spectral_r)
plt.gca().invert_yaxis()
plt.title(title,size=siz)