def plot_bTemp_diffs(chan_dict, chan_str, eps=1.):

    bTemp = ma.masked_less(chan_dict[chan_str]['bTemp'], -800.)
    bTemp_new = ma.masked_less(chan_dict[chan_str]['bTemp_new'], -800.)

    bTemp_diff = bTemp - bTemp_new
    #bTemp_diff = 100.*(bTemp - bTemp_new)/bTemp

    BT_diff_min = np.min(bTemp_diff)
    BT_diff_max = np.max(bTemp_diff)
    BT_diff_min_idx = np.where(bTemp_diff.data == BT_diff_min)
    BT_diff_max_idx = np.where(bTemp_diff.data == BT_diff_max)

    print "Minimum BT difference = {} @ {}".format(BT_diff_min,
                                                   BT_diff_min_idx)
    print "Maximum BT difference = {} @ {}".format(BT_diff_max,
                                                   BT_diff_max_idx)

    f = ppl.figure()
    ppl.imshow(bTemp_diff,
               interpolation='nearest',
               vmin=-1. * eps,
               vmax=eps,
               cmap='RdBu_r')
    ppl.colorbar(orientation='horizontal')
    ppl.title(
        '(Original - Converted) IDPS {} Brightness Temperature (K)'.format(
            chan_str))
    ppl.show(block=False)
示例#2
0
def fit_1981(t, f, ferr, t_test_epochs, t_window=8.0, min_npoints=15):
    import numpy as np
    import numpy.ma as ma
    # t_window - half width of fitting window
    # min_npoints - minimum number of photometric points for a fit within the t_window
    t_test_ampl = np.zeros_like(
        t_test_epochs) - 1000.  # set -1000 to mark bad/missing points
    t_test_ampl_err = np.zeros_like(t_test_epochs) - 1000.

    for (i, t_now) in enumerate(t_test_epochs):
        # select the points plus/minus the epoch
        #    print('Trying {:.2f} ...'.format(t_now))
        n_obs_mask = (t > (t_now - t_window)) * (t < (t_now + t_window))
        n = np.count_nonzero(n_obs_mask)
        #    print('{:d} points found'.format(n))

        if n < min_npoints:
            continue

    #    print('nonzero number of points found!')
        t_sel = t[n_obs_mask]
        d_sel = f[n_obs_mask]
        e_sel = ferr[n_obs_mask]
        #    print('t_now is {:.2f}'.format(t_now))

        # add hints and limits to the fit so it doesn't run away
        params = gmodel.make_params(t0=t_now, peak=0.1, bgnd=0.00)
        gmodel.set_param_hint('t0',
                              value=t_now,
                              min=t_now - (step / 2.),
                              max=t_now + (step / 2.))
        gmodel.set_param_hint('peak', value=0.1, min=0.0, max=5.)

        #    print('Parameter hints:')
        #for pname, par in gmodel.param_hints.items():
        #    print(pname, par)

        result = gmodel.fit(d_sel, t=t_sel, bgnd=0.0, t0=t_now, peak=0.1)

        if result.success:
            #            print('succeeded')
            if result.errorbars:
                #                print('got me some errorbars')
                asdf = result.eval_uncertainty(sigma=3)
                #                print(asdf)
                t_test_ampl[i] = result.best_values['peak']
                t_test_ampl_err[i] = result.params['peak'].stderr

        #        print(result.params['peak'].eval_uncertainty)
        else:
            print('FAILED to fit at {}'.format(t_now))

        # convert all to masked arrays
        ama = ma.masked_less(t_test_ampl, -999)
        ema = ma.masked_less(t_test_ampl_err, -999)
        tma = np.ma.masked_where(np.ma.getmask(ama), t_test_epochs)

    return (tma, ama, ema)
示例#3
0
def map_to_slit(fname, clip=0.0, gamma=1.0):
    """take all values from a map over clip, compute best slit for PV Slice
    """
    ia = taskinit.iatool()
    ia.open(fname)
    imshape = ia.shape()
    pix = ia.getchunk().squeeze()  # this should now be a numpy pix[ix][iy] map
    pixmax = pix.max()
    pixrms = pix.std()
    if False:
        pix1 = pix.flatten()
        rpix = stats.robust(pix1)
        logging.debug("stats: mean: %g %g" % (pix1.mean(), rpix.mean()))
        logging.debug("stats: rms: %g %g" % (pix1.std(), rpix.std()))
        logging.debug("stats: max: %g %g" % (pix1.max(), rpix.max()))
        logging.debug('shape: %s %s %s' %
                      (str(pix.shape), str(pix1.shape), str(imshape)))
    ia.close()
    nx = pix.shape[0]
    ny = pix.shape[1]
    x = np.arange(pix.shape[0]).reshape((nx, 1))
    y = np.arange(pix.shape[1]).reshape((1, ny))
    if clip > 0.0:
        nmax = nx * ny
        clip = clip * pixrms
        logging.debug("Using initial clip=%g for rms=%g" % (clip, pixrms))
        m = ma.masked_less(pix, clip)
        while m.count() == 0:
            clip = 0.5 * clip
            logging.debug("no masking...trying lower clip=%g" % clip)
            m = ma.masked_less(pix, clip)
        else:
            logging.debug("Clip=%g now found %d/%d points" %
                          (clip, m.count(), nmax))

    else:
        #@ todo   sigma-clipping with iterations?  see also astropy.stats.sigma_clip()
        rpix = stats.robust(pix.flatten())
        r_mean = rpix.mean()
        r_std = rpix.std()
        logging.info("ROBUST MAP mean/std: %f %f" % (r_mean, r_std))
        m = ma.masked_less(pix, -clip * r_std)
    logging.debug("Found > clip=%g : %g" % (clip, m.count()))
    if m.count() == 0:
        logging.warning("Returning a dummy slit, no points above clip %g" %
                        clip)
        edge = 3.0
        #slit = [edge,0.5*ny,nx-1.0-edge,0.5*ny]          # @todo    file a bug, this failed
        #  RuntimeError: (/var/rpmbuild/BUILD/casa-test/casa-test-4.5.7/code/imageanalysis/ImageAnalysis/PVGenerator.cc : 334) Failed AlwaysAssert abs( (endPixRot[0] - startPixRot[0]) - sqrt(xdiff*xdiff + ydiff*ydiff) ) < 1e-6
        slit = [edge, 0.5 * ny - 0.1, nx - 1.0 - edge, 0.5 * ny + 0.1]
    else:
        slit = convert_to_slit(m, x, y, nx, ny, gamma)
    return (slit, clip)
示例#4
0
def rtrnMskDegreeGrtrN(aMsk,n):
	"""
	Input: aMsk is a binary mask matrix. n is the number of degrees of 
	interest (under which values are going to be masked).
	Output: aMskDegreeGrtrN is a matrix where all nodes that have a 
	degree equal or higher than n with 1 values and the other with 0.
	NOTE: Returns a masking array with 1 and 0 values. It masks and 
	replaces an input matrix of binary data with nodes with degrees less 
	than n replaced by 1 (i.e. masked). It runs iteratively until 
	converging (all nodes have a degree equal or higher than n).
	"""
	#--------------------------
	# invert mask
	aMskDegreeGrtrN = np.logical_not(aMsk)
	#--------------------------
	# calculate degree of nodes
	rowSum = np.sum(aMskDegreeGrtrN,1)
	clmnSum = np.sum(aMskDegreeGrtrN,0)
	crrntCntRow = len(rowSum)#to start no mask
	crrntCntClmn = len(clmnSum)#to start no mask
	#--------------------------
	# mask nodes with less degree than n
	mksdRowSum = ma.masked_less(rowSum,n)
	mksdClmnSum = ma.masked_less(clmnSum,n)
	cntMskdRow = mksdRowSum.count()
	cntMskdClmn = mksdClmnSum.count()
	if not cntMskdRow or not cntMskdClmn:#no answer
		return False
	#--------------------------
	# make degree mask to converge
	while (cntMskdRow!=crrntCntRow or cntMskdClmn!=crrntCntClmn) and \
		cntMskdRow>0 and cntMskdClmn>0:#test for covergence
		crrntCntRow = cntMskdRow
		crrntCntClmn = cntMskdClmn
		posToZeroRow = np.where(mksdRowSum.mask==1)[0]
		posToZeroClmn = np.where(mksdClmnSum.mask==1)[0]
		if len(posToZeroRow)>0:
			aMskDegreeGrtrN[posToZeroRow,:] = 0
		if len(posToZeroClmn)>0:
			aMskDegreeGrtrN[:,posToZeroClmn] = 0
		rowSum = np.sum(aMskDegreeGrtrN,1)
		clmnSum = np.sum(aMskDegreeGrtrN,0)
		mksdRowSum = ma.masked_less(rowSum,n)
		mksdClmnSum = ma.masked_less(clmnSum,n)
		cntMskdRow = mksdRowSum.count()
		cntMskdClmn = mksdClmnSum.count()
		if not cntMskdRow or not cntMskdClmn:#no answer
			return False
	#--------------------------
	# invert output mask so nodes with higher degrees than n be unmasked
	aMskDegreeGrtrN = np.logical_not(aMskDegreeGrtrN)
	return aMskDegreeGrtrN
示例#5
0
def nema_data_preprocess(imagearray, resamplesize):
    """ Function performs preprocessing on the input image:
          - resample to 64x64
          - smoothing with nine-point NEMA kernel
          - calculation of UFOV and CFOV regions
        Returns: masked UFOV and CFOV arrays
    """

    print("data preprocessing: ")
    print("array size: {}    resamplesize: {}".format(np.shape(imagearray),
                                                      resamplesize))
    # causes Fourier artifacts
    #imagearray = resample(resample(imagearray,resamplesize[0],axis=0),resamplesize[1],axis=1)
    if resamplesize[0] > 0 and resamplesize[1] > 0:
        imagearray = block_reduce(
            imagearray,
            block_size=(int(np.shape(imagearray)[0] / resamplesize[0]),
                        int(np.shape(imagearray)[1] / resamplesize[1])),
            func=np.sum)
    imagearray = imagearray.astype('float64')

    imagearray = nema_smooth(imagearray)
    """ NEMA step 1: "First, any pixels at the edge of UFOV containing less
                      than 75% of the mean counts per pixel in the CFOV shall
                      be set to zero."
    """
    # first estimate of UFOV (use segmentation-threshold = mean value of entire image)
    threshold = set_threshold(imagearray)
    ufov = ma.masked_less(imagearray, threshold, copy=False)

    # use NEMA guidelines to determine UFOV
    cfov = create_cfov(ufov)

    # average of CFOV
    cfov_average = set_threshold(cfov)
    ufov = ma.masked_less(imagearray, 0.75 * cfov_average, copy=False)
    """ NEMA step 2: "Second, those pixels which now have at least one of their
        four directly abutted neighbors containing zero counts, will be also
        set to zero. The remaining non-zero pixels are the pixels to be included
        in the analysis for the UFOV.
    """
    ufov.mask = scipy.ndimage.binary_dilation(ufov.mask, iterations=1)
    # based on final UFOV, create a new CFOV
    cfov = create_cfov(ufov)

    # FIXME: inconsistent use of xy (in python y is the horizontal dimension)
    #ux, uy = get_dims(ufov)

    ufov.fill_value = 0
    cfov.fill_value = 0

    return ufov, cfov
示例#6
0
def nema_data_preprocess(imagearray,resamplesize):
    """ Function performs preprocessing on the input image:
          - resample to 64x64
          - smoothing with nine-point NEMA kernel
          - calculation of UFOV and CFOV regions
        Returns: masked UFOV and CFOV arrays
    """    

    print "data preprocessing: "
    print "array size:",  np.shape(imagearray), "resamplesize: ", resamplesize
    # causes Fourier artifacts
    #imagearray = resample(resample(imagearray,resamplesize[0],axis=0),resamplesize[1],axis=1)
    if resamplesize[0]>0 and resamplesize[1]>0:
      imagearray = block_reduce(imagearray, block_size=(np.shape(imagearray)[0]/resamplesize[0],np.shape(imagearray)[1]/resamplesize[1]),func=np.sum)
    imagearray = imagearray.astype('float64')

    imagearray = nema_smooth(imagearray) 


    """ NEMA step 1: "First, any pixels at the edge of UFOV containing less
                      than 75% of the mean counts per pixel in the CFOV shall
                      be set to zero."
    """                      
    # first estimate of UFOV (use segmentation-threshold = mean value of entire image)
    threshold = set_threshold(imagearray)
    ufov = ma.masked_less(imagearray,threshold,copy=False)
    
    # use NEMA guidelines to determine UFOV
    cfov = create_cfov(ufov)
    
    # average of CFOV
    cfov_average = set_threshold(cfov)
    ufov = ma.masked_less(imagearray,0.75*cfov_average,copy=False)
    
    
    """ NEMA step 2: "Second, those pixels which now have at least one of their
        four directly abutted neighbors containing zero counts, will be also
        set to zero. The remaining non-zero pixels are the pixels to be included
        in the analysis for the UFOV.
    """
    ufov.mask=scipy.ndimage.binary_dilation(ufov.mask,iterations=1)
    # based on final UFOV, create a new CFOV
    cfov = create_cfov(ufov)

    # FIXME: inconsistent use of xy (in python y is the horizontal dimension)
    #ux, uy = get_dims(ufov)

    ufov.fill_value=0
    cfov.fill_value=0

    return ufov, cfov
示例#7
0
 def test_masked_data_and_x_y_2d(self):
     '''Test masked data and x'''
     new_data = ma.masked_less(self.data2d, 3)
     new_lon = ma.masked_less(self.lon2d, 2)
     c_data, c_lons, c_lats = add_cyclic(new_data, x=new_lon, y=self.lat2d)
     r_data = ma.concatenate((self.data2d, self.data2d[:, :1]), axis=1)
     r_lons = np.concatenate(
         (self.lon2d, np.full((self.lon2d.shape[0], 1), 360.)), axis=1)
     assert_array_equal(c_data, r_data)
     assert_array_equal(c_lons, r_lons)
     assert_array_equal(c_lats, self.c_lat2d)
     assert ma.is_masked(c_data)
     assert ma.is_masked(c_lons)
     assert not ma.is_masked(c_lats)
    def _check_sst_quality(self, dataset, product_type):
        mask_specs = product_type.get_mask_consistency_check_specs()
        if len(mask_specs) == 0:
            return

        sst_variable_names = product_type.get_sst_variable_names()
        if len(sst_variable_names) == 0:
            return

        quality_variable_name = mask_specs[0][2]
        quality_data = dataset.variables[quality_variable_name][:]

        valid_retrieval_quality = ma.masked_less(quality_data, 2)
        self.report["sst_valid_retrieval"] = float(valid_retrieval_quality.count())

        failed_retrieval_quality = ma.masked_not_equal(quality_data, 1)
        sst_variable = dataset.variables[sst_variable_names[0]]
        fill_value = sst_variable.getncattr('_FillValue')
        sst_quality_one_data = ma.array(sst_variable[:], mask=failed_retrieval_quality.mask)

        invalid_retrieval = ma.masked_equal(sst_quality_one_data, fill_value)
        self.report["sst_invalid_retrieval"] = float(invalid_retrieval.count())

        failed_retrieval = ma.masked_not_equal(sst_quality_one_data, fill_value)
        self.report["sst_failed_retrieval"] = float(failed_retrieval.count())

        not_ocean = ma.masked_not_equal(quality_data, 0)
        self.report["not_ocean"] = float(not_ocean.count())
示例#9
0
    def __init__(self,
                 dens,
                 grad,
                 ked,
                 grid=None,
                 trans='rational',
                 trans_k=2,
                 trans_a=1,
                 denscut=0.0005):
        r"""Initialize class from arrays.

        Parameters
        ----------
        dens : np.ndarray
            Electron density of grid points, :math:`\rho(\mathbf{r})`.
        grad : np.ndarray
            Gradient of electron density of grid points, :math:`\nabla \rho(\mathbf{r})`.
        ked : np.ndarray
            Positive-definite or Lagrangian kinetic energy density of grid
            points; :math:`\tau_\text{PD} (\mathbf{r})` or :math:`G(\mathbf{r})`.
        grid : instance of `Grid`, optional
            Grid used for computation of ELF. Only if this a CubeGrid one can generate the scripts.
        trans : str, optional
            Type of transformation applied to ELF ratio; options are 'rational' or 'hyperbolic'.
        trans_k : float, optional
            Parameter :math:`k` of transformation.
        trans_a : float, optional
            Parameter :math:`a` of transformation.
        denscut : float, optional
            Value of density cut. ELF value of points with density < denscut is set to zero.

        """
        if dens.shape != ked.shape:
            raise ValueError(
                'Arguments dens and ked should have the same shape!')
        if grad.ndim != 2:
            raise ValueError('Argument grad should be a 2d-array!')
        if grad.shape[0] != dens.shape[0]:
            raise ValueError(
                'Argument dens & grad should have the same length!')
        if trans.lower() not in ['rational', 'hyperbolic']:
            raise ValueError(
                'Argument trans should be either "rational" or "hyperbolic".')
        if not trans_k > 0:
            raise ValueError(
                'Argument trans_k should be positive! trans_k={0}'.format(
                    trans_k))
        if not trans_a > 0:
            raise ValueError(
                'Argument trans_a should be positive! trans_a={0}'.format(
                    trans_a))
        self._grid = grid
        self._denstool = DensGradTool(dens, grad)
        # compute elf ratio
        self._ratio = ked - self._denstool.ked_weizsacker
        self._ratio /= masked_less(self._denstool.ked_thomas_fermi, 1.0e-30)
        # compute elf value & set low density points to zero
        self._value = np.asarray(
            self._transform(self._ratio, trans.lower(), trans_k, trans_a))
        self._value[self._denstool.density < denscut] = 0.
def combine_landsat():
    fh_out = Dataset('../../processed_data/landsat/20180719.nc', 'w')

    flag = False
    for i in range(1, 8):
        fh_in = Dataset(
            '../../raw_data/landsat/nebraska/SRB{}_20180719.nc'.format(i), 'r')
        if not flag:
            lats, lons = fh_in.variables['lat'][:], fh_in.variables['lon'][:]

            fh_out.createDimension("lat", len(lats))
            fh_out.createDimension("lon", len(lons))

            for v_name, varin in fh_in.variables.items():
                if v_name in ["lat", "lon"]:
                    outVar = fh_out.createVariable(v_name, varin.datatype,
                                                   (v_name, ))
                    outVar.setncatts(
                        {k: varin.getncattr(k)
                         for k in varin.ncattrs()})
            fh_out.variables["lat"][:] = lats[:]
            fh_out.variables["lon"][:] = lons[:]
            flag = True

        for v_name, varin in fh_in.variables.items():
            if v_name == 'Band1':
                outVar = fh_out.createVariable('band{}'.format(i),
                                               varin.datatype, ('lat', 'lon'))
                outVar.setncatts(
                    {k: varin.getncattr(k)
                     for k in varin.ncattrs()})
                outVar[:] = ma.masked_less(varin[:], 0)

        fh_in.close()
    fh_out.close()
    def _check_sst_quality(self, dataset, product_type):
        mask_specs = product_type.get_mask_consistency_check_specs()
        if len(mask_specs) == 0:
            return

        sst_variable_names = product_type.get_sst_variable_names()
        if len(sst_variable_names) == 0:
            return

        quality_variable_name = mask_specs[0][2]
        quality_data = dataset.variables[quality_variable_name][:]

        valid_retrieval_quality = ma.masked_less(quality_data, 2)
        self.report["sst_valid_retrieval"] = float(
            valid_retrieval_quality.count())

        failed_retrieval_quality = ma.masked_not_equal(quality_data, 1)
        sst_variable = dataset.variables[sst_variable_names[0]]
        fill_value = sst_variable.getncattr('_FillValue')
        sst_quality_one_data = ma.array(sst_variable[:],
                                        mask=failed_retrieval_quality.mask)

        invalid_retrieval = ma.masked_equal(sst_quality_one_data, fill_value)
        self.report["sst_invalid_retrieval"] = float(invalid_retrieval.count())

        failed_retrieval = ma.masked_not_equal(sst_quality_one_data,
                                               fill_value)
        self.report["sst_failed_retrieval"] = float(failed_retrieval.count())

        not_ocean = ma.masked_not_equal(quality_data, 0)
        self.report["not_ocean"] = float(not_ocean.count())
示例#12
0
    def action(self, state):
        mesh = state.mesh.copy().apply_transform(state.T_obj_world.matrix)

        mesh.fix_normals()
        vertices, face_ind = sample.sample_surface_even(mesh, self.num_samples)
        normals = mesh.face_normals[face_ind]
        
        angles = normals.dot(up)
        valid_pushes = np.logical_and(ma.masked_greater(angles, 1.39626), ma.masked_less(angles, 1.74533)) # within 10 degrees of horizontal
        if not np.any(valid_pushes):
            return LinearPushAction(None, None, metadata={'vertex': np.array([0,0,0]), 'normal': np.array([1,0,0])})
        best_valid_ind = np.argmax(normals[valid_pushes][:,2]) # index of best 
        best_ind = np.arange(self.num_samples)[valid_pushes][best_valid_ind]
        
        start_position = vertices[best_ind] + normals[best_ind] * .015
        end_position = vertices[best_ind] - normals[best_ind] * .04
        
        start_pose, end_pose = self.get_hand_pose(start_position, end_position)
        return LinearPushAction(
            start_pose,
            end_pose,
            metadata={
                'vertex': vertices[best_ind],
                'normals': normals[best_ind],
            }
        )
示例#13
0
def plotWinterMaps(dataset, dataVar, winter, minval, maxval, cbarTicks = None, title = "", cmap = 'viridis'):
    """Plot maps of the arctic on North Pole Stereo projection with several months of data overlayed, along with the sea ice edge for each month. 
   
    Args:
        dataset (xr Dataset): dataset from google bucket
        dataVar (str): variable of interest
        winter (list): list of pandas Timestamp objects generated by getWinterDateRange(startYear, endYear)
        minval, maxval (int): minimum and maximum values for the data variable 
        cbarTicks (list or np array of length 2): ticks to use on colorbar (default to [minval + 1, maxval +1])
        title (str, optional): title of subplots (default to empty string)
        cmap (str, optional): color map (default to viridis)

    Returns:
        Figure displayed in notebook 

    """
    #format time for plotting 
    timeFormatted = list(pd.to_datetime(winter).strftime('%B %Y'))
    
    #define projection and transform
    proj = ccrs.NorthPolarStereo(central_longitude = -45)
    transform = ccrs.PlateCarree()
    
    #define arguments if not inputted 
    cbarTicks = np.arange(minval, maxval + 1, 1) if cbarTicks is None else cbarTicks

    #plot the data
    im = dataset[dataVar].where(dataset['seaice_conc_monthly_cdr'] > 0.5).sel(time = winter).plot(x = 'longitude', y = 'latitude', vmin = minval, vmax = maxval, cmap = cmap,
        extend='both', levels=20, transform = transform, col='time', add_colorbar = True, zorder = 2, figsize = (8,8), col_wrap = 3,
        cbar_kwargs = {'ticks': cbarTicks, 'label': "\n".join(wrap(dataset[dataVar].attrs['long_name'] + ' (' + dataset[dataVar].attrs['units'] + ')', 50)), 'orientation': 'horizontal', 'shrink': 0.4, 'pad': 0.025}, 
        subplot_kws = {'projection': proj})
    
    #add a title
    plt.suptitle(title + ': ' + dataset[dataVar].attrs['long_name'], fontsize = 20, y = 0.99, fontweight = 'medium')

    i = 0 #indexer to go through timeFormatted and winter arrays and assign the correct data to each month
    for ax in im.axes.flat:
        ax.coastlines(linewidth=0.25, color = 'black', zorder = 10) #add coastlines 
        ax.add_feature(cfeature.LAND, color ='0.95', zorder = 5) #add land 
        ax.add_feature(cfeature.LAKES, color = 'grey', zorder = 5) #add lakes 
        ax.gridlines(draw_labels = False, linewidth = 0.25, color = 'gray', alpha = 0.75, linestyle='--', zorder = 6) #add gridlines
        ax.set_extent([-179, 179, 50, 90], crs = transform) #zoom in so map only displays the Arctic
        ax.set_title(timeFormatted[i])
        
        #plot sea ice concentration 
        SICarray = dataset['seaice_conc_monthly_cdr'].sel(time = winter[i]).where(dataset['region_mask']!=21) #dont plot contour along coastlines
        lonGreater = ma.masked_greater(SICarray.longitude.values, -0.01)
        lonLesser = ma.masked_less(SICarray.longitude.values, 0)
        latGreater = ma.MaskedArray(SICarray.latitude.values, mask = lonGreater.mask)
        latLesser = ma.MaskedArray(SICarray.latitude.values, mask = lonLesser.mask)
        dataGreater = ma.MaskedArray(SICarray.values, mask = lonGreater.mask)
        dataLesser = ma.MaskedArray(SICarray.values, mask = lonLesser.mask)
        im2a = ax.contour(lonGreater, latGreater, dataGreater, levels = [0.5], transform = transform, colors = 'magenta', linewidths = 0.8, zorder = 5, alpha = 1)
        im2b = ax.contour(lonLesser, latLesser, dataLesser, levels = [0.5], transform = transform, colors = 'magenta', linewidths = 0.8, zorder = 5, alpha = 1)
        
        #update indexer 
        i += 1
        
    #display figure in notebook  
    plt.show()
示例#14
0
def peakfinding():
	# first step of getting peaks
	peaks_obj = Data(frq, abs(Y), smoothness=11)
	# second part of getting peaks
	peaks_obj.get_peaks(method='slope')
	# pull data out of peaks data object for filtering
	peaks_obj.peaks["peaks"]
	peaks = peaks_obj.peaks["peaks"]
	peaks_obj.plot()
	show()
	peaksnp = np.zeros((2, len(peaks[0])))
	peaksnp[0] = peaks[0]
	peaksnp[1] = peaks[1] 
	maxpeaks = max(peaks[1])

	# filtering function: removes peaks that are shorter than 10% of the max peak
	filteredpeaksnp = []
	cutoff = .05
	filtered_peaks = ma.masked_less(peaksnp[1], (cutoff * maxpeaks))	
	indeces = ma.nonzero(filtered_peaks)
	indeces = indeces[0]
	final_peaks = np.zeros((3,len(indeces)))

	i = 0
	while i < len(indeces):
		final_peaks [0,i] = frq[i]
		final_peaks[1,i] = peaksnp[1, indeces[i]]
		final_peaks[2,i] = peaksnp[0, indeces[i]]
		i = i + 1
示例#15
0
def ROI_distribution(indiv_ROI, arr_beacons_ROI, dict_routes_ROI):

    arr_sampled_grid = np.zeros((param.GRID_X_DIV, param.GRID_Y_DIV),
                                dtype=np.int)

    for idx, indiv_element in enumerate(indiv_ROI):
        if idx < len(indiv_ROI) - 1:
            ##            print indiv_ROI[idx], indiv_ROI[idx+1]
            ##                print "===="
            #                print arr_sampled_grid_pattern[idx][idx+1]
            if str(arr_beacons_ROI[indiv_ROI[idx]]) + '_' + str(
                    arr_beacons_ROI[indiv_ROI[idx + 1]]) in dict_routes_ROI:
                ##                print idx, idx+1
                for route_element in dict_routes_ROI[
                        str(arr_beacons_ROI[indiv_ROI[idx]]) + '_' +
                        str(arr_beacons_ROI[indiv_ROI[idx + 1]])]:
                    arr_sampled_grid[route_element[0]][route_element[1]] += 1

#    print 'np.sum', np.sum(arr_sampled_grid)

    mdata = ma.masked_less(arr_sampled_grid, 1)

    print 'np.mean', np.mean(mdata), 'np.std', np.std(mdata)

    if np.std(mdata) != 0:
        inv_coef_var = np.mean(mdata) / np.std(mdata)
    else:
        inv_coef_var = 0

    print np.sum(arr_sampled_grid), np.sum(mdata)
    print round(inv_coef_var, 3), round(np.std(mdata),
                                        3), round(np.mean(mdata), 3)

    return inv_coef_var
示例#16
0
def calcQuality(rawdatafiles, ofileq, year, month, day, curVer, latency):
    '''
    Extracts relevant information from the downloaded raw data files and 
    calculates a quality index that accounts for the amount of influence IR imagery has had on the precipitation estimate
    
    Results are scaled between 0-100 integers to reduce disk storage costs
    '''

    try:
        ir_weight, = getGenericField(rawdatafiles, 'IRkalmanFilterWeight',
                                     year, month, day)
        qualindex, = getGenericField(rawdatafiles, 'precipitationQualityIndex',
                                     year, month, day)

        mask = (ir_weight.data > 0.0)
        qualindex.data[mask] = qualindex.data[mask] * (
            (100 - ir_weight.data[mask]) / 100.0)
        qualindex.data = ma.masked_less(qualindex.data, 0)
        qualindex.data = np.around(qualindex.data * 100).astype(np.uint8)

        qualindex.data.set_fill_value(0)

        iris.save(qualindex, ofileq, zlib=True)

    except:
        print('Error creating quality flag: ' + curVer)
示例#17
0
    def _breakList(self, inList, index, parameter):
        par = float(parameter)

        array = N.empty(shape=[len(inList),],dtype=N.float64)
        i = 0
        for parameters in inList:
            array[i] = parameters[index]
            i = i + 1 

        greater = MA.masked_less(array, par)
        less = MA.masked_greater(array, par)

        upper = MA.minimum(greater)
        lower = MA.maximum(less)

        upperArray = MA.masked_inside(array, par, upper)
        lowerArray = MA.masked_inside(array, lower, par)

        upperList = []
        lowerList = []
        i = 0
        for parameters in inList:
            if upperArray.mask[i]:
                upperList.append(parameters)
            if lowerArray.mask[i]:
                lowerList.append(parameters)
            i = i + 1

        return upperList, lowerList
    def split(self, split_point, ch_to_split):
        img1 = np.ma.array(self.img, mask=False, fill_value=0)
        img2 = np.ma.array(self.img, mask=False, fill_value=0)

        ch_to_split = int(ch_to_split)
        img1[:, :, ch_to_split] = ma.masked_less(img1[:, :, ch_to_split],
                                                 split_point)
        if ch_to_split == 0:
            img1[ma.getmask(img1[:, :, ch_to_split]), 1] = ma.masked
            img1[ma.getmask(img1[:, :, ch_to_split]), 2] = ma.masked
        elif ch_to_split == 1:
            img1[ma.getmask(img1[:, :, ch_to_split]), 0] = ma.masked
            img1[ma.getmask(img1[:, :, ch_to_split]), 2] = ma.masked
        else:
            img1[ma.getmask(img1[:, :, ch_to_split]), 1] = ma.masked
            img1[ma.getmask(img1[:, :, ch_to_split]), 0] = ma.masked

        img2[:, :,
             ch_to_split] = ma.masked_greater_equal(img2[:, :, ch_to_split],
                                                    split_point)
        if ch_to_split == 0:
            img2[ma.getmask(img2[:, :, ch_to_split]), 1] = ma.masked
            img2[ma.getmask(img2[:, :, ch_to_split]), 2] = ma.masked
        elif ch_to_split == 1:
            img2[ma.getmask(img2[:, :, ch_to_split]), 0] = ma.masked
            img2[ma.getmask(img2[:, :, ch_to_split]), 2] = ma.masked
        else:
            img2[ma.getmask(img2[:, :, ch_to_split]), 1] = ma.masked
            img2[ma.getmask(img2[:, :, ch_to_split]), 0] = ma.masked

        #print ('Image 1 after : ' + str(img1))
        #print ('Image 2 after : ' + str(img2))

        return img1, img2
示例#19
0
def add_noise(infile, snr):
    '''
    :param infile: file to process
    :param snr: SNR, in decibels, which the output image contains vs input
    :return: File written to disc
    '''
    x = cv2.imread(infile).astype(float)
    x = ma.masked_less(x, 1)  # zeros give error
    uplim = ma.max(x)
    x = x / uplim
    v = ma.var(x) / (10**(snr / 10))
    if snr == 0:
        v = 0.00000001
    x_noise = (noi.random_noise(x, mode='gaussian', mean=0, var=v) *
               uplim).astype(np.uint8)
    if snr == 0:
        cv2.imwrite('%s' % (infile.replace('.png', '_snr%s.png' % (snr))),
                    x_noise)
    elif '.jpg' in infile:
        cv2.imwrite('%s' % (infile.replace('.jpg', '_snr%s.jpg' % (snr))),
                    x_noise)
    elif '.png' in infile:
        cv2.imwrite('%s' % (infile.replace('.png', '_snr%s.tif' % (snr))),
                    x_noise)
    elif '.tif' in infile:
        cv2.imwrite('%s' % (infile.replace('.tif', '_snr%s.tif' % (snr))),
                    x_noise)
示例#20
0
 def test_masked_data(self):
     '''Test masked data'''
     new_data = ma.masked_less(self.data2d, 3)
     c_data = add_cyclic(new_data)
     r_data = ma.concatenate((self.data2d, self.data2d[:, :1]), axis=1)
     assert_array_equal(c_data, r_data)
     assert ma.is_masked(c_data)
def extract_ndvi(nc_file):
    fh_in = Dataset('../../raw_data/ndvi/' + nc_file, 'r')

    for index, n_days in enumerate(fh_in.variables['time'][:]):
        date = (datetime.datetime(2000, 1, 1, 0, 0) + datetime.timedelta(int(n_days))).strftime('%Y%m%d')
        print(date)
        fh_out = Dataset('../../processed_data/ndvi/1km/{}.nc'.format(date[:-2]), 'w')

        for name, dim in fh_in.dimensions.items():
            if name != 'time':
                fh_out.createDimension(name, len(dim) if not dim.isunlimited() else None)

        ignore_features = ["time", "crs", "_1_km_monthly_VI_Quality"]
        for v_name, varin in fh_in.variables.items():
            if v_name not in ignore_features:
                dimensions = varin.dimensions if v_name in ['lat', 'lon'] else ('lat', 'lon')
                v_name = v_name if v_name in ['lat', 'lon'] else v_name.split('_')[-1].lower()
                outVar = fh_out.createVariable(v_name, varin.datatype, dimensions)
                if v_name == "lat":
                    outVar.setncatts({"units": "degree_north"})
                    outVar[:] = varin[:]
                elif v_name == "lon":
                    outVar.setncatts({"units": "degree_east"})
                    outVar[:] = varin[:]
                else:
                    outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
                    vin = varin[index, :, :]
                    vin = ma.masked_greater(vin, 1.0)
                    vin = ma.masked_less(vin, -0.2)
                    outVar[:] = vin[:]
        fh_out.close()
    fh_in.close()
def extract_lai(nc_file):
    fh_in = Dataset('../../raw_data/lai/' + nc_file, 'r')

    for index, n_days in enumerate(fh_in.variables['time'][:]):
        date = (datetime.datetime(2000, 1, 1, 0, 0) + datetime.timedelta(int(n_days))).strftime('%Y%m%d')
        print(date)
        fh_out = Dataset('../../processed_data/lai/500m/{}.nc'.format(date), 'w')

        for name, dim in fh_in.dimensions.items():
            if name != 'time':
                fh_out.createDimension(name, len(dim) if not dim.isunlimited() else None)

        ignore_features = ["time", "crs", "FparExtra_QC", "FparLai_QC"]
        mask_value_dic = {'Lai_500m': 10, 'LaiStdDev_500m': 10, 'Fpar_500m': 1, 'FparStdDev_500m': 1}
        for v_name, varin in fh_in.variables.items():
            if v_name not in ignore_features:
                dimensions = varin.dimensions if v_name in ['lat', 'lon'] else ('lat', 'lon')
                outVar = fh_out.createVariable(v_name, varin.datatype, dimensions)
                if v_name == "lat":
                    outVar.setncatts({"units": "degree_north"})
                    outVar[:] = varin[:]
                elif v_name == "lon":
                    outVar.setncatts({"units": "degree_east"})
                    outVar[:] = varin[:]
                else:
                    outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
                    vin = varin[index, :, :]
                    vin = ma.masked_greater(vin, mask_value_dic[v_name])
                    vin = ma.masked_less(vin, 0)
                    outVar[:] = vin[:]
        fh_out.close()
    fh_in.close()
示例#23
0
def find_peak(field, comp=0, max_radius=None, min_radius=None):
    """Find the peak magnitude of a component in the field.

    Args:
        field ``GraspField``: The field to work on.
        comp int: The field component to look at.
        max_radius float: Ignore portions of the grid outside this radius from the center of the field.
        min_radius float: Ignore portions of the grid inside this radius from the center fo the field.

    Returns:
        x_peak float:, y_peak float: The x and y values of the peak value."""
    x_vals, y_vals = field.positions_1d

    f = abs(field.field[:, :, comp])
    if max_radius is not None:
        rad = field.radius_grid()
        rad_max_mask = ma.masked_greater(rad, max_radius)
        f = ma.array(f, mask=rad_max_mask.mask)

    if min_radius is not None:
        rad = field.radius_grid()
        rad_min_mask = ma.masked_less(rad, min_radius)
        f = ma.array(f, mask=rad_min_mask.mask)

    ny, nx = np.unravel_index(np.argmax(abs(f)), f.shape)
    x_peak = x_vals[nx]
    y_peak = y_vals[ny]

    return x_peak, y_peak
示例#24
0
def coefficient_variation(individual):
    "Calcula el coeficiente de variacion de las zonas muestreadas"
    coef_var = 0
    samp_grid = []

    #    print(individual)
    arr_sampled_grid = np.zeros((param.GRID_X_DIV, param.GRID_Y_DIV))

    for idx, elements in enumerate(individual):
        if idx < len(individual) - 1:
            #        print individual[idx], individual[idx+1]
            ruta_test = [
                param.list_coord[individual[idx]],
                param.list_coord[individual[idx + 1]]
            ]
            #        print ruta_test
            samp_grid = check_all_intersection(
                ruta_test, individual[idx], individual[idx + 1],
                arr_sampled_grid)[0]  # check_all_intersection retorna
            # 2 valores

    mdata = ma.masked_less(samp_grid, 1)

    coef_var = np.std(mdata) / np.mean(mdata)

    #    print np.sum(samp_grid), round(coef_var,3), round(np.std(mdata),3), round(np.mean(mdata),3)

    return coef_var
示例#25
0
def _attvalues(attribute, stacked):
    """Attribute values computed in numpy.ma stack."""
    if attribute == "max":
        attvalues = ma.max(stacked, axis=2)
    elif attribute == "min":
        attvalues = ma.min(stacked, axis=2)
    elif attribute == "rms":
        attvalues = np.sqrt(ma.mean(np.square(stacked), axis=2))
    elif attribute == "var":
        attvalues = ma.var(stacked, axis=2)
    elif attribute == "mean":
        attvalues = ma.mean(stacked, axis=2)
    elif attribute == "maxpos":
        stacked = ma.masked_less(stacked, 0.0, copy=True)
        attvalues = ma.max(stacked, axis=2)
    elif attribute == "maxneg":  # ~ minimum of negative values?
        stacked = ma.masked_greater_equal(stacked, 0.0, copy=True)
        attvalues = ma.min(stacked, axis=2)
    elif attribute == "maxabs":
        attvalues = ma.max(abs(stacked), axis=2)
    elif attribute == "sumpos":
        stacked = ma.masked_less(stacked, 0.0, copy=True)
        attvalues = ma.sum(stacked, axis=2)
    elif attribute == "sumneg":
        stacked = ma.masked_greater_equal(stacked, 0.0, copy=True)
        attvalues = ma.sum(stacked, axis=2)
    elif attribute == "sumabs":
        attvalues = ma.sum(abs(stacked), axis=2)
    elif attribute == "meanabs":
        attvalues = ma.mean(abs(stacked), axis=2)
    elif attribute == "meanpos":
        stacked = ma.masked_less(stacked, 0.0, copy=True)
        attvalues = ma.mean(stacked, axis=2)
    elif attribute == "meanneg":
        stacked = ma.masked_greater_equal(stacked, 0.0, copy=True)
        attvalues = ma.mean(stacked, axis=2)
    else:
        etxt = "Invalid attribute applied: {}".format(attribute)
        raise ValueError(etxt)

    if not attvalues.flags["C_CONTIGUOUS"]:
        mask = ma.getmaskarray(attvalues)
        mask = np.asanyarray(mask, order="C")
        attvalues = np.asanyarray(attvalues, order="C")
        attvalues = ma.array(attvalues, mask=mask, order="C")

    return attvalues
def plot_bTemps(chan_dict, chan_str, vmin=None, vmax=None):

    bTemp = ma.masked_less(chan_dict[chan_str]['bTemp'], -800.)

    f = ppl.figure()
    ppl.imshow(bTemp, interpolation='nearest', vmin=vmin, vmax=vmax)
    ppl.colorbar(orientation='horizontal')
    ppl.title('IDPS {} Brightness Temperature (K)'.format(chan_str))
    ppl.show(block=False)

    bTemp_new = ma.masked_less(chan_dict[chan_str]['bTemp_new'], -800.)

    f = ppl.figure()
    ppl.imshow(bTemp_new, interpolation='nearest', vmin=vmin, vmax=vmax)
    ppl.colorbar(orientation='horizontal')
    ppl.title('Converted IDPS {} Brightness Temperature (K)'.format(chan_str))
    ppl.show(block=False)
示例#27
0
    def masked_less(self, val):
        new = Map.empty()

        new.data[0] = ma.masked_less(self.data[0], val)

        new.data[1] = ma.masked_where(ma.getmask(new.data[0]), self.data[1])

        return new
示例#28
0
def plotOneMonth(dataset, dataVar, month, minval, maxval, cbarTicks = None, cmap = 'viridis'): 
    """Plots map of the arctic on North Pole Stereo projection with one month of data overlayed, along with the sea ice edge for each month.
   
    Args:
        dataset (xr Dataset): dataset from google bucket
        dataVar (str): variable of interest
        month (str): month and year of interest, i.e. 'Dec 2019' (does not need to be in any particular format)
        minval, maxval (int): minimum and maximum values for the data variable 
        cbarTicks (list or np array of length 2): ticks to use on colorbar (default to [minval + 1, maxval +1])
        cmap (str, optional): color map (default to viridis)
        
    Returns:
        Figure displayed in notebook 
    
    """
    
    #define projection and transform
    proj = ccrs.NorthPolarStereo(central_longitude = -45)
    transform = ccrs.PlateCarree()
    
    #initialize the figure and axes 
    plt.figure(figsize=(6, 6))
    ax = plt.axes(projection = proj)
    
    #define arguments if not inputted 
    cbarTicks = np.arange(minval, maxval + 1, 1) if cbarTicks is None else cbarTicks
    
    #plot sea ice concentraion 
    SICarray = dataset['seaice_conc_monthly_cdr'].sel(time = month).where(dataset['region_mask']!=21) #dont plot contour along coastlines
    
    #stackexchange workaround for plotting on a rotated grid
    lonGreater = ma.masked_greater(SICarray.longitude.values, -0.01)
    lonLesser = ma.masked_less(SICarray.longitude.values, 0)
    latGreater = ma.MaskedArray(SICarray.latitude.values, mask = lonGreater.mask)
    latLesser = ma.MaskedArray(SICarray.latitude.values, mask = lonLesser.mask)
    dataGreater = ma.MaskedArray(SICarray.values[0], mask = lonGreater.mask)
    dataLesser = ma.MaskedArray(SICarray.values[0], mask = lonLesser.mask)
    
    #plot contour using each part of the 2 masked data sets
    im2a = ax.contour(lonGreater, latGreater, dataGreater, levels = [0.5], transform = transform, colors = 'magenta', linewidths = 0.9, zorder=5, alpha=1)
    im2b = ax.contour(lonLesser, latLesser, dataLesser, levels = [0.5], transform = transform, colors = 'magenta', linewidths = 0.9, zorder=5, alpha=1)
    #im = ax.contour(SICarray.longitude.values, SICarray.latitude.values, SICarray.values[0], levels = [0.15], transform = transform, colors = 'magenta', linewidths = 0.8, zorder=15, alpha=1)
    
    #plot the data
    dataset[dataVar].where(dataset['seaice_conc_monthly_cdr'] > 0.5).sel(time = month).plot(x = 'longitude', y = 'latitude', vmin = minval, vmax = maxval, extend = 'both', 
                    ax = ax, add_colorbar = True, transform = transform, zorder = 2, cmap = cmap, 
                    cbar_kwargs = {'label': "\n".join(wrap(dataset[dataVar].attrs['long_name'] + ' (' + dataset[dataVar].attrs['units'] + ')', 50)), 'orientation': 'horizontal', 'shrink': 0.75, 'pad': 0.025})
    
    #add features to the map
    ax.coastlines(linewidth=0.15, color = 'black', zorder = 10) #add coastlines 
    ax.add_feature(cfeature.LAND, color ='0.95', zorder = 5) #add land 
    ax.add_feature(cfeature.LAKES, color = 'grey', zorder = 5) #add lakes 
    ax.gridlines(draw_labels = False, linewidth = 0.25, color = 'gray', alpha = 0.7, linestyle = '--', zorder = 6) #add gridlines
    ax.set_extent([-179, 179, 55, 90], crs = transform) #zoom in so map only displays the Arctic
    ax.set_title("\n".join(wrap(month + ": " + dataset[dataVar].attrs['long_name'], 38)), fontsize = 'x-large')
    
    #display figure in notebook 
    plt.show()
示例#29
0
    def mask(self, mask):
        if isinstance(mask, str):
            fitsmask = fits.getdata(mask)
            if np.median(fitsmask) == 0:
                self.__pixeldata.mask = fitsmask >= self.maskthresh
            else:
                self.__pixeldata.mask = fitsmask <= self.maskthresh
        # if the mask is a separated array
        elif isinstance(mask, np.ndarray):
            self.__pixeldata.mask = mask
        # if the mask is not given
        elif mask is None:
            # check the fits file and try to find it as an extension
            if self.attached_to == 'PrimaryHDU':
                self.__pixeldata = ma.masked_invalid(self.__img.data)

            elif self.attached_to == 'ndarray':
                self.__pixeldata = ma.masked_invalid(self.__img)
            elif self.attached_to == 'HDUList':
                if self.header['EXTEND']:
                    fitsmask = self.__img[1].data
                    if np.median(fitsmask) == 0:
                        self.__pixeldata.mask = fitsmask >= self.maskthresh
                    else:
                        self.__pixeldata.mask = fitsmask <= self.maskthresh
                else:
                    self.__pixeldata = ma.masked_invalid(self.__img[0].data)
            # if a path is given where we find a fits file search on extensions
            else:
                try:
                    ff = fits.open(self.attached_to)
                    if 'EXTEND' in ff[0].header.keys():
                        if ff[0].header['EXTEND']:
                            try:
                                fitsmask = ff[1].data
                                if np.median(fitsmask) == 0:
                                    self.__pixeldata.mask = fitsmask >= \
                                                            self.maskthresh
                                else:
                                    self.__pixeldata.mask = fitsmask <= \
                                                            self.maskthresh
                            except IndexError:
                                self.__pixeldata = ma.masked_invalid(
                                    self.__pixeldata.data)
                except IOError:
                    self.__pixeldata = ma.masked_invalid(self.__pixeldata)
        else:
            masked = ma.masked_greater(self.__pixeldata, 65000.)
            if not np.sum(~masked.mask) < 1000.:
                self.__pixeldata = masked

        mask_lower = ma.masked_less(self.__pixeldata, -50.)
        mask_greater = ma.masked_greater(self.__pixeldata, 65000.)

        self.__pixeldata.mask = ma.mask_or(self.__pixeldata.mask,
                                           mask_lower.mask)
        self.__pixeldata.mask = ma.mask_or(self.__pixeldata.mask,
                                           mask_greater.mask)
示例#30
0
文件: SVM_D3.py 项目: kerinin/iEngine
	def _select_working_set(self):
		return (0,1)
		
		I = ma.masked_less( self.alpha, 1e-8 )
		grad = self._grad( ma.array(self.X, mask=ma.getmask(I) ) )
		
		i = (-grad).argmax()
		
		P = self._P( self.X[ i % self.N ].reshape([1,self.d]), np.vstack( [self.X,]*kappa ) )		# P_IJ
		a = ma.masked_less( P[0,i] + np.diagonal(P) - 2*P, 0 )
		
		#NOTE the actual equation is -grad < -grad_i - i'm not sure if inversing the signs and equality operator is a problem
		grad_i =  self._grad( self.X[i % self.N] )
		b = ma.masked_greater(grad, grad_i) - grad_i
		
		j = ( (b**2) / -a ).argmin()
		
		return (i,j)
示例#31
0
 def all_fail(self):
     # Count the number of problems
     # on which all solvers failed
     nfail = 0
     for prob in range(self.nprobs):
         fail = ma.masked_less(self.metric[:,prob], 0.0)
         if numpy.all(fail.mask) == self.nsolvs:
             nfail += 1
     return nfail
示例#32
0
 def belowhorizon(z):
     """Return masked z values that are below the horizon.
     Below the horizon means either than z is negative or
     the z has a nonzero imaginary part.
     """
     imagz_ma = ma.getmaskarray(ma.masked_not_equal(z.imag, 0.))
     negz_ma = ma.getmaskarray(ma.masked_less(z, .0))
     belowhrz = ma.mask_or(imagz_ma, negz_ma)
     return belowhrz
示例#33
0
def get_mask_for_unphysical(U, cutoffU=2000., fill_value=np.nan):
    """
    Returns a mask for masking module. if absolute value of value is greater than cutoff, the value is masked.
    Parameters
    ----------
    U: array-like
    cutoffU: float
        if |value| > cutoff, this method considers those values unphysical.
    fill_value:


    Returns
    -------
    mask: multidimensional boolean array

    """
    print 'number of invalid values (nan and inf) in the array: ' + str(
        np.isnan(U).sum() + np.isinf(U).sum())
    print 'number of nan values in U: ' + str(np.isnan(U).sum())
    print 'number of inf values in U: ' + str(np.isinf(U).sum()) + '\n'

    # a=ma.masked_invalid(U)
    # print 'number of masked elements by masked_invalid: '+ str(ma.count_masked(a))

    # Replace all nan and inf values with fill_value.
    # fix_invalid still enforces a mask on elements with originally invalid values
    U_fixed = ma.fix_invalid(U, fill_value=99999)
    n_invalid = ma.count_masked(U_fixed)
    print 'number of masked elements by masked_invalid: ' + str(n_invalid)
    # Update the mask to False (no masking)
    U_fixed.mask = False

    # Mask unreasonable values of U_fixed
    b = ma.masked_greater(U_fixed, cutoffU)
    c = ma.masked_less(U_fixed, -cutoffU)
    n_greater = ma.count_masked(b) - n_invalid
    n_less = ma.count_masked(c)
    print 'number of masked elements greater than cutoff: ' + str(n_greater)
    print 'number of masked elements less than -cutoff: ' + str(n_less)

    # Generate a mask for all nonsense values in the array U
    mask = ~(~b.mask * ~c.mask)

    d = ma.array(U_fixed, mask=mask)
    n_total = ma.count_masked(d)
    # U_filled = ma.filled(d, fill_value)

    #Total number of elements in U
    N = 1
    for i in range(len(U.shape)):
        N *= U.shape[i]

    print 'total number of unphysical values: ' + str(
        ma.count_masked(d)) + '  (' + str((float(n_total) / N * 100)) + '%)\n'

    return mask
示例#34
0
def resolve_head_tail(
    shuffled_results: ShuffledResults,
    original_results: OriginalResults,
    frame_rate: float,
    score_threshold,
) -> BaseResults:
    len_series = len(shuffled_results)

    # Create continuous segments without jumps
    partitioned_results = _make_continuous_partitions(
        score_threshold=score_threshold,
        frame_rate=frame_rate,
        shuffled_results=shuffled_results,
    )
    segments = partitioned_results.get_segments()

    if len(segments) == 0:
        logger.error(
            f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold},"
            f" stopping analysis.")
        return _FinalResults.from_shuffled(shuffled_results)

    # Choose each segment global alignment by comparing with labelled data
    segments_alignment = _align_segments_with_labels(
        segments, partitioned_results.skeletons, original_results.skeletons)

    # Fix unaligned segments here by comparing skeletons with neighboring segments iteratively
    segments_alignment = _align_unlabelled_segments_with_adjacents(
        segments, segments_alignment, partitioned_results.skeletons,
        frame_rate)

    # Compile results
    resolved_results = _ResolvedResults(partitioned_results)
    for segment, segment_alignment in zip(segments, segments_alignment):
        if not ma.is_masked(segment_alignment):
            resolved_results.resolve(segment, segment_alignment)

    # Filter the final results again by score threshold
    low_scores_indices = np.where(
        ma.masked_less(resolved_results.scores, score_threshold).mask)[0]
    resolved_results.mask(low_scores_indices)

    num_success = resolved_results.num_valid()
    original_num_success = np.any(~np.isnan(original_results.skeletons),
                                  axis=(1, 2)).sum()
    logger.info(
        f"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully "
        f"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}"
        f" or {(float(original_num_success) / len_series * 100):.1f}% of total)"
    )
    if num_success < original_num_success:
        logger.warning(
            f"Original results had {original_num_success - num_success} more successfully analyzed frames!"
        )

    return _FinalResults.from_resolved(resolved_results)
示例#35
0
 def _mask_data(self):
     """Apply mask of any NaN-valued pixels and any negative-valued pixels in the intensity image"""
     nan_mask = np.isnan(self.int_data)
     nan_mask_unc = np.isnan(self.unc_data)
     neg_mask = ma.masked_less(ma.array(self.int_data, mask=nan_mask),
                               0.0).mask
     self.mask = reduce(np.add,
                        [self.msk_data, neg_mask, nan_mask, nan_mask_unc])
     self.int_data = ma.array(self.int_data, mask=self.mask)
     self.unc_data = ma.array(self.unc_data, mask=self.mask)
示例#36
0
文件: local.py 项目: CWSL/climate
def load_GPM_IMERG_files(file_path=None,
                      filename_pattern=None,
                      filelist=None,
                      variable_name='precipitationCal',
                      name='GPM_IMERG'):
    ''' Load multiple GPM Level 3 IMEGE files containing calibrated precipitation and generate an OCW Dataset obejct.
    :param file_path: Directory to the HDF files to load.
    :type file_path: :mod:`string`
    :param filename_pattern: Path to the HDF files to load.
    :type filename_pattern: :list:`string`
    :param filelist: A list of filenames
    :type filelist: :list:`string`
    :param variable_name: The variable name to load from the HDF file.
    :type variable_name: :mod:`string`
    :param name: (Optional) A name for the loaded dataset.
    :type name: :mod:`string`
    :returns: An OCW Dataset object with the requested variable's data from
        the HDF file.
    :rtype: :class:`dataset.Dataset`
    :raises ValueError:
    '''

    if not filelist:
        GPM_files = []
        for pattern in filename_pattern:
            GPM_files.extend(glob(file_path + pattern))
    else:
        GPM_files = [line.rstrip('\n') for line in open(filelist)]

    GPM_files.sort()

    file_object_first = h5py.File(GPM_files[0])
    lats = file_object_first['Grid']['lat'][:]
    lons = file_object_first['Grid']['lon'][:]

    lons, lats = numpy.meshgrid(lons, lats)

    variable_unit = "mm/hr"

    times = []
    nfile = len(GPM_files)
    for ifile, file in enumerate(GPM_files):
        print 'Reading file '+str(ifile+1)+'/'+str(nfile), file
        file_object = h5py.File(file)
        time_struct_parsed = strptime(file[-39:-23],"%Y%m%d-S%H%M%S")
        times.append(datetime(*time_struct_parsed[:6]))
        values0= numpy.transpose(ma.masked_less(file_object['Grid'][variable_name][:], 0.))
        values0= numpy.expand_dims(values0, axis=0)
        if ifile == 0:
            values = values0
        else:
            values = numpy.concatenate((values, values0))
        file_object.close()
    times = numpy.array(times)
    return Dataset(lats, lons, times, values, variable_name, units=variable_unit, name=name)    
示例#37
0
文件: PVCorr_AT.py 项目: teuben/admit
def test_single(x1,x2,y,cutoff=None):
    """ test as if there is only one line via a simple moment analysis
    """
    ymax = y.max()
    if cutoff == None:
        cutoff = 0.10 * ymax
    ym = ma.masked_less(y,cutoff)
    y1 = ym * x1
    y2 = ym * x2
    logging.info("Average: %d %d %d %d" % (ym.count(), y1.count(), y2.count(), len(y)))
    logging.info("Average intensity weighted channel: %g %g %g %g" % (y1.sum()/ym.sum(),y2.sum()/ym.sum(),cutoff,ymax))
def get_WRF_Tsoil_VWC(fname_wrf):
    """obtain WRF soil T and soil moisture.  soil T is masked below 10 C
    because Mary's soil flux model fitting data went no lower..
    """

    vwc = sp.parse_STEM_var(nc_fname=fname_wrf, varname='SMOIS')
    Tsoil = sp.parse_STEM_var(nc_fname=fname_wrf, varname='TSOIL')

    ten_C = 273.15 + 10  # 10 C expressed in Kelvins
    Tsoil['data'] = ma.masked_less(Tsoil['data'], ten_C)

    return(vwc, Tsoil)
示例#39
0
    def proc(self):
        """ 
        Calculates the optical depth from the raw data images using the
        settings previously configured for the normalization and filtering,
        then stores the result in the data array Frame.OD,
        """
        # Apply preprocessing filters to raw image data, if desired   
        img, pro, bac = self._apply_prefilters()
            
        # Subtract background and clip any non-positive pixels to 1
        rawabs = (img - bac).clip(1).copy()
        rawref = (pro - bac).clip(1).copy()
        
        # Flag img and probe pixels for special handling if the exposure
        # there happens to be below the median background
        ref_minlevel = median(bac)
        mref = ma.masked_less(rawref, ref_minlevel)
        mabs = ma.masked_less(rawabs, ref_minlevel)
            
        # Calculate normalization factor for probe intensity fluctuations
        norm = self._calculate_normalization(mref, mabs)
        self.proc_info['norm'] = norm
           
        # Calculate the normalized OD from the raw image data.
        OD = (-log(rawabs / mref * norm)).clip(-1.0, 6.0).copy().filled(0)
        if self._valid_Isat(): # correct for saturation effects
            # Caclulate a smoothed version of the measured probe intensity
            smooth_radius = 10
            I_probe = ndi.gaussian_filter(rawref, smooth_radius)
            # calculate the corrected OD based on local I_probe / Isat
            self.OD = OD + (1 - exp(-OD)) * I_probe / self.Isat
        else: # Ignore the correction for saturation of the probe
            self.OD = OD

        # Apply postprocessing filters to image data, if desired
        self.OD = self._apply_postfilter()

        # indicate that the frame has been successfully (re)processed
        self.proc_info['reproc'] = 0
        print("%s Processed" % self.frame_id)
示例#40
0
 def mask_sparse(self, threshold=10):
     """ mask pixels that are not coherent in more than <threshold> interferograms in a set"""
     self.MaskPrefix = 's' + self.MaskPrefix
     print('Masking pixels that do not have at least {0} coherent values'.format(threshold))
     # each pixel assigned an integer corresponding to # of igrams where coherent
     # NOTE: save coverage map if it doesn't exist already
     coverage = self.get_coverage()
     sparse = ma.masked_less(coverage, threshold)
     for ig in self.Set:
          igram = self.load_ma(ig)
          igram[sparse.mask] = ma.masked
          self.save_ma(ig, igram)
     print('Done')
示例#41
0
def wet_spells(rainfall, threshold=0, start=True, mode='both'):
    """
    Compute the wet spells for a series of precipitations

    Parameters
    ----------
    rainfall : TimeSeries
        TimeSeries of precipitations.
    threshold : float, optional
        Minimum amount of precipitation defining a wet day.
    start : boolean, optional
        Whether the spells are associated with the first or last day of the spell.
    mode : {'durations', 'intensities', 'all'}
        Whether to return only durations, intensities or both.
    

    Returns
    -------
    wet_spells : TimeSeries
        A :class:`TimeSeries` giving the duration and/or intensity of a spell
        at either the first or last date.
    """
    rdates = getattr(rainfall, 'dates', None)
    rdata = ma.masked_array(rainfall, subok=False)
    condition = ma.masked_less(rdata, threshold)
    slices = ma.clump_unmasked(condition)
    # Get the durations and starting dates of each spell
    mode = (mode or 'both').lower()[0]
    if mode == 'd':
        # Durations
        result = np.array([s.stop - s.start for s in slices], dtype=int)
    elif mode == 'i':
        # Intensities
        result = np.array([rdata[s].sum() for s in slices])
    else:
        durations = [s.stop - s.start for s in slices]
        intensities = [rdata[s].sum() for s in slices]
        result = np.array(zip(durations, intensities),
                          dtype=[('durations', int), ('intensities', float)],)
    if rdates is None:
        return result
    if start:
        dates = rdates[[s.start for s in slices]]
    else:
        dates = rdates[[s.stop - 1 for s in slices]]
    ensoi = getattr(rainfall, 'ensoindicator', None)
    if mode in 'id':
        spells = enso.climate_series(result, dates=dates, ensoindicator=ensoi)
    else:
        spells = enso.climate_records(result, dates=dates, ensoindicator=ensoi)
    return spells
示例#42
0
    def importFromXYZ(self,b,H,RB,alpha):
        RT=RB-H *( np.tan(alpha ) )
        x,y,z=b[:,0],b[:,1],b[:,2]
        r,tht,z=rec2cyl(x,y,z)
        self.setGeometry(RB,H,alpha)
        rPerf=self._getRperf(z)
        if self.imp_type == 'thick':
            imp=b[:,3]
        else:
            imp=getGeomImperfection(r,z,rPerf)

        tm1=ma.masked_less(tht,0.1*np.pi).mask
        tm2=ma.masked_greater(tht,1.9*np.pi).mask

        tht=np.hstack((tht, np.pi*2.0+tht[tm1],  0.0+(-1.0)*tht[tm2]))
        r=np.hstack((r,r[tm1],r[tm2] ))
        z=np.hstack((z,z[tm1],z[tm2]))
        imp=np.hstack((imp,imp[tm1],imp[tm2]))

        ft=np.linspace(0,2.0*np.pi,self.samplingRadial)
        fz=np.linspace(0,H,self.samplingAxial)
        IMPERF=getImperfectionArray(tht,z,imp,ft,fz)

        mf=[]
        for row in IMPERF[0:len(IMPERF)]:
            mf.append( np.isnan(np.sum(row))  )
        row1=mf.index(False)
        mr=[]
        for i in reversed(mf):
            mr.append(i)
        row2= len(mf)-1-mr.index(False)

        row1+=1
        row2-=2
        dr1=row1
        rows1=range(0,row1)
        rows1sym=range(row1,row1+dr1)[::-1]

        dr2=len(mf)-1-row2
        rows2=range(len(mf)-1,row2,-1)
        rows2sym=range(row2-dr2,row2)[::-1]

        IMPERF[rows1]=IMPERF[rows1sym].copy()
        IMPERF[rows2]=IMPERF[rows2sym].copy()

        #EXTRUDE
        #IMPERF[0:row1]=IMPERF[row1]
        #IMPERF[row2::]=IMPERF[row2]

        self.addData(IMPERF,ft,fz)
示例#43
0
    def mask_incoherent(self):
        """ mask pixel values that have coherence values in .msk file less than specified threshold. 0=incoherent, 1=fully coherent """
        self.MaskPrefix = 'i' + self.MaskPrefix
        print('Masking pixel values where .msk value is less than {0}...'.format(threshold))
        for ig in self.Set:
            igram = self.load_ma(ig)
            mskFile = ig.Path[:-3] + 'msk'
            coherence = roipy.tools.load_half(ig, 2, mskFile)
            incoherent = ma.masked_less(coherence, self.Cothresh)
            igram[incoherent.mask] = ma.masked
            mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]
            np.save(os.path.join(self.ProcDir, mskFile), igram.mask)
            print(mskFile)

        print('Done')
示例#44
0
def id_features(data, threshold):
    '''
    Find the locations where features exist inside a data array
    set entries 1=feature, 0=no feature
    Separating these out into individual features is application dependent
    Returns an array of ints={0,1} of data.shape
    '''
    #Filter data. notouch masks covers the sections we are not examining. touch is the sections we want
    data_notouch = ma.masked_less(data, threshold)
    data_touch = ma.masked_greater(data, threshold)
    #Extract the mask to get where there are features. We will use this to id features to operate on
    regions = ma.getmask(data_touch) #Extract the mask from the touch array as the Trues will line up with the areas more than the threshold
    #Create the features map of 1's where we want features (greater than threshold), zeroes otherwise
    features = numpy.zeros(data.shape, dtype=numpy.int32)
    features[regions] = 1 #Define features
    return features
示例#45
0
文件: TEC.py 项目: QuLogic/iris
def main():
    # Load the "total electron content" cube.
    filename = iris.sample_data_path('space_weather.nc')
    cube = iris.load_cube(filename, 'total electron content')

    # Explicitly mask negative electron content.
    cube.data = ma.masked_less(cube.data, 0)

    # Plot the cube using one hundred colour levels.
    qplt.contourf(cube, 100)
    plt.title('Total Electron Content')
    plt.xlabel('longitude / degrees')
    plt.ylabel('latitude / degrees')
    plt.gca().stock_img()
    plt.gca().coastlines()
    iplt.show()
示例#46
0
def EdgeFinder(image,data):
    '''this makes list of all x,y where I>edgeMin suitable for an ellipse search?
    Not currently used but might be useful in future?
    '''
    import numpy.ma as ma
    Nx,Ny = data['size']
    pixelSize = data['pixelSize']
    edgemin = data['edgemin']
    scalex = pixelSize[0]/1000.
    scaley = pixelSize[1]/1000.    
    tay,tax = np.mgrid[0:Nx,0:Ny]
    tax = np.asfarray(tax*scalex,dtype=np.float32)
    tay = np.asfarray(tay*scaley,dtype=np.float32)
    tam = ma.getmask(ma.masked_less(image.flatten(),edgemin))
    tax = ma.compressed(ma.array(tax.flatten(),mask=tam))
    tay = ma.compressed(ma.array(tay.flatten(),mask=tam))
    return zip(tax,tay)
示例#47
0
文件: SVM_D3.py 项目: kerinin/iEngine
	def _compute(self):
		if self.Y.shape != self.X.shape:
			start = datetime.datetime.now()
			
			# Initialize variables
			kappa = len( self.gamma )
			(N,self.d) = self.X.shape
			self.alpha = np.ones( [N*kappa,1] ) / (N*kappa)
			self.Y = ( ( .5 + (self.X.reshape(N,1,self.d) > np.transpose(self.X.reshape(N,1,self.d),[1,0,2])).prod(2).sum(1,dtype=float) ) / N ).reshape([N,1])
			self.Gamma = np.repeat(self.gamma,N).reshape([N*kappa,1])
			counter = 0

			# Test stopping condition
			while not counter or not self._test_stop():
				
				# Select working set
				(i,j) = self._select_working_set()
				
				# Solve sub-problem
				(alpha_i, alpha_j) = self._sub_problem(i,j)
				
				print self.alpha[i] - alpha_i
				print self.alpha[j] - alpha_j
				
				# Update alpha
				self.alpha[i] = alpha_i
				self.alpha[j] = alpha_j
				
				counter += 1
			
			beta = ma.masked_less( self.alpha, 1e-8 )
			mask = ma.getmask(beta)
			self.NSV = beta.count()
			
			self.beta = self.alpha
			self.SV = np.vstack( [self.X,]*kappa )
			
			'''
			self.beta = beta.compressed().reshape([self.NSV,1])
			self.SV = ma.array( np.tile(self.X.T,kappa).T, mask=np.repeat(mask,self.d)).compressed().reshape([self.NSV,self.d])
			self.Gamma = ma.array( self.Gamma, mask=mask ).compressed().reshape([self.NSV,1])
			'''
			duration = datetime.datetime.now() - start
			print "optimized in %ss" % ( duration.seconds + float(duration.microseconds)/1000000)
示例#48
0
文件: TEC.py 项目: AntoinedDMO/iris
def main():
    # Enable a future option, to ensure that the netcdf load works the same way
    # as in future Iris versions.
    iris.FUTURE.netcdf_promote = True

    # Load the "total electron content" cube.
    filename = iris.sample_data_path('space_weather.nc')
    cube = iris.load_cube(filename, 'total electron content')

    # Explicitly mask negative electron content.
    cube.data = ma.masked_less(cube.data, 0)

    # Plot the cube using one hundred colour levels.
    qplt.contourf(cube, 100)
    plt.title('Total Electron Content')
    plt.xlabel('longitude / degrees')
    plt.ylabel('latitude / degrees')
    plt.gca().stock_img()
    plt.gca().coastlines()
    iplt.show()
示例#49
0
	def dynamic_mask(self, image, sigrange):
		"""
		Creates a numpy mask on the image, filtering out any
		pixel values that are more than sigrange*std from the median value

		Input: numpy array of the image, sigrange for multiplier on standard dev range
		Output: Masked numpy array covering any pixels above or below the standard dev range
		"""

		# Make a masked array using the static mask and imput image
		pre_masked = ma.array(image, mask=self.static_mask)

		# Mask saturated or empty
		masked1 = ma.masked_greater(pre_masked, 254)
		masked1 = ma.masked_less(masked1, 0)

		median = ma.median(masked1)
		mean = ma.mean(masked1)
		std = ma.std(masked1)

		return masked1, median, mean, std
示例#50
0
def main():
    # Load the "total electron content" cube.
    filename = iris.sample_data_path("space_weather.nc")
    cube = iris.load_cube(filename, "total electron content")

    # Explicitly mask negative electron content.
    cube.data = ma.masked_less(cube.data, 0)

    # Currently require to remove the multi-dimensional
    # latitude and longitude coordinates for Iris plotting.
    cube.remove_coord("latitude")
    cube.remove_coord("longitude")

    # Plot the cube using one hundred colour levels.
    qplt.contourf(cube, 100)
    plt.title("Total Electron Content")
    plt.xlabel("longitude / degrees")
    plt.ylabel("latitude / degrees")
    plt.gca().stock_img()
    plt.gca().coastlines()
    plt.show()
示例#51
0
def Make2ThetaAzimuthMap(data,masks,iLim,jLim,times): #most expensive part of integration!
    'Needs a doc string'
    #transforms 2D image from x,y space to 2-theta,azimuth space based on detector orientation
    pixelSize = data['pixelSize']
    scalex = pixelSize[0]/1000.
    scaley = pixelSize[1]/1000.
    
    tay,tax = np.mgrid[iLim[0]+0.5:iLim[1]+.5,jLim[0]+.5:jLim[1]+.5]         #bin centers not corners
    tax = np.asfarray(tax*scalex,dtype=np.float32)
    tay = np.asfarray(tay*scaley,dtype=np.float32)
    nI = iLim[1]-iLim[0]
    nJ = jLim[1]-jLim[0]
    t0 = time.time()
    #make position masks here
    frame = masks['Frames']
    tam = ma.make_mask_none((nI,nJ))
    if frame:
        tamp = ma.make_mask_none((1024*1024))
        tamp = ma.make_mask(pm.polymask(nI*nJ,tax.flatten(),
            tay.flatten(),len(frame),frame,tamp)[:nI*nJ])-True  #switch to exclude around frame
        tam = ma.mask_or(tam.flatten(),tamp)
    polygons = masks['Polygons']
    for polygon in polygons:
        if polygon:
            tamp = ma.make_mask_none((1024*1024))
            tamp = ma.make_mask(pm.polymask(nI*nJ,tax.flatten(),
                tay.flatten(),len(polygon),polygon,tamp)[:nI*nJ])
            tam = ma.mask_or(tam.flatten(),tamp)
    if tam.shape: tam = np.reshape(tam,(nI,nJ))
    spots = masks['Points']
    for X,Y,diam in spots:
        tamp = ma.getmask(ma.masked_less((tax-X)**2+(tay-Y)**2,(diam/2.)**2))
        tam = ma.mask_or(tam,tamp)
    times[0] += time.time()-t0
    t0 = time.time()
    TA = np.array(GetTthAzmG(tax,tay,data))     #includes geom. corr. as dist**2/d0**2 - most expensive step
    times[1] += time.time()-t0
    TA[1] = np.where(TA[1]<0,TA[1]+360,TA[1])
    return np.array(TA),tam           #2-theta, azimuth & geom. corr. arrays & position mask
示例#52
0
    def test_testOddFeatures(self):
        # Test of other odd features
        x = arange(20)
        x = x.reshape(4, 5)
        x.flat[5] = 12
        assert_(x[1, 0] == 12)
        z = x + 10j * x
        assert_(eq(z.real, x))
        assert_(eq(z.imag, 10 * x))
        assert_(eq((z * conjugate(z)).real, 101 * x * x))
        z.imag[...] = 0.0

        x = arange(10)
        x[3] = masked
        assert_(str(x[3]) == str(masked))
        c = x >= 8
        assert_(count(where(c, masked, masked)) == 0)
        assert_(shape(where(c, masked, masked)) == c.shape)
        z = where(c, x, masked)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is masked)
        assert_(z[7] is masked)
        assert_(z[8] is not masked)
        assert_(z[9] is not masked)
        assert_(eq(x, z))
        z = where(c, masked, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        z = masked_where(c, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        assert_(eq(x, z))
        x = array([1., 2., 3., 4., 5.])
        c = array([1, 1, 1, 0, 0])
        x[2] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        c[0] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
        assert_(eq(masked_where(greater_equal(x, 2), x),
                   masked_greater_equal(x, 2)))
        assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
        assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
        assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
        assert_(eq(masked_inside(array(list(range(5)),
                                       mask=[1, 0, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 1, 1, 0]))
        assert_(eq(masked_outside(array(list(range(5)),
                                        mask=[0, 1, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 0, 0, 1]))
        assert_(eq(masked_equal(array(list(range(5)),
                                      mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 0]))
        assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
                                          mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 1]))
        assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
                   [99, 99, 3, 4, 5]))
        atest = ones((10, 10, 10), dtype=np.float32)
        btest = zeros(atest.shape, MaskType)
        ctest = masked_where(btest, atest)
        assert_(eq(atest, ctest))
        z = choose(c, (-x, x))
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        x = arange(6)
        x[5] = masked
        y = arange(6) * 10
        y[2] = masked
        c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
        cm = c.filled(1)
        z = where(c, x, y)
        zm = where(cm, x, y)
        assert_(eq(z, zm))
        assert_(getmask(zm) is nomask)
        assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
        z = where(c, masked, 1)
        assert_(eq(z, [99, 99, 99, 1, 1, 1]))
        z = where(c, 1, masked)
        assert_(eq(z, [99, 1, 1, 99, 99, 99]))
示例#53
0
文件: Reaction.py 项目: envhyf/permm
    def get_spc(self, *args):
        nargs = len(args)
        if nargs > 2:
            raise TypeError('get_spc expected at most 2 arguments, got %d' % nargs)
            
        item = args[0]

        values = []
        roles = []
        if item.exclude:
            item_spc_roles = [k for k in item.iter_species_roles()]
            item_spcs = [k[0] for k in item_spc_roles]
            for spc, role in self._stoic:
                if role == 'u' and spc in item_spcs and (spc, 'u') not in item_spc_roles:
                    val = self._stoic[spc, role]
                    if greater(val, 0).all():
                        role = 'p'
                    elif less(val, 0).all():
                        role = 'r'
                
                if not (spc, role) in item_spc_roles:
                    values.append(1 * self._stoic[spc, role])
                    roles.append(role)
                    
                
        else:
            for spc, props in item.spc_dict.iteritems():
                spc_roles = props['role']
                for role in spc_roles:
                    if (spc, role) in self._stoic:
                        values.append(item.spc_dict[spc]['stoic'] * self._stoic[spc, role])
                        roles.append(role)
                
                if 'u' not in spc_roles and (spc, 'u') in self._stoic:
                    val = self._stoic[spc, 'u']
                    if role == 'p':
                        values.append(item.spc_dict[spc]['stoic'] * masked_less(val, 0).filled(0))
                        roles.append(role)
                    elif role == 'r':
                        values.append(item.spc_dict[spc]['stoic'] * masked_greater(val, 0).filled(0))
                        roles.append(role)
                    
                
                
        if len(values) == 0:
            if self._safe:
                if nargs == 1:
                    if item.name in self:
                        warn("%s not in %s; trying all roles" % (item, self.sum()))
                        return self.get_spc(Species(item.name, exclude = item.exclude))
                    raise KeyError, "%s does not contain %s" % (str(self.sum()), str(item))
                else:
                    return args[1]
            else:
                raise KeyError, "%s does not contain %s" % (str(self.sum()), str(item))
            
                
        last_role = roles[-1]
        same_role = all([last_role == role for role in roles])
        
        if same_role:
            role = last_role
        else:
            role = 'u'
        return Stoic(sum(values, axis = 0), role = role)
示例#54
0
def extractCORTADSST(timestamp_start, timestamp_end, masked=True):
    """Routine that extracts the SST values for
    the specific tiles and time-period (t)"""
    cdf1,cdf2,cdf3,cdf4,cdf5,cdf6,cdf7,cdf8,cdf9,cdf10=openCoRTAD()

    # calculate days from ref date to first and last sighting
    ref_date=datetime.datetime(1980,12,31,12,0,0)
    days = 60.*60.*24. # sec*min*hr
    t1 = int(round((timestamp_start - ref_date).total_seconds()/days))
    t2 = int(round((timestamp_end - ref_date).total_seconds()/days))

    cortad_time=np.squeeze(cdf1.variables["time"][:])

    # use binary search to find index of nearest time value to data times
    idx1 = (np.abs(cortad_time-t1)).argmin()
    idx2 = (np.abs(cortad_time-t2)).argmin()

    # TODO make sure data is being averaged correctly
    filledSST1=np.average((cdf1.variables["FilledSST"][idx1:idx2,:,:]), axis=0)
    filledSST2=np.average((cdf2.variables["FilledSST"][idx1:idx2,:,:]), axis=0)
    filledSST3=np.average((cdf3.variables["FilledSST"][idx1:idx2,:,:]), axis=0)
    filledSST4=np.average((cdf4.variables["FilledSST"][idx1:idx2,:,:]), axis=0)
    filledSST5=np.average((cdf5.variables["FilledSST"][idx1:idx2,:,:]), axis=0)
    filledSST6=np.average((cdf6.variables["FilledSST"][idx1:idx2,:,:]), axis=0)
    filledSST7=np.average((cdf7.variables["FilledSST"][idx1:idx2,:,:]), axis=0)
    filledSST8=np.average((cdf8.variables["FilledSST"][idx1:idx2,:,:]), axis=0)
    filledSST9=np.average((cdf9.variables["FilledSST"][idx1:idx2,:,:]), axis=0)
    filledSST10=np.average((cdf10.variables["FilledSST"][idx1:idx2,:,:]), axis=0)

    offset=cdf1.variables["FilledSST"].__getattribute__('add_offset')
    cdf1.close();cdf2.close();cdf3.close();cdf4.close();cdf5.close();cdf6.close();cdf7.close();cdf8.close();cdf9.close();cdf10.close()

    filledMaskedSST1=filledSST1 - offset
    filledMaskedSST2=filledSST2 - offset
    filledMaskedSST3=filledSST3 - offset
    filledMaskedSST4=filledSST4 - offset
    filledMaskedSST5=filledSST5 - offset
    filledMaskedSST6=filledSST6 - offset
    filledMaskedSST7=filledSST7 - offset
    filledMaskedSST8=filledSST8 - offset
    filledMaskedSST9=filledSST9 - offset
    filledMaskedSST10=filledSST10 - offset

#    filledMaskedSST1=filledSST3*0
#    filledMaskedSST2=filledSST3*0
#    filledMaskedSST3=filledSST3*0
#    filledMaskedSST4=filledSST3*0
#    filledMaskedSST5=filledSST3*0
#    filledMaskedSST6=filledSST3*0

    """Now we have all the data in 4 different arrays that we need to concentate.
    First we add the horisontal tiles, and finally we stack the two horisontal ones on top
    of each other."""
    filledMaskedSST_lower=concatenate((filledMaskedSST1,filledMaskedSST2,filledMaskedSST3,filledMaskedSST4,filledMaskedSST5),axis=1)

    filledMaskedSST_upper=concatenate((filledMaskedSST6,filledMaskedSST7,filledMaskedSST8,filledMaskedSST9,filledMaskedSST10),axis=1)

    filledMaskedSST_all=concatenate((filledMaskedSST_upper,filledMaskedSST_lower),axis=0)

    """Flip the SST array to be consistent with order of latitude array"""
 #   filledMaskedSST_all=np.flipud(filledMaskedSST_all) # flipping doesn not appear to be necessary

    """ Scale and offset is autmoatically detected and edited by netcdf, but
    we need to mask the values that are not filled."""
    filledMaskedSST_final=ma.masked_less(filledMaskedSST_all,-2.)

    print "Min and max of SST: %s - %s"%(filledMaskedSST_final.min(),filledMaskedSST_final.max())
    print "------------------------------\n"

    return filledMaskedSST_final
示例#55
0
 def test_masked_data(self):
     new_data = ma.masked_less(self.data2d, 3)
     c_data = add_cyclic_point(new_data)
     r_data = ma.concatenate((self.data2d, self.data2d[:, :1]), axis=1)
     assert_array_equal(c_data, r_data)
interpMo_He = RegularGridInterpolator((glist_He,tlist_He), tableMo_He, bounds_error=False, fill_value=None)
interpRo_He = RegularGridInterpolator((glist_He,tlist_He), tableRo_He, bounds_error=False, fill_value=None)

#read in the BSS data and ...

#CREATE WAVELENGTH MASK
#edges of masked regions adjusted by hand in TestMasks.ipynb to
#block out the geocoronal lines.

#w1, f1, err1 = np.loadtxt('lcb201010_x1dsum.txt.bin30.unred', unpack=True)
w1, f1, stdev1, err1 = np.loadtxt('../Natalie/lcb201010_x1dsum.txt.bin30.unred.newerrors', unpack=True)
add_disp = 9.1e-18
#adding intrinsic dispersion
toterr1 = np.sqrt(err1**2.0 + add_disp**2.0)
w1_1 = ma.masked_less(w1, 1141)
w1_2 = ma.masked_inside(w1_1, 1178., 1250.)
w1_3 = ma.masked_inside(w1_2, 1292., 1318.)
w1_4 = ma.masked_inside(w1_3, 1348., 1367.)
w1_5 = ma.masked_inside(w1_4, 1332., 1340.)
dataw1 = ma.masked_greater(w1_5, 1725.)
dataw1c = 1.0*dataw1.compressed()

#w2, f2, err2 = np.loadtxt('lcb202010_x1dsum.txt.bin30.unred', unpack=True)
w2, f2, stdev2, err2 = np.loadtxt('../Natalie/lcb202010_x1dsum.txt.bin30.unred.newerrors', unpack=True)
#adding intrinsic dispersion
toterr2 = np.sqrt(err2**2.0 + add_disp**2.0)
w2_1 = ma.masked_less(w2, 1142)
w2_2 = ma.masked_inside(w2_1, 1182., 1250.)
w2_3 = ma.masked_inside(w2_2, 1290., 1318.)
w2_4 = ma.masked_inside(w2_3, 1348., 1368.)
示例#57
0
def cb_multi(data, sign='+', samplerate=0.1, rise=[5.0, 3.0, 2.0, 0.5], decay=[30.0, 9.0, 5.0, 1.0],
                        matchflag=True, threshold=3.0,
                        dispflag=True, lpfilter=0, template_type=1, ntau=5):
    
    """ return the best choice of fits among a set of rise and fall times using the CB method.
            the tested rise and decay times are determined by the lists of rise and decay arrays.
            if matchflag is true, then the arrays are compared in order (rise[0], decay[0]),
            (rise[1], decay[1]). if matchflag is false, then all pairwise comparisons are made

    """

    clist = [(1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1), (1, 1, 0, 1), (1, 0, 1, 1), (0, 1, 1, 1),
             (1, 0.5, 0, 5, 1), (0.5, 1, 0.5, 1), (0.5, 0.5, 1, 1), (0, 0, 0, 1)]

    nrTests = 1
    ndTests = 1
    if len(rise) > 1:
        nrTests = len(rise)
    if len(decay) > 1:
        ndTests = len(decay)
    nTests = max((nrTests, ndTests))
    if matchflag is False:
        nCand = nrTests * ndTests
    else:
        nCand = nrTests
    icand = np.array([])
    iscamp = np.array([])
    ioff = np.array([])
    peaks = np.array([])
    crit = np.array([])
    itj = np.array([])
    itk = np.array([])
    # datan will be modified during CB if subtractMode is on
    datan = data.copy()
    for k in range(0, nrTests):  # use multiple template shapes
        for j in range(0, ndTests):
            if matchflag is True and j != k:
                continue
            (ic, pks, critval, isc, io, template) = ClementsBekkers(datan,
                                                                    samplerate=samplerate, rise=rise[k], decay=decay[
                                                                        j], threshold=threshold, sign=sign,
                                                                    dispFlag=dispflag, subtractMode=True,
                                                                    lpfilter=lpfilter, template_type=template_type, ntau=ntau, markercolor=clist[k])
#	returns :: (eventlist, pkl, crit, scale, cx)
            if ic is []:
                continue
            icand = np.append(icand, ic)
            peaks = np.append(peaks, pks)
            crit = np.append(crit, critval)
            iscamp = np.append(iscamp, isc)
            ioff = np.append(ioff, io)
            itj = np.append(itj, j * np.ones(len(ic)))
            itk = np.append(itk, k * np.ones(len(ic)))

    dist = 10.0  # minimum time bewteen events is set to 5 msec here.
    # pairwise comparision
    if sign is '-':
        ksign = -1
    else:
        ksign = 1
    print np.shape(icand)
    nt = len(icand)
    if nt is 0:
        return
    # choose the best fit candidate events within dist of each other
    for ic in range(0, nt):
        # compare each candidate template with the others
        for jc in range(ic + 1, nt):
            if icand[jc] is -1 or icand[ic] is -1:
                continue
            if abs(icand[ic] - icand[jc]) < dist or abs(peaks[ic] - peaks[jc]) < dist:
                if ksign * crit[ic] > ksign * crit[jc]:
                    icand[jc] = -1  # removes an event from the list
                else:
                    icand[ic] = -1
    mcand = ma.masked_less(icand, 0)
    selmask = ma.getmask(mcand)
    icand = ma.compressed(mcand)
    crit = ma.compressed(ma.array(crit, mask=selmask))
    peaks = ma.compressed(ma.array(peaks, mask=selmask))
    iscamp = ma.compressed(ma.array(iscamp, mask=selmask))
    ioff = ma.compressed(ma.array(ioff, mask=selmask))
    itj = ma.compressed(ma.array(itj, mask=selmask))
    itk = ma.compressed(ma.array(itk, mask=selmask))
    mpl.figure(2)
    t = samplerate * np.arange(0, len(data))
    mpl.subplot(1, 1, 1)
    mpl.plot(t, data, 'k', zorder=0)
    mpl.hold(True)
    ipts = icand.astype(int).tolist()
    ippts = peaks.astype(int).tolist()
    ijp = itj.astype(int).tolist()
    cols = []
    for p in range(0, len(ippts)):
        cols.append(clist[ijp[p]])  # plots below were t[ipts], data[ipts]
    mpl.scatter(t[ipts], ioff, s=49, c=cols, marker='s', zorder=1)
    mpl.scatter(t[ippts], iscamp, s=49, c=cols, marker='o', zorder=2)
    mpl.show()

    return(icand, peaks, crit, iscamp, ioff)
示例#58
0
    def run(self):
        """Runs the task.

           Parameters
           ----------
           None

           Returns
           -------
           None
        """

        self._summary = {}
        dt = utils.Dtime("CubeStats")

        #maxvrms = 2.0      # maximum variation in rms allowed (hardcoded for now)
        #maxvrms = -1.0     # turn maximum variation in rms allowed off
        maxvrms = self.getkey("maxvrms")

        psample = -1
        psample = self.getkey("psample")        

        # BDP's used :
        #   b1 = input BDP
        #   b2 = output BDP

        b1 = self._bdp_in[0]
        fin = b1.getimagefile(bt.CASA)

        bdp_name = self.mkext(fin,'cst')
        b2 = CubeStats_BDP(bdp_name)
        self.addoutput(b2)

        # PeakPointPlot 
        use_ppp = self.getkey("ppp")

        # peakstats: not enabled for mortal users yet
        # peakstats = (psample=1, numsigma=4, minchan=3, maxgap=2, peakfit=False)
        pnumsigma = 4
        minchan   = 3
        maxgap    = 2
        peakfit   = False             # True will enable a true gaussian fit
        
        # numsigma:  adding all signal > numsigma ; not user enabled;   for peaksum.
        numsigma = -1.0
        numsigma = 3.0

        # grab the new robust statistics. If this is used, 'rms' will be the RMS,
        # else we will use RMS = 1.4826*MAD (MAD does a decent job on outliers as well)
        # and was the only method available before CASA 4.4 when robust was implemented
        robust = self.getkey("robust")
        rargs = casautil.parse_robust(robust)
        nrargs = len(rargs)

        if nrargs == 0:
           sumrargs = "medabsdevmed"      # for the summary, indicate the default robust
        else:
           sumrargs = str(rargs)

        self._summary["rmsmethd"] = SummaryEntry([sumrargs,fin],"CubeStats_AT",self.id(True))
        #@todo think about using this instead of putting 'fin' in all the SummaryEntry
        #self._summary["casaimage"] = SummaryEntry(fin,"CubeStats_AT",self.id(True))

        # extra CASA call to get the freq's in GHz, as these are not in imstat1{}
        # @todo what if the coordinates are not in FREQ ?
        # Note: CAS-7648 bug on 3D cubes
        if False:
            # csys method
            ia.open(self.dir(fin))
            csys = ia.coordsys() 
            spec_axis = csys.findaxisbyname("spectral") 
            # ieck, we need a valid position, or else it will come back and "Exception: All selected pixels are masked"
            #freqs = ia.getprofile(spec_axis, region=rg.box([0,0],[0,0]))['coords']/1e9
            #freqs = ia.getprofile(spec_axis)['coords']/1e9
            freqs = ia.getprofile(spec_axis,unit="GHz")['coords']
            dt.tag("getprofile")
        else:
            # old imval method 
            #imval0 = casa.imval(self.dir(fin),box='0,0,0,0')     # this fails on 3D
            imval0 = casa.imval(self.dir(fin))
            freqs = imval0['coords'].transpose()[2]/1e9
            dt.tag("imval")
        nchan = len(freqs)
        chans = np.arange(nchan)

        # call CASA to get what we want
        # imstat0 is the whole cube, imstat1 the plane based statistics
        # warning: certain robust stats (**rargs) on the whole cube are going to be very slow
        dt.tag("start")
        imstat0 = casa.imstat(self.dir(fin),           logfile=self.dir('imstat0.logfile'),append=False,**rargs)
        dt.tag("imstat0")
        imstat1 = casa.imstat(self.dir(fin),axes=[0,1],logfile=self.dir('imstat1.logfile'),append=False,**rargs)
        dt.tag("imstat1")
        # imm = casa.immoments(self.dir(fin),axis='spec', moments=8, outfile=self.dir('ppp.im'))
        if nrargs > 0:
            # need to get the peaks without rubust
            imstat10 = casa.imstat(self.dir(fin),           logfile=self.dir('imstat0.logfile'),append=True)
            dt.tag("imstat10")
            imstat11 = casa.imstat(self.dir(fin),axes=[0,1],logfile=self.dir('imstat1.logfile'),append=True)
            dt.tag("imstat11")

        # grab the relevant plane-based things from imstat1
        if nrargs == 0:
            mean    = imstat1["mean"]
            sigma   = imstat1["medabsdevmed"]*1.4826     # see also: astropy.stats.median_absolute_deviation()
            peakval = imstat1["max"]
            minval  = imstat1["min"]
        else:
            mean    = imstat1["mean"]
            sigma   = imstat1["rms"]
            peakval = imstat11["max"]
            minval  = imstat11["min"]

        if True:
            # work around a bug in imstat(axes=[0,1]) for last channel [CAS-7697]
            for i in range(len(sigma)):
                if sigma[i] == 0.0:
                    minval[i] = peakval[i] = 0.0

        # too many variations in the RMS ?
        sigma_pos = sigma[np.where(sigma>0)]
        smin = sigma_pos.min()
        smax = sigma_pos.max()
        logging.info("sigma varies from %f to %f; %d/%d channels ok" % (smin,smax,len(sigma_pos),len(sigma)))
        if maxvrms > 0:
            if smax/smin > maxvrms:
                cliprms = smin * maxvrms
                logging.warning("sigma varies too much, going to clip to %g (%g > %g)" % (cliprms, smax/smin, maxvrms))
                sigma = np.where(sigma < cliprms, sigma, cliprms)

        # @todo   (and check again) for foobar.fits all sigma's became 0 when robust was selected
        #         was this with mask=True/False?

        # PeakPointPlot (can be expensive, hence the option)
        if use_ppp:
            logging.info("Computing MaxPos for PeakPointPlot")
            xpos    = np.zeros(nchan)
            ypos    = np.zeros(nchan)
            peaksum = np.zeros(nchan)

            ia.open(self.dir(fin))
            for i in range(nchan):
                if sigma[i] > 0.0:
                    plane = ia.getchunk(blc=[0,0,i,-1],trc=[-1,-1,i,-1],dropdeg=True)
                    v = ma.masked_invalid(plane)
                    v_abs = np.absolute(v)
                    max = np.unravel_index(v_abs.argmax(), v_abs.shape)
                    xpos[i] = max[0]
                    ypos[i] = max[1]
                    if numsigma > 0.0:
                        peaksum[i] = ma.masked_less(v,numsigma * sigma[i]).sum()
            peaksum = np.nan_to_num(peaksum)    # put 0's where nan's are found
            ia.close()
            dt.tag("ppp")

        nzeros = len(np.where(sigma<=0.0))
        if nzeros > 0:
            zeroch = np.where(sigma<=0.0)
            logging.warning("There are %d fully masked channels (%s)" % (nzeros,str(zeroch)))

        # construct the admit Table for CubeStats_BDP
        # note data needs to be a tuple, later to be column_stack'd
        if use_ppp:
            labels = ["channel" ,"frequency" ,"mean"    ,"sigma"   ,"max"     ,"maxposx" ,"maxposy" ,"min",     "peaksum"]
            units  = ["number"  ,"GHz"       ,"Jy/beam" ,"Jy/beam" ,"Jy/beam" ,"number"  ,"number"  ,"Jy/beam", "Jy"]
            data   = (chans     ,freqs       ,mean      ,sigma     ,peakval   ,xpos      ,ypos      ,minval,    peaksum)

        else:
            labels = ["channel" ,"frequency" ,"mean"    ,"sigma"   ,"max"     ,"min"]
            units  = ["number"  ,"GHz"       ,"Jy/beam" ,"Jy/beam" ,"Jy/beam" ,"Jy/beam"]
            data   = (chans     ,freqs       ,mean      ,sigma     ,peakval   ,minval)

        table = Table(columns=labels,units=units,data=np.column_stack(data))
        b2.setkey("table",table)

        # get the full cube statistics, it depends if robust was pre-selected
        if nrargs == 0:
            mean0  = imstat0["mean"][0]
            sigma0 = imstat0["medabsdevmed"][0]*1.4826
            peak0  = imstat0["max"][0]
            b2.setkey("mean" , float(mean0))
            b2.setkey("sigma", float(sigma0))
            b2.setkey("minval",float(imstat0["min"][0]))
            b2.setkey("maxval",float(imstat0["max"][0]))
            b2.setkey("minpos",imstat0["minpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            b2.setkey("maxpos",imstat0["maxpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            logging.info("CubeMax: %f @ %s" % (imstat0["max"][0],str(imstat0["maxpos"])))
            logging.info("CubeMin: %f @ %s" % (imstat0["min"][0],str(imstat0["minpos"])))
            logging.info("CubeRMS: %f" % sigma0)
        else:
            mean0  = imstat0["mean"][0]
            sigma0 = imstat0["rms"][0]
            peak0  = imstat10["max"][0]
            b2.setkey("mean" , float(mean0))
            b2.setkey("sigma", float(sigma0))
            b2.setkey("minval",float(imstat10["min"][0]))
            b2.setkey("maxval",float(imstat10["max"][0]))
            b2.setkey("minpos",imstat10["minpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            b2.setkey("maxpos",imstat10["maxpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            logging.info("CubeMax: %f @ %s" % (imstat10["max"][0],str(imstat10["maxpos"])))
            logging.info("CubeMin: %f @ %s" % (imstat10["min"][0],str(imstat10["minpos"])))
            logging.info("CubeRMS: %f" % sigma0)
        b2.setkey("robust",robust)
        rms_ratio = imstat0["rms"][0]/sigma0
        logging.info("RMS Sanity check %f" % rms_ratio)
        if rms_ratio > 1.5:
            logging.warning("RMS sanity check = %f.  Either bad sidelobes, lotsa signal, or both" % rms_ratio)
        logging.regression("CST: %f %f" % (sigma0, rms_ratio))

        # plots: no plots need to be made when nchan=1 for continuum
        # however we could make a histogram, overlaying the "best" gauss so 
        # signal deviations are clear?

        logging.info('mean,rms,S/N=%f %f %f' % (mean0,sigma0,peak0/sigma0))

        if nchan == 1:
            # for a continuum/1-channel we only need to stuff some numbers into the _summary
            self._summary["chanrms"] = SummaryEntry([float(sigma0), fin], "CubeStats_AT", self.id(True))
            self._summary["dynrange"] = SummaryEntry([float(peak0)/float(sigma0), fin], "CubeStats_AT", self.id(True))
            self._summary["datamean"] = SummaryEntry([float(mean0), fin], "CubeStats_AT", self.id(True))
        else:
            y1 = np.log10(ma.masked_invalid(peakval))
            y2 = np.log10(ma.masked_invalid(sigma))
            y3 = y1-y2
            y4 = np.log10(ma.masked_invalid(-minval))
            y5 = y1-y4
            y = [y1,y2,y3,y4]
            title = 'CubeStats: ' + bdp_name+'_0'
            xlab  = 'Channel'
            ylab  = 'log(Peak,Noise,Peak/Noise)'
            labels = ['log(peak)','log(rms noise)','log(peak/noise)','log(|minval|)']
            myplot = APlot(ptype=self._plot_type,pmode=self._plot_mode,abspath=self.dir())
            segp = [[chans[0],chans[nchan-1],math.log10(sigma0),math.log10(sigma0)]]
            myplot.plotter(chans,y,title,bdp_name+"_0",xlab=xlab,ylab=ylab,segments=segp,labels=labels,thumbnail=True)
            imfile = myplot.getFigure(figno=myplot.figno,relative=True)
            thumbfile = myplot.getThumbnail(figno=myplot.figno,relative=True)

            image0 = Image(images={bt.PNG:imfile},thumbnail=thumbfile,thumbnailtype=bt.PNG,description="CubeStats_0")
            b2.addimage(image0,"im0")

            if use_ppp:
                # new trial for Lee
                title = 'PeakSum: (numsigma=%.1f)' % (numsigma)
                ylab = 'Jy*N_ppb'
                myplot.plotter(chans,[peaksum],title,bdp_name+"_00",xlab=xlab,ylab=ylab,thumbnail=False)

            if True:
                # hack ascii table
                y30 = np.where(sigma > 0, np.log10(peakval/sigma), 0.0)
                table2 = Table(columns=["freq","log(P/N)"],data=np.column_stack((freqs,y30)))
                table2.exportTable(self.dir("testCubeStats.tab"))
                del table2

            # the "box" for the "spectrum" is all pixels.  Don't know how to 
            # get this except via shape.
            ia.open(self.dir(fin))
            s = ia.summary()
            ia.close()
            if 'shape' in s:
                specbox = (0,0,s['shape'][0],s['shape'][1])
            else:
                specbox = ()

            caption = "Emission characteristics as a function of channel, as derived by CubeStats_AT "
            caption += "(cyan: global rms,"
            caption += " green: noise per channel,"
            caption += " blue: peak value per channel,"
            caption += " red: peak/noise per channel)."
            self._summary["spectra"] = SummaryEntry([0, 0, str(specbox), 'Channel', imfile, thumbfile , caption, fin], "CubeStats_AT", self.id(True))
            self._summary["chanrms"] = SummaryEntry([float(sigma0), fin], "CubeStats_AT", self.id(True))

            # @todo Will imstat["max"][0] always be equal to s['datamax']?  If not, why not?
            if 'datamax' in s:
                self._summary["dynrange"] = SummaryEntry([float(s['datamax']/sigma0), fin], "CubeStats_AT", self.id(True))
            else:
                self._summary["dynrange"] = SummaryEntry([float(imstat0["max"][0]/sigma0), fin], "CubeStats_AT", self.id(True))
            self._summary["datamean"] = SummaryEntry([imstat0["mean"][0], fin], "CubeStats_AT", self.id(True))

            title = bdp_name + "_1"
            xlab =  'log(Peak,Noise,P/N)'
            myplot.histogram([y1,y2,y3],title,bdp_name+"_1",xlab=xlab,thumbnail=True)

            imfile = myplot.getFigure(figno=myplot.figno,relative=True)
            thumbfile = myplot.getThumbnail(figno=myplot.figno,relative=True)
            image1 = Image(images={bt.PNG:imfile},thumbnail=thumbfile,thumbnailtype=bt.PNG,description="CubeStats_1")
            b2.addimage(image1,"im1")

            # note that the 'y2' can have been clipped, which can throw off stats.robust()
            # @todo  should set a mask for those.

            title = bdp_name + "_2"
            xlab = 'log(Noise))'
            n = len(y2)
            ry2 = stats.robust(y2)
            y2_mean = ry2.mean()
            y2_std  = ry2.std()
            if n>9: logging.debug("NORMALTEST2: %s" % str(scipy.stats.normaltest(ry2)))
            myplot.hisplot(y2,title,bdp_name+"_2",xlab=xlab,gauss=[y2_mean,y2_std],thumbnail=True)

            title = bdp_name + "_3"
            xlab = 'log(diff[Noise])'
            n = len(y2)
            # dy2 = y2[0:-2] - y2[1:-1]
            dy2 = ma.masked_equal(y2[0:-2] - y2[1:-1],0.0).compressed()
            rdy2 = stats.robust(dy2)
            dy2_mean = rdy2.mean()
            dy2_std  = rdy2.std()
            if n>9: logging.debug("NORMALTEST3: %s" % str(scipy.stats.normaltest(rdy2)))
            myplot.hisplot(dy2,title,bdp_name+"_3",xlab=xlab,gauss=[dy2_mean,dy2_std],thumbnail=True)


            title = bdp_name + "_4"
            xlab = 'log(Signal/Noise))'
            n = len(y3)
            ry3 = stats.robust(y3)
            y3_mean = ry3.mean()
            y3_std  = ry3.std()
            if n>9: logging.debug("NORMALTEST4: %s" % str(scipy.stats.normaltest(ry3)))
            myplot.hisplot(y3,title,bdp_name+"_4",xlab=xlab,gauss=[y3_mean,y3_std],thumbnail=True)

            title = bdp_name + "_5"
            xlab = 'log(diff[Signal/Noise)])'
            n = len(y3)
            dy3 = y3[0:-2] - y3[1:-1]
            rdy3 = stats.robust(dy3)
            dy3_mean = rdy3.mean()
            dy3_std  = rdy3.std()
            if n>9: logging.debug("NORMALTEST5: %s" % str(scipy.stats.normaltest(rdy3)))
            myplot.hisplot(dy3,title,bdp_name+"_5",xlab=xlab,gauss=[dy3_mean,dy3_std],thumbnail=True)


            title = bdp_name + "_6"
            xlab = 'log(Peak+Min)'
            n = len(y1)
            ry5 = stats.robust(y5)
            y5_mean = ry5.mean()
            y5_std  = ry5.std()
            if n>9: logging.debug("NORMALTEST6: %s" % str(scipy.stats.normaltest(ry5)))
            myplot.hisplot(y5,title,bdp_name+"_6",xlab=xlab,gauss=[y5_mean,y5_std],thumbnail=True)

            logging.debug("LogPeak: m,s= %f %f min/max %f %f" % (y1.mean(),y1.std(),y1.min(),y1.max()))
            logging.debug("LogNoise: m,s= %f %f %f %f min/max %f %f" % (y2.mean(),y2.std(),y2_mean,y2_std,y2.min(),y2.max()))
            logging.debug("LogDeltaNoise: RMS/sqrt(2)= %f %f " % (dy2.std()/math.sqrt(2),dy2_std/math.sqrt(2)))
            logging.debug("LogDeltaP/N:   RMS/sqrt(2)= %f %f" % (dy3.std()/math.sqrt(2),dy3_std/math.sqrt(2)))
            logging.debug("LogPeak+Min: robust m,s= %f %f" % (y5_mean,y5_std))

            # compute two ratios that should both be near 1.0 if noise is 'normal'
            ratio  = y2.std()/(dy2.std()/math.sqrt(2))
            ratio2 = y2_std/(dy2_std/math.sqrt(2))
            logging.info("RMS BAD VARIATION RATIO: %f %f" % (ratio,ratio2))

        # making PPP plot
        if nchan > 1 and use_ppp:
            smax = 10
            gamma = 0.75

            z0 = peakval/peakval.max()
            # point sizes
            s = np.pi * ( smax * (z0**gamma) )**2
            cmds = ["grid", "axis equal"]
            title = "Peak Points per channel"
            pppimage = bdp_name + '_ppp'
            myplot.scatter(xpos,ypos,title=title,figname=pppimage,size=s,color=chans,cmds=cmds,thumbnail=True)
            pppimage     = myplot.getFigure(figno=myplot.figno,relative=True)
            pppthumbnail = myplot.getThumbnail(figno=myplot.figno,relative=True)
            caption = "Peak point plot: Locations of per-channel peaks in the image cube " + fin
            self._summary["peakpnt"] = SummaryEntry([pppimage, pppthumbnail, caption, fin], "CubeStats_AT", self.id(True))
        dt.tag("plotting")

        # making PeakStats plot
        if nchan > 1 and psample > 0:
            logging.info("Computing peakstats")
            # grab peak,mean and width values for all peaks
            (pval,mval,wval) = peakstats(self.dir(fin),freqs,sigma0,pnumsigma,minchan,maxgap,psample,peakfit)
            title = "PeakStats: cutoff = %g" % (sigma0*pnumsigma)
            xlab = 'Peak value'
            ylab = 'FWHM (channels)'
            pppimage = bdp_name + '_peakstats'
            cval = mval
            myplot.scatter(pval,wval,title=title,xlab=xlab,ylab=ylab,color=cval,figname=pppimage,thumbnail=False)
            dt.tag("peakstats")
            

        # myplot.final()    # pjt debug 
        # all done!
        dt.tag("done")

        taskargs = "robust=" + sumrargs 
        if use_ppp: 
            taskargs = taskargs + " ppp=True"
        else: 
            taskargs = taskargs + " ppp=False"
        for v in self._summary:
            self._summary[v].setTaskArgs(taskargs)

        dt.tag("summary")
        dt.end()
示例#59
0
def ns_r1rho_2site(M0=None, M0_T=None, r1rho_prime=None, omega=None, offset=None, r1=0.0, pA=None, dw=None, kex=None, spin_lock_fields=None, relax_time=None, inv_relax_time=None, back_calc=None):
    """The 2-site numerical solution to the Bloch-McConnell equation for R1rho data.

    This function calculates and stores the R1rho values.


    @keyword M0:                This is a vector that contains the initial magnetizations corresponding to the A and B state transverse magnetizations.
    @type M0:                   numpy float array of rank [NE][NS][NM][NO][ND][6][1]
    @keyword M0_T:              This is a vector that contains the initial magnetizations corresponding to the A and B state transverse magnetizations, where the outer two axis has been swapped for efficient dot operations.
    @type M0_T:                 numpy float array of rank [NE][NS][NM][NO][ND][1][6]
    @keyword r1rho_prime:       The R1rho_prime parameter value (R1rho with no exchange).
    @type r1rho_prime:          numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword omega:             The chemical shift for the spin in rad/s.
    @type omega:                numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword offset:            The spin-lock offsets for the data.
    @type offset:               numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword r1:                The R1 relaxation rate.
    @type r1:                   numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword pA:                The population of state A.
    @type pA:                   float
    @keyword dw:                The chemical exchange difference between states A and B in rad/s.
    @type dw:                   numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword kex:               The kex parameter value (the exchange rate in rad/s).
    @type kex:                  float
    @keyword spin_lock_fields:  The R1rho spin-lock field strengths (in rad.s^-1).
    @type spin_lock_fields:     numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword relax_time:        The total relaxation time period for each spin-lock field strength (in seconds).
    @type relax_time:           numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword inv_relax_time:    The inverse of the relaxation time period for each spin-lock field strength (in inverse seconds).  This is used for faster calculations.
    @type inv_relax_time:       numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword back_calc:         The array for holding the back calculated R2eff values.  Each element corresponds to one of the CPMG nu1 frequencies.
    @type back_calc:            numpy float array of rank [NE][NS][NM][NO][ND]
    """

    # Once off parameter conversions.
    pB = 1.0 - pA
    k_BA = pA * kex
    k_AB = pB * kex

    # The matrix that contains all the contributions to the evolution, i.e. relaxation, exchange and chemical shift evolution.
    R_mat = rr1rho_3d_2site_rankN(R1=r1, r1rho_prime=r1rho_prime, dw=dw, omega=omega, offset=offset, w1=spin_lock_fields, k_AB=k_AB, k_BA=k_BA, relax_time=relax_time)

    # This matrix is a propagator that will evolve the magnetization with the matrix R.
    Rexpo_mat = matrix_exponential(R_mat)

    # Magnetization evolution.
    Rexpo_M0_mat = einsum('...ij, ...jk', Rexpo_mat, M0)

    # Magnetization evolution, which include all dimensions.
    MA_mat = einsum('...ij, ...jk', M0_T, Rexpo_M0_mat)[:, :, :, :, :, 0, 0]

    # Insert safe checks.
    if min(MA_mat) < 0.0:
        mask_min_MA_mat = masked_less(MA_mat, 0.0)
        # Fill with high values.
        MA_mat[mask_min_MA_mat.mask] = 1e100

    # Do back calculation.
    back_calc[:] = -inv_relax_time * log(MA_mat)

    # Catch errors, taking a sum over array is the fastest way to check for
    # +/- inf (infinity) and nan (not a number).
    if not isfinite(sum(back_calc)):
        # Replaces nan, inf, etc. with fill value.
        fix_invalid(back_calc, copy=False, fill_value=1e100)