Esempio n. 1
0
def main():
    # rocket = T1_test_vehicle
    rocket = T2_test_vehicle

    # x, v
    Y0 = [0, 0]

    flight = Flight(rocket, Y0)
    Y = flight.solve()
    
    # Aliases
#    print "Y:", Y.shape, Y
    x = np.array(Y[:, 0])
    v = np.array(Y[:, 1])
    D = np.array(Y[:, 2]) 
    W = np.array(Y[:, 3])

    t = flight.getTimes()
    pyplot.plot(t, x, t, v)

    # odeint can skip over many time steps, leaving blanks for intermediate values
    # Hence, masked arrays to ignore the blank values.
    D = ma.masked_invalid(D)
    W = ma.masked_invalid(W)
    mask = D.mask
    t = ma.masked_array(t, mask=mask)
    D = D.compressed()
    W = W.compressed()
    t = t.compressed()
    
    pyplot.figure()
    pyplot.plot(t, D, t, W)
    pyplot.show()

    return Y
	def score(self, dataframe, parallel):
		global _parallel_score_dataframe
		n_comparators = len(self.comparators)

		_parallel_score_dataframe = dataframe

		if parallel==True:
			_parallel_score_dataframe = dataframe
			pool = multiprocessing.Pool()
		elif parallel==False:
			pool=None
		else:
			pool=parallel #assumed to be instance of multiprocessing.Pool()

		if pool is not None:
			scores = pool.map(parallel_score, self.comparators)
		else:
			scores = map(serial_score, self.comparators, [dataframe] * len(self.comparators))

		s_scores, b_scores = zip(*scores)

		s_scores = ma.masked_invalid(s_scores)
		b_scores = ma.masked_invalid(b_scores)

		return (
			scipy.stats.mstats.gmean(s_scores, axis=0).data,
			scipy.stats.mstats.gmean(b_scores, axis=0).data
		)
Esempio n. 3
0
    def extract(self,PSF):
        #Placeholder
        pixsig = 1.
        sh   = self.sh
        npsf = PSF.size

        flux  = np.zeros(sh)
        uflux = np.zeros(sh)
        sky1d  = np.zeros(sh)
        usky1d  = np.zeros(sh)
        
        im = self.Order.image_rect
        uim = self.Order.uimage_rect
        sky = self.Order.sky_rect
        usky = self.Order.usky_rect
        
        for i in np.arange(sh):
#            flux[i] = (PSF*im[i,:]).sum() / (PSF**2).sum()

            flux[i] = (PSF*im[i,:]/uim[i,:]**2).sum() / (PSF**2/uim[i,:]**2).sum()
            uflux[i] = np.sqrt(1.0/(PSF**2/uim[i,:]**2.).sum())
            
            sky1d[i] = (np.abs(PSF)*sky[i,:]).sum() / (PSF**2).sum()
            usky1d[i] = np.sqrt(1.0/(PSF**2/usky[i,:]**2.).sum())

        flux = ma.masked_invalid(flux)
        flux = ma.filled(flux,1.)
        uflux = ma.masked_invalid(uflux)
        uflux = ma.filled(uflux,1000.)
        sky1d = ma.masked_invalid(sky1d)
        sky1d = ma.filled(sky1d,1.)
        usky1d = ma.masked_invalid(usky1d)
        usky1d = ma.filled(usky1d,1000.)
        sky_cont = self._fitCont(self.wave,sky1d)
        return flux,uflux,sky1d-sky_cont,usky1d
    def calc_stat_1d(self, trial_array, calc_corr=True):
        """take an 1D array of power spectra and find some basic statistics
        on it"""
        stat_1d = {}

        old_settings = np.seterr(invalid="warn", under="warn")
        mtrial_array = ma.masked_invalid(trial_array)

        stat_1d['mean'] = np.ma.filled(np.ma.mean(mtrial_array, axis=0),
                                       fill_value=np.nan)

        stat_1d['std'] = np.ma.filled(np.ma.std(mtrial_array, axis=0, ddof=1),
                                      fill_value=np.nan)

        if calc_corr:
            mtrial_array_trans = np.ma.transpose(
                                        ma.masked_invalid(trial_array))

            stat_1d['corr'] = np.ma.filled(np.ma.corrcoef(mtrial_array_trans,
                                           ddof=1), fill_value=np.nan)

            stat_1d['cov'] = np.ma.filled(np.ma.cov(mtrial_array_trans,
                                          ddof=1), fill_value=np.nan)

        np.seterr(**old_settings)

        return stat_1d
def probability_bincount(obs, drop=True):
    """
    Normalise the observed bincount from `obs` to sum up to unity.

    Parameters
    ----------
    obs: iterable
        An iterable of integer numerals.
    drop: bool (optional)
        Determines whether or not bins with zero events are dropped from the
        resulting list.

    Returns
    -------
    A frequency distribution that is normalised to unity.
    """
    obs = nma.asanyarray(obs, dtype="int32")
    nma.masked_invalid(obs, copy=False)
    obs = numpy.ravel(obs)
    if obs.size == 0:
        return list()
    total = float(len(obs))
    freq = numpy.bincount(obs[~obs.mask])
    points = [(k, val / total) for (k, val) in enumerate(freq) if val > 0]
    return points
def compute_zscore(obs, random_stats):
    """
    Parameters
    ----------
    obs: numeral
        original observation
    random_stats : iterable
        same observable in randomised versions
    """
    random_stats = nma.asanyarray(random_stats)
    nma.masked_invalid(random_stats, copy=False)
    random_stats = numpy.ravel(random_stats)
    if random_stats.size == 0:
        return numpy.nan
    mean = numpy.mean(random_stats.filled(0.0))
    std = numpy.std(random_stats.filled(0.0))
    nominator = obs- mean
    if nominator == 0.0:
        return nominator
    if std == 0.0:
        if nominator < 0.0:
            return -numpy.inf
        else:
            return numpy.inf
    else:
        return (nominator / std)
Esempio n. 7
0
    def set_profile_data(self, pts=[[],[]]):
        try: 
            pres = ma.masked_invalid(self.data['pres'])
        except KeyError: 
            raise KeyError, "Pres in hPa (PRES) is required!"

        try: 
            tc=ma.masked_invalid(self.data['temp'])
        except KeyError: 
            raise KeyError, "Temperature in C (TEMP) is required!"

        try: 
            dwpt=ma.masked_invalid(self.data['dwpt'])
        except KeyError:
            warnings.warn("Warning: No DWPT available")
            dwpt=ma.masked_array(zeros(pres.shape),mask=True)
            

        try:
            sknt=self.data['sknt']
            drct=self.data['drct']
            rdir = (270.-drct)*(pi/180.)
            uu = (sknt*cos(rdir))
            vv = (sknt*sin(rdir))
        except KeyError:
            warnings.warn("Warning: No SKNT/DRCT available")
            uu=zeros(pres.shape)
            vv=zeros(pres.shape)

        self.tcprof_line.set_data(tc, pres)
        self.dpprof_line.set_data(dwpt, pres)
        self.plt_title.set_text("%s %s"%(self["StationNumber"],self['SoundingDate']))
        self.wind_pts.set_data(*zip(*pts))

        return self.tcprof_line,self.dpprof_line,self.plt_title,self.wind_pts
Esempio n. 8
0
def despike(self, n1=2, n2=20, block=100, keep=0):
    """
    Wild Edit Seabird-like function.  Passes with Standard deviation
    `n1` and `n2` with window size `block`.
    """

    data = self.values.astype(float).copy()
    roll = rolling_window(data, block)
    roll = ma.masked_invalid(roll)
    std = n1 * roll.std(axis=1)
    mean = roll.mean(axis=1)
    # Use the last value to fill-up.
    std = np.r_[std, np.tile(std[-1], block - 1)]
    mean = np.r_[mean, np.tile(mean[-1], block - 1)]
    mask = (np.abs(data - mean.filled(fill_value=np.NaN)) >
            std.filled(fill_value=np.NaN))
    data[mask] = np.NaN

    # Pass two recompute the mean and std without the flagged values from pass
    # one and removed the flagged data.
    roll = rolling_window(data, block)
    roll = ma.masked_invalid(roll)
    std = n2 * roll.std(axis=1)
    mean = roll.mean(axis=1)
    # Use the last value to fill-up.
    std = np.r_[std, np.tile(std[-1], block - 1)]
    mean = np.r_[mean, np.tile(mean[-1], block - 1)]
    values = self.values.astype(float)
    mask = (np.abs(values - mean.filled(fill_value=np.NaN)) >
            std.filled(fill_value=np.NaN))

    clean = self.astype(float).copy()
    clean[mask] = np.NaN
    return clean
Esempio n. 9
0
def plot_glider(cube, mask_topo=False, **kw):
    """Plot glider cube."""
    cmap = kw.pop('cmap', plt.cm.rainbow)
    
    lon = cube.coord(axis='X').points.squeeze()
    lat = cube.coord(axis='Y').points.squeeze()
    z = cube.coord(axis='Z').points.squeeze()
    data = cube.data
    data = ma.masked_invalid(data,copy=True)
    z = ma.masked_invalid(z,copy=True)
    t = cube.coord(axis='T')
    t = t.units.num2date(t.points)
    
    dist, pha = sw.dist(lat, lon, units='km')
    dist = np.r_[0, np.cumsum(dist)]
    
    dist, z = np.broadcast_arrays(dist[..., None], z)
    
    fig, ax = plt.subplots(figsize=(9, 3.75))
    cs = ax.pcolor(dist, z, data, cmap=cmap, snap=True, **kw)
    plt.colorbar(cs)
    if mask_topo:
        h = z.max(axis=1)
        x = dist[:, 0]
        ax.plot(x, h, color='black', linewidth='0.5', zorder=3)
        ax.fill_between(x, h, y2=h.max(), color='0.9', zorder=3)
    ax.invert_yaxis()
    ax.set_title('Glider track from {} to {}'.format(t[0], t[-1]))
    fig.tight_layout()
    return fig, ax, cs
def get_lapmat(pos0, pos1, max_disp=1000., dist_function=np.square):

    pos0 = np.asarray(pos0)
    pos1 = np.asarray(pos1)
    num_in, ndim = pos0.shape
    num_out, ndim = pos1.shape
    if ndim not in (2, 3):
        raise ValueError('''Only 2d and 3d data are supported''')

    lapmat = np.zeros((num_in + num_out,
                       num_in + num_out)) * np.nan
    costmat, p90 = get_costmat(pos0, pos1, max_disp, dist_function)
    m_costmat = ma.masked_invalid(costmat)
    lapmat[:num_in, :num_out] = costmat
    if np.all(np.isnan(costmat)):
        birthcost = deathcost = 1.
    else:
        birthcost = deathcost = np.percentile(m_costmat.compressed(), 90)
    lapmat[num_in:, :num_out] = get_birthmat(pos1, birthcost)
    lapmat[:num_in, num_out:] = get_deathmat(pos0, deathcost)
    m_lapmat = ma.masked_invalid(lapmat)
    fillvalue = m_lapmat.max() * 1.05
    lapmat[num_in:, num_out:] = get_lowerright(costmat, fillvalue)

    return lapmat
def calc_ratio(fsoil_mary, fsoil_kettle):
    lon, lat, topo = sp.parse_STEM_coordinates(
        os.path.join(os.environ['SARIKA_INPUT'], 'TOPO-124x124.nc'))
    fsoil_mary = maskoceans(lon, lat, fsoil_mary)
    fsoil_kettle = maskoceans(lon, lat, fsoil_kettle)
    ratio = ma.masked_invalid(fsoil_kettle) / ma.masked_invalid(fsoil_mary)
    return(ratio)
Esempio n. 12
0
    def set_UVC(self, U, V, C=None):
        self.u = ma.masked_invalid(U, copy=False).ravel()
        self.v = ma.masked_invalid(V, copy=False).ravel()
        if C is not None:
            c = ma.masked_invalid(C, copy=False).ravel()
            x, y, u, v, c = delete_masked_points(self.x.ravel(),
                                                 self.y.ravel(),
                                                 self.u, self.v, c)
        else:
            x, y, u, v = delete_masked_points(self.x.ravel(), self.y.ravel(),
                                              self.u, self.v)

        magnitude = np.hypot(u, v)
        flags, barbs, halves, empty = self._find_tails(magnitude,
                                                       self.rounding,
                                                       **self.barb_increments)

        # Get the vertices for each of the barbs

        plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
                                      self._length, self._pivot, self.sizes,
                                      self.fill_empty, self.flip)
        self.set_verts(plot_barbs)

        # Set the color array
        if C is not None:
            self.set_array(c)

        # Update the offsets in case the masked data changed
        xy = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
        self._offsets = xy
Esempio n. 13
0
def get_roms(url, time_slice, n=3):
    url = parse_url(url)
    with Dataset(url) as nc:
        ncv = nc.variables
        time = ncv['ocean_time']
        tidx = date2index(time_slice, time, select='nearest')
        time = num2date(time[tidx], time.units, time.calendar)

        mask = ncv['mask_rho'][:]
        lon_rho = ncv['lon_rho'][:]
        lat_rho = ncv['lat_rho'][:]
        anglev = ncv['angle'][:]

        u = ncv['u'][tidx, -1, ...]
        v = ncv['v'][tidx, -1, ...]

        u = shrink(u, mask[1:-1, 1:-1].shape)
        v = shrink(v, mask[1:-1, 1:-1].shape)

        u, v = rot2d(u, v, anglev[1:-1, 1:-1])

        lon = lon_rho[1:-1, 1:-1]
        lat = lat_rho[1:-1, 1:-1]

        u, v = u[::n, ::n], v[::n, ::n]
        lon, lat = lon[::n, ::n], lat[::n, ::n]

        u = ma.masked_invalid(u)
        v = ma.masked_invalid(v)
    return dict(lon=lon, lat=lat, u=u, v=v, time=time)
Esempio n. 14
0
    def agg_stat_2d_pwrspec(self, use_masked=False, debug=None):
        r"""TODO: add corr and cov"""
        comb_arr = self.combination_array_2d(counts=False, debug=debug)
        counts_arr = self.combination_array_2d(counts=True, debug=debug)

        stat_summary = {}
        for treatment in self.treatment_cases:
            entry = {}
            if use_masked:
                counts_ma = ma.masked_invalid(counts_arr[treatment])
                comb_ma = ma.masked_invalid(comb_arr[treatment])
                entry['counts'] = np.ma.mean(counts_ma, axis=2)
                entry['mean'] = np.ma.mean(comb_ma, axis=2)
                if self.num_comb > 1:
                    entry['std'] = np.ma.std(comb_ma, axis=2, ddof=1)
                else:
                    entry['std'] = 0.
            else:
                entry['counts'] = np.mean(counts_arr[treatment], axis=2)
                entry['mean'] = np.mean(comb_arr[treatment], axis=2)
                if self.num_comb > 1:
                    try:
                        entry['std'] = np.std(comb_arr[treatment], axis=2, ddof=1)
                    except FloatingPointError:
                        print "ERROR: stdev of spectral pairs failed"
                        entry['std'] = 0.
                else:
                    entry['std'] = 0.
            stat_summary[treatment] = entry

        return stat_summary
Esempio n. 15
0
 def __init__(self, lats, lons, latsCenter, lonsCenter, elev, alti, img, cameraPosGCRS, photoTime, 
              station, minBrightness=None, maxBrightness=None):
     """
     
     :param lats: (h+1,w+1) in degrees
     :param lons: (h+1,w+1) in degrees
     :param latsCenter: (h,w) in degrees
     :param lonsCenter: (h,w) in degrees
     :param elev: elevation in degrees for each pixel center (h,w), can be None  
     :param alti: the altitude in km onto which the image was mapped (e.g. 110)
     :param img: masked array, (h,w) or (h,w,3) in [0,255] as int or float, can have NaN's
     :param cameraPosGCRS: [x,y,z] in km
     :param photoTime: datetime object
     :param string station: 
     :param minBrightness: 
     :param maxBrightness:
     """
     assert img.ndim == 2
     h, w = img.shape[0], img.shape[1]
     assert lats.shape == lons.shape == (h+1, w+1)
     assert elev is None or elev.shape == (img.shape[0], img.shape[1])
     
     # adapted from web filenames: RANK.2013.09.26.05.03.gif
     identifier = station + '.' + photoTime.strftime('%Y.%m.%d.%H.%M.%S')
     
     BaseMapping.__init__(self, alti, cameraPosGCRS, photoTime, identifier)
     self._img = img[:,:,None]
     self._lats = ma.masked_invalid(lats, copy=False)
     self._lons = ma.masked_invalid(lons, copy=False)
     self._latsCenter = ma.masked_invalid(latsCenter, copy=False)
     self._lonsCenter = ma.masked_invalid(lonsCenter, copy=False)
     self._elevation = ma.masked_invalid(elev, copy=False)
     # allow to be changed from outside:
     self.minBrightness = minBrightness
     self.maxBrightness = maxBrightness
Esempio n. 16
0
    def agg_stat_1d_pwrspec(self, from_2d=False, use_masked=False):
        r"""TODO: use masked array here"""
        (counts_arr, gerror_arr, comb_arr) = \
            self.combination_array_1d(from_2d=from_2d)

        stat_summary = {}
        for treatment in self.treatment_cases:
            entry = {}
            if use_masked:
                gerror_ma = ma.masked_invalid(gerror_arr[treatment])
                counts_ma = ma.masked_invalid(counts_arr[treatment])
                comb_ma = ma.masked_invalid(comb_arr[treatment])

                if self.num_comb > 1:
                    entry['counts'] = np.ma.mean(counts_ma, axis=1)
                    entry['mean'] = np.ma.mean(comb_ma, axis=1)
                    entry['gauss_std'] = np.ma.mean(gerror_ma, axis=1)
                    entry['std'] = np.ma.std(comb_ma, axis=1, ddof=1)
                    entry['corr'] = np.ma.corrcoef(comb_ma)
                    entry['cov'] = np.ma.cov(comb_ma)
                else:
                    entry['counts'] = counts_ma[:, 0]
                    entry['mean'] = comb_ma[:, 0]
                    entry['gauss_std'] = gerror_ma[:, 0]
                    entry['std'] = 0.
                    entry['corr'] = 0.
                    entry['cov'] = 0.
            else:
                comb_treat = comb_arr[treatment]
                if self.num_comb > 1:
                    entry['counts'] = np.mean(counts_arr[treatment], axis=1)
                    entry['mean'] = np.mean(comb_treat, axis=1)
                    entry['gauss_std'] = np.mean(gerror_arr[treatment], axis=1)
                    entry['std'] = np.std(comb_treat, axis=1, ddof=1)
                    try:
                        entry['corr'] = np.corrcoef(comb_treat)
                    except FloatingPointError:
                        print "error in calculating corr function"
                        nside = entry['mean'].shape[0]
                        entry['corr'] = np.zeros((nside, nside))

                    try:
                        entry['cov'] = np.cov(comb_treat)
                    except FloatingPointError:
                        print "error in calculating cov function"
                        nside = entry['mean'].shape[0]
                        entry['cov'] = np.zeros((nside, nside))

                else:
                    entry['counts'] = counts_arr[treatment][:, 0]
                    entry['mean'] = comb_treat[:, 0]
                    entry['gauss_std'] = gerror_arr[treatment][:, 0]
                    entry['std'] = 0.
                    entry['corr'] = 0.
                    entry['cov'] = 0.

            stat_summary[treatment] = entry

        return stat_summary
Esempio n. 17
0
def cloud_statistics(file_name):
    """
    Return core duration, minimum core base, maximum core height, mean core 
    mass, formation time, dissipation time, maximum depth, depth evolution and 
    corresponding times for tracked clouds.
        
    Parameters
    ----------
    file_name : netCDF file name
        id_profile file for a tracked core with dimensions double t(t), 
        double z(z).
      
    Return
    ------
    tuple : cloud_id, lifetime, base, top, mass, l_min, l_max, depths,
        max_depth, times
    """
    
    # Read netCDF dataset
    data = Dataset(file_name)
    
    # Core ID
    cloud_id = int(file_name[-11:-3])
    
    # Core duration (seconds)
    times = data.variables['t'][...]
    lifetime = len(times)*mc.dt
    
    # Formation time, dissipation time (seconds)
    l_min = times.min()*mc.dt
    l_max = times.max()*mc.dt

    # Minimum core base, maximum core height, maximum depth, depth evolution 
    # (metres)
    area = ma.masked_invalid(data.variables['AREA'][...])
    z = data.variables['z'][...]
    z = z*np.ones(np.shape(area))
    z = ma.masked_array(z, ma.getmask(area)) 
    bases = z.min(axis=1)
    tops = z.max(axis=1)
    depths = tops - bases + mc.dz
    max_depth = depths.max()
    base = bases.min()
    top = tops.max()

    # Mean core mass mass (kilograms)
    qn = ma.masked_invalid(data.variables['QN'][...])
    rho = ma.masked_invalid(data.variables['RHO'][...])
    mass = np.mean(np.sum(area*rho*mc.dz, axis=1))

    # Remove missing values
    times = ma.masked_array(times, ma.getmask(depths))
    depths = depths[~depths.mask]
    times = times[~times.mask]
    
    data.close()
    
    return cloud_id, lifetime, base, top, mass, l_min, l_max, depths, \
        max_depth, times
Esempio n. 18
0
 def azElCenter(self):
     """
     Azimuth and elevation for each pixel center.
     """
     az, el = self.calculateAzEl(center=True)
     az = ma.masked_invalid(az, copy=False)
     el = ma.masked_invalid(el, copy=False)
     return az, el
Esempio n. 19
0
def gaussians(x,x0,A,sig):
    x0m=ma.masked_invalid(np.atleast_1d(x0))
    Am=ma.masked_invalid(np.atleast_1d(A))
    sigm=ma.masked_invalid(np.atleast_1d(sig))
    amp=Am*np.sqrt(0.5/np.pi)/sigm
    [X,X0]=np.meshgrid(x,x0m)
    gg=None
    gg=np.einsum('...i,...ij->...j',amp,np.exp(-0.5*(X-X0)**2/np.tile(sigm**2,(np.shape(x)[0],1)).T))
    return gg
Esempio n. 20
0
def water_mass_mixing(T, S, indices):
    """
    Computes the water mass mixing percentage based on water mass core
    indices.

    The calculations are based on the mixing triagle (Mamayev 1975).

    Parameters
    ----------
    T : array like
        Temperature.
    S : array like
        Salinity.
    indices : array like
        A list/array with the core thermohaline indices for each water
        mass.

    Returns
    -------
    m1, m2, m3 : array like
        Relative composition for water masses 1, 2 and 3.

    Examples
    --------

    Reference
    ---------
    [1] Mamayev, O. I. (Ed.). Temperature -- Salinity Analysis fo World
        Ocean Waters Elsevier, 1975, 11.

    Notes
    -----
    This function is based upon code developed by Filipe Fernandes and
    available at https://ocefpaf.github.io/python4oceanographers/blog/
    2014/03/24/watermass/.

    """
    # Makes sure input parameters are numpy arrays
    T, S, indices = asarray(T), asarray(S), asarray(indices)

    # Creates linear system of three equations based on input parameters.
    a = r_[indices, ones((1, 3))]
    b = c_[T.ravel(), S.ravel(), ones(T.shape).ravel()].T
    m = linalg.solve(a, b)

    # The mixing indices 
    m1 = m[0].reshape(T.shape)
    m2 = m[1].reshape(T.shape)
    m3 = m[2].reshape(T.shape)

    # Mask values outside the mising triangle.
    m1 = ma.masked_outside(ma.masked_invalid(m1), 0, 1)
    m2 = ma.masked_outside(ma.masked_invalid(m2), 0, 1)
    m3 = ma.masked_outside(ma.masked_invalid(m3), 0, 1)

    return m1, m2, m3
Esempio n. 21
0
 def __setitem__(self, key, value):
     # Makes sure that value is a Variable object
     if not isinstance(value, atlantis.data.Variable):
         raise ValueError("`{}` is not a Variable!".format(key))
     if (~isinstance(value.data, ma.MaskedArray)) & (value.data is not None):
         value.data = ma.masked_invalid(value.data)
     if (~isinstance(value.standard_deviation, ma.MaskedArray)) & (value.standard_deviation is not None):
         value.standard_deviation = ma.masked_invalid(value.standard_deviation)
     # Assigns value to class' list of items (fields)
     self.fields[key] = value
Esempio n. 22
0
File: cnv.py Progetto: xuanblo/jcvi
    def predict(self, X):
        # Handle missing values
        X = ma.masked_invalid(X)
        mask = X.mask
        dX = ma.compressed(X).reshape(-1, 1)
        dZ = self.model.predict(dX)
        Z = np.array([np.nan for i in xrange(X.shape[0])])
        Z[~mask] = dZ
        Z = ma.masked_invalid(Z)

        return Z * self.step
Esempio n. 23
0
def readrasterband(dataset, aband, NoDataVal=None, masked=True):
    """Accepts GDAL raster dataset and band number, returns Numpy 2D-array."""
    if dataset.RasterCount >= aband:
        # Get one band
        band = dataset.GetRasterBand(aband)
        # test for user specified input NoDataValue
        if NoDataVal is None:
            # test for band specified NoDataValue
            if band.GetNoDataValue() != None:
                NoDataVal = band.GetNoDataValue()
                # 	print NoData
            else:
                # else set NoDataValue to be 9999.
                NoDataVal = 9999
                # set NoDataVal for the band (not strictly needed, but good practice if we call the band later).
        band.SetNoDataValue(NoDataVal)
        # create blank array (full of 0's) to hold extracted data [note Y,X format], get data type from dictionary.
        datarray = np.zeros((band.YSize, band.XSize), gdt2npy[band.DataType])
        # create loop based on YAxis (i.e. num rows)
        for i in range(band.YSize):
            # read lines of band
            scanline = band.ReadRaster(0, i, band.XSize, 1, band.XSize, 1, band.DataType)
            # unpack from binary representation
            tuple_of_vals = struct.unpack(gdt2struct[band.DataType] * band.XSize, scanline)
            # tuple_of_floats = struct.unpack('f' * band.XSize, scanline)
            # add tuple to image array line by line
            datarray[i, :] = tuple_of_vals

            # check if masked=True
        if masked is True:
            # check if data type is int or float using dictionary for numeric test.
            if npy2gdt[datarray.dtype.name] <= 5:
                # data is integer use masked_equal
                # apply NoDataValue masking.
                dataraster = ma.masked_equal(datarray, NoDataVal, copy=False)
                # apply invalid data masking
                dataraster = ma.masked_invalid(dataraster, copy=False)
                return dataraster
            else:
                # data is float use masked_values
                dataraster = ma.masked_values(datarray, NoDataVal, copy=False)
                # finaly apply mask for NaN values
                dataraster = ma.masked_invalid(dataraster, copy=False)
                # return array (raster)
                return dataraster
        else:
            # user wants numpy array, no masking.
            return datarray
    else:
        raise TypeError
Esempio n. 24
0
def adaptive_distribution(obs, binwidth, limits=None, factor=2.0, k_max=21, right_most=3):
    """
    An adaptive binning technique that overlays multiple histograms with
    increasing bin widths.

    References
    ----------
    [1] Liebovitch, L. S., A. T. Todorov, M. Zochowski, D. Scheurle, L. Colgin,
        M. A. Wood, K. A. Ellenbogen, J. M. Herre, and R. C. Bernstein. 1999.
        ``Nonlinear Properties of Cardiac Rhythm Abnormalities.''
        Physical Review E 59 (3): 3312–3319.

    """
    obs = nma.asanyarray(obs)
    nma.masked_invalid(obs, copy=False)
    obs = numpy.ravel(obs)
    if obs.size == 0:
        return list()
    if limits is None:
        mn = obs.min()
        mx = obs.max()
    else:
        (mn, mx) = limits
    offset = binwidth * 0.5
    if limits is None:
        limits = (mn - offset, mx + offset)
    num_bins = numpy.floor((mx - mn) / binwidth)
    freq = numpy.histogram(obs[~obs.mask], num_bins, range=limits)[0]
    total = float(freq.sum())
    print 0
    print binwidth
    print freq[0:right_most + 1]
    results = list()
    iteration = 1
    while len(freq) > right_most and not (freq[1:right_most] == 0).any():
        for (k, count) in enumerate(freq[1:]):
            if count == 0 or k == k_max:
                break
            results.append(((k + 0.5) * binwidth + limits[0], count / (binwidth * total)))
        binwidth *= factor
        iteration += 1
        # new loop
        num_bins = numpy.floor((mx - mn) / binwidth)
        freq = numpy.histogram(obs, num_bins, range=limits)[0]
        total = float(freq.sum())
        print iteration
        print binwidth
        print freq[0:right_most + 1]

    return sorted(results)
Esempio n. 25
0
 def _accum_to_avg(accumulation):
     ret = {}
     for k,v in accumulation.items():
         var = ma.masked_invalid(_wvar(v["weighted_flux_sum"],
                 v["weighted_flux_squared_sum"], v["weight_sum"],
                 v["weight_squared_sum"]))
         ret[k] = {
             "wlen": ma.masked_invalid(v["weighted_wlen_sum"] / v["weight_sum"]),
             "flux": ma.masked_invalid(v["weighted_flux_sum"] / v["weight_sum"]),
             "var": var,
             "dflux": np.ma.sqrt(var),
             "old_dflux": np.ma.sqrt(ma.masked_invalid(v["var_sum"] / v["count"])),
             "count": v["count"]
             }
     return ret
Esempio n. 26
0
 def doResample(mapping, pxPerDeg, arcsecPerPx, containsPole):
     # trigger calculation of properties so that they are not included in the timing measurements
     mapping.lats
     mapping.latsCenter
     mapping.elevation
     mapping.img
     t0 = time.time()
     
     if containsPole is None:
         containsPole = mapping.containsPole
         
     if arcsecPerPx:
         pxPerDeg = plateCarreeResolution(mapping.boundingBox, arcsecPerPx)
     else:
         try:
             _, _ = pxPerDeg
         except TypeError:
             assert pxPerDeg is not None
             pxPerDeg = (pxPerDeg, pxPerDeg)
     print('pxPerDeg: ' + str(pxPerDeg))
     
     imgIsInt = mapping.img.dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64]
     
     # merge elevation with rgb array and extract channels afterwards
     merged = np.dstack((mapping.img.astype(np.float64).filled(np.nan),
                         mapping.elevation.filled(np.nan)))
     lats, lons, latsCenter, lonsCenter, merged = \
         _resample(mapping.latsCenter.filled(np.nan), mapping.lonsCenter.filled(np.nan), mapping.altitude,
                   merged,
                   lambda: mapping.outline, mapping.boundingBox,
                   pxPerDeg, mapping.containsDiscontinuity, containsPole,
                   method=method)
             
     img, elevation = np.dsplit(merged, [-1])
     if imgIsInt:
         with np.errstate(invalid='ignore'):
             img = np.round(img)
     img = np.require(ma.masked_invalid(img, copy=False), mapping.img.dtype)
     if mapping.img.ndim == 2:
         img = img.reshape(img.shape[0], img.shape[1])            
     elevation = elevation.reshape(elevation.shape[0],elevation.shape[1])
     elevation = ma.masked_invalid(elevation, copy=False)
 
     resampledMapping = mapping.createResampled(lats, lons, latsCenter, lonsCenter, elevation, img)
     
     print('resampling:', time.time()-t0, 's')
     
     return resampledMapping
Esempio n. 27
0
    def _make_verts(self, U, V):
        uv = (U + V * 1j)
        if self.angles == 'xy' and self.scale_units == 'xy':
            # Here eps is 1 so that if we get U, V by diffing
            # the X, Y arrays, the vectors will connect the
            # points, regardless of the axis scaling (including log).
            angles, lengths = self._angles_lengths(U, V, eps=1)
        elif self.angles == 'xy' or self.scale_units == 'xy':
            # Calculate eps based on the extents of the plot
            # so that we don't end up with roundoff error from
            # adding a small number to a large.
            eps = np.abs(self.ax.dataLim.extents).max() * 0.001
            angles, lengths = self._angles_lengths(U, V, eps=eps)
        if self.scale_units == 'xy':
            a = lengths
        else:
            a = np.absolute(uv)
        if self.scale is None:
            sn = max(10, math.sqrt(self.N))
            if self.Umask is not ma.nomask:
                amean = a[~self.Umask].mean()
            else:
                amean = a.mean()
            scale = 1.8 * amean * sn / self.span  # crude auto-scaling
                # scale is typical arrow length as a multiple
                # of the arrow width
        if self.scale_units is None:
            if self.scale is None:
                self.scale = scale
            widthu_per_lenu = 1.0
        else:
            if self.scale_units == 'xy':
                dx = 1
            else:
                dx = self._dots_per_unit(self.scale_units)
            widthu_per_lenu = dx / self._trans_scale
            if self.scale is None:
                self.scale = scale * widthu_per_lenu
        length = a * (widthu_per_lenu / (self.scale * self.width))
        X, Y = self._h_arrows(length)
        if self.angles == 'xy':
            theta = angles
        elif self.angles == 'uv':
            theta = np.angle(uv)
        else:
            # Make a copy to avoid changing the input array.
            theta = ma.masked_invalid(self.angles, copy=True).filled(0)
            theta = theta.ravel()
            theta *= (np.pi / 180.0)
        theta.shape = (theta.shape[0], 1)  # for broadcasting
        xy = (X + Y * 1j) * np.exp(1j * theta) * self.width
        xy = xy[:, :, np.newaxis]
        XY = np.concatenate((xy.real, xy.imag), axis=2)
        if self.Umask is not ma.nomask:
            XY = ma.array(XY)
            XY[self.Umask] = ma.masked
            # This might be handled more efficiently with nans, given
            # that nans will end up in the paths anyway.

        return XY
Esempio n. 28
0
 def _make_verts(self, U, V):
     uv = U + V * 1j
     a = np.absolute(uv)
     if self.scale is None:
         sn = max(10, math.sqrt(self.N))
         if self.Umask is not ma.nomask:
             amean = a[~self.Umask].mean()
         else:
             amean = a.mean()
         scale = 1.8 * amean * sn / self.span  # crude auto-scaling
         self.scale = scale
     length = a / (self.scale * self.width)
     X, Y = self._h_arrows(length)
     if self.angles == "xy":
         theta = self._angles(U, V)
     elif self.angles == "uv":
         theta = np.angle(uv)
     else:
         theta = ma.masked_invalid(self.angles, copy=False).filled(0)
         theta = theta.ravel()
         theta *= np.pi / 180.0
     theta.shape = (theta.shape[0], 1)  # for broadcasting
     xy = (X + Y * 1j) * np.exp(1j * theta) * self.width
     xy = xy[:, :, np.newaxis]
     XY = np.concatenate((xy.real, xy.imag), axis=2)
     if self.Umask is not ma.nomask:
         XY = ma.array(XY)
         XY[self.Umask] = ma.masked
     return XY
Esempio n. 29
0
def quantify_effect_smoothing_freq():
    """Test how much total dissipation changes by smoothing density to
    2, 3, 4, and 6 Hz"""
    casts = np.r_[45:1150:3]
    eps_all = np.full((4, casts.size), np.nan)
    for i, cast in enumerate(casts):
        try:
            print(cast, end=' ')
            _, data = loadMVP_m1(cast, bin_data=False)

            for j, n_smooth_rho in enumerate([4, 6, 8, 12]):
                eps, Lt = calc_eps(
                    data['p_raw'], data['prho'], data['z'],
                    plot_overturns=False, n_smooth_rho=n_smooth_rho)
                eps_all[j, i] = eps.sum()
        except IndexError:
            pass

    print('\n\nCompared to low-passing at 3Hz, filtering at a different freq\n'
          'produces dissipation values of the following relative magnitude:')
    relative_mags = [ma.mean(ma.masked_invalid(eps_all[i]/eps_all[2]))
                     for i in [0, 1, 3]]
    print("""
    6 Hz: {0:2.2f}
    4 Hz: {1:2.2f}
    2 Hz: {2:2.2f}

    """.format(*relative_mags))
    return eps_all
Esempio n. 30
0
def main(tcu_fpath, col_name):
    data = tcu_io.load_untreated_csv_to_numpy(tcu_fpath)
    
    column = data[col_name]
    dtype = column.dtype
    
    masked = None
    if dtype == 'i' or dtype == 'f':
        masked = ma.masked_invalid(column)
        
        x, y = ecdf(masked)
        plt.plot(x, y, 'bo')
        plt.show()
        
    else:
        #Simple hack for the string case. 
        #Creates a copy with masked values deleted. 
        masked = column[column != 'N/A']
        
        cat, y = categorical_hist(masked)
        
        x = range(1, len(cat) + 1)
        plt.bar(x, y, width = 0.5)
        plt.xticks(x, cat)
        plt.show()
Esempio n. 31
0
def plot_radials(dataset, *,
                 plot_type='velocity',
                 output_file=None,
                 extent=None, lon_ticks=None, lat_ticks=None,
                 scale=True,
                 sub=2,
                 cbar_step=10,
                 velocity_min=None, velocity_max=None,
                 markers=None,
                 prim_filter=False,
                 title='HF Radar'):
    """
    param dataset:  a file-path to an xarray compatible object or an xarray Dataset object
    """
    try:
        ds = xr.open_dataset(dataset)
        closing = ds.close
    except AttributeError:
        if isinstance(dataset, xr.Dataset):
            ds = dataset
            closing = int  # dummy func to close nothing
        else:
            raise

    tds = ds.squeeze()

    if prim_filter:
        if 'PRIM' in list(tds.keys()):
            tds = tds.where(tds.PRIM == 1).squeeze()
            title = title + ' - QC'
        else:
            logging.warning('PRIM flag not found. Bypassing quality control filters')

    time = ds.time.values[0]
    lon = tds.coords['lon'].data
    lat = tds.coords['lat'].data
    u = tds['u'].data
    v = tds['v'].data

    u = ma.masked_invalid(u)
    v = ma.masked_invalid(v)

    if scale:
        angle, speed = uv2spdir(u, v)  # convert u/v to angle and speed
        u, v = spdir2uv(  # convert angle and speed back to u/v, normalizing the arrow sizes
            np.ones_like(speed),
            angle,
            deg=True
        )

    velocity_min = velocity_min or -40
    velocity_max = velocity_max or 40

    kwargs = dict(extent=extent, lon_ticks=lon_ticks, lat_ticks=lat_ticks,
                  output_file=output_file,
                  sub=sub,
                  markers=markers,
                  title=title,
                  meshgrid=False,
                  scale=120, headwidth=2.5, headlength=4, headaxislength=4)

    if 'velocity' in plot_type:
        """
        Velocity displays the direction and magnitude of the radials
        """
        kwargs['colorbar'] = True
        kwargs['cmap'] = cmocean.cm.balance

        # Define arrow colors. Limited by velocity_min and velocity_max
        kwargs['color_clipped'] = np.clip(
            tds.velocity.data[::sub],
            velocity_min,
            velocity_max
        ).squeeze()
        kwargs['offset'] = Normalize(vmin=velocity_min, vmax=velocity_max, clip=True)
        kwargs['ticks'] = np.append(np.arange(velocity_min, velocity_max, cbar_step), velocity_max)
    elif 'motion' in plot_type:
        """
        Motion displays the direction (towards or away) from radar
        """
        kwargs['colorbar'] = False
        kwargs['cmap'] = 'bwr'

        velocity = tds.velocity
        velocity_temp = velocity.where(velocity > 0, other=-1)  # Going away from radar
        kwargs['color_clipped'] = velocity_temp.where(velocity < 0, other=1).data  # Going towards radar
        kwargs['offset'] = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)
    elif 'qc_pass_fail' in plot_type:
        kwargs['colorbar'] = False
        kwargs['cmap'] = colors.ListedColormap(['limegreen', 'red'])

        if prim_filter:
            tds = ds.squeeze()
        kwargs['color_clipped'] = tds.PRIM.where(tds.PRIM != 1, other=-1).data  # PRIM == 1 where vectors pass qc
        kwargs['offset'] = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)

    closing()
    plot_common(time, lon, lat, u, v, **kwargs)
Esempio n. 32
0
def pcolor_1D(values, y=None, **kwargs):
    """Create a pseudocolour plot of an array, Actually uses pcolormesh for speed.

    Create a colour plot of an array.
    If the arrays x, y, and values are geobipy.StatArray classes, the axes can be automatically labelled.
    Can take any other matplotlib arguments and keyword arguments e.g. cmap etc.

    Parameters
    ----------
    values : array_like or StatArray
        An array of colour values.
    x : 1D array_like or StatArray
        Horizontal coordinates of the values edges.
    y : 1D array_like or StatArray, optional
        Vertical coordinates of the values edges.

    Other Parameters
    ----------------
    log : 'e' or float, optional
        Take the log of the colour to a base. 'e' if log = 'e', and a number e.g. log = 10.
        Values in c that are <= 0 are masked.
    equalize : bool, optional
        Equalize the histogram of the colourmap so that all colours have an equal amount.
    nbins : int, optional
        Number of bins to use for histogram equalization.
    xscale : str, optional
        Scale the x axis? e.g. xscale = 'linear' or 'log'
    yscale : str, optional
        Scale the y axis? e.g. yscale = 'linear' or 'log'.
    flipY : bool, optional
        Flip the Y axis
    clabel : str, optional
        colourbar label
    grid : bool, optional
        Show the grid lines
    noColorBar : bool, optional
        Turn off the colour bar, useful if multiple customPlots plotting routines are used on the same figure.        

    Returns
    -------
    ax
        matplotlib .Axes

    See Also
    --------
    matplotlib.pyplot.pcolormesh : For additional keyword arguments you may use.

    """

    #    assert np.ndim(values) == 2, ValueError('Number of dimensions must be 2')

    equalize = kwargs.pop('equalize', False)

    log = kwargs.pop('log', False)
    xscale = kwargs.pop('xscale', 'linear')
    yscale = kwargs.pop('yscale', 'linear')

    cl = kwargs.pop('clabel', None)
    grid = kwargs.pop('grid', False)

    flipY = kwargs.pop('flipY', False)

    noColorBar = kwargs.pop('noColorBar', False)

    # Set the grid colour if specified
    c = None
    if grid:
        c = kwargs.pop('color', 'k')

    # Get the figure axis
    ax = plt.gca()
    plt.cla()
    pretty(ax)

    # Set the x and y axes before meshgridding them
    if (y is None):
        my = np.arange(np.size(values) + 1)
        mx = np.asarray([0.0, 0.1 * (np.nanmax(my) - np.nanmin(my))])
    else:
        assert y.size == values.size + 1, ValueError('y must be size ' +
                                                     str(values.size + 1))
        #my = np.hstack([np.asarray(y), y[-1]])
        my = y
        mx = np.asarray([0.0, 0.1 * (np.nanmax(y) - np.nanmin(y))])

    v = ma.masked_invalid(values)
    if (log):
        v, logLabel = _logSomething(v, log)

    # Append with null values to correctly use pcolormesh
    v = np.concatenate([
        np.atleast_2d(np.hstack([np.asarray(v), 0])),
        np.atleast_2d(np.zeros(v.size + 1))
    ],
                       axis=0)

    if equalize:
        nBins = kwargs.pop('nbins', 256)
        assert nBins > 0, ValueError('nBins must be greater than zero')
        v, dummy = histogramEqualize(v, nBins=nBins)

    # Zm = ma.masked_invalid(v, copy=False)

    Y, X = np.meshgrid(my, mx)

    pm = plt.pcolormesh(X, Y, v, color=c, **kwargs)

    ax.set_aspect('equal')

    if (not flipY):
        ax.invert_yaxis()

    plt.yscale(yscale)
    ylabel(getNameUnits(y))
    ax.get_xaxis().set_ticks([])

    if (not noColorBar):
        if (equalize):
            cbar = plt.colorbar(pm, extend='both')
        else:
            cbar = plt.colorbar(pm)

        if cl is None:
            if (log):
                clabel(cbar, logLabel + getNameUnits(values))
            else:
                clabel(cbar, getNameUnits(values))
        else:
            clabel(cbar, cl)

    return ax
Esempio n. 33
0
def compute_plot_layer_corr_mat(eva_col,
                                evc_col,
                                H_col,
                                layernames,
                                titstr="BigGAN",
                                savestr="BigGAN",
                                figdir="",
                                use_cuda=False):
    Lnum = len(evc_col)
    corr_mat_lin = np.zeros((Lnum, Lnum))
    corr_mat_log = np.zeros((Lnum, Lnum))
    log_reg_slope = np.zeros((Lnum, Lnum))
    log_reg_intcp = np.zeros((Lnum, Lnum))
    for Li in range(Lnum):
        eva, evc = eva_col[Li], evc_col[Li]
        eva_i, evc_i = (eva, evc) if not use_cuda else \
            (torch.from_numpy(eva).cuda(), torch.from_numpy(evc).cuda())
        for Lj in range(Lnum):  # hessian target
            if H_col is not None:
                H = H_col[Lj]
                if use_cuda:
                    H = torch.from_numpy(H).cuda()
                    alphavec = torch.diag(evc_i.T @ H @ evc_i).cpu().numpy()
                else:
                    alphavec = np.diag(evc_i.T @ H @ evc_i)
            else:
                if use_cuda:
                    eva_j, evc_j = torch.from_numpy(
                        eva_col[Lj]).cuda(), torch.from_numpy(
                            evc_col[Lj]).cuda()
                    inpr = evc_i.T @ evc_j
                    alphavec = torch.diag((inpr * eva_j.view(1, -1)) @ inpr.T)
                    alphavec = alphavec.cpu().numpy()
                else:
                    inpr = evc_i.T @ evc_col[Lj]
                    # H = evc_col[Lj] @ np.diag(eva_col[Lj]) @ evc_col[Lj].T
                    alphavec = np.diag(
                        (inpr * eva_col[Lj].reshape(1, -1)) @ inpr.T)
    # for Li in range(Lnum):
    #     evc = evc_col[Li]
    #     eva = eva_col[Li]
    #     for Lj in range(Lnum):
    #         H = H_col[Lj]
    #         alphavec = np.diag(evc.T @ H @ evc)
            log10alphavec = np.log10(alphavec)
            log10eva = np.log10(eva)
            corr_mat_lin[Li, Lj] = np.corrcoef(alphavec, eva)[0, 1]
            corr_mat_log[Li, Lj] = ma.corrcoef(
                ma.masked_invalid(log10alphavec), ma.masked_invalid(log10eva))[
                    0, 1]  #np.corrcoef(log10alphavec, log10eva)[0,1]
            nanmask = (~np.isnan(log10alphavec)) * (~np.isnan(log10eva))
            slope, intercept = np.polyfit(log10eva[nanmask],
                                          log10alphavec[nanmask], 1)
            log_reg_slope[Li, Lj] = slope
            log_reg_intcp[Li, Lj] = intercept
    fig1 = plot_layer_mat(corr_mat_lin,
                          layernames=layernames,
                          titstr="Linear Correlation of Amplification in %s" %
                          titstr)
    fig1.savefig(join(figdir, "%s_Layer_corr_lin_mat.pdf" % savestr))
    fig2 = plot_layer_mat(
        corr_mat_log,
        layernames=layernames,
        titstr="Log scale Correlation of Amplification in %s" % titstr)
    fig2.savefig(join(figdir, "%s_Layer_corr_log_mat.pdf" % savestr))
    fig3 = plot_layer_mat(log_reg_slope,
                          layernames=layernames,
                          titstr="Log scale Slope of Amplification in %s" %
                          titstr)
    fig3.savefig(join(figdir, "%s_Layer_log_reg_slope.pdf" % savestr))
    fig4 = plot_layer_mat(log_reg_intcp,
                          layernames=layernames,
                          titstr="Log scale intercept of Amplification in %s" %
                          titstr)
    fig4.savefig(join(figdir, "%s_Layer_log_reg_intercept.pdf" % savestr))
    return corr_mat_lin, corr_mat_log, log_reg_slope, log_reg_intcp, fig1, fig2, fig3, fig4,
Esempio n. 34
0
    def select_behavioural(
        self,
        output,
        method='treshold',
        threshold=0.0,
        percBest=0.2,
        norm=True,
        mask=True
    ):  #ofwel gewoon beide op None en wat niet None, dat doorrekenen
        '''
        Select bevahvioural parameter sets, based on output evaluation 
        criterion used. 
        w
        The algorithm makes only a 'mask' for further operation, in order to
        avoid memory overload by copying matrices
        
        Parameters
        -----------
        threshold : 
        
        output : ndarray
            Nx1
        
        
        
        Method can be 'treshold' based or 'percentage' based
        All arrays must have same dimension in 0-direction (vertical); output-function only 1 column
        InputPar is nxnpar; output is nx1; Output nxnTimesteps
        this most be done for each structure independently

        Output of OFfunctions/Likelihoods normalised or not (do it if different likelihoods have to be plotted together)
        '''
        #by using copy, reference is used and no copy (memory-use)
        #keep_mask is false, so every value is used
        #first mask the outputs with OF=nan or OF= inf/-inf
        if mask == True:
            #self.ma_output = ma.masked_invalid(output, copy=False, keep_mask = False)
            #self.ma_pars = ma.masked_invalid(self.parset2run, copy=False, keep_mask = False)

            self.ma_output = ma.masked_invalid(output, copy=False)
            self.ma_pars = ma.masked_invalid(self.parset2run, copy=False)
        else:
            self.ma_output = output
            self.ma_pars = self.parset2run

        if method == 'treshold':
            if mask == True:
                self.ma_output = ma.masked_where(self.ma_output < threshold,
                                                 self.ma_output,
                                                 copy=False)
                self.ma_pars = ma.masked_where(self.ma_output < threshold,
                                               self.ma_pars,
                                               copy=False)
            else:
                self.ma_output = self.ma_output < threshold, self.ma_output
                self.ma_pars = self.ma_output < threshold, self.ma_pars
            InputPar_Behav = self.ma_pars
            output_Behav = self.ma_output

        elif method == 'percentage':
            nr = np.size(InputPar, 0)
            ind = argsort(output)  #Geeft indices van de slechtste eerst weer
            NbIndic = int(round(
                (1 - percBest) * ind.size))  #Percentage Indices selecteren
            indBad = ind[:NbIndic]  #Slechtste bepalen om weg te doen
            output_Behav = np.delete(output, indBad, 0)
            InputPar_Behav = np.delete(InputPar, indBad, 0)

        else:
            print ' Choose appropriate method: treshold or percentage'

        #Normaliseren van de Objectieffunctie 1!
        if norm == True:
            Tempor = output_Behav.sum() / output_Behav
            output_Behav = Tempor / Tempor.sum()
        #print 'controle voor de som: %f' %output_Behav.sum()

        return [InputPar_Behav, output_Behav]
Rad = np.load(
    '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Rad_2018_2019CH2.npy')
fechas_horas = np.load(
    '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_FechasHoras_CH2__2018_2019.npy'
)
fechas_horas_GOES = pd.to_datetime(fechas_horas,
                                   format="%Y-%m-%d %H:%M",
                                   errors='coerce')

lat_index_o = np.load(
    '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Lat_index_Malla.npy')
lon_index_o = np.load(
    '/home/nacorreasa/Maestria/Datos_Tesis/Arrays/Array_Lon_index_Malla.npy')

mx_lat_index = ma.masked_invalid(lat_index_o)
mx_lon_index = ma.masked_invalid(lon_index_o)

lat_index = mx_lat_index.astype('int')
lon_index = mx_lon_index.astype('int')

##############################################################################################
## ----------------DEFINICION DE LAS ACCIONES PARA LA SELECCION DE DATOS------------------- ##
##############################################################################################

R = np.zeros(Rad.shape, dtype=float) * np.nan
fechas_horas_new = []
raros = []
blist = []
for g in range(len(fechas_horas_GOES)):
    try:
Esempio n. 36
0
n1deflec = np.empty((nconds, nses))
n1sub = np.empty((nconds, nses))
condmat = np.empty((nconds, nses))
submat = np.empty((nconds, nses))
n1sub[:] = np.NAN
n1deflec[:] = np.NAN
condmat[:] = np.NAN
submat[:] = np.NAN
for n in range(0, n1lat.shape[0]):
    n1sub[conds[n] - 1, sescount[n] - 1] = n1lat[n]
    n1deflec[conds[n] - 1, sescount[n] - 1] = n1onset[n]
    condmat[conds[n] - 1, sescount[n] - 1] = conds[n]
    submat[conds[n] - 1, sescount[n] - 1] = sescount[n]
n1deflec /= 1000  #Convert from ms to seconds
n1deflec = ma.masked_invalid(n1deflec)  # Remove NANs for pyjags
n1sub /= 1000  #Convert from ms to seconds
n1sub = ma.masked_invalid(n1sub)  # Remove NANs for pyjags

# Initialize non-decision time with mininum RT for each subject and condition
minrt = np.empty((nconds, nses))
for k in range(0, nconds):
    for j in range(0, nses):
        where = (sessioncount == (j + 1)) & (condition == (k + 1))
        minrt[k, j] = np.min(np.abs(y[where]))

# pyjags code

# Make sure $LD_LIBRARY_PATH sees /usr/local/lib
pyjags.modules.load_module('wiener')
pyjags.modules.load_module('dic')
Esempio n. 37
0
def evaluate_target_regression_epoch(regressor,
                                     dataloader,
                                     device,
                                     history,
                                     output_folder=None,
                                     seed=None,
                                     cv_flag=False):
    y_truths = None
    y_preds = None
    regressor.eval()

    for x_batch, y_batch in dataloader:
        x_batch = x_batch.to(device)
        y_batch = y_batch.to(device)
        with torch.no_grad():
            y_truths = np.vstack([
                y_truths, y_batch.cpu().detach().numpy()
            ]) if y_truths is not None else y_batch.cpu().detach().numpy()
            y_pred = regressor(x_batch).detach()
            y_preds = np.vstack([
                y_preds, y_pred.cpu().detach().numpy()
            ]) if y_preds is not None else y_pred.cpu().detach().numpy()
    assert (y_truths.shape == y_preds.shape)
    if output_folder is not None:
        # output prediction
        if cv_flag:
            pd.DataFrame(y_truths).to_csv(
                f'{output_folder}/cv_truths_{seed}.csv')
            pd.DataFrame(y_preds).to_csv(
                f'{output_folder}/cv_preds_{seed}.csv')
        else:
            pd.DataFrame(y_truths).to_csv(f'{output_folder}/truths_{seed}.csv')
            pd.DataFrame(y_preds).to_csv(f'{output_folder}/preds_{seed}.csv')
    else:
        history['dpearsonr'].append(
            np.mean(
                np.abs([
                    pearsonr(
                        y_truths[:,
                                 i][~ma.masked_invalid(y_truths[:, i]).mask],
                        y_preds[:,
                                i][~ma.masked_invalid(y_truths[:, i]).mask])[0]
                    for i in range(y_truths.shape[1])
                ])).item())
        history['cpearsonr'].append(
            np.mean(
                np.abs([
                    pearsonr(
                        y_truths[i, :]
                        [~ma.masked_invalid(y_truths[i, :]).mask], y_preds[
                            i, :][~ma.masked_invalid(y_truths[i, :]).mask])[0]
                    for i in range(y_truths.shape[0])
                ])).item())
        history['dspearmanr'].append(
            np.mean(
                np.abs([
                    spearmanr(
                        y_truths[:,
                                 i][~ma.masked_invalid(y_truths[:, i]).mask],
                        y_preds[:,
                                i][~ma.masked_invalid(y_truths[:, i]).mask])[0]
                    for i in range(y_truths.shape[1])
                ])).item())
        history['cspearmanr'].append(
            np.mean(
                np.abs([
                    spearmanr(
                        y_truths[i, :]
                        [~ma.masked_invalid(y_truths[i, :]).mask], y_preds[
                            i, :][~ma.masked_invalid(y_truths[i, :]).mask])[0]
                    for i in range(y_truths.shape[0])
                ])).item())
        # history['cpearsonr'].append(pd.DataFrame(y_truths).corrwith(pd.DataFrame(y_preds), axis=1).mean())
        # history['dpearsonr'].append(pd.DataFrame(y_truths).corrwith(pd.DataFrame(y_preds), axis=0).mean())
        history['drmse'].append(
            np.mean(np.nanmean(np.square((y_truths - y_preds)),
                               axis=0)).item())
        history['crmse'].append(
            np.mean(np.nanmean(np.square((y_truths - y_preds)),
                               axis=1)).item())
        # history['pearsonr'].append(pearsonr(y_truths, y_preds)[0])
        # history['spearmanr'].append(spearmanr(y_truths, y_preds)[0])
        # history['r2'].append(r2_score(y_true=y_truths, y_pred=y_preds))
        # history['rmse'].append(mean_squared_error(y_true=y_truths, y_pred=y_preds, squared=False))
        return history
Esempio n. 38
0
nc_b = Dataset('Greenland_bedrock_topography_V3.nc', 'r+')
nc_o = Dataset('Racmo2MeanSMB_1961-1990.nc', 'r')

#Read the target grid
x_b = nc_b.variables['projection_x_coordinate'][:]
y_b = nc_b.variables['projection_y_coordinate'][:]
nx_b = x_b.shape[0]
ny_b = y_b.shape[0]

#List of variables to merge in
varlist_o = ['smb']
varlist_b = ['acab']

vdata_o = ndarray((ny_b, nx_b))
for v in range(len(varlist_o)):
    vdata_o[:, :] = 0.
    ncvar_o = nc_o.variables[varlist_o[v]]
    vdata_o[:, :] = ncvar_o[:, ::-1].transpose() / 910.
    vdata_o = ma.masked_invalid(vdata_o)
    vdata_o = vdata_o.filled(0.)
    ncvar_b = nc_b.createVariable(varlist_b[v], 'f4', (
        'y',
        'x',
    ))
    ncvar_b[:, :] = vdata_o[:, :]
    copy_atts(ncvar_o, ncvar_b)

nc_b.close()
nc_o.close()
Esempio n. 39
0
def plot_selection(alldomain=False):

    plt.figure(figsize=(6.0,4.0))  
    ax1 = plt.subplot(1,1,1)

    nc = Dataset(simul.ncfile, 'r')
    if alldomain:
        if adv3d or advdepth==0 or advsurf:
            if 'surf' in simul.simul:
                sst = np.squeeze(simul.Forder(nc.variables['temp'][simul.infiletime,:,:]))
            else:
                sst = np.squeeze(simul.Forder(nc.variables['temp'][simul.infiletime,-1,:,:]))
        else:
            [z_r,z_w] = part.get_depths(simul)
            temp =  simul.Forder(np.squeeze(nc.variables['temp'][simul.infiletime,:,:,:]))
            sst = part.vinterp(temp, [advdepth], z_r,z_w)[:,:,0]
        if not light: topo = simul.topo
    else:
        [ny1,ny2,nx1,nx2] = np.array(coord) #-ng

        if adv3d or advdepth==0 or advsurf:
            if 'surf' in simul.simul:
                sst = np.squeeze(simul.Forder(nc.variables['temp'][simul.infiletime,ny1:ny2,nx1:nx2]))
            else:
                sst = np.squeeze(simul.Forder(nc.variables['temp'][simul.infiletime,-1,ny1:ny2,nx1:nx2]))
        else:
            [z_r,z_w] = part.get_depths(simul,coord=[ny1,ny2,nx1,nx2])
            temp =  simul.Forder(np.squeeze(nc.variables['temp'][simul.infiletime,:,ny1:ny2,nx1:nx2]))
            sst = part.vinterp(temp, [advdepth], z_r,z_w)[:,:,0]

        if not light: topo = simul.topo[nx1:nx2,ny1:ny2]
    nc.close()

    #sst[sst<=0] = np.nan

    #plt.imshow(sst[:,::1].T); plt.colorbar(shrink=0.25);
    plt.pcolormesh(ma.masked_invalid(sst[:,:].T),cmap='jet',rasterized=True);
    plt.colorbar(shrink=0.25);

    if not advsurf and not adv3d:
        plt.contourf(topo.T,[0,-advdepth],
                     cmap = col.LinearSegmentedColormap.from_list('my_colormap',
                                                      ['white','lightgray'],256))
        plt.contourf(topo.T,[0,topo.min()],
                cmap = col.LinearSegmentedColormap.from_list('my_colormap',
                    ['Gainsboro','gray'],256))
        plt.contour(topo.T,[topo.min()],colors=('k',),linewidths=(0.5,));
        plt.contour(topo.T,[-advdepth],colors=('k',),linewidths=(0.2,));

    if alldomain:
        plt.plot(px[::1]+0.5,(py[::1]+0.5),'o', markersize=2,
                 markeredgecolor='k', markerfacecolor='k');
        plt.axis([0,sst.shape[0]-1,0,sst.shape[1]-1]);
    else:
        plt.plot(px[::1]-coord[2]+0.5,(py[::1]-coord[0]+0.5),'o', markersize=1,
                 markerfacecolor='white');
        #plt.axis('scaled');
        #plt.axis([nx1-coord[2]+0.5,nx2-coord[2]+0.5,ny1-coord[0]+0.5,ny2-coord[0]+0.5]);
    
    '''
    for isub,jsub in product(range(nsub_x),range(nsub_y)):
        try:
            plot_rect(subtightcoord_saves[jsub+isub*nsub_y],'r')
            plot_rect(subcoord_saves[jsub+isub*nsub_y],'k--')
        except:
            print 'no subdomains to plot'
    '''

    color = 'w'; box = 'round,pad=0.1'; props = dict(boxstyle=box, fc=color, ec='k', lw=1, alpha=1.)
    plt.title(format(np.sum(px>0)) + ' pyticles ' )
    plt.savefig(folderout + simulname + '_' + format(nproc) + '_' + '{0:04}'.format(time+dfile) +'.png', dpi=150,bbox_inches='tight'); plt.clf()
Esempio n. 40
0
                # split data where longitude increment is larger than median increment + 2 standard deviations
                lon_diff = np.diff(lon)
                # files are not always split, so check whether discontinuity in lon exists
                try:
                    lon_split = int(
                        np.where(lon_diff > (np.median(lon_diff) +
                                             2 * np.std(lon_diff)))[0])
                    split = True
                # if not, don't split
                except:
                    split = False

                split = False
                soil_moisture = nc["soil_moisture"][:]
                soil_moisture[soil_moisture > 100.] = np.nan
                soil_moisture = ma.masked_invalid(soil_moisture)

                # remap soil moisture on regular lat/lon grid
                data_in = pd.DataFrame({
                    'lat': lat_m.flatten(),
                    'lon': lon_m.flatten(),
                    'soil_moisture': soil_moisture.flatten()
                })
                target_grid = lat_lon_grid(globals.min_lat,
                                           globals.min_lon,
                                           globals.max_lat,
                                           globals.max_lon,
                                           delLat=globals.del_lat,
                                           delLon=globals.del_lon)
                target_grid.build_grid()
                max_distance = get_max_distance()
Esempio n. 41
0
    def fill_gradient(self, canvas, X, percentiles, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
        ax = canvas
        plots = []

        if 'edgecolors' not in kwargs:
            kwargs['edgecolors'] = 'none'

        if 'facecolors' in kwargs:
            color = kwargs.pop('facecolors')

        if 'array' in kwargs:
            array = kwargs.pop('array')
        else:
            array = 1.-np.abs(np.linspace(-.97, .97, len(percentiles)-1))

        if 'alpha' in kwargs:
            alpha = kwargs.pop('alpha')
        else:
            alpha = .8

        if 'cmap' in kwargs:
            cmap = kwargs.pop('cmap')
        else:
            cmap = LinearSegmentedColormap.from_list('WhToColor', (color, color), N=array.size)
        cmap._init()
        cmap._lut[:-3, -1] = alpha*array

        kwargs['facecolors'] = [cmap(i) for i in np.linspace(0,1,cmap.N)]

        # pop where from kwargs
        where = kwargs.pop('where') if 'where' in kwargs else None
        # pop interpolate, which we actually do not do here!
        if 'interpolate' in kwargs: kwargs.pop('interpolate')

        def pairwise(iterable):
            "s -> (s0,s1), (s1,s2), (s2, s3), ..."
            from itertools import tee
            #try:
            #    from itertools import izip as zip
            #except ImportError:
            #    pass
            a, b = tee(iterable)
            next(b, None)
            return zip(a, b)

        polycol = []
        for y1, y2 in pairwise(percentiles):
            import matplotlib.mlab as mlab
            # Handle united data, such as dates
            ax._process_unit_info(xdata=X, ydata=y1)
            ax._process_unit_info(ydata=y2)
            # Convert the arrays so we can work with them
            from numpy import ma
            x = ma.masked_invalid(ax.convert_xunits(X))
            y1 = ma.masked_invalid(ax.convert_yunits(y1))
            y2 = ma.masked_invalid(ax.convert_yunits(y2))

            if y1.ndim == 0:
                y1 = np.ones_like(x) * y1
            if y2.ndim == 0:
                y2 = np.ones_like(x) * y2

            if where is None:
                where = np.ones(len(x), np.bool)
            else:
                where = np.asarray(where, np.bool)

            if not (x.shape == y1.shape == y2.shape == where.shape):
                raise ValueError("Argument dimensions are incompatible")

            from functools import reduce
            mask = reduce(ma.mask_or, [ma.getmask(a) for a in (x, y1, y2)])
            if mask is not ma.nomask:
                where &= ~mask

            polys = []
            for ind0, ind1 in mlab.contiguous_regions(where):
                xslice = x[ind0:ind1]
                y1slice = y1[ind0:ind1]
                y2slice = y2[ind0:ind1]

                if not len(xslice):
                    continue

                N = len(xslice)
                p = np.zeros((2 * N + 2, 2), np.float)

                # the purpose of the next two lines is for when y2 is a
                # scalar like 0 and we want the fill to go all the way
                # down to 0 even if none of the y1 sample points do
                start = xslice[0], y2slice[0]
                end = xslice[-1], y2slice[-1]

                p[0] = start
                p[N + 1] = end

                p[1:N + 1, 0] = xslice
                p[1:N + 1, 1] = y1slice
                p[N + 2:, 0] = xslice[::-1]
                p[N + 2:, 1] = y2slice[::-1]

                polys.append(p)
            polycol.extend(polys)
        from matplotlib.collections import PolyCollection
        if 'zorder' not in kwargs:
            kwargs['zorder'] = 0
        plots.append(PolyCollection(polycol, **kwargs))
        ax.add_collection(plots[-1], autolim=True)
        ax.autoscale_view()
        return plots
Esempio n. 42
0
lat_aux = iris.coords.AuxCoord(lat.data,
                               standard_name=lat.name(),
                               units=lat.units)
lon_aux = iris.coords.AuxCoord(lon.data,
                               standard_name=lon.name(),
                               units=lon.units)
# Add latitude and longitude as coordinates
mean_cube.add_aux_coord(lat_aux, data_dims=(0))
mean_cube.add_aux_coord(lon_aux, data_dims=(1))
iris.util.promote_aux_coord_to_dim_coord(mean_cube, 'latitude')
iris.util.promote_aux_coord_to_dim_coord(mean_cube, 'longitude')
# regrid the cube
rh_cube = regrid_model(mean_cube, regrid_modelcube)
rh_data_regridded = rh_cube.data
rh_data_regridded = rh_data_regridded * 1e-3 * 365
rh_data_regridded = ma.masked_invalid(rh_data_regridded)
rh_data_regridded = np.ma.masked_where(rh_data_regridded <= 0,
                                       rh_data_regridded)
historical_observational_rh = rh_data_regridded.copy()

#%% tau calculation

tau_s = merged_hwsd_ncscd_masked / rh_data_regridded
tau_s_masked = ma.masked_where(np.logical_or(tau_s < 1, tau_s > 1e4), tau_s)
obs_temp = ma.masked_where(np.logical_or(tau_s < 1, tau_s > 1e4), obs_temp)

#%%
# FIGURE 2a
ax = fig_figure1.add_subplot(gs[row, column], projection=ccrs.PlateCarree())

# Add lat/lon grid lines to the figure
Esempio n. 43
0
    def make_measurement(datafile,
                         error,
                         outfile,
                         rms=None,
                         masknan=True,
                         overwrite=False,
                         unit="adu"):
        """Create a FITS files with 2 HDUS, the first being the datavalue and the 2nd being 
        the data uncertainty. This format makes allows the resulting file to be read into the underlying :class:'~astropy.nddata.CCDData` class.

        :param datafile: The FITS file containing the data as a function of spatial coordinates
        :type datafile: str
        :param error: The errors on the data Possible values for error are:

             - a filename with the same shape as datafile containing the error values per pixel
             - a percentage value 'XX%' must have the "%" symbol in it
             - 'rms' meaning use the rms parameter if given, otherwise look for the RMS keyword in the FITS header of the datafile

        :type error: str
        :param outfile: The output file to write the result in (FITS format)
        :type outfile: str
        :param rms:  If error == 'rms', this value may give the rms in same units as data (e.g 'erg s-1 cm-2 sr-1').
        :type rms: float or :class:`astropy.units.Unit`
        :param masknan: Whether to mask any pixel where the data or the error is NaN. Default:true
        :type masknan: bool
        :param overwrite: If `True`, overwrite the output file if it exists. Default: `False`.
        :type overwrite: bool
        :param unit: Intensity unit to use for the data, this will override BUNIT in header if present.
        :type unit: :class:`astropy.units.Unit` or str

        :raises Exception: on various FITS header issues
        :raises OSError: if `overwrite` is `False` and the output file exists.

        Example usage:
        
        .. code-block:: python

            # example with percentage error
            Measurement.make_measurement("my_infile.fits",error='10%',outfile="my_outfile.fits")

            # example with measurement in units of K km/s and error 
            # indicated by RMS keyword in input file.
            Measurement.make_measurement("my_infile.fits",error='rms',outfile="my_outfile.fits",unit="K km/s",overwrite=True)
        """
        _data = fits.open(datafile)
        needsclose = False
        if error == 'rms':
            _error = deepcopy(_data)
            if rms is None:
                rms = _data[0].header.get("RMS", None)
                if rms is None:
                    raise Exception(
                        "rms not given as parameter and RMS keyword not present in data header"
                    )
                else:
                    print("Found RMS in header: %.2E %s" %
                          (rms, _error[0].data.shape))
            tmp = np.full(_error[0].data.shape, rms)
            _error[0].data[:] = rms
        elif "%" in error:
            percent = float(error.strip('%')) / 100.0
            _error = deepcopy(_data)
            _error[0].data = _data[0].data * percent
        else:
            _error = fits.open(error)
            needsclose = True

        fb = _data[0].header.get('bunit',
                                 str(unit))  #use str in case Unit was given
        eb = _error[0].header.get('bunit', str(unit))
        if fb != eb:
            raise Exception(
                "BUNIT must be the same in both data (%s) and error (%s) maps"
                % (fb, eb))
        # Sigh, this is necessary since there is no mode available in
        # fits.open that will truncate an existing file for writing
        if overwrite and exists(outfile):
            remove(outfile)
        _out = fits.open(name=outfile, mode="ostream")
        _out.append(_data[0])
        _out[0].header['bunit'] = fb
        _out.append(_error[0])
        _out[1].header['extname'] = 'UNCERT'
        _out[1].header['bunit'] = eb
        _out[1].header['utype'] = 'StdDevUncertainty'
        if masknan:
            fmasked = ma.masked_invalid(_data[0].data)
            emasked = ma.masked_invalid(_error[0].data)
            final_mask = utils.mask_union([fmasked, emasked])
            # Convert boolean mask to uint since io.fits cannot handle bool.
            hduMask = fits.ImageHDU(final_mask.astype(np.uint8), name='MASK')
            _out.append(hduMask)
        _out.writeto(outfile, overwrite=overwrite)
        _data.close()
        _out.close()
        if needsclose: _error.close()
def generate_param_colour_plot(F, ax, scale):
    def fmt(x):
        a, b = '{:.2e}'.format(x).split('e')
        b = int(b)
        return r'${} \times 10^{{{}}}$'.format(a, b)

    text = "15 km, 0 rptrs * NP, T=1e-2-1e1, P = 0.9-0.95, march 6 *.pkl"

    x = 20
    y = 20

    files = glob.glob(text)
    files.sort(key=lambda x: [int(s) for s in x.split() if s.isdigit()][2])

    rates = [[0 for i in range(x)] for j in range(y)]

    counter1 = 0
    for file in files:
        with open(file, 'rb') as f:
            stored_path_dict = pickle.load(f)
            if stored_path_dict:
                fidelities, opt_times, opt_schemes = collect_optimal_protocols(G=None, Alice=None, Bob=None,
                                                                               stored_path_dict=stored_path_dict)
                try:
                    # index of the smallest fidelity greater than F
                    index = next(x[0] for x in enumerate(fidelities) if x[1] >= F)
                    max_rate = 1/opt_times[index]
                except StopIteration:  # exception occurs when there is no scheme that achieves F, return infinite time
                    max_rate = 0
                counter = [int(s) for s in file.split() if s.isdigit()][2] - 1
                print(counter)
                print(file)

                rates[counter % y][counter // y] = max_rate

        counter1 += 1

    plt.set_cmap('RdPu')
    current_cmap = matplotlib.cm.get_cmap()
    current_cmap.set_bad(color='black')
    try:
        min_time = np.nanmin(np.nanmin(rates))
        max_time = np.nanmax(np.nanmax(rates))
    except:
        min_time, max_time = 0, 0
    times = masked_invalid(rates)

    current_cmap = matplotlib.cm.get_cmap()
    current_cmap.set_bad(color='black', alpha=1.)

    skr_grad = np.gradient(ndimage.gaussian_filter(times, sigma=0.35, order=0))
    lengths = [[np.power(0.00001+skr_grad[0][j][i]**2 + skr_grad[1][j][i]**2, 1/2)
                if np.isfinite(skr_grad[0][j][i]) or np.isfinite(skr_grad[1][j][i])
                else 1 for i in range(x)] for j in range(y)]

    skr_grad[0] = skr_grad[0]/lengths
    skr_grad[1] = skr_grad[1]/lengths

    ax.quiver(skr_grad[1], -skr_grad[0], scale=scale, alpha=0.8)

    im = ax.matshow(rates, cmap=current_cmap, interpolation=None)

    t = np.linspace(min_time, max_time, 5)
    cb = plt.colorbar(im, ax=ax, ticks=t, format=ticker.FuncFormatter(fmt), fraction=0.046, pad=0.04)
    cb.ax.tick_params(labelsize=11)

    cb.ax.set_title('Generation rate', fontsize=10)
corr_all=[]
for j in range(np.size(drug_data,1)):
  corr_drug=[]                           ####### removing cell lines with response NaN
  y1 = drug_data[:,j]
  a=[i for i, x in enumerate(y1) if not str(x).replace('.','').isdigit()]
  y=np.delete(y1,a)
  X=np.delete(X1,a,axis=0)
  
  for i in range(iteration):
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=i)  
    
    if FS==1:
      cc = np.zeros((1,np.size(X,1)))
      for j in range(np.size(X,1)):
        D= pearsonr(ma.masked_invalid(X_train[:,j]), ma.masked_invalid(y_train))[0]
        cc[0,j] = D
      cc = np.nan_to_num(cc)
      F = normalnp(X, alpha, cc)
      sort=np.argsort(F,0)
      sort=sort[::-1][:,0]
      index=np.concatenate([sort[0:cutoff//2],sort[-cutoff//2:]])
    
    elif FS==0:
      CC=[pearsonr(X_train[:, i], y_train)[0] for i in range(np.size(X_train,1))]
      sort=np.argsort(np.abs(CC))
      sort=sort[::-1]
      index=sort[0:cutoff]
    else:
      print('Invalid feature selection')
def generate_param_colour_plot_throughput(ax):
    def fmt(x):
        a, b = '{:.2e}'.format(x).split('e')
        b = int(b)
        return r'${} \times 10^{{{}}}$'.format(a, b)

    def binary_entropy(x):
        if x not in [0, 1]:
            return -x*np.log2(x)-(1-x)*np.log2(1-x)
        else:
            return 0

    def calc_throughput(scheme):
        state = scheme.state
        time = scheme.time
        simulation_type = "IP"
        if simulation_type == "IP":
            proj1 = qt.Qobj([[1, 0, 0, 0],
                             [0, 0, 0, 0],
                             [0, 0, 0, 0],
                             [0, 0, 0, 1]])
            proj2 = (1/2)*qt.Qobj([[1, 0, 0, 1],
                                   [0, 1, 1, 0],
                                   [0, 1, 1, 0],
                                   [1, 0, 0, 1]])
            proj3 = (1/2)*qt.Qobj([[1, 0, 0, -1],
                                   [0, 1, 1, 0],
                                   [0, 1, 1, 0],
                                   [-1, 0, 0, 1]])

            state00 = qt.Qobj([[1/2, 0, 0, 1/2],
                               [0,   0, 0, 0],
                               [0,   0, 0, 0],
                               [1/2, 0, 0, 1/2]])

            state01 = qt.Qobj([[1/2, 0, 0, -1/2],
                               [0,   0, 0, 0],
                               [0,   0, 0, 0],
                               [-1/2, 0, 0, 1/2]])

            state10 = (1/2)*qt.Qobj([[0, 0, 0, 0],
                                     [0, 1, 1, 0],
                                     [0, 1, 1, 0],
                                     [0, 0, 0, 0]])

            state11 = (1/2)*qt.Qobj([[0, 0, 0, 0],
                                     [0, 1, -1, 0],
                                     [0, -1, 1, 0],
                                     [0, 0, 0, 0]])

            proj1.dims = [[2, 2], [2, 2]]
            proj2.dims = [[2, 2], [2, 2]]
            proj3.dims = [[2, 2], [2, 2]]
            state00.dims = [[2, 2], [2, 2]]
            state01.dims = [[2, 2], [2, 2]]
            state10.dims = [[2, 2], [2, 2]]
            state11.dims = [[2, 2], [2, 2]]

        elif simulation_type == "MP":
            proj1 = qt.ket2dm(qt.basis(81, 28)) + qt.ket2dm(qt.basis(81, 12))
            proj2 = qt.ket2dm(qt.basis(81, 28) + qt.basis(81, 12)) / 2 + \
                    qt.ket2dm(qt.basis(81, 28) - qt.basis(81, 12)) / 2
            proj3 = qt.ket2dm(qt.basis(81, 28) + 1j*qt.basis(81, 12)) / 2 + \
                    qt.ket2dm(qt.basis(81, 28) - 1j*qt.basis(81, 12)) / 2

            vec01 = qt.basis(9, 1)
            vec10 = qt.basis(9, 3)

            state00 = qt.ket2dm(qt.tensor(vec01, vec10) + qt.tensor(vec10, vec01))/2
            state01 = qt.ket2dm(qt.tensor(vec01, vec10) - qt.tensor(vec10, vec01))/2
            state10 = qt.ket2dm(qt.tensor(vec01, vec01) + qt.tensor(vec10, vec10))/2
            state11 = qt.ket2dm(qt.tensor(vec01, vec01) - qt.tensor(vec10, vec10))/2

            proj1.dims = [[9, 9], [9, 9]]
            proj2.dims = [[9, 9], [9, 9]]
            proj3.dims = [[9, 9], [9, 9]]
            state00.dims = [[9, 9], [9, 9]]
            state01.dims = [[9, 9], [9, 9]]
            state10.dims = [[9, 9], [9, 9]]
            state11.dims = [[9, 9], [9, 9]]

        bb84 = False
        if bb84:
            qber1 = (state * proj1).tr()
            qber2 = (state * proj2).tr()
            skf = max(1 - binary_entropy(qber1) - binary_entropy(qber2), 0)  # calc secret_key fraction
        else:
            p00 = (state * state00).tr()
            p01 = (state * state01).tr()
            p10 = (state * state10).tr()
            p11 = (state * state11).tr()

            normalisation = p00+p01+p10+p11

            # define P' distribution as in https://iopscience.iop.org/article/10.1088/2058-9565/aab31b, eq D1 to D8
            Px0 = (p00+p01)**2 + (p10+p11)**2
            Px1 = 1-Px0
            p00n = (p00**2+p01**2)/Px0
            p01n = (2*p00*p01)/Px0
            p10n = (p10**2+p11**2)/Px0
            p11n = (2*p10*p11)/Px0
            if ((p00 + p01) * (p10 + p11)) == 0:
                Pbinary_entropy = 1
            else:
                Pbinary_entropy = (p00 * p10 + p01 * p11) / ((p00 + p01) * (p10 + p11))

            skf = max(1-entropy([p00, p01, p10, p11], base=2)+(Px1/2)*binary_entropy(Pbinary_entropy),
                      (Px0/2)*(1-entropy([p00n, p01n, p10n, p11n], base=2)), 0)

        return skf/time

    text = "50 km, 1 rptrs * G=0.98-1 in 20, P=* in 20, eps = 0.01, IP, 1 node, 50km, 8 feb, *.pkl"
    x = 20
    y = 20

    files = glob.glob(text)
    files = [file for file in files if "pessimistic" not in file]
    files.sort(key=lambda x: [int(s) for s in x.split() if s.isdigit()][2])
    files.sort(key=lambda x: [int(s) for s in x.split() if s.isdigit()][1])
    skr = [[0 for i in range(x)] for j in range(y)]
    for file in files:
        print(file)
    counter = 0
    for file in files:
        with open(file, 'rb') as f:
            stored_path_dict = pickle.load(f)
            if stored_path_dict:
                fidelities, opt_times, opt_schemes = collect_optimal_protocols(G=None, Alice=None, Bob=None,
                                                                               stored_path_dict=stored_path_dict)
                throughputs = [calc_throughput(scheme) for scheme in opt_schemes]
                max_throughput = max(throughputs)
                counter = [int(s) for s in file.split() if s.isdigit()][2] - 1
                skr[counter % y][counter // y] = max_throughput

        counter += 1

    print(counter)
    plt.set_cmap('RdPu')
    current_cmap = matplotlib.cm.get_cmap()
    current_cmap.set_bad(color='black')
    try:
        min_time = np.nanmin(np.nanmin(skr))
        max_time = np.nanmax(np.nanmax(skr))
    except:
        min_time, max_time = 0, 0

    skr = masked_invalid(skr)

    current_cmap = matplotlib.cm.get_cmap()
    current_cmap.set_bad(color='black', alpha=1.)

    skr_grad = np.gradient(skr)
    lengths = [[np.power(0.001+skr_grad[0][j][i]**2 + skr_grad[1][j][i]**2, 1/2)
                if np.isfinite(skr_grad[0][j][i]) or np.isfinite(skr_grad[1][j][i])
                else 1 for i in range(x)] for j in range(y)]

    maxlength = max(max(lengths))*1.5
    skr_grad[0] = skr_grad[0]/maxlength
    skr_grad[1] = skr_grad[1]/maxlength
    plt.quiver(skr_grad[1], -skr_grad[0])

    im = ax.matshow(skr, cmap=current_cmap, interpolation=None)

    plt.rc('text', usetex=True)
    t = np.linspace(min_time, max_time, 5)
    cb = plt.colorbar(im, ax=ax, ticks=t, format=ticker.FuncFormatter(fmt), fraction=0.046, pad=0.04)
    cb.ax.tick_params(labelsize=16)
    cb.ax.set_title('Secret-key rate', fontsize=17, pad=12)
Esempio n. 47
0
    #trend_io_3m = xr.full_like(trend_io,3*np.mean(trend_io))
    #trend_io_3m = mask_oceans(args.imask, trend_io_3m, lon, lat)

    # Damp the edges
    trend_io_dmp = damp(trend_io, lon, lat)
    #trend_io_dmp = trend_io_dmp.values

    #Calculate global climatology
    clim = clim_anom_xr(sst_t, time_t, start=None)[0]
    clim = clim.values
    #clip T
    clim[clim < -1.77] = -1.77
    #Interpolate sst
    clim_int = np.zeros(clim.shape)
    for i in range(clim.shape[0]):
        clim_int[i], converged = fill(ma.masked_invalid(clim[i]), 1, 0, **kw)

    #Repeat the climatology 68 times (1950-2017)
    nyears = (end_time - start_time) + 1
    clim_rpt = np.repeat(clim_int[np.newaxis,:,:,:],nyears,axis=0)\
    .reshape(clim_int.shape[0]*nyears,clim_int.shape[1],clim_int.shape[2])

    # Repeat IO trend 68 times (1950-1975)
    trend_io_rpt = np.repeat(trend_io_int[np.newaxis, :, :],
                             nyears * 12,
                             axis=0)
    # Create an array to multiply rates with
    multiplier = np.arange(1, (nyears * 12) + 1, 1)[:, np.newaxis, np.newaxis]
    # Multiply rate with multiplier to get transient trends
    trend_io_trans = np.multiply(trend_io_rpt, multiplier)
Esempio n. 48
0
    def scatter(x, y, z, ax=None, robust=False, **kwargs):
        """
        Plot a scatter section plot of a small dataset (< 10 000 obs)

        Parameters
        ----------
        x : array, dtype=float, shape=[n, ]
            continuous horizontal variable (e.g. time, lat, lon)
        y : array, dtype=float, shape=[n, ]
            continous vertical variable (e.g. depth, density)
        z : array, dtype=float, shape=[n, ]
            ungridded data variable
        ax : matplotlib.axes
            a predefined set of axes to draw on
        robust : bool=False
            if True, uses the 0.5 and 99.5 percentile to set color limits
        kwargs : any key:values pair that gets passed to plt.pcolormesh.

        Returns
        -------
        axes

        Raises
        ------
        will ask if you want to continue if more than 10000 points
        """

        from matplotlib.pyplot import colorbar, subplots
        from numpy import (
            ma,
            nanpercentile,
            datetime64,
            array,
            nanmin,
            nanmax,
            isnan,
        )
        from datetime import datetime

        z = ma.masked_invalid(z)
        m = ~(z.mask | isnan(y))
        z = z[m]
        x = array(x)[m]
        y = array(y)[m]

        if y.size >= 1e5:
            carry_on = input(
                'There are a large number of points to plot ({}). '
                'This will take a while to plot.\n'
                'Type "y" to continue or "n" to cancel.\n'.format(y.size))
            if carry_on != 'y':
                print('You have aborted the scatter plot')
                return None

        x_time = isinstance(x[0], (datetime, datetime64))

        if robust:
            kwargs['vmin'] = nanpercentile(z, 0.5)
            kwargs['vmax'] = nanpercentile(z, 99.5)

        if ax is None:
            fig, ax = subplots(1, 1, figsize=[9, 3.5], dpi=90)
        else:
            fig = ax.get_figure()
        im = ax.scatter(x, y, c=z, rasterized=True, **kwargs)

        ax.cb = colorbar(mappable=im, pad=0.02, ax=ax, fraction=0.05)
        ax.set_xlim(x.min(), x.max())
        ax.set_ylim(nanmax(y), nanmin(y))
        ax.set_ylabel('Depth (m)')
        ax.set_xlabel('Date' if x_time else 'Dives')

        [tick.set_rotation(45) for tick in ax.get_xticklabels()]
        fig.tight_layout()

        return ax
Esempio n. 49
0
def compute_hess_corr(eigval_col,
                      eigvec_col,
                      savelabel="",
                      figdir="",
                      use_cuda=False):
    """cuda should be used for large mat mul like 512 1024 4096.
    small matmul should stay with cpu numpy computation. cuda will add the IO overhead."""
    posN = len(eigval_col)
    T0 = time()
    if use_cuda:
        corr_mat_log = torch.zeros((posN, posN)).cuda()
        corr_mat_lin = torch.zeros((posN, posN)).cuda()
        for eigi in tqdm(range(posN)):
            evc_i, eva_i = torch.from_numpy(
                eigvec_col[eigi]).cuda(), torch.from_numpy(
                    eigval_col[eigi]).cuda()
            for eigj in range(posN):
                evc_j, eva_j = torch.from_numpy(
                    eigvec_col[eigj]).cuda(), torch.from_numpy(
                        eigval_col[eigj]).cuda()
                inpr = evc_i.T @ evc_j
                vHv_ij = torch.diag((inpr * eva_j.unsqueeze(0)) @ inpr.T)
                corr_mat_log[eigi,
                             eigj] = corr_nan_torch(vHv_ij.log10(),
                                                    eva_j.log10())
                corr_mat_lin[eigi, eigj] = corr_nan_torch(vHv_ij, eva_j)
        corr_mat_log = corr_mat_log.cpu().numpy()
        corr_mat_lin = corr_mat_lin.cpu().numpy()
    else:
        corr_mat_log = np.zeros((posN, posN))
        corr_mat_lin = np.zeros((posN, posN))
        for eigi in tqdm(range(posN)):
            eva_i, evc_i = eigval_col[eigi], eigvec_col[eigi]
            for eigj in range(posN):
                eva_j, evc_j = eigval_col[eigj], eigvec_col[eigj]
                inpr = evc_i.T @ evc_j
                vHv_ij = np.diag((inpr * eva_j[np.newaxis, :]) @ inpr.T)
                corr_mat_log[eigi, eigj] = ma.corrcoef(
                    ma.masked_invalid(np.log10(vHv_ij)),
                    ma.masked_invalid(np.log10(eva_j)))[0, 1]
                corr_mat_lin[eigi, eigj] = np.corrcoef(vHv_ij, eva_j)[0, 1]

    print("%.1f sec" %
          (time() - T0))  # 582.2 secs for the 1000 by 1000 mat. not bad!
    np.savez(join(figdir, "Hess_%s_corr_mat.npz" % savelabel),
             corr_mat_log=corr_mat_log,
             corr_mat_lin=corr_mat_lin)
    print("Compute results saved to %s" %
          join(figdir, "Hess_%s_corr_mat.npz" % savelabel))
    corr_mat_log_nodiag = corr_mat_log.copy()
    corr_mat_lin_nodiag = corr_mat_lin.copy()
    np.fill_diagonal(corr_mat_log_nodiag, np.nan)  # corr_mat_log_nodiag =
    np.fill_diagonal(corr_mat_lin_nodiag, np.nan)  # corr_mat_log_nodiag =
    log_nodiag_mean_cc = np.nanmean(corr_mat_log_nodiag)
    lin_nodiag_mean_cc = np.nanmean(corr_mat_lin_nodiag)
    log_nodiag_med_cc = np.nanmedian(corr_mat_log_nodiag)
    lin_nodiag_med_cc = np.nanmedian(corr_mat_lin_nodiag)
    print("Log scale non-diag mean corr value %.3f med %.3f" %
          (log_nodiag_mean_cc, log_nodiag_med_cc))
    print("Lin scale non-diag mean corr value %.3f med %.3f" %
          (lin_nodiag_mean_cc, lin_nodiag_med_cc))
    return corr_mat_log, corr_mat_lin
Esempio n. 50
0
#This file contains code for extracting AB compartments from all time points in a structure
import time
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import pdb
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, "../")
from Utils import util as ut
import sys

hic_file = sys.argv[1]
out_file = sys.argv[2]

start_time = time.time()

mat = ut.loadConstraintAsMat(hic_file)
mat = np.clip(mat, 0, 30)
pear = ma.corrcoef(ma.masked_invalid(mat))
pca = PCA(n_components=1)
AB = pca.fit_transform(pear)
AB_VEC = np.squeeze(AB)
np.save(out_file, AB_VEC)
Esempio n. 51
0
def pcolor(values, x=None, y=None, **kwargs):
    """Create a pseudocolour plot of a 2D array, Actually uses pcolormesh for speed.

    Create a colour plot of a 2D array.
    If the arrays x, y, and values are geobipy.StatArray classes, the axes can be automatically labelled.
    Can take any other matplotlib arguments and keyword arguments e.g. cmap etc.

    Parameters
    ----------
    values : array_like or StatArray
        A 2D array of colour values.
    x : 1D array_like or StatArray, optional
        Horizontal coordinates of the values edges.
    y : 1D array_like or StatArray, optional
        Vertical coordinates of the values edges.

    Other Parameters
    ----------------
    log : 'e' or float, optional
        Take the log of the colour to a base. 'e' if log = 'e', and a number e.g. log = 10.
        Values in c that are <= 0 are masked.
    equalize : bool, optional
        Equalize the histogram of the colourmap so that all colours have an equal amount.
    nbins : int, optional
        Number of bins to use for histogram equalization.
    xscale : str, optional
        Scale the x axis? e.g. xscale = 'linear' or 'log'
    yscale : str, optional
        Scale the y axis? e.g. yscale = 'linear' or 'log'.
    flipX : bool, optional
        Flip the X axis
    flipY : bool, optional
        Flip the Y axis
    grid : bool, optional
        Plot the grid
    noColorbar : bool, optional
        Turn off the colour bar, useful if multiple customPlots plotting routines are used on the same figure.        

    Returns
    -------
    ax
        matplotlib .Axes

    See Also
    --------
    matplotlib.pyplot.pcolormesh : For additional keyword arguments you may use.

    """

    assert np.ndim(values) == 2, ValueError('Number of dimensions must be 2')

    equalize = kwargs.pop('equalize', False)

    log = kwargs.pop('log', False)

    xscale = kwargs.pop('xscale', 'linear')
    yscale = kwargs.pop('yscale', 'linear')
    flipX = kwargs.pop('flipX', False)
    flipY = kwargs.pop('flipY', False)

    cl = kwargs.pop('clabel', None)
    grid = kwargs.pop('grid', False)

    noColorBar = kwargs.pop('noColorbar', False)

    # Set the grid colour if specified
    c = None
    if grid:
        c = kwargs.pop('color', 'k')

    ax = plt.gca()
    pretty(ax)

    if (x is None):
        mx = np.arange(np.size(values, 1) + 1)
    else:
        mx = mx = np.asarray(x)
        if (x.size == values.shape[1]):
            mx = x.edges()
        else:
            assert x.size == values.shape[1] + 1, ValueError(
                'x must be size ' + str(values.shape[1] + 1) + '. Not ' +
                str(x.size))

    if (y is None):
        my = np.arange(np.size(values, 0) + 1)
    else:
        my = np.asarray(y)
        if (y.size == values.shape[0]):
            my = y.edges()
        else:
            assert y.size == values.shape[0] + 1, ValueError(
                'y must be size ' + str(values.shape[0] + 1) + '. Not ' +
                str(y.size))

    v = ma.masked_invalid(np.atleast_2d(np.asarray(values)))

    if (log):
        v, logLabel = _logSomething(v, log)

    if equalize:
        nBins = kwargs.pop('nbins', 256)
        assert nBins > 0, ValueError('nBins must be greater than zero')
        v, dummy = histogramEqualize(v, nBins=nBins)

    Zm = ma.masked_invalid(v, copy=False)

    X, Y = np.meshgrid(mx, my)

    pm = plt.pcolormesh(X, Y, Zm, **kwargs)

    plt.xscale(xscale)
    plt.yscale(yscale)
    xlabel(getNameUnits(x))
    ylabel(getNameUnits(y))

    if (not noColorBar):
        if (equalize):
            cbar = plt.colorbar(pm, extend='both')
        else:
            cbar = plt.colorbar(pm)

        if cl is None:
            if (log):
                clabel(cbar, logLabel + getNameUnits(values))
            else:
                clabel(cbar, getNameUnits(values))
        else:
            clabel(cbar, cl)

    if flipX:
        ax.set_xlim(ax.get_xlim()[::-1])

    if flipY:
        ax.set_ylim(ax.get_ylim()[::-1])

    if grid:
        ax.minorticks_on()
        for i, xmin in enumerate(ax.xaxis.get_majorticklocs()):
            if (xmin == np.round(xmin) and xmin > mx[0] and xmin < mx[-1]):
                ax.axvline(x=xmin, c='k', **kwargs)
        for i, xmin in enumerate(ax.xaxis.get_minorticklocs()):
            if (xmin == np.round(xmin) and xmin > mx[0] and xmin < mx[-1]):
                ax.axvline(x=xmin, c='k', **kwargs)

        for i, ymin in enumerate(ax.yaxis.get_majorticklocs()):
            if (ymin == np.round(ymin) and ymin > my[0] and ymin < my[-1]):
                ax.axhline(y=ymin, c='k', **kwargs)
        for i, ymin in enumerate(ax.yaxis.get_minorticklocs()):
            if (ymin == np.round(ymin) and ymin > my[0] and ymin < my[-1]):
                ax.axhline(y=ymin, c='k', **kwargs)

    return ax
Esempio n. 52
0
def infmean(arr):
    return pylab.mean(masked_invalid(arr))
Esempio n. 53
0
import numpy as np
import numpy.ma as ma
arr = np.array([1, 2, 3, np.nan, 5, 6, np.inf, 8])
ma_arr = ma.masked_invalid(arr)
print(ma_arr)
Esempio n. 54
0
from filter import Filter
#from sclouds.io import Filter

files = glob.glob(os.path.join(read_dir, '*.nc'))

# This should read all files....
import numpy as np
import numpy.ma as ma

#print(ma.corrcoef(a[msk],b[msk]))
#files = glob.glob(os.path.join(read_dir, '*2012*03*.nc'))
#print(files)
data = xr.open_mfdataset(files, compat='no_conflicts')
ref_data = data['tcc'].values
print('ref data shape {}'.format(ref_data.shape))
a = ma.masked_invalid(ref_data)

msk = ~a.mask
print('msk shape {}'.format(msk.shape))
storang = np.zeros((81, 161, 4))

dictionary_to_store = {}

for k, var in enumerate(['r', 'q', 't2m', 'sp']):
    print('Variable {}'.format(var))
    dta = data[var].values
    print('dta data shape {}'.format(dta.shape))

    for i in range(81):
        for j in range(161):
            #print(ref_data[msk][:, i, j].shape)
Esempio n. 55
0
def main():
    args = parse_args()
    process_args(args)
    print_args(args)

    dims = len(args.dimensions)
    srcs = len(args.source_ratios)
    if args.texture is Texture.NONE:
        textures = [Texture.OEU, Texture.OET, Texture.OUT]
        if args.plot_table:
            textures = [Texture.OET, Texture.OUT]
    else:
        textures = [args.texture]
    texs = len(textures)

    prefix = ''
    # prefix = 'noprior'

    # Initialise data structure.
    statistic_arr = np.full((dims, srcs, texs, args.segments, 2), np.nan)

    print('Loading data')
    argsc = deepcopy(args)
    for idim, dim in enumerate(args.dimensions):
        argsc.dimension = dim

        datadir = args.datadir + '/DIM{0}'.format(dim)
        # Array of scales to scan over.
        boundaries = fr_utils.SCALE_BOUNDARIES[argsc.dimension]
        eval_scales = np.linspace(boundaries[0], boundaries[1],
                                  args.segments - 1)
        eval_scales = np.concatenate([[-100.], eval_scales])

        for isrc, src in enumerate(args.source_ratios):
            argsc.source_ratio = src

            for itex, texture in enumerate(textures):
                argsc.texture = texture

                if args.stat_method is StatCateg.BAYESIAN:
                    base_infile = datadir + '/{0}/{1}/'.format(
                        *map(parse_enum, [args.stat_method, args.data])
                    ) + r'{0}/fr_stat'.format(prefix) + gen_identifier(argsc)
                else:
                    base_infile = datadir + '/{0}/{1}/'.format(
                        *map(parse_enum, [StatCateg.BAYESIAN, args.data])
                    ) + r'{0}/fr_maxllh'.format(prefix) + gen_identifier(argsc)

                print('== {0:<25} = {1}'.format('base_infile', base_infile))

                if args.split_jobs:
                    for idx_sc, scale in enumerate(eval_scales):
                        infile = base_infile + '_scale_{0:.0E}'.format(
                            np.power(10, scale))
                        try:
                            print('Loading from {0}'.format(infile + '.npy'))
                            statistic_arr[idim][isrc][itex][idx_sc] = \
                                np.load(infile+'.npy')[0]
                        except:
                            print('Unable to load file {0}'.format(infile +
                                                                   '.npy'))
                            # raise
                            continue
                else:
                    print('Loading from {0}'.format(base_infile + '.npy'))
                    try:
                        statistic_arr[idim][isrc][itex] = \
                            np.load(base_infile+'.npy')
                    except:
                        print('Unable to load file {0}'.format(base_infile +
                                                               '.npy'))
                        continue

    data = ma.masked_invalid(statistic_arr)

    print('data', data)
    if args.plot_statistic:
        print('Plotting statistic')

        for idim, dim in enumerate(args.dimensions):
            argsc.dimension = dim

            # Array of scales to scan over.
            boundaries = fr_utils.SCALE_BOUNDARIES[argsc.dimension]
            eval_scales = np.linspace(boundaries[0], boundaries[1],
                                      args.segments - 1)
            eval_scales = np.concatenate([[-100.], eval_scales])

            for isrc, src in enumerate(args.source_ratios):
                argsc.source_ratio = src
                for itex, texture in enumerate(textures):
                    argsc.texture = texture

                    if args.stat_method is StatCateg.BAYESIAN:
                        base_infile = args.datadir + '/DIM{0}/{1}/{2}/'.format(
                            dim, *map(parse_enum,
                                      [args.stat_method, args.data
                                       ])) + r'{0}/fr_stat'.format(
                                           prefix) + gen_identifier(argsc)
                    else:
                        base_infile = args.datadir + '/DIM{0}/{1}/{2}/'.format(
                            dim,
                            *map(parse_enum,
                                 [StatCateg.BAYESIAN, args.data
                                  ])) + r'{0}/fr_maxllh'.format(
                                      prefix) + gen_identifier(argsc)

                    basename = os.path.dirname(base_infile)
                    outfile = basename[:5] + basename[5:].replace(
                        'data', 'plots')
                    outfile += '/' + os.path.basename(base_infile)

                    label = r'$\text{Texture}=' + plot_utils.texture_label(
                        texture)[1:]
                    plot_utils.plot_statistic(data=data[idim][isrc][itex],
                                              outfile=outfile,
                                              outformat=['png'],
                                              args=argsc,
                                              scale_param=scale,
                                              label=label)

    basename = args.datadir[:5] + args.datadir[5:].replace('data', 'plots')
    baseoutfile = basename + '/{0}/{1}/'.format(
        *map(parse_enum,
             [args.actual_stat_method, args.data])) + r'{0}'.format(prefix)

    argsc = deepcopy(args)
    if args.plot_x:
        for idim, dim in enumerate(args.dimensions):
            print('|||| DIM = {0}'.format(dim))
            argsc.dimension = dim
            plot_utils.plot_x(data=data[idim],
                              outfile=baseoutfile +
                              '/hese_x_DIM{0}'.format(dim),
                              outformat=['png', 'pdf'],
                              args=argsc,
                              normalize=True)

    if args.plot_table:
        plot_utils.plot_table_sens(data=data,
                                   outfile=baseoutfile + '/hese_table',
                                   outformat=['png', 'pdf'],
                                   args=args,
                                   show_lvatmo=True)
Esempio n. 56
0
def _process_2D_plot_args(args, gridding_dz=1):
    """
    Processes input to the plotting class functions. Allows plots to receive
    one (2D) or three (1D) input arguements.
    """
    from numpy import array, ma, nan, ndarray
    from pandas import DataFrame, Series
    from xarray import DataArray
    from .mapping import grid_data
    from .helpers import GliderToolsError

    name = ''
    if len(args) == 3:
        x = array(args[0])
        y = array(args[1]).astype(float)
        z = args[2].copy()

        if isinstance(z, ma.MaskedArray):
            z[z.mask] = nan
        elif isinstance(z, DataArray):
            name = z.name if z.name is not None else ''
            unit = ' [{}]'.format(z.units) if 'units' in z.attrs else ''
            name = name + unit
            z = ma.masked_invalid(array(z)).astype(float)
        else:
            z = ma.masked_invalid(array(z)).astype(float)

        if (x.size == y.size) & (len(z.shape) == 1):
            df = grid_data(x,
                           y,
                           z,
                           interp_lim=6,
                           verbose=False,
                           return_xarray=False)
            x = df.columns
            y = df.index
            z = ma.masked_invalid(df.values)

        return x, y, z, name

    elif len(args) == 1:
        z = args[0]
        if isinstance(z, DataArray):
            name = z.name if z.name is not None else ''
            unit = ' [{}]'.format(z.units) if 'units' in z.attrs else ''
            name = name + unit
            if z.ndim == 1:
                raise GliderToolsError(
                    'Please provide gridded DataArray or x and y coordinates')
            elif z.ndim == 2:
                z = z.to_series().unstack()
            elif z.ndim > 2:
                raise GliderToolsError(
                    'GliderTools plotting currently only supports 2 '
                    'dimensional plotting')
        elif isinstance(z, (ndarray, Series)):
            if z.ndim == 2:
                z = DataFrame(z)
            else:
                raise IndexError('The input must be a 2D DataFrame or ndarray')

        x = z.columns.values
        y = z.index.values
        z = ma.masked_invalid(z.values).astype(float)

        return x, y, z, name
Esempio n. 57
0
 print('Processing data for year '+year+' ...')
 for j in date_range:        
     date='%03d'%j       
     fname=MyDir+'/'+param+'/'+year+'/AMSRU_Mland_'+year+date+pass_type+'.'+param
     if  os.path.isfile(fname):
         fid = open(fname,'rb');
         data=np.fromfile(fid)
         fid.close()            
         for i in list(range(len(data))):
             if data[i]<=0.0:
                 data[i]=np.nan                     
         data = [i*factor for i in data]; 
         data = -np.log(data)            
         from mkgrid_global import mkgrid_global              
         datagrid = mkgrid_global(data)                                           
         datagridm = ma.masked_invalid(datagrid)
         from scipy.interpolate import griddata
         z=np.asarray(datagridm)
         z=z.flatten()                                
         grid_z = griddata((y,x), z, (grid_y, grid_x), method='linear')
         grid_z = ma.masked_invalid(grid_z)
         nrows,ncols = np.shape(grid_z)
         nrows=np.int(nrows)
         ncols=np.int(ncols)            
         xres = (xmax-xmin)/(ncols-1)
         yres = (ymax-ymin)/(nrows-1)
         geotransform=(xmin,xres,0,ymax,0, -yres)   
         arcpy.env.workspace=Dir_fig
         os.chdir(Dir_fig)
         output_raster = gdal.GetDriverByName('GTiff').Create('VOD_%s_%s_%s.tif' %(year,date,pass_type),ncols, nrows, 1 ,gdal.GDT_Float32,)  # Open the file
         output_raster.SetGeoTransform(geotransform)  # Specify its coordinates
    def plotAllSUPERDARN(self):
        # make a plot of the results
        # type is what we are plotting here
        #
        # get the current variables 
        date=self.date
        type=self.pType
        pdata=self.pFband
        times=self.pTimes
        freq=self.pF
        #
        # set here the parameters for power, spectral width, and velocity
        # no log parameters here, but will need to implement that if this
        # changes 
        if self.pType == 'power':
            cblabel = 'Power [dB]'
            vmin = 0
            vmax = 20
        elif self.pType == 'spectral':
            cblabel = 'Spectral Width [m/s]'
            vmin = 0
            vmax = 50
        elif self.pType == 'vel':
            cblabel = 'L-o-s Velocity [m/s]'
            vmin = -200
            vmax = 50        
        #
        # labels for the plots 
        labels=['13 MHz', '15 MHz', '16 MHz', '17 MHz', '18 MHz', '19 MHz']
        #
        # iterate through the potential frequencies 
        for i in range(len(labels)): # number of elements in data arrays
            # start by opening a figure 
            fig=plt.figure()
            #
            # get the necessary plotting imports 
            from numpy import ma
            os.chdir('/Users/loisks/Desktop/Functions/')
            import colormaps as cmaps
            os.chdir(self.path)
            # this is a colormap I like a lot but I have to load it extra special because of the python version I have :) 
            plt.register_cmap(name='viridis', cmap=cmaps.viridis) 

            # for time parameters on plot
            # go with time labels every hour and ticks every 10 minutes
            days = DayLocator(interval=1) 
            hours = MinuteLocator(interval=30) 
            hours2 = MinuteLocator(interval=10) 
            daysFmt = DateFormatter('%H:%M')
            fig.gca().xaxis.set_major_locator(hours)
            fig.gca().xaxis.set_major_formatter(daysFmt)
            fig.gca().xaxis.set_minor_locator(hours2)
            #
            # big font to make things easy to read 
            font = {'family' : 'normal',
                    'weight' : 'bold',
                    'size'   : 22}
            plt.rc('font', **font)
            ax=fig.add_subplot(111)
            plt.subplots_adjust(right=0.70, top=0.92, bottom=0.28, left=0.11)
            ax.set_ylabel('Range Gate Number', fontsize=22, fontweight='bold')
            time=dt.date2num(self.pTimes[i])
            Altitudes = range(75) # for 75 range gates 
            X,Y=np.meshgrid(time, Altitudes)
            dtemp=np.array(self.pFband[i])
            #
            # nan the 10000 data, which are nans in this data set 
            dtemp[dtemp==10000]=np.nan
            data=ma.masked_invalid(dtemp).transpose()
            ax.set_ylim(25,38) # relevant range gates here
            ax.plot(time, np.ones(len(time))*32, lw=3, ls='--', c='k')
            ax.set_xlabel("UT Time on " + self.date, fontsize=20, fontweight='bold')
            ax.set_xlim(self.xlimLow, self.xlimHigh)
            # plot! 
            try:
                col=ax.pcolormesh(X,Y,data, cmap='viridis', vmin=vmin, vmax=vmax)
                font = {'family' : 'normal',
                  'weight' : 'bold',
                  'size'   : 22}
                plt.rc('font', **font)
                #
                # add a colorbar
                cbaxes = fig.add_axes([0.8, 0.27, 0.03, 0.65])
                cb = plt.colorbar(col, cax = cbaxes,ticks=np.linspace(vmin,vmax,5))
                cb.set_label(cblabel, fontsize=25)

                fig.set_size_inches(13,9)
                #
                # save in new directory if directory doesn't already exist
                os.chdir(self.path)
                subdir_name='CUTLASS_All_Spectrograms'
                if not os.path.exists(subdir_name):
                    os.umask(0) # unmask if necessary
                    os.makedirs(subdir_name) 
                os.chdir(subdir_name)#
                plt.savefig(labels[i]+self.date+'_'+self.pType+'.png')
            except: # if there is no data in this frequency band 
                print "No data for " + labels[i]
            #os.chdir('..')
            plt.close()
        # PLOT ALL ON SAME
        fig=plt.figure()
        for i in range(len(labels)): # number of elements in data arrays
            from numpy import ma
            os.chdir('/Users/loisks/Desktop/Functions/')
            import colormaps as cmaps
            os.chdir(self.path)
            # this is a colormap I like a lot but I have to load it extra special because of the python version I have :) 
            plt.register_cmap(name='viridis', cmap=cmaps.viridis) 

            # for time parameters on plot
            # go with time labels every hour and ticks every 10 minutes
            days = DayLocator(interval=1) 
            hours = MinuteLocator(interval=30) 
            hours2 = MinuteLocator(interval=10) 
            daysFmt = DateFormatter('%H:%M')
            fig.gca().xaxis.set_major_locator(hours)
            fig.gca().xaxis.set_major_formatter(daysFmt)
            fig.gca().xaxis.set_minor_locator(hours2)
            #
            # big font to make things easy to read 
            font = {'family' : 'normal',
                    'weight' : 'bold',
                    'size'   : 22}
            plt.rc('font', **font)
            ax=fig.add_subplot(111)
            plt.subplots_adjust(right=0.70, top=0.92, bottom=0.28, left=0.11)
            ax.set_ylabel('Range Gate Number', fontsize=22, fontweight='bold')
            time=dt.date2num(self.pTimes[i])
            Altitudes = range(75) # for 75 range gates 
            X,Y=np.meshgrid(time, Altitudes)
            dtemp=np.array(self.pFband[i])
            #
            # nan the 10000 data, which are nans in this data set 
            dtemp[dtemp==10000]=np.nan
            data=ma.masked_invalid(dtemp).transpose()
            ax.set_ylim(25,38) # relevant range gates here
            ax.plot(time, np.ones(len(time))*32, lw=3, ls='--', c='k')
            ax.set_xlabel("UT Time on " + self.date, fontsize=20, fontweight='bold')
            ax.set_xlim(self.xlimLow, self.xlimHigh)
            # plot! 
            try:
                col=ax.pcolormesh(X,Y,data, cmap='viridis', vmin=vmin, vmax=vmax)
                font = {'family' : 'normal',
                  'weight' : 'bold',
                  'size'   : 22}
                plt.rc('font', **font)
                #
                # add a colorbar
                cbaxes = fig.add_axes([0.8, 0.27, 0.03, 0.65])
                cb = plt.colorbar(col, cax = cbaxes,ticks=np.linspace(vmin,vmax,5))
                cb.set_label(cblabel, fontsize=25)


            except: # if there is no data in this frequency band 
                print "No data for " + labels[i]
            #os.chdir('..')
        fig.set_size_inches(13,9)
        #
        # save in new directory if directory doesn't already exist
        os.chdir(self.path)
        subdir_name='CUTLASS_All_Spectrograms'
        if not os.path.exists(subdir_name):
           os.umask(0) # unmask if necessary
           os.makedirs(subdir_name) 
        os.chdir(subdir_name)#
        plt.savefig('All_'+self.date+'_'+self.pType+'.png')
        plt.close()
Esempio n. 59
0
            for cube in cubes:
                print("cube loop")
                print("cube.var_name=", cube.var_name,
                      "dict_of_variable_names.keys()",
                      dict_of_variable_names.keys())

                if (cube.var_name in dict_of_variable_names.keys()):
                    print("NAME IN DICT OF VARIABLE NAMES")
                    print("file = ", file)
                    print(cube.var_name)
                    print("")

                    # Set any negative values to np.NaN
                    #print("min bef = ",np.nanmin(cube.data))
                    cube.data[cube.data < 0.0] = np.NAN
                    cube.data = ma.masked_invalid(cube.data)

                    #print("min aft = ",np.nanmin(cube.data))
                    #print("")

                    # Convert any data from ng m-3 to ug m-3
                    #print("cube.units")
                    #print(cube.units)

                    if (cube.units == "ng m-3"):
                        cube.convert_units('ug m-3')
                        print(cube.units)
                        print("")

                    for coord in cube.coords():
                        #print("coord = ",coord)
Esempio n. 60
0
 def _tomasked(self, value):
     try:
         return ma.masked_invalid(value.values)
     except AttributeError:
         return ma.masked_invalid(value)