def get_spatial_extent(nc, legal_name):
    try:
        if 'lat' and 'lon' in nc.variables:
            lon = nc.variables['lon'][:]
            lat = nc.variables['lat'][:]
        elif 'x' and 'y' in nc.variables:
            lon = nc.variables['x'][:]
            lat = nc.variables['y'][:]
        elif 'lat_u' and 'lon_u' in nc.variables:
            lon = nc.variables['lon_u'][:]
            lat = nc.variables['lat_u'][:]
        elif 'lat_v' and 'lon_v' in nc.variables:
            lon = nc.variables['lon_v'][:]
            lat = nc.variables['lat_v'][:]
        else:
            logger.info("Couldn't Compute Spatial Extent {0}".format(legal_name))
            return []

    except:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        logger.error("Disabling Error: " +
                     repr(traceback.format_exception(exc_type, exc_value, exc_traceback)))
        return []
    
    return [np.nanmin(lon), np.nanmin(lat), np.nanmax(lon), np.nanmax(lat)]
def summary():
    # read sonde data
    for sites in [[0],[1],[2]]:
        slist,snames=read_diff_events(sites=sites)
        ecount = [len(s.einds) for s in slist]
        mintp = [np.nanmin(s.tp) for s in slist]
        meantp = [np.nanmean(s.tp) for s in slist]
        maxtp = [np.nanmax(s.tp) for s in slist]
        
        head="%9s"%slist[0].name
        ecount = "events   "
        meantp = "mean tph "
        minmax = "tph bound"
        for sonde, sname in zip(slist,snames):
            
            head=head+'| %16s'%sname
            ecount=ecount+'| %16d'%len(sonde.einds)
            meantp=meantp+'| %16.2f'%np.nanmean(sonde.tp)
            minmax=minmax+'| %7.2f,%7.2f '%(np.nanmin(sonde.tp),np.nanmax(sonde.tp))
            
        print("")
        print(head)
        print(ecount)
        print(meantp)
        print(minmax)
Example #3
0
    def _set_minmax(self):
        data = self._get_fast_data()
        try:
            self.maxval = numpy.nanmax(data)
            self.minval = numpy.nanmin(data)
        except Exception:
            self.maxval = 0
            self.minval = 0

        # TODO: see if there is a faster way to ignore infinity
        try:
            if numpy.isfinite(self.maxval):
                self.maxval_noinf = self.maxval
            else:
                self.maxval_noinf = numpy.nanmax(data[numpy.isfinite(data)])
        except:
            self.maxval_noinf = self.maxval

        try:
            if numpy.isfinite(self.minval):
                self.minval_noinf = self.minval
            else:
                self.minval_noinf = numpy.nanmin(data[numpy.isfinite(data)])
        except:
            self.minval_noinf = self.minval
Example #4
0
    def voxelize(self, points):
        if not self.voxelized:
            # compute the boundary of the 3D points
            Xmin = np.nanmin(points[0,:]) - self.margin
            Xmax = np.nanmax(points[0,:]) + self.margin
            Ymin = np.nanmin(points[1,:]) - self.margin
            Ymax = np.nanmax(points[1,:]) + self.margin
            Zmin = np.nanmin(points[2,:]) - self.margin
            Zmax = np.nanmax(points[2,:]) + self.margin
            self.min_x = Xmin
            self.min_y = Ymin
            self.min_z = Zmin
            self.max_x = Xmax
            self.max_y = Ymax
            self.max_z = Zmax

            # step size
            self.step_x = (Xmax-Xmin) / self.grid_size
            self.step_y = (Ymax-Ymin) / self.grid_size
            self.step_z = (Zmax-Zmin) / self.grid_size
            self.voxelized = True

        # compute grid indexes
        indexes = np.zeros_like(points, dtype=np.float32)
        indexes[0,:] = np.floor((points[0,:] - self.min_x) / self.step_x)
        indexes[1,:] = np.floor((points[1,:] - self.min_y) / self.step_y)
        indexes[2,:] = np.floor((points[2,:] - self.min_z) / self.step_z)

        # crash the grid indexes
        # grid_indexes = indexes[0,:] * self.grid_size * self.grid_size + indexes[1,:] * self.grid_size + indexes[2,:]
        # I = np.isnan(grid_indexes)
        # grid_indexes[I] = -1
        # grid_indexes = grid_indexes.reshape(self.height, self.width).astype(np.int32)

        return indexes
def rescale_to_depth_image(original_image, opencv_image):
    nan_max = np.nanmax(original_image.pixels[..., 2])
    nan_min = np.nanmin(original_image.pixels[..., 2])
    depth_pixels = opencv_image.astype(np.float) / 255.0
    depth_pixels = rescale(depth_pixels, 0.0, 1.0, nan_min, nan_max)
    depth_pixels[np.isclose(np.nanmin(depth_pixels), depth_pixels)] = np.nan
    return depth_pixels
Example #6
0
def flugroute(datum_list):
	for datum in datum_list:
		start=time.clock()
		data=np.genfromtxt("../txt/"+datum+"_data.txt",skip_header=1,usecols=(45,46,47,3,48))
		dat=[]
		try:
			for i in range(data.shape[0]):
				if np.isnan(np.max(data[i,:]))==False:dat.append(data[i,:])
			dat=np.array(dat)

			fig = plt.figure()
			ax = fig.gca(projection='3d')

			for i in range(dat.shape[0]): 
				if dat[i,3]>0:dat[i,4],dat[i,3]=dat[i,4]/dat[i,3]*100,np.log(dat[i,3])
				else: dat[i,4],dat[i,3]=0,0
			print dat[:,3]
			y2=dat[:,3]
			dicke=(y2-np.nanmin(y2))/(np.nanmax(y2)-np.nanmin(y2))*200

			sc = ax.scatter(dat[:,0],dat[:,1],dat[:,2],c=dat[:,4],vmin=0, vmax=25,s=dicke,alpha=0.3, edgecolors='none')
			ax.invert_zaxis()
			ax.set_xlabel("latitude")
			ax.set_ylabel("longitude")
			ax.set_zlabel("altitude")
			cbar = plt.colorbar(sc)
			plt.savefig("../plots/"+datum+"_flugroute_3D.png")

			fig = plt.figure()
			ax = fig.gca()
			sc = ax.scatter(dat[:,0],dat[:,1],c=dat[:,2],s=dicke,alpha=0.3, edgecolors='none')
			cbar = plt.colorbar(sc)
			plt.savefig("../plots/"+datum+"_flugroute.png")
			print datum, 'flugroute geplottet in %d s'%(time.clock()-start)
		except: print "data incomplete"
Example #7
0
def normalizeFloatImage3(floatImage):

    mn_0 = np.nanmin(np.nanmin(floatImage[:, :,1]))
    mx_0 = np.nanmax(np.nanmax(floatImage[:, :, 1]))

    rows = floatImage.shape[0]
    cols = floatImage.shape[1]
    mn_0 = 1000
    mx_0 =-1000
    for r in range(rows):
        for c in range(cols):
            if floatImage[r,c, 2] <= 0:
                mn_0 = mn_0 if mn_0 <=  floatImage[r,c, 1 ] else floatImage[r,c, 1 ]
                mx_0 = mx_0 if mx_0  >=  floatImage[r,c, 1 ] else floatImage[r,c, 1 ]

    fctr_0 = 255.0/(mx_0 - mn_0)

    for r in range(rows):
        for c in range(cols):
            if floatImage[r,c, 2]  <= 0:
                floatImage[r,c, 1] = (floatImage[r,c, 1] - mn_0) * fctr_0
            else:
                floatImage[r,c, 1]=0

    print "mn_0: ", mn_0, "  mx_0: ", mx_0
Example #8
0
    def __call__(self, transform_xy, x1, y1, x2, y2):
        """
        get extreme values.

        x1, y1, x2, y2 in image coordinates (0-based)
        nx, ny : number of divisions in each axis
        """
        x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
        x, y = np.meshgrid(x_, y_)
        lon, lat = transform_xy(np.ravel(x), np.ravel(y))

        # iron out jumps, but algorithm should be improved.
        # Tis is just naive way of doing and my fail for some cases.
        if self.lon_cycle is not None:
            lon0 = np.nanmin(lon)
            lon -= 360.0 * ((lon - lon0) > 180.0)
        if self.lat_cycle is not None:
            lat0 = np.nanmin(lat)
            lat -= 360.0 * ((lat - lat0) > 180.0)

        lon_min, lon_max = np.nanmin(lon), np.nanmax(lon)
        lat_min, lat_max = np.nanmin(lat), np.nanmax(lat)

        lon_min, lon_max, lat_min, lat_max = self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)

        return lon_min, lon_max, lat_min, lat_max
Example #9
0
def calc_norm_summary_tables(accuracy_tbl, time_tbl):
    """
    Calculate normalized performance/ranking summary, as numpy
    matrices as usual for convenience, and matrices of additional
    statistics (min, max, percentiles, etc.)

    Here normalized means relative to the best which gets a 1, all
    others get the ratio resulting from dividing by the performance of
    the best.
    """
    # Min across all minimizers, i.e. for each fit problem what is the lowest chi-squared and the lowest time
    min_sum_err_sq = np.nanmin(accuracy_tbl, 1)
    min_runtime = np.nanmin(time_tbl, 1)

    # create normalised tables
    norm_acc_rankings = accuracy_tbl / min_sum_err_sq[:, None]
    norm_runtimes = time_tbl / min_runtime[:, None]

    summary_cells_acc = np.array([np.nanmin(norm_acc_rankings, 0),
                                  np.nanmax(norm_acc_rankings, 0),
                                  stats.nanmean(norm_acc_rankings, 0),
                                  stats.nanmedian(norm_acc_rankings, 0)
                                  ])

    summary_cells_runtime = np.array([np.nanmin(norm_runtimes, 0),
                                      np.nanmax(norm_runtimes, 0),
                                      stats.nanmean(norm_runtimes, 0),
                                      stats.nanmedian(norm_runtimes, 0)
                                      ])

    return norm_acc_rankings, norm_runtimes, summary_cells_acc, summary_cells_runtime
Example #10
0
	def show(self,**kwargs):
		display = kwargs.get('display', True)
		show_layers = kwargs.get('show_layers',self.layers)
		try:
			show_layers=sorted(show_layers)
		except TypeError:
			show_layers=[show_layers]
		extent=kwargs.get('extent', 
						max_axis(*tuple(_image.axis for _image in self.image_sorted[self.layers[0]])))
		vmin=kwargs.get('vmin')
		vmax=kwargs.get('vmax')
		fig = plt.figure(figsize=(8, 8*abs((extent[3]-extent[2])*1./(extent[1]-extent[0]))))
		for layer in show_layers:
			for image in self.image_sorted[layer]:
				if layer==show_layers[0] and image==self.image_sorted[layer][0]:
					if not vmin:
						kwargs['vmin']=np.nanmin(image.image)
						vmin=np.nanmin(image.image)
					if not vmax:
						kwargs['vmax']=np.nanmax(image.image)
						vmax=np.nanmax(image.image)
					image.show(hold=True,**kwargs)
				else:
					image.show(hold=True,vmin=vmin,vmax=vmax,scalebar='off',colorbar='off')
		plt.xlim(extent[:2])
		plt.ylim(extent[-2:])
		if display:
			plt.show()
		else:
			return fig
Example #11
0
    def _get_Tp_limits(self):
        """Get the limits for the graphs in temperature and pressure, based on 
        SI units: [Tmin, Tmax, pmin, pmax]"""
        T_lo,T_hi,P_lo,P_hi = self.limits
        Ts_lo,Ts_hi = self._get_sat_bounds(CoolProp.iT)
        Ps_lo,Ps_hi = self._get_sat_bounds(CoolProp.iP)

        if T_lo is None:            T_lo  = 0.0
        elif T_lo < self.ID_FACTOR: T_lo *= Ts_lo
        if T_hi is None:            T_hi  = 1e6
        elif T_hi < self.ID_FACTOR: T_hi *= Ts_hi
        if P_lo is None:            P_lo  = 0.0
        elif P_lo < self.ID_FACTOR: P_lo *= Ps_lo
        if P_hi is None:            P_hi  = 1e10
        elif P_hi < self.ID_FACTOR: P_hi *= Ps_hi

        try: T_lo = np.nanmax([T_lo, self._state.trivial_keyed_output(CoolProp.iT_min)])
        except: pass
        try: T_hi = np.nanmin([T_hi, self._state.trivial_keyed_output(CoolProp.iT_max)])
        except: pass
        try: P_lo = np.nanmax([P_lo, self._state.trivial_keyed_output(CoolProp.iP_min)])
        except: pass
        try: P_hi = np.nanmin([P_hi, self._state.trivial_keyed_output(CoolProp.iP_max)])
        except: pass

        return [T_lo,T_hi,P_lo,P_hi]
Example #12
0
def classify(request):
    C = json.loads(request.POST["C"])
    try:
        features, labels = get_multi_features(request)
    except ValueError as e:
        return HttpResponse(json.dumps({"status": e.message}))
    try:
        kernel = get_kernel(request, features)
    except ValueError as e:
        return HttpResponse(json.dumps({"status": e.message}))
    
    learn = "No"  
    values=[]

    try:
        domain = json.loads(request.POST['axis_domain'])
        x, y, z = svm.classify_svm(sg.GMNPSVM, features, labels, kernel, domain, learn, values, C, False)
    except Exception as e:
        return HttpResponse(json.dumps({"status": repr(e)}))

#    z = z + np.random.rand(*z.shape) * 0.01
	
    z_max = np.nanmax(z)
    z_min = np.nanmin(z)
    z_delta = 0.1*(np.nanmax(z)-np.nanmin(z))
    data = {"status": "ok",
            "domain": [z_min-z_delta, z_max+z_delta],
            "max": z_max+z_delta,
            "min": z_min-z_delta,
            "z": z.tolist()}

    return HttpResponse(json.dumps(data))
Example #13
0
def draw_hmap_old(hmap, yvals, fname=None):
    """
    Plot a matrix as a heat map and write an image file.
    :param hmap: Heat map matrix.
    :param yvals: Heat map Y labels (e.g. amino acid names).
    :param fname: Destination image file.
    """
    if np.nanmax(hmap) > abs(np.nanmin(hmap)):
        vmax = np.nanmax(hmap)
        vmin = -np.nanmax(hmap)
    else:
        vmax = abs(np.nanmin(hmap))
        vmin = np.nanmin(hmap)
    fig = plt.figure()
    plt.figure(figsize=(20,10))
    plt.imshow(hmap, cmap='RdBu', interpolation = 'nearest',aspect='auto',vmin = vmin ,vmax = vmax )
    plt.xlim(0, hmap.shape[1])
    plt.ylim(0, hmap.shape[0])
    ax = plt.gca()
    fig.set_facecolor('white')
    ax.set_xlim((-0.5, hmap.shape[1] -0.5))
    ax.set_ylim((-0.5, hmap.shape[0] -0.5))
    ax.set_yticks([x for x in xrange(0, hmap.shape[0])])
    ax.set_yticklabels(yvals)
    ax.set_xticks(range(0,76,5))
    ax.set_xticklabels(range(2,76,5)+['STOP'])
    ax.set_ylabel('Residue')
    ax.set_xlabel('Ub Sequence Position')
    cb = plt.colorbar()
    cb.set_clim(vmin=vmin, vmax=vmax)
    cb.set_label('Relative Fitness')
    if fname is not None:
        plt.savefig(fname, bbox_inches='tight')
    return fig
Example #14
0
def get_region_boxes(sp, reg2sp):
  x = np.arange(0, sp.shape[1])
  y = np.arange(0, sp.shape[0])
  xv, yv = np.meshgrid(x, y)
  maxsp = np.max(sp)
  sp1=sp.reshape(-1)-1
  xv = xv.reshape(-1)
  yv = yv.reshape(-1)
  spxmin = accum.my_accumarray(sp1,xv, maxsp, 'min')
  spymin = accum.my_accumarray(sp1,yv, maxsp, 'min')
  spxmax = accum.my_accumarray(sp1,xv, maxsp, 'max')
  spymax = accum.my_accumarray(sp1,yv, maxsp, 'max')
  
  Z = reg2sp.astype(float, copy=True)
  Z[reg2sp==0] = np.inf
  xmin = np.nanmin(np.multiply(spxmin.reshape(-1,1), Z),0)
  ymin = np.nanmin(np.multiply(spymin.reshape(-1,1), Z),0)
  xmax = np.amax(np.multiply(spxmax.reshape(-1,1), reg2sp),0)
  ymax = np.amax(np.multiply(spymax.reshape(-1,1), reg2sp), 0)
  xmin[np.isinf(xmin)]=0
  ymin[np.isinf(ymin)]=0
  

  boxes = np.hstack((xmin.reshape(-1,1), ymin.reshape(-1,1), xmax.reshape(-1,1), ymax.reshape(-1,1)))
  return boxes 
Example #15
0
def plotLL(fname='out4.npy'):
    plt.figure()
    h= np.linspace(0,1,21)
    g= np.linspace(0,1,21)
    m=np.linspace(0,2,21)
    d=np.linspace(0,2,21)
    out=np.load(fname)
    print np.nanmax(out),np.nanmin(out)
    rang=np.nanmax(out)-np.nanmin(out)
    maxloc= np.squeeze(np.array((np.nanmax(out)==out).nonzero()))
    H,G=np.meshgrid(h,g)
    print maxloc
    for mm in range(m.size/2):
        for dd in range(d.size/2):
            plt.subplot(10,10,(9-mm)*10+dd+1)
            plt.pcolormesh(h,g,out[:,:,mm*2,dd*2].T,
                           vmax=np.nanmax(out),vmin=np.nanmax(out)-rang/4.)
            plt.gca().set_xticks([])
            plt.gca().set_yticks([])
            if mm==maxloc[2]/2 and dd==maxloc[3]/2:
                plt.plot(h[maxloc[0]],g[maxloc[1]],'ow',ms=8)
            if dd==0:
                print mm,dd
                plt.ylabel('%.1f'%m[mm*2])
            if mm==0: plt.xlabel('%.1f'%d[dd*2])
    plt.title(fname[:6])
Example #16
0
    def getRange(self, axis, depname, axrange):
        """Update axis range from data."""

        s = self.settings
        doc = self.document

        if ( (depname == 'sx' and s.direction == 'horizontal') or
             (depname == 'sy' and s.direction == 'vertical') ):
            # update axis in direction of data
            if s.calculate:
                # update from values
                values = s.get('values').getData(doc)
                if values:
                    for v in values:
                        if len(v.data) > 0:
                            axrange[0] = min(axrange[0], N.nanmin(v.data))
                            axrange[1] = max(axrange[1], N.nanmax(v.data))
            else:
                # update from manual entries
                drange = self.rangeManual()
                axrange[0] = min(axrange[0], drange[0])
                axrange[1] = max(axrange[1], drange[1])
        else:
            # update axis in direction of datasets
            posns = self.getPosns()
            if len(posns) > 0:
                axrange[0] = min(axrange[0], N.nanmin(posns)-0.5)
                axrange[1] = max(axrange[1], N.nanmax(posns)+0.5)
    def acquire_data(self, var_name=None, slice_=()):
        if var_name in self._variables:
            vars = [var_name]
        else:
            vars = self._variables

        if not isinstance(slice_, tuple): slice_ = (slice_,)

        for vn in vars:
            var = self._data_array[vn]

            ndims = len(var.shape)
            # Ensure the slice_ is the appropriate length
            if len(slice_) < ndims:
                slice_ += (slice(None),) * (ndims-len(slice_))

            arri = ArrayIterator(var, self._block_size)[slice_]
            for d in arri:
                if d.dtype.char is "S":
                    # Obviously, we can't get the range of values for a string data type!
                    rng = None
                elif isinstance(d, numpy.ma.masked_array):
                    # TODO: This is a temporary fix because numpy 'nanmin' and 'nanmax'
                    # are currently broken for masked_arrays:
                    # http://mail.scipy.org/pipermail/numpy-discussion/2011-July/057806.html
                    dc = d.compressed()
                    if dc.size == 0:
                        rng = None
                    else:
                        rng = (numpy.nanmin(dc), numpy.nanmax(dc))
                else:
                    rng = (numpy.nanmin(d), numpy.nanmax(d))
                yield vn, arri.curr_slice, rng, d

        return
Example #18
0
def stokes_plot(x_data, xlabel, I_data, Q_data, U_data, V_data,
                filename):
    """Generate plot of 4 stokes parameters"""
    fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex=True)
    ax1.plot(x_data,I_data)
    ax1.set_xlim(np.nanmin(x_data),np.nanmax(x_data))
    ax1.set_ylim(np.nanmin(I_data[I_data.nonzero()]),
                 np.nanmax(I_data))
    ax1.set_ylabel("Stokes I (K)")
    ax2.plot(x_data,Q_data)
    ax2.set_ylim(np.nanmin(Q_data),np.nanmax(Q_data))
    ax2.set_ylabel("Stokes Q (K)")
    ax3.plot(x_data,U_data)
    ax3.set_ylim(np.nanmin(U_data),np.nanmax(U_data))
    ax3.set_ylabel("Stokes U (K)")
    ax4.plot(x_data,V_data)
    ax4.set_ylim(np.nanmin(V_data),np.nanmax(V_data))
    ax4.set_ylabel("Stokes V (K)")
    ax4.set_xlabel(xlabel)
    fig.subplots_adjust(hspace=0.1)
    for ax in [ax1, ax2, ax3, ax4]:
        # make the fontsize a bit smaller
        for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
                     ax.get_xticklabels() + ax.get_yticklabels()):
            item.set_fontsize(12)
    plt.savefig(filename)
    plt.close(fig)
 def set_range(self, x_data, y_data):
     min_x, max_x = np.nanmin(x_data), np.nanmax(x_data)
     min_y, max_y = np.nanmin(y_data), np.nanmax(y_data)
     self.plotview.setRange(
         QRectF(min_x, min_y, max_x - min_x, max_y - min_y),
         padding=0.025)
     self.plotview.replot()
Example #20
0
    def _axes_domain(self, nx=None, ny=None, background_patch=None):
        """Returns x_range, y_range"""
        DEBUG = False

        transform = self._crs_transform()

        ax_transform = self.axes.transAxes
        desired_trans = ax_transform - transform

        nx = nx or 30
        ny = ny or 30
        x = np.linspace(1e-9, 1 - 1e-9, nx)
        y = np.linspace(1e-9, 1 - 1e-9, ny)
        x, y = np.meshgrid(x, y)

        coords = np.concatenate([x.flatten()[:, None], y.flatten()[:, None]], 1)

        in_data = desired_trans.transform(coords)

        ax_to_bkg_patch = self.axes.transAxes - background_patch.get_transform()

        ok = np.zeros(in_data.shape[:-1], dtype=np.bool)
        # XXX Vectorise contains_point
        for i, val in enumerate(in_data):
            # convert the coordinates of the data to the background
            # patches coordinates
            background_coord = ax_to_bkg_patch.transform(coords[i : i + 1, :])
            bkg_patch_contains = background_patch.get_path().contains_point
            if bkg_patch_contains(background_coord[0, :]):
                color = "r"
                ok[i] = True
            else:
                color = "b"

            if DEBUG:
                import matplotlib.pyplot as plt

                plt.plot(coords[i, 0], coords[i, 1], "o" + color, clip_on=False, transform=ax_transform)
        #                plt.text(coords[i, 0], coords[i, 1], str(val), clip_on=False,
        #                         transform=ax_transform, rotation=23,
        #                         horizontalalignment='right')

        inside = in_data[ok, :]
        x_range = np.nanmin(inside[:, 0]), np.nanmax(inside[:, 0])
        y_range = np.nanmin(inside[:, 1]), np.nanmax(inside[:, 1])

        # XXX Cartopy specific thing. Perhaps make this bit a specialisation
        # in a subclass...
        crs = self.crs
        if isinstance(crs, Projection):
            x_range = np.clip(x_range, *crs.x_limits)
            y_range = np.clip(y_range, *crs.y_limits)

            # if the limit is >90 of the full x limit, then just use the full
            # x limit (this makes circular handling better)
            prct = np.abs(np.diff(x_range) / np.diff(crs.x_limits))
            if prct > 0.9:
                x_range = crs.x_limits

        return x_range, y_range
Example #21
0
    def info(self):
        """
    info()

    Prints out a simple human-readable summary of the spectrum,
    containing the name of the spectrum, the units on its axes,
    and their limits. Also shows whether the spectrum has been
    baselined or convolved yet.

    Parameters
    ----------
    None

    Returns
    -------
    Nothing, but prints out a summary of the spectrum.
    """
        print "---"
        print "Summary for spectrum " + self.name
        print "x unit: " + str(self.x.unit)
        print "min(x): " + str(np.nanmin(self.x.value))
        print "max(x): " + str(np.nanmax(self.x.value))
        print "y unit: " + str(self.y.unit)
        print "min(y): " + str(np.nanmin(self.y.value))
        print "max(y): " + str(np.nanmax(self.y.value))
        print "baselined: " + str(self.baselined)
        print "convolved: " + str(self.convolved)
        print "---"
Example #22
0
 def test_threshold_filter_nan(self):
     src = self.make_src(nan=True)
     self.e.add_source(src)
     threshold = Threshold()
     self.e.add_filter(threshold)
     self.assertEqual(np.nanmin(src.scalar_data), np.nanmin(threshold.outputs[0].point_data.scalars.to_array()))
     self.assertEqual(np.nanmax(src.scalar_data), np.nanmax(threshold.outputs[0].point_data.scalars.to_array()))
Example #23
0
def TestPlot(fig=None):
    A = numpy.array([1,2,3,4,2,5,8,3,2,3,5,6])
    B = numpy.array([8,7,3,6,4,numpy.nan,9,3,7,numpy.nan,2,4])
    C = numpy.array([6,3,4,7,2,1,1,7,8,4,3,2])
    D = numpy.array([5,2,4,5,3,8,2,5,3,5,6,8])
    
    # A work around to get the histograms overplotted with each other to overlap correctly;
    histrangelist = [(numpy.nanmin(A),numpy.nanmax(A)),(numpy.nanmin(B),numpy.nanmax(B)),
                (numpy.nanmin(C),numpy.nanmax(C)),(numpy.nanmin(D),numpy.nanmax(D))]
    
    data = numpy.array([A,B,C,D])
    labels = ['A','3','C','D']

    fig = GridPlot(data,labels=labels, no_tick_labels=True, color='black', 
                    hist=True, histbins=3, histloc='tl', histrangelist=histrangelist, fig=None) 
    
    # Data of note to plot in different color
    A2 = numpy.array([1,2,3,4])
    B2 = numpy.array([8,7,3,6])
    C2 = numpy.array([6,3,4,7])
    D2 = numpy.array([5,2,4,5])
    data2 = numpy.array([A2,B2,C2,D2])
    
    fig = GridPlot(data2,labels=labels, no_tick_labels=True, color='red', 
                hist=True, histbins=3, histloc='tr', histrangelist=histrangelist, fig=fig) 
    
    return fig
Example #24
0
def plot_nontarget_betas_n_back(t_vols_n_back_beta_1, b_vols_smooth_n_back, in_brain_mask, brain_structure, nice_cmap, n_back):

  beta_index = 1

  b_vols_smooth_n_back[~in_brain_mask] = np.nan
  t_vols_n_back_beta_1[~in_brain_mask] = np.nan
  min_val = np.nanmin(b_vols_smooth_n_back[...,(40,50,60),beta_index])
  max_val = np.nanmax(b_vols_smooth_n_back[...,(40,50,60),beta_index])

  plt.figure()

  for map_index, depth in (((3,2,1), 40),((3,2,3), 50),((3,2,5), 60)):
    plt.subplot(*map_index)
    plt.title("z=%d,%s" % (depth, n_back + "-back nontarget,beta values"))
    plt.imshow(brain_structure[...,depth], alpha=0.5)
    plt.imshow(b_vols_smooth_n_back[...,depth,beta_index], cmap=nice_cmap, alpha=0.5, vmin=min_val, vmax=max_val)
    plt.colorbar()
    plt.tight_layout()

  t_min_val = np.nanmin(t_vols_n_back_beta_1[...,(40,50,60)])
  t_max_val = np.nanmax(t_vols_n_back_beta_1[...,(40,50,60)])

  for map_index, depth in (((3,2,2), 40),((3,2,4), 50),((3,2,6), 60)):
    plt.subplot(*map_index)
    plt.title("z=%d,%s" % (depth, n_back + "-back nontarget,t values"))
    plt.imshow(brain_structure[...,depth], alpha=0.5)
    plt.imshow(t_vols_n_back_beta_1[...,depth], cmap=nice_cmap, alpha=0.5, vmin=t_min_val, vmax=t_max_val)
    plt.colorbar()
    plt.tight_layout()

  plt.savefig(os.path.join(output_filename, "sub011_nontarget_betas_%s_back.png" % (n_back)), format='png', dpi=500)  
Example #25
0
    def callback_function(self,
                          optimiser_output,
                          minimise_function_result,
                          was_accepted):
        if not(was_accepted):
            return

        if self.current_success_number == 0:
            # First result
            self.successful_results[0] = minimise_function_result
            self.current_success_number = 1

        elif (minimise_function_result >=
              np.nanmin(self.successful_results) + self.confidence):
            # Reject result
            0

        elif (minimise_function_result >=
              np.nanmin(self.successful_results) - self.confidence):
            # Agreeing result
            self.successful_results[
                self.current_success_number
            ] = minimise_function_result

            self.current_success_number += 1

        elif (minimise_function_result <
              np.nanmin(self.successful_results) - self.confidence):
            # New result
            self.successful_results[0] = minimise_function_result
            self.current_success_number = 1

        if self.current_success_number >= self.n:
            return True
Example #26
0
    def __call__(self, transform_xy, x1, y1, x2, y2):
        """
        get extreme values.

        x1, y1, x2, y2 in image coordinates (0-based)
        nx, ny : number of divisions in each axis
        """
        x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
        x, y = np.meshgrid(x_, y_)
        lon, lat = transform_xy(np.ravel(x), np.ravel(y))

        # iron out jumps, but algorithm should be improved.
        # This is just naive way of doing and my fail for some cases.
        # Consider replacing this with numpy.unwrap
        # We are ignoring invalid warnings. They are triggered when
        # comparing arrays with NaNs using > We are already handling
        # that correctly using np.nanmin and np.nanmax
        with np.errstate(invalid='ignore'):
            if self.lon_cycle is not None:
                lon0 = np.nanmin(lon)
                lon -= 360. * ((lon - lon0) > 180.)
            if self.lat_cycle is not None:
                lat0 = np.nanmin(lat)
                lat -= 360. * ((lat - lat0) > 180.)

        lon_min, lon_max = np.nanmin(lon), np.nanmax(lon)
        lat_min, lat_max = np.nanmin(lat), np.nanmax(lat)

        lon_min, lon_max, lat_min, lat_max = \
                 self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)

        return lon_min, lon_max, lat_min, lat_max
Example #27
0
def plot_noise_regressor_betas(b_vols_smooth, t_vols_beta_6_to_9, brain_structure, in_brain_mask, nice_cmap):

  plt.figure()

  min_val = np.nanmin(b_vols_smooth[...,40,(6,7,9)])
  max_val = np.nanmax(b_vols_smooth[...,40,(6,7,9)])

  plt.subplot(3,2,1)
  plt.title("z=%d,%s" % (40, "linear drift,betas"))
  b_vols_smooth[~in_brain_mask] = np.nan
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(b_vols_smooth[...,40,6], cmap=nice_cmap, alpha=0.5, vmin=min_val, vmax=max_val)
  plt.colorbar()
  plt.tight_layout()

  plt.subplot(3,2,3)
  plt.title("z=%d,%s" % (40, "quadratic drift,betas"))
  b_vols_smooth[~in_brain_mask] = np.nan
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(b_vols_smooth[...,40,7], cmap=nice_cmap, alpha=0.5, vmin=min_val, vmax=max_val)
  plt.colorbar()
  plt.tight_layout()

  plt.subplot(3,2,5)
  plt.title("z=%d,%s" % (40, "second PC,betas"))
  b_vols_smooth[~in_brain_mask] = np.nan
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(b_vols_smooth[...,40,9], cmap=nice_cmap, alpha=0.5, vmin=min_val, vmax=max_val)
  plt.colorbar()
  plt.tight_layout()

  t_vols_beta_6_to_9[0][~in_brain_mask] = np.nan
  t_vols_beta_6_to_9[1][~in_brain_mask] = np.nan
  t_vols_beta_6_to_9[3][~in_brain_mask] = np.nan

  t_min_val = np.nanmin([t_vols_beta_6_to_9[0][...,40], t_vols_beta_6_to_9[1][...,40], t_vols_beta_6_to_9[3][...,40]])
  t_max_val = np.nanmax([t_vols_beta_6_to_9[0][...,40], t_vols_beta_6_to_9[1][...,40], t_vols_beta_6_to_9[3][...,40]])

  plt.subplot(3,2,2)
  plt.title("z=%d,%s" % (40, "linear drift,t values"))
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(t_vols_beta_6_to_9[0][...,40], cmap=nice_cmap, alpha=0.5, vmin=t_min_val, vmax=t_max_val)
  plt.colorbar()
  plt.tight_layout()

  plt.subplot(3,2,4)
  plt.title("z=%d,%s" % (40, "quadratic drift,t values"))
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(t_vols_beta_6_to_9[1][...,40], cmap=nice_cmap, alpha=0.5, vmin=t_min_val, vmax=t_max_val)
  plt.colorbar()
  plt.tight_layout()

  plt.subplot(3,2,6)
  plt.title("z=%d,%s" % (40, "second PC,t values"))
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(t_vols_beta_6_to_9[3][...,40], cmap=nice_cmap, alpha=0.5, vmin=t_min_val, vmax=t_max_val)
  plt.colorbar()
  plt.tight_layout()

  plt.savefig(os.path.join(output_filename, "sub001_noise_regressors_betas_map.png"), format='png', dpi=500)  
def bin_fit(x, y, buckets=3):
     
    assert buckets in [3,25]

    xstd=np.nanstd(x)
    
    if buckets==3:
        binlimits=[np.nanmin(x), -xstd/2.0,xstd/2.0 , np.nanmax(x)]
    elif buckets==25:
    
        steps=xstd/4.0
        binlimits=np.arange(-xstd*3.0, xstd*3.0, steps)
    
        binlimits=[np.nanmin(x)]+list(binlimits)+[np.nanmax(x)]
    
    fit_y=[]
    err_y=[]
    x_values_to_plot=[]
    for binidx in range(len(binlimits))[1:]:
        lower_bin_x=binlimits[binidx-1]
        upper_bin_x=binlimits[binidx]

        x_values_to_plot.append(np.mean([lower_bin_x, upper_bin_x]))

        y_in_bin=[y[idx] for idx in range(len(y)) if x[idx]>=lower_bin_x and x[idx]<upper_bin_x]

        fit_y.append(np.nanmedian(y_in_bin))
        err_y.append(np.nanstd(y_in_bin))

    ## no zeros
    

    return (binlimits, x_values_to_plot, fit_y, err_y)
Example #29
0
    def generate_artist(self):
           container=self.get_container()
           if self.isempty() is False:
               return

           x, y = self._eval_xy() # this handles "use_var"
 
           lp=self.getp("loaded_property") 

           if True:
              x, y = self.getp(("x", "y"))
              if y is None: return
              if x is None: return 
              
              if (x is not None  and
                  y is not None):   
                  self._data_extent=[np.nanmin(x), np.nanmax(x), 
                                     np.nanmin(y), np.nanmax(y)]

                  if len(y.shape) == 1:
                     kywds = self._var["kywds"]
                     args, self._tri =  tri_args(x, y, self._tri) 
                     kywds['mask'] = self.getp('mask')
                     kywds['linestyle'] = self.getp('linestyle')
                     kywds['linewidth'] = self.getp('linewidth')
                     kywds['color'] = self.getp('color')
                     a =  triplot(container, *args, **kywds)
                     self.set_artist(a[0])
                     self._other_artists = a[1:]

           if lp is not None:
              for i in range(0, len(lp)):
                  self.set_artist_property(self._artists[i], lp[i])
              self.delp("loaded_property")
           self.set_rasterized()
Example #30
0
def threshold(frame, threshold = 0.5, normalized_threshold = True, threshold_type = cv2.THRESH_BINARY, debug = False):
    """
    tresholding an image: the input type has to be either np.uint8 or np.float32
    returns a uint8 image
    """

    fmin = np.double( np.nanmin(frame) )
    fmax = np.double( np.nanmax(frame) )
    
    if debug:
        print "fmin: ", fmin
        print "fmax: ", fmax
    
    floatframe = ( (1.0 - 0.0) / (fmax - fmin) * (np.double(frame) - fmin) ).astype(np.float32)
    
    if debug:
        print "floatframe: ", floatframe.dtype
        print "min: ", np.nanmin(floatframe)
        print "max: ", np.nanmax(floatframe)
    
    if not normalized_threshold:
        threshold = 1.0 / (fmax - fmin) * (threshold - fmin)
        if debug:
            print "normalized threshold: ", threshold
    
    retval, t = cv2.threshold(floatframe, thresh = threshold, maxval = 255, type = threshold_type)
    
    return t
Example #31
0
    def formatAudBatch(self, aud_msg_array, name=""):
        # perform pre-processing on the audio input

        for x in range(len(aud_msg_array)):
            aud_msg_array[x] = self.formatAudMsg(aud_msg_array[x])

        num_frames = len(aud_msg_array)
        core_data = np.reshape(aud_msg_array, (num_frames * len(aud_msg_array[0])))
        # modify data
        # core_data = input_data
        # core_data = input_data[:int(16000*1.2)]

        # np.save(NOISE_SAMPLE_PATH.replace('#', '3'), core_data)
        # dummy = np.load(NOISE_SAMPLE_PATH.replace('#', '3'))
        # core_data = np.append(core_data, dummy)
        # num_frames = num_frames + int(len(dummy)/len(aud_msg_array[0]))

        # get the indicies for the noise sample
        # noise_sample_s, noise_sample_e = 1, -1 #16000 * (-1.5), -1

        # perform spectral subtraction to reduce noise
        noise = self.__noise_sample_1
        # noise = core_data[int(noise_sample_s): int(noise_sample_e)]
        # np.save(NOISE_SAMPLE_PATH.replace('#', '1'), noise)
        filtered_input = reduce_noise(np.array(core_data), noise)

        # smooth signal
        b, a = signal.butter(3, 0.05)
        filtered_input = signal.lfilter(b, a, filtered_input)

        # additional spectral subtraction to remove remaining noise
        noise = self.__noise_sample_2
        # noise = filtered_input[int(noise_sample_s): int(noise_sample_e)]
        # np.save(NOISE_SAMPLE_PATH.replace('#', '2'), noise)
        filtered_input = reduce_noise(filtered_input, noise)

        filtered_input = np.append(filtered_input, self.__noise_dummy)
        num_frames = num_frames + int(len(self.__noise_dummy)/len(aud_msg_array[0]))

        # generate spectrogram
        S = librosa.feature.melspectrogram(y=filtered_input, sr=self.rate, n_mels=128, fmax=8000)
        S = librosa.power_to_db(S, ref=np.max)

        # if(True):
        #     # if True then output spectrogram to png file (requires matplot.pyplot lib to be imported)
        #     plt.figure(figsize=(10, 4))
        #
        #     librosa.display.specshow(S,y_axis='mel', fmax=8000,x_axis='time')
        #     plt.colorbar(format='%+2.0f dB')
        #     plt.title('Mel-Spectrogram')
        #     plt.tight_layout()
        #     print("spectrogram ouput to file.")
        #
        #     out_file = "debug/audio_{}.png".format(self.__chunk_counter)
        #     plt.savefig(out_file)
        #     self.counter += 1
        #     plt.clf()

        # split the spectrogram into A_i. This generates an overlap between
        # frames with as set stride
        stride = S.shape[1] / float(num_frames)
        frame_len = aud_dtype["cmp_w"]

        # pad the entire spectrogram so that overlaps at either end do not fall out of bounds
        min_val = np.nanmin(S)

        empty = np.zeros((S.shape[0], 3))
        empty.fill(min_val)
        empty_end = np.zeros((S.shape[0], 8))
        empty_end.fill(min_val)
        S = np.concatenate((empty, S, empty_end), axis=1)

        split_data = np.zeros(shape=(num_frames, S.shape[0], frame_len), dtype=S.dtype)
        for i in range(0, num_frames):
            split_data[i] = S[:,
                            int(math.floor(i * stride)):int(math.floor(i * stride)) + frame_len]

        # normalize the output to be between 0 and 255
        split_data -= split_data.min()
        split_data                 /= split_data.max() / 255.0

        return np.reshape(split_data, (num_frames, -1))[:-int(len(self.__noise_dummy) /
                                                              len(aud_msg_array[0])) - 1]
Example #32
0
 def fit_transform(self, tensor_3d):
     self.min_ = np.nanmin(tensor_3d)
     self.max_ = np.nanmax(tensor_3d)
     return (tensor_3d - self.min_) / (self.max_ - self.min_)
for itime in list(out_flux.keys())[98:1558:20]:

    real_itime = batch_delta_to_time(
        date_origin, [float(itime[7:18])], "%Y-%m-%d %H:%M:%S", "hours")
    real_itime = [datetime.strptime(x, "%Y-%m-%d %H:%M:%S") for x in real_itime]
    print(real_itime)
    
    xy_flux = np.asarray([np.nan] * (ny * nx)).reshape(ny, nx)
    for i_unique in np.arange(n_unique):
        xy_flux[unique_xy[i_unique, 1],
                unique_xy[i_unique, 0]] = out_flux[itime][i_unique]
    
    iout_flux = xy_flux * 24 / dx[0] / dy[0]
    
    imax = np.nanmax(iout_flux)
    imin = np.nanmin(iout_flux)
    
    if imax > max_flux:
        max_flux = imax
    if imin < min_flux:
        min_flux = imin
        
print(max_flux, min_flux)


# # Calculate accumulative flux for each river segment

# In[14]:


# load pre-processed file
Example #34
0
    def _phantom_detection(self):

        # generate phatnom via otsu method and morpological closing
        image = self.image.array
        thresh = filters.threshold_otsu(image)
        bw = morphology.closing(image > thresh, morphology.square(3))

        # get points along the edges
        points = feature.corner_peaks(feature.corner_harris(bw),
                                      min_distance=20)
        # estimation of the rectangular lv phantom via convex hull
        # get the convex hull for the points
        hull_points = points[ConvexHull(points).vertices]
        # calculate edge angles
        edges = np.zeros((len(hull_points) - 1, 2))
        edges = hull_points[1:] - hull_points[:-1]
        # find angles between points at the hull
        angles = np.zeros((len(edges)))
        angles = np.arctan2(edges[:, 1], edges[:, 0])
        angles = np.abs(np.mod(angles, np.pi / 2))
        angles = np.unique(angles)
        # rotation matrix
        rotations = np.vstack([
            np.cos(angles),
            np.cos(angles - np.pi / 2),
            np.cos(angles + np.pi / 2),
            np.cos(angles)
        ]).T

        rotations = rotations.reshape((-1, 2, 2))

        # apply rotations to the hull
        rot_points = np.dot(rotations, hull_points.T)

        # find the bounding points
        min_x = np.nanmin(rot_points[:, 0], axis=1)
        max_x = np.nanmax(rot_points[:, 0], axis=1)
        min_y = np.nanmin(rot_points[:, 1], axis=1)
        max_y = np.nanmax(rot_points[:, 1], axis=1)

        # find the box with the best area
        areas = (max_x - min_x) * (max_y - min_y)
        best_idx = np.argmin(areas)

        # return the best box
        x1 = max_x[best_idx]
        x2 = min_x[best_idx]
        y1 = max_y[best_idx]
        y2 = min_y[best_idx]
        r = rotations[best_idx]

        self.edges = np.zeros((4, 2))
        self.edges[0] = np.dot([x1, y2], r)
        self.edges[1] = np.dot([x2, y2], r)
        self.edges[2] = np.dot([x2, y1], r)
        self.edges[3] = np.dot([x1, y1], r)
        self.phantom_center = np.mean(self.edges, axis=0)

        angle = np.array([
            np.rad2deg(
                np.arctan2(x[1] - self.phantom_center.y,
                           x[0] - self.phantom_center.x)) for x in self.edges
        ])
        indSort = np.argsort(angle)
        angle = np.sort(angle)
        self.edges = self.edges[indSort, :]
        self.phantom_angle = np.mean(angle - (-135, -45, 45, 135))
Example #35
0
def estimate_numax_acf2d(periodogram,
                         numaxs=None,
                         window_width=None,
                         spacing=None):
    """Estimates the peak of the envelope of seismic oscillation modes, numax,
    using an autocorrelation function.

    There are many papers on the topic of autocorrelation functions for
    estimating seismic parameters, including but not limited to:
    Roxburgh & Vorontsov (2006), Roxburgh (2009), Mosser & Appourchaux (2009),
    Huber et al. (2009), Verner & Roxburgh (2011) & Viani et al. (2019).

    We base this approach first and foremost off the 2D ACF numax estimation
    presented in Viani et al. (2019) and other papers above. A window of
    fixed width (either given by the user, 25 microhertz for Red Giants or
    250 microhertz for Main Sequence stars) is moved along the power
    spectrum, where the central frequency of the window moves in steps of 1
    microhertz (or given by the user as `spacing`) and evaluates the
    autocorrelation at each step.

    The correlation (numpy.correlate) is typically given as:

    C[x, y] = sum( x * conj(y) ) .

    The autocorrelation power of a full spectrum with itself is then

    C = sum(s * s),

    where s is a window of the signal-to-noise spectrum.
    Because of the method of this calculation, we need to first
    rescale the power by subtracting its mean, placing its mean around 0. This
    decreases the noise levels in the ACF, as the autocorrelation of the noise
    with itself will be close to zero.

    In order to evaluate where the correlation power is highest (indicative
    of the power excess of the modes) we calculate the Mean Collapsed
    Correlation (MCC, see Kiefer 2013, Viani et al. 2019) as

    MCC = (sum(|C|) - 1) / nlags ,

    where C is the autocorrelation power at a given central freqeuncy, and
    nlags is the number of lags in the autocorrelation.

    The MCC metric is covolved with an Astropy Gaussian 1D Kernel with a
    standard deviation of 1/5th of the window size to smooth it. The
    frequency that results in the highest value of the smoothed MCC is the
    detected numax.

    NOTE: This method is not robust against large peaks in the spectrum (due
    to e.g. spacecraft rotation), nor is it robust in the case of low signal
    to noise (such as for single sector TESS data). Exercise caution when
    using this module!

    NOTE: This function is intended for use with solar like Main Sequence
    and Red Giant Branch oscillators only.

    Parameters
    ----------
    numaxs : array-like
        An array of numaxs at which to evaluate the autocorrelation. If
        none is given, a sensible range will be chosen. If no units are
        given it is assumed to be in the same units as the periodogram
        frequency.
    window_width : int or float
        The width of the autocorrelation window around each central
        frequency in 'numaxs'. If none is given, a sensible value will be
        chosen. If no units are given it is assumed to be in the same units
        as the periodogram frequency.
    spacing : int or float
        The spacing between central frequencies (numaxs) at which the
        autocorrelation is evaluated. If none is given, a sensible value
        will be assumed. If no units are given it is assumed to be in the
        same units as the periodogram frequency.

    Returns
    -------
    numax : `SeismologyQuantity`
        The numax of the periodogram. In the units of the periodogram object
        frequency.
    """
    # Calculate the window_width size

    #C: What is this doing? Why have these values been picked? This function is slow.
    if window_width is None:
        if u.Quantity(periodogram.frequency[-1], u.microhertz) > u.Quantity(
                500., u.microhertz):
            window_width = u.Quantity(250., u.microhertz).to(
                periodogram.frequency.unit).value
        else:
            window_width = u.Quantity(25., u.microhertz).to(
                periodogram.frequency.unit).value

    # Calculate the spacing size
    if spacing is None:
        if u.Quantity(periodogram.frequency[-1], u.microhertz) > u.Quantity(
                500., u.microhertz):
            spacing = u.Quantity(10., u.microhertz).to(
                periodogram.frequency.unit).value
        else:
            spacing = u.Quantity(1., u.microhertz).to(
                periodogram.frequency.unit).value

    # Run some checks on the inputs
    window_width = u.Quantity(window_width, periodogram.frequency.unit).value
    spacing = u.Quantity(spacing, periodogram.frequency.unit).value
    if numaxs is None:
        numaxs = np.arange(
            np.ceil(np.nanmin(periodogram.frequency.value)) + window_width / 2,
            np.floor(np.nanmax(periodogram.frequency.value)) -
            window_width / 2, spacing)
    numaxs = u.Quantity(numaxs, periodogram.frequency.unit).value
    if not hasattr(numaxs, '__iter__'):
        numaxs = np.asarray([numaxs])

    fs = np.median(np.diff(periodogram.frequency.value))
    # Perform checks on spacing and window_width
    for var, label in zip([np.asarray(window_width),
                           np.asarray(spacing)], ['window_width', 'spacing']):
        if (var < fs).any():
            raise ValueError("You can't have {} smaller than the "
                             "frequency separation!".format(label))
        if (var > (periodogram.frequency[-1].value -
                   periodogram.frequency[0].value)).any():
            raise ValueError("You can't have {} wider than the entire "
                             "power spectrum!".format(label))
        if (var < 0).any():
            raise ValueError(
                "Please pass an entirely positive {}.".format(label))

    # Perform checks on numaxs
    if any(numaxs < fs):
        raise ValueError("A custom range of numaxs can not extend below "
                         "a single frequency bin.")
    if any(numaxs > np.nanmax(periodogram.frequency.value)):
        raise ValueError("A custom range of numaxs can not extend above "
                         "the highest frequency value in the periodogram.")

    # We want to find the numax which returns in the highest autocorrelation
    # power, rescaled based on filter width
    fs = np.median(np.diff(periodogram.frequency.value))

    metric = np.zeros(len(numaxs))
    acf2d = np.zeros([int(window_width / 2 / fs) * 2, len(numaxs)])
    for idx, numax in enumerate(numaxs):
        acf = utils.autocorrelate(
            periodogram,
            numax,
            window_width=window_width,
            frequency_spacing=fs)  #Return the acf at this numax
        acf2d[:, idx] = acf  #Store the 2D acf
        metric[idx] = (np.sum(np.abs(acf)) - 1) / len(
            acf)  #Store the max acf power normalised by the length

    # Smooth the data to find the peak
    # Gaussian1D kernel takes a standard deviation in unitless indices. A stddev
    # of sqrt(len(numaxs) will result in a smoothing kernel that works for all
    # resolutions of numax.
    if len(numaxs) > 10:
        g = Gaussian1DKernel(stddev=np.sqrt(len(numaxs)))
        metric_smooth = convolve(metric, g, boundary='extend')
    else:
        metric_smooth = metric

    # The highest value of the metric corresponds to numax
    best_numax = numaxs[np.argmax(metric_smooth)]
    best_numax = u.Quantity(best_numax, periodogram.frequency.unit)

    # Create and return the object containing the result and diagnostics
    diagnostics = {
        'numaxs': numaxs,
        'acf2d': acf2d,
        'window_width': window_width,
        'metric': metric,
        'metric_smooth': metric_smooth
    }
    result = SeismologyQuantity(best_numax,
                                name="numax",
                                method="ACF2D",
                                diagnostics=diagnostics,
                                diagnostics_plot_method=diagnose_numax_acf2d)
    return result
Example #36
0
    def update(self, data_changed=True, **kwargs):
        if data_changed:
            # When working with lazy signals the following may reread the data
            # from disk unnecessarily, for example when updating the image just
            # to recompute the histogram to adjust the contrast. In those cases
            # use `data_changed=True`.
            _logger.debug("Updating image slowly because `data_changed=True`")
            self._update_data()
        data = self._current_data
        optimize_contrast = kwargs.pop("optimize_contrast", False)
        if rgb_tools.is_rgbx(data):
            self.colorbar = False
            data = rgb_tools.rgbx2regular_array(data, plot_friendly=True)
            data = self._current_data = data
            self._is_rgb = True
        ims = self.ax.images
        # update extent:
        self._extent = (self.xaxis.axis[0] - self.xaxis.scale / 2.,
                        self.xaxis.axis[-1] + self.xaxis.scale / 2.,
                        self.yaxis.axis[-1] + self.yaxis.scale / 2.,
                        self.yaxis.axis[0] - self.yaxis.scale / 2.)

        # Turn on centre_colormap if a diverging colormap is used.
        if not self._is_rgb and self.centre_colormap == "auto":
            if "cmap" in kwargs:
                cmap = kwargs["cmap"]
            elif ims:
                cmap = ims[0].get_cmap().name
            else:
                cmap = plt.cm.get_cmap().name
            if cmap in utils.MPL_DIVERGING_COLORMAPS:
                self.centre_colormap = True
            else:
                self.centre_colormap = False
        redraw_colorbar = False

        for marker in self.ax_markers:
            marker.update()

        if not self._is_rgb:

            def format_coord(x, y):
                try:
                    col = self.xaxis.value2index(x)
                except ValueError:  # out of axes limits
                    col = -1
                try:
                    row = self.yaxis.value2index(y)
                except ValueError:
                    row = -1
                if col >= 0 and row >= 0:
                    z = data[row, col]
                    if np.isfinite(z):
                        return f'x={x:1.4g}, y={y:1.4g}, intensity={z:1.4g}'
                return f'x={x:1.4g}, y={y:1.4g}'

            self.ax.format_coord = format_coord

            old_vmin, old_vmax = self.vmin, self.vmax
            self.optimize_contrast(data, optimize_contrast)
            # Use _vmin_auto and _vmax_auto if optimize_contrast is True
            if optimize_contrast:
                vmin, vmax = self._vmin_auto, self._vmax_auto
            else:
                vmin, vmax = self.vmin, self.vmax
            # If there is an image, any of the contrast bounds have changed and
            # the new contrast bounds are not the same redraw the colorbar.
            if (ims and (old_vmin != vmin or old_vmax != vmax)
                    and vmin != vmax):
                redraw_colorbar = True
                ims[0].autoscale()
            if self.centre_colormap:
                vmin, vmax = utils.centre_colormap_values(vmin, vmax)
            else:
                vmin, vmax = vmin, vmax

            if self.norm == 'auto' and self.gamma != 1.0:
                self.norm = 'power'
            norm = copy.copy(self.norm)
            if norm == 'power':
                # with auto norm, we use the power norm when gamma differs from its
                # default value.
                norm = PowerNorm(self.gamma, vmin=vmin, vmax=vmax)
            elif norm == 'log':
                if np.nanmax(data) <= 0:
                    raise ValueError(
                        'All displayed data are <= 0 and can not '
                        'be plotted using `norm="log"`. '
                        'Use `norm="symlog"` to plot on a log scale.')
                if np.nanmin(data) <= 0:
                    vmin = np.nanmin(np.where(data > 0, data, np.inf))

                norm = LogNorm(vmin=vmin, vmax=vmax)
            elif norm == 'symlog':
                norm = SymLogNorm(linthresh=self.linthresh,
                                  linscale=self.linscale,
                                  vmin=vmin,
                                  vmax=vmax)
            elif inspect.isclass(norm) and issubclass(norm, Normalize):
                norm = norm(vmin=vmin, vmax=vmax)
            elif norm not in ['auto', 'linear']:
                raise ValueError(
                    "`norm` paramater should be 'auto', 'linear', "
                    "'log', 'symlog' or a matplotlib Normalize  "
                    "instance or subclass.")
            else:
                # set back to matplotlib default
                norm = None
        redraw_colorbar = redraw_colorbar and self.colorbar

        if self.plot_indices is True:
            self._text.set_text(self.axes_manager.indices)
        if self.no_nans:
            data = np.nan_to_num(data)

        if ims:  # the images has already been drawn previously
            ims[0].set_data(data)
            self.ax.set_xlim(self._extent[:2])
            self.ax.set_ylim(self._extent[2:])
            ims[0].set_extent(self._extent)
            self._calculate_aspect()
            self.ax.set_aspect(self._aspect)
            if not self._is_rgb:
                ims[0].set_norm(norm)
                ims[0].norm.vmax, ims[0].norm.vmin = vmax, vmin
            if redraw_colorbar:
                # ims[0].autoscale()
                self._colorbar.draw_all()
                self._colorbar.solids.set_animated(
                    self.figure.canvas.supports_blit)
            else:
                ims[0].changed()
            if self.figure.canvas.supports_blit:
                self._update_animated()
            else:
                self.figure.canvas.draw_idle()
        else:  # no signal have been drawn yet
            new_args = {
                'interpolation': 'nearest',
                'extent': self._extent,
                'aspect': self._aspect,
                'animated': self.figure.canvas.supports_blit,
            }
            if not self._is_rgb:
                new_args.update({'vmin': vmin, 'vmax': vmax, 'norm': norm})
            new_args.update(kwargs)
            self.ax.imshow(data, **new_args)
            self.figure.canvas.draw_idle()

        if self.axes_ticks == 'off':
            self.ax.set_axis_off()
Example #37
0
    def testRasterScaling(self):
        """Raster layers can be scaled when resampled

        This is a test for ticket #52

        Native test .asc data has

        Population_Jakarta_geographic.asc
        ncols         638
        nrows         649
        cellsize      0.00045228819716044

        Population_2010.asc
        ncols         5525
        nrows         2050
        cellsize      0.0083333333333333

        Scaling is necessary for raster data that represents density
        such as population per km^2
        """

        for myFilename in [
                'Population_Jakarta_geographic.asc', 'Population_2010.asc'
        ]:

            myRasterPath = ('%s/%s' % (TESTDATA, myFilename))

            # Get reference values
            mySafeLayer = readSafeLayer(myRasterPath)
            myMinimum, myMaximum = mySafeLayer.get_extrema()
            del myMaximum
            del myMinimum
            myNativeResolution = mySafeLayer.get_resolution()

            # Get the Hazard extents as an array in EPSG:4326
            myBoundingBox = mySafeLayer.get_bounding_box()

            # Test for a range of resolutions
            for myResolution in [
                    0.02,
                    0.01,
                    0.005,
                    0.002,
                    0.001,
                    0.0005,  # Coarser
                    0.0002
            ]:  # Finer

                # To save time only do two resolutions for the
                # large population set
                if myFilename.startswith('Population_2010'):
                    if myResolution > 0.01 or myResolution < 0.005:
                        break

                # Clip the raster to the bbox
                myExtraKeywords = {'resolution': myNativeResolution}
                myRasterLayer = QgsRasterLayer(myRasterPath, 'xxx')
                myResult = clip_layer(myRasterLayer,
                                      myBoundingBox,
                                      myResolution,
                                      extra_keywords=myExtraKeywords)

                mySafeLayer = readSafeLayer(myResult.source())
                myNativeData = mySafeLayer.get_data(scaling=False)
                myScaledData = mySafeLayer.get_data(scaling=True)

                mySigma = (mySafeLayer.get_resolution()[0] /
                           myNativeResolution[0])**2

                # Compare extrema
                myExpectedScaledMax = mySigma * numpy.nanmax(myNativeData)
                myMessage = ('Resampled raster was not rescaled correctly: '
                             'max(myScaledData) was %f but expected %f' %
                             (numpy.nanmax(myScaledData), myExpectedScaledMax))

                # FIXME (Ole): The rtol used to be 1.0e-8 -
                #              now it has to be 1.0e-6, otherwise we get
                #              max(myScaledData) was 12083021.000000 but
                #              expected 12083020.414316
                #              Is something being rounded to the nearest
                #              integer?
                assert numpy.allclose(myExpectedScaledMax,
                                      numpy.nanmax(myScaledData),
                                      rtol=1.0e-6,
                                      atol=1.0e-8), myMessage

                myExpectedScaledMin = mySigma * numpy.nanmin(myNativeData)
                myMessage = ('Resampled raster was not rescaled correctly: '
                             'min(myScaledData) was %f but expected %f' %
                             (numpy.nanmin(myScaledData), myExpectedScaledMin))
                assert numpy.allclose(myExpectedScaledMin,
                                      numpy.nanmin(myScaledData),
                                      rtol=1.0e-8,
                                      atol=1.0e-12), myMessage

                # Compare elementwise
                myMessage = 'Resampled raster was not rescaled correctly'
                assert nanallclose(myNativeData * mySigma,
                                   myScaledData,
                                   rtol=1.0e-8,
                                   atol=1.0e-8), myMessage

                # Check that it also works with manual scaling
                myManualData = mySafeLayer.get_data(scaling=mySigma)
                myMessage = 'Resampled raster was not rescaled correctly'
                assert nanallclose(myManualData,
                                   myScaledData,
                                   rtol=1.0e-8,
                                   atol=1.0e-8), myMessage

                # Check that an exception is raised for bad arguments
                try:
                    mySafeLayer.get_data(scaling='bad')
                except GetDataError:
                    pass
                else:
                    myMessage = 'String argument should have raised exception'
                    raise Exception(myMessage)

                try:
                    mySafeLayer.get_data(scaling='(1, 3)')
                except GetDataError:
                    pass
                else:
                    myMessage = 'Tuple argument should have raised exception'
                    raise Exception(myMessage)

                # Check None option without keyword datatype == 'density'
                mySafeLayer.keywords['datatype'] = 'undefined'
                myUnscaledData = mySafeLayer.get_data(scaling=None)
                myMessage = 'Data should not have changed'
                assert nanallclose(myNativeData,
                                   myUnscaledData,
                                   rtol=1.0e-12,
                                   atol=1.0e-12), myMessage

                # Try with None and density keyword
                mySafeLayer.keywords['datatype'] = 'density'
                myUnscaledData = mySafeLayer.get_data(scaling=None)
                myMessage = 'Resampled raster was not rescaled correctly'
                assert nanallclose(myScaledData,
                                   myUnscaledData,
                                   rtol=1.0e-12,
                                   atol=1.0e-12), myMessage

                mySafeLayer.keywords['datatype'] = 'counts'
                myUnscaledData = mySafeLayer.get_data(scaling=None)
                myMessage = 'Data should not have changed'
                assert nanallclose(myNativeData,
                                   myUnscaledData,
                                   rtol=1.0e-12,
                                   atol=1.0e-12), myMessage
Example #38
0
def minmax(x):
    '''Return tuple of the (finite) max and min of an array.'''
    return np.nanmin(x), np.nanmax(x)
def _scale_to_unit_interval(ndar, eps=1e-8):
    """ Scales all values in the ndarray ndar to be between 0 and 1 """
    ndar = ndar.copy()
    ndar -= np.nanmin(ndar)
    ndar *= 1.0 / (np.nanmax(ndar) + eps)
    return ndar
Example #40
0
def getVelocityProfile(dat, vels_in):
    """

    Map the layered velocity structure into the shape of the data.

    Parameters
    ---------
    dat: data as a dictionary in the ImpDAR format
    vels_in: v(x,z)
        Up to 2-D array with three columns for velocities (m/s), and z/x (m).
        Array structure is velocities in first column, z location in second, x location in third.
        If uniform velocity (i.e. vel=constant) input constant
        If layered velocity (i.e. vel=v(z)) input array with shape (#vel-points, 2) (i.e. no x-values)

    Output
    ---------
    vmig: 2-D array of migration velocities (m/s), shape is (#traces, #samples).
        If constant input velocity, output is constant.
        If only z-component in input velocity array, output is v(z)

    """

    # return the input value if it is a constant
    if not hasattr(vels_in, "__len__"):
        return vels_in

    start = time.time()
    print('Interpolating the velocity profile.')

    if len(np.shape(vels_in)) != 2 or np.shape(vels_in)[1] == 1:
        raise ValueError(
            'If non-constant vel, inputs needs to be 2d (v, z) or (v, z, x)')
    nlay, dimension = np.shape(vels_in)
    vel_v = vels_in[:, 0]
    vel_z = vels_in[:, 1]

    twtt = dat.travel_time.copy() / 1.0e6
    ### Layered Velocity
    if nlay == 1:
        raise ValueError(
            'It does not make sense to only give one layer of velocity--if you want constant velocity just input v'
        )
    elif dimension == 2:
        zs = np.max(
            vel_v) / 2. * twtt  # depth array for maximum possible penetration
        zs[0] = twtt[0] * vel_v[0] / 2.
        # If an input point is closest to a boundary push it to the boundary
        # This will suppress some desired errors though, so use this if to try to guard
        if (vel_z[0] > 1.1 * np.nanmin(zs) and vel_z[0] / np.nanmax(zs) >
                1.0e-3) or vel_z[-1] * 1.1 < np.nanmax(zs):
            raise ValueError(
                'Your velocity data doesnt come close to covering the depths in the data'
            )
        if vel_z[0] > np.nanmin(zs):
            vel_v = np.insert(vel_v, 0, vel_v[np.argmin(vel_z)])
            vel_z = np.insert(vel_z, 0, np.nanmin(zs))
        if vel_z[-1] < np.nanmax(zs):
            vel_v = np.append(vel_v, vel_v[np.argmax(vel_z)])
            vel_z = np.append(vel_z, np.nanmax(zs))
        # Compute times from input velocity/location array (vels_in)
        vel_t = 2. * vel_z / vel_v
        # Interpolate to get t(z) for maximum penetration depth array
        tinterp = interp1d(vel_z, vel_t)
        tofz = tinterp(zs)
        # Compute z(t) from monotonically increasing t
        zinterp = interp1d(tofz, zs)
        zoft = zinterp(twtt)
        # Compute vmig(t) from z(t)
        vmig = 2. * np.gradient(zoft, twtt)

    ### Lateral Velocity Variations TODO: I need to check this more rigorously too.
    elif dimension == 3:
        vel_x = vels_in[:, 2]  # Input velocities
        # Depth array for largest penetration range
        zs = np.linspace(
            np.min(vel_v) * twtt[0],
            np.max(vel_v) * twtt[-1], dat.snum) / 2.
        # Use nearest neighbor interpolation to grid the input points onto a mesh
        if dat.dist is None or all(dat.dist == 0):
            raise ValueError('The distance vector was never set.')
        XS, ZS = np.meshgrid(dat.dist, zs)
        VS = griddata(np.transpose([vel_x, vel_z]),
                      vel_v,
                      np.transpose([XS.flatten(), ZS.flatten()]),
                      method='nearest')
        VS = np.reshape(VS, np.shape(XS))

        # convert velocities into travel_time space for all traces
        vmig = np.zeros_like(VS)
        for i in range(dat.tnum):
            vel_z = ZS[:, i]
            vel_v = VS[:, i]
            # Compute times from input velocity/location array (vels_in)
            vel_t = 2 * np.array(
                [np.trapz(1. / vel_v[:j], vel_z[:j]) for j in range(dat.snum)])
            # Interpolate to get t(z) for maximum penetration depth array
            tinterp = interp1d(ZS[:, i], vel_t)
            tofz = tinterp(zs)
            # Compute z(t) from monotonically increasing t
            zinterp = interp1d(tofz, zs)
            if twtt[-1] > tofz[-1]:
                raise ValueError(
                    'Two-way travel time array extends outside of interpolation range'
                )
            zoft = zinterp(twtt)
            # Compute vmig(t) from z(t)
            vmig[:, i] = 2. * np.gradient(zoft, twtt)
    else:
        # We get here if the number of columns is bad
        raise ValueError('Input must be 2d with 2 or 3 columns')

    print('Velocity profile finished in %.2f seconds.' % (time.time() - start))

    return vmig
Example #41
0
    def update(self, force_replot=False, render_figure=True,
               update_ylimits=False):
        """Update the current spectrum figure

        Parameters:
        -----------
        force_replot : bool
            If True, close and open the figure. Default is False.
        render_figure : bool
            If True, render the figure. Useful to avoid firing matplotlib
            drawing events too often. Default is True.
        update_ylimits : bool
            If True, update the y-limits. This is useful to avoid the figure
            flickering when different lines update the y-limits consecutively,
            in which case, this is done in `Signal1DFigure.update`.
            Default is False.

        """
        if force_replot is True:
            self.close()
            self.plot(data_function_kwargs=self.data_function_kwargs,
                      norm=self.norm)

        self._y_min, self._y_max = self.ax.get_ylim()
        ydata = self._get_data()
        old_xaxis = self.line.get_xdata()
        if len(old_xaxis) != self.axis.size or \
                np.any(np.not_equal(old_xaxis, self.axis.axis)):
            self.line.set_data(self.axis.axis, ydata)
        else:
            self.line.set_ydata(ydata)

        if 'x' in self.autoscale:
            self.ax.set_xlim(self.axis.axis[0], self.axis.axis[-1])

        if 'v' in self.autoscale:
            self.ax.relim()
            y1, y2 = np.searchsorted(self.axis.axis,
                                     self.ax.get_xbound())
            y2 += 2
            y1, y2 = np.clip((y1, y2), 0, len(ydata - 1))
            clipped_ydata = ydata[y1:y2]
            with ignore_warning(category=RuntimeWarning):
                # In case of "All-NaN slices"
                y_max, y_min = (np.nanmax(clipped_ydata),
                                np.nanmin(clipped_ydata))

            if self._plot_imag:
                # Add real plot
                yreal = self._get_data(real_part=True)
                clipped_yreal = yreal[y1:y2]
                with ignore_warning(category=RuntimeWarning):
                    # In case of "All-NaN slices"
                    y_min = min(y_min, np.nanmin(clipped_yreal))
                    y_max = max(y_max, np.nanmin(clipped_yreal))
            if y_min == y_max:
                # To avoid matplotlib UserWarning when calling `set_ylim`
                y_min, y_max = y_min - 0.1, y_max + 0.1
            if not np.isfinite(y_min):
                y_min = None  # data are -inf or all NaN
            if not np.isfinite(y_max):
                y_max = None  # data are inf or all NaN
            if y_min is not None:
                self._y_min = y_min
            if y_max is not None:
                self._y_max = y_max
            if update_ylimits:
                # Most of the time, we don't want to call `set_ylim` now to
                # avoid flickering of the figure. However, we use the values
                # `self._y_min` and `self._y_max` in `Signal1DFigure.update`
                self.ax.set_ylim(self._y_min, self._y_max)

        if self.plot_indices is True:
            self.text.set_text(self.axes_manager.indices)

        if render_figure:
            self.ax.hspy_fig.render_figure()
Example #42
0
def cldslice(pcolno2,cldtophgt):

    """ 
    Compute upper troposphere NO2 using partial columns above
    cloudy scenes. 

    Determine NO2 mixing ratio by regressing NO2 partial columns
    against cloud-top heights over cloudy scenes.

    INPUT: vectors of partial columns in molec/m2 and corresponding 
           cloud top heights in hPa.

    OUTPUT: NO2 volumetric mixing ratio, corresponding estimated error on the
            cloud-sliced NO2 value, a number to identify which filtering
            criteria led to loss of data in the case that the cloud-sliced
            NO2 value ia nan, and the mean cloud pressure of data retained
            after 10th and 90th percentile filtering.
    """

    # Initialize:
    utmrno2=0.0
    utmrno2err=0.0
    error_state=0

    # Define factor to convert slope of NO2 partial column vs pressure
    # to VMR:
    den2mr=np.divide((np.multiply(g,mmair)),na)

    # Get 10th and 90th percentiles of data population:
    p10=np.percentile(pcolno2,10)
    p90=np.percentile(pcolno2,90)

    # Remove outliers determined as falling outside the 10th and 90th 
    # percentile range. Not include this or instead using 5th and 95th leads
    # to overestimate in cloud-sliced UT NO2 compared to the "truth":
    sind=np.where((pcolno2>p10)&(pcolno2<p90))[0]
    # Trim the data to remove ouliers:
    pcolno2=pcolno2[sind]
    cldtophgt=cldtophgt[sind]

    # Cloud pressure mean:
    mean_cld_pres=np.mean(cldtophgt)

    # Get number of points in vector:
    npoints=len(cldtophgt)

    # Only consider data with more than 5 points for reasonably
    # robust statistics. This step is added to account for data loss
    # removing outliers:
    if npoints<=10:
        error_state=1
        utmrno2=np.nan
        utmrno2err=np.nan
        return (utmrno2, utmrno2err, error_state, mean_cld_pres)

    # Get cloud top height standard deviation:
    stdcld=np.std(cldtophgt)
    # Get cloud top height range:
    diffcld=np.nanmax(cldtophgt)-np.nanmin(cldtophgt)

    # Only consider scenes with a dynamic range of clouds:
    # (i) Cloud range:
    if diffcld<=140:
        error_state=2
        utmrno2=np.nan
        utmrno2err=np.nan
        return (utmrno2, utmrno2err, error_state, mean_cld_pres)

    # (ii) Cloud standard deviation:
    if stdcld<=30:
        error_state=3
        utmrno2=np.nan
        utmrno2err=np.nan
        return (utmrno2, utmrno2err, error_state, mean_cld_pres)

    # Get regression statistics:
    # Partial NO2 column (molec/m2) vs cloud top height (hPa):
    # 300 iterations of regression chosen to compromise between
    # statistics and computational efficiency:
    result=rma(cldtophgt*1e2,pcolno2,len(pcolno2),300)

    # Remove data with relative error > 100%:
    if np.absolute(np.divide(result[2], result[0]))>1.0:
        error_state=4
        utmrno2=np.nan
        utmrno2err=np.nan
        return (utmrno2, utmrno2err, error_state, mean_cld_pres)

    # Account for negative values:
    # Set points with sum of slope and error less than zero to nan.
    # This is to account for noise in the data that hover near zero.
    if result[0]<0 and (not np.isnan(utmrno2)):
        if (np.add(result[0],result[2])<0):
            error_state=5
            utmrno2=np.nan
            utmrno2err=np.nan
            return (utmrno2, utmrno2err, error_state, mean_cld_pres)

    # Proceed with estimating NO2 mixing ratios for retained data:
    #if not np.isnan(utmrno2):
    slope=result[0]
    #slope=np.multiply(slope,sf)
    slope_err=result[2]
    #slope_err=np.multiply(slope_err,sf)
    # Convert slope to mol/mol:
    utmrno2=np.multiply(slope,den2mr)
    # Convert error to mol/mol:
    utmrno2err=np.multiply(slope_err,den2mr)
    # Convert UT NO2 from mol/mol to ppt:
    utmrno2=np.multiply(utmrno2,1e+12)
    # Convert UT NO2 error from mol/mol to ppt
    utmrno2err=np.multiply(utmrno2err,1e+12)

    # Finally, remove outliers in the cloud-sliced NO2
    # 200 pptv threshold is chosen, as far from likely range.
    # Scale factor applied to TROPOMI UT NO2 to account for
    # positive bias in free tropospheric NO2:
    if utmrno2>200:
        error_state=6
        utmrno2=np.nan
        utmrno2err=np.nan
        return (utmrno2, utmrno2err, error_state, mean_cld_pres)
    else:
        return (utmrno2, utmrno2err, error_state, mean_cld_pres)
Example #43
0
def NormalizeData(data):
    return (data - np.nanmin(data)) / (np.nanmax(data) - np.nanmin(data))
Example #44
0
def _deblend_source(data,
                    segment_img,
                    npixels,
                    nlevels=32,
                    contrast=0.001,
                    mode='exponential',
                    connectivity=8):
    """
    Deblend a single labeled source.

    Parameters
    ----------
    data : array_like
        The cutout data array for a single source.  ``data`` should also
        already be smoothed by the same filter used in
        :func:`~photutils.segmentation.detect_sources`, if applicable.

    segment_img : `~photutils.segmentation.SegmentationImage`
        A cutout `~photutils.segmentation.SegmentationImage` object with
        the same shape as ``data``.  ``segment_img`` should contain only
        *one* source label.

    npixels : int
        The number of connected pixels, each greater than ``threshold``,
        that an object must have to be detected.  ``npixels`` must be a
        positive integer.

    nlevels : int, optional
        The number of multi-thresholding levels to use.  Each source
        will be re-thresholded at ``nlevels`` levels spaced
        exponentially or linearly (see the ``mode`` keyword) between its
        minimum and maximum values within the source segment.

    contrast : float, optional
        The fraction of the total (blended) source flux that a local
        peak must have (at any one of the multi-thresholds) to be
        considered as a separate object.  ``contrast`` must be between 0
        and 1, inclusive.  If ``contrast = 0`` then every local peak
        will be made a separate object (maximum deblending).  If
        ``contrast = 1`` then no deblending will occur.  The default is
        0.001, which will deblend sources with a 7.5 magnitude
        difference.

    mode : {'exponential', 'linear'}, optional
        The mode used in defining the spacing between the
        multi-thresholding levels (see the ``nlevels`` keyword).  The
        default is 'exponential'.

    connectivity : {8, 4}, optional
        The type of pixel connectivity used in determining how pixels
        are grouped into a detected source.  The options are 8 (default)
        or 4.  8-connected pixels touch along their edges or corners.
        4-connected pixels touch along their edges.  For reference,
        SourceExtractor uses 8-connected pixels.

    Returns
    -------
    segment_image : `~photutils.segmentation.SegmentationImage`
        A segmentation image, with the same shape as ``data``, where
        sources are marked by different positive integer values.  A
        value of zero is reserved for the background.  Note that the
        returned `SegmentationImage` may *not* have consecutive labels.
    """
    from scipy.ndimage import label as ndilabel
    from skimage.segmentation import watershed

    if nlevels < 1:
        raise ValueError(f'nlevels must be >= 1, got "{nlevels}"')
    if contrast < 0 or contrast > 1:
        raise ValueError(f'contrast must be >= 0 and <= 1, got "{contrast}"')

    segm_mask = (segment_img.data > 0)
    source_values = data[segm_mask]
    source_sum = float(np.nansum(source_values))
    source_min = np.nanmin(source_values)
    source_max = np.nanmax(source_values)
    if source_min == source_max:
        return segment_img  # no deblending

    if mode == 'exponential' and source_min < 0:
        warnings.warn(
            f'Source "{segment_img.labels[0]}" contains negative '
            'values, setting deblending mode to "linear"', AstropyUserWarning)
        mode = 'linear'

    steps = np.arange(1., nlevels + 1)
    if mode == 'exponential':
        if source_min == 0:
            source_min = source_max * 0.01
        thresholds = source_min * ((source_max / source_min)**(steps /
                                                               (nlevels + 1)))
    elif mode == 'linear':
        thresholds = source_min + ((source_max - source_min) /
                                   (nlevels + 1)) * steps
    else:
        raise ValueError(f'"{mode}" is an invalid mode; mode must be '
                         '"exponential" or "linear"')

    # suppress NoDetectionsWarning during deblending
    warnings.filterwarnings('ignore', category=NoDetectionsWarning)

    mask = ~segm_mask
    segments = _detect_sources(data,
                               thresholds,
                               npixels=npixels,
                               connectivity=connectivity,
                               mask=mask,
                               deblend_skip=True)

    selem = _make_binary_structure(data.ndim, connectivity)

    # define the sources (markers) for the watershed algorithm
    nsegments = len(segments)
    if nsegments == 0:  # no deblending
        return segment_img
    else:
        for i in range(nsegments - 1):
            segm_lower = segments[i].data
            segm_upper = segments[i + 1].data
            relabel = False
            # if the are more sources at the upper level, then
            # remove the parent source(s) from the lower level,
            # but keep any sources in the lower level that do not have
            # multiple children in the upper level
            for label in segments[i].labels:
                mask = (segm_lower == label)
                # checks for 1-to-1 label mapping n -> m (where m >= 0)
                upper_labels = segm_upper[mask]
                upper_labels = np.unique(upper_labels[upper_labels != 0])
                if upper_labels.size >= 2:
                    relabel = True
                    segm_lower[mask] = segm_upper[mask]

            if relabel:
                segm_new = object.__new__(SegmentationImage)
                segm_new._data = ndilabel(segm_lower, structure=selem)[0]
                segments[i + 1] = segm_new
            else:
                segments[i + 1] = segments[i]

        # Deblend using watershed.  If any sources do not meet the
        # contrast criterion, then remove the faintest such source and
        # repeat until all sources meet the contrast criterion.
        markers = segments[-1].data
        mask = segment_img.data.astype(bool)
        remove_marker = True
        while remove_marker:
            markers = watershed(-data, markers, mask=mask, connectivity=selem)

            labels = np.unique(markers[markers != 0])
            flux_frac = np.array(
                [np.sum(data[markers == label])
                 for label in labels]) / source_sum
            remove_marker = any(flux_frac < contrast)

            if remove_marker:
                # remove only the faintest source (one at a time)
                # because several faint sources could combine to meet the
                # constrast criterion
                markers[markers == labels[np.argmin(flux_frac)]] = 0.

        segm_new = object.__new__(SegmentationImage)
        segm_new._data = markers
        return segm_new
Example #45
0
def save_memmap_chunks(filename,
                       base_name='Yr',
                       resize_fact=(1, 1, 1),
                       remove_init=0,
                       idx_xy=None,
                       order='F',
                       xy_shifts=None,
                       is_3D=False,
                       add_to_movie=0,
                       border_to_0=0,
                       n_chunks=1):
    """ Saves efficiently a list of tif files into a memory mappable file
    Parameters
    ----------
        filenames: list
            list of tif files
        base_name: str
            the base used to build the file name. IT MUST NOT CONTAIN "_"    
        resize_fact: tuple
            x,y, and z downampling factors (0.5 means downsampled by a factor 2) 
        remove_init: int
            number of frames to remove at the begining of each tif file (used for resonant scanning images if laser in rutned on trial by trial)
        idx_xy: tuple size 2 [or 3 for 3D data]
            for selecting slices of the original FOV, for instance idx_xy=(slice(150,350,None),slice(150,350,None))
        order: string
            whether to save the file in 'C' or 'F' order     
        xy_shifts: list 
            x and y shifts computed by a motion correction algorithm to be applied before memory mapping    

        is_3D: boolean
            whether it is 3D data
    Return
    -------
        fname_new: the name of the mapped file, the format is such that the name will contain the frame dimensions and the number of f

    """

    #TODO: can be done online
    print(filename)

    Yr = cm.load(filename, fr=1)

    T, dims = Yr.shape[0], Yr.shape[1:]
    step = np.int(old_div(T, n_chunks))
    bins = []

    for i in range(0, T, step):
        bins.append(i)
    bins.append(T)

    for j in range(0, len(bins) - 1):
        tmp = np.array(Yr[bins[j]:bins[j + 1], :, :])
        if xy_shifts is not None:
            tmp = tmp.apply_shifts(xy_shifts,
                                   interpolation='cubic',
                                   remove_blanks=False)

        if idx_xy is None:
            if remove_init > 0:
                tmp = np.array(tmp)[remove_init:]
        elif len(idx_xy) == 2:
            tmp = np.array(tmp)[remove_init:, idx_xy[0], idx_xy[1]]
        else:
            raise Exception('You need to set is_3D=True for 3D data)')
            tmp = np.array(tmp)[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

        if border_to_0 > 0:
            min_mov = np.nanmin(tmp)
            tmp[:, :border_to_0, :] = min_mov
            tmp[:, :, :border_to_0] = min_mov
            tmp[:, :, -border_to_0:] = min_mov
            tmp[:, -border_to_0:, :] = min_mov

        fx, fy, fz = resize_fact
        if fx != 1 or fy != 1 or fz != 1:

            tmp = cm.movie(tmp, fr=1)
            tmp = Yr.resize(fx=fx, fy=fy, fz=fz)

        Tc, dimsc = tmp.shape[0], tmp.shape[1:]
        tmp = np.transpose(tmp, list(range(1, len(dimsc) + 1)) + [0])
        tmp = np.reshape(tmp, (np.prod(dimsc), Tc), order='F')

        if j == 0:
            fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(
                dims[1]) + '_d3_' + str(
                    1 if len(dims) == 2 else dims[2]) + '_order_' + str(order)
            fname_tot = os.path.join(os.path.split(filename)[0], fname_tot)
            big_mov = np.memmap(fname_tot,
                                mode='w+',
                                dtype=np.float32,
                                shape=(np.prod(dims), T),
                                order=order)
        else:
            big_mov = np.memmap(fname_tot,
                                dtype=np.float32,
                                mode='r+',
                                shape=(np.prod(dims), T),
                                order=order)
        #    np.save(fname[:-3]+'npy',np.asarray(Yr))

        big_mov[:, bins[j]:bins[j + 1]] = np.asarray(
            tmp, dtype=np.float32) + 1e-10 + add_to_movie
        big_mov.flush()
        del big_mov

#    if ref+step+1<d:
#        print 'running on remaining pixels:' + str(ref+step-d)
#        pars.append([fname_tot,d,tot_frames,mmap_fnames,ref+step,d])

    fname_new = fname_tot + '_frames_' + str(T) + '_.mmap'
    os.rename(fname_tot, fname_new)

    return fname_new
Example #46
0
    def _read_fits(self, image_filename, image_extension):
        """Read a single extension's data and header from a FITS file
        """

        self._check_file_exists(image_filename)
        self._logger.info('Loading extension {} of FITS file {}'.format(
            image_extension, image_filename))

        # open() parameters that can be important.
        # Default values used here.
        # See https://docs.astropy.org/en/stable/io/fits/api/files.html#astropy.io.fits.open
        uint_handling = True
        image_scaling = False

        with fits.open(image_filename,
                       uint=uint_handling,
                       do_not_scale_image_data=image_scaling) as hdu_list:
            ext_hdr = hdu_list[image_extension].header
            ext_data = hdu_list[image_extension].data

        ndim = ext_hdr['NAXIS']
        cols = ext_hdr['NAXIS1']
        rows = ext_hdr['NAXIS2']
        bitpix = ext_hdr['BITPIX']
        info_str = '{}-D BITPIX={} image with {} columns, {} rows'.format(
            ndim, bitpix, cols, rows)

        if ndim == 3:
            layers = ext_hdr['NAXIS3']
            info_str = '{}-D BITPIX={} image with {} columns, {} rows, {} layers'.format(
                ndim, bitpix, cols, rows, layers)

        if 'BSCALE' in ext_hdr:
            bscale = ext_hdr['BSCALE']
            info_str += f', BSCALE={bscale}'

        if 'BZERO' in ext_hdr:
            bzero = ext_hdr['BZERO']
            info_str += f', BZERO={bzero}'

        self._logger.debug(info_str)
        if ndim == 3:
            self._logger.error(
                'Error, 3-D handling has not been implemented yet.')
            sys.exit(1)

        # Convert to 32-bit floating point if necessary
        if not np.issubdtype(ext_data.dtype, np.floating):
            orig_dtype = ext_data.dtype
            ext_data = ext_data.astype(np.float32)
            self._logger.debug(
                f'  Converted data type from {orig_dtype} to float32')

        # Get data absolute limits.
        minval = np.nanmin(ext_data)
        maxval = np.nanmax(ext_data)
        medval = np.nanmedian(ext_data)
        self._logger.debug(
            f'Raw data statistics are min={minval:.2f}, max={maxval:.2f}, median={medval:.2f}'
        )

        # Is there a PEDESTAL value? MaximDL likes to add an offset, and
        # the PEDESTAL value is the value to ADD to the data to remove the
        # pedestal.
        if 'PEDESTAL' in ext_hdr:
            pedestal = float(ext_hdr['PEDESTAL'])
            if pedestal != 0:
                self._logger.debug(
                    f'Removing a PEDESTAL value of {pedestal} ADU.')
                ext_data += pedestal
                minval = np.amin(ext_data)
                maxval = np.amax(ext_data)
                medval = np.median(ext_data)
                self._logger.debug(
                    f'After PEDESTAL removal, min={minval:.2f}, max={maxval:.2f}, median={medval:.2f}'
                )

        return ext_data, ext_hdr
Example #47
0
def project_tif(coord_dir,
                px_size,
                pj_file,
                image_folder,
                out_dir,
                fill_nodata=False,
                file=False):
    # import required libraries
    from osgeo.gdalconst import GA_ReadOnly
    import os, gdal, sys, math, subprocess, platform
    import numpy as np

    ##### get list of the images from image folder directory
    if file:
        img_file_list = [image_folder]
    else:
        img_file_list = [
            os.path.join(image_folder, f) for f in os.listdir(image_folder)
            if (f.endswith('.tif'))
        ]
        img_file_list.sort()

    ##### create directory, where output should be stored
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    ##### open & read coordinate raster and mask layers
    driver = gdal.GetDriverByName('GTiff')
    driver.Register()
    input_list = ['mask.tif', 'north_raster.tif', 'east_raster.tif']
    for i in input_list:
        datafile = os.path.join(coord_dir, i)
        inDs = gdal.Open(datafile, GA_ReadOnly)
        if inDs is None:
            print('could not open ' + datafile)
            sys.exit(1)
        canal = inDs.GetRasterBand(1)
        if (i == 'mask.tif'):
            mask = canal.ReadAsArray().astype(np.float)
            cols = inDs.RasterXSize
            rows = inDs.RasterYSize
        elif (i == 'north_raster.tif'):
            north = canal.ReadAsArray().astype(np.float)
        elif (i == 'east_raster.tif'):
            east = canal.ReadAsArray().astype(np.float)

    ####################### derive extent of map array  #######################
    ##### mask georeferencing layers
    north[mask == 0] = np.nan
    east[mask == 0] = np.nan

    ##### get maximum spatial extent uf remaining image area in world coordinate system
    min_max_east = (np.nanmin(east), np.nanmax(east))
    min_max_north = (np.nanmin(north), np.nanmax(north))

    ##### correct extent values, so that whole extent is covered & extent is divideable by pixel_size
    half_px = px_size / 2
    min_max_east = (round(min_max_east[0] / px_size) * px_size - half_px,
                    round(min_max_east[1] / px_size) * px_size + half_px)
    min_max_north = (round(min_max_north[0] / px_size) * px_size - half_px,
                     round(min_max_north[1] / px_size) * px_size + half_px)

    ##### get number of rows & cols of projected map
    row_number = int((min_max_north[1] - min_max_north[0]) / px_size)
    col_number = int((min_max_east[1] - min_max_east[0]) / px_size)

    ################ calculate for each image pixel the #######################
    ############## correlating position in the new array ######################
    pos_new = np.ones((rows, cols, 2)) * -1
    for col in range(cols):
        for row in range(rows):
            if not (math.isnan(east[row, col])) and not (math.isnan(
                    north[row, col])):
                pos_new[row, col] = [
                    int((east[row, col] - min_max_east[0]) / px_size),
                    int((north[row, col] - min_max_north[0]) / px_size)
                ]

    ################# read tif data which should be projected #################
    for img_file in img_file_list:
        inDs = gdal.Open(img_file, GA_ReadOnly)
        no_of_bands = inDs.RasterCount

        ##### create tif-file for projected result
        img_name = os.path.basename(img_file)  # get image name
        tif_name = img_name.split(
            '.')[0] + '_map.tif'  # create output map name from that
        dst_ds = gdal.GetDriverByName('GTiff').Create(
            os.path.join(out_dir,
                         tif_name), col_number, row_number, no_of_bands,
            gdal.GDT_Float64)  # create the single band raster tif-file

        #################### run projection for each band #########################
        for i in range(no_of_bands):
            ##### open classified image
            canal = inDs.GetRasterBand(i + 1)
            noDataVal = canal.GetNoDataValue()
            image = canal.ReadAsArray()

            ##### create output arrays
            count_array = np.zeros(
                (row_number, col_number)
            )  # counts the number of image pixels assigned to each map pixel
            snow_val = np.full((row_number, col_number),
                               -9999)  # output array for map

            ##### assign image values to map pixels
            for col in range(cols):
                for row in range(rows):
                    if not (image[row, col]
                            == noDataVal):  # exclude no-Data pixels
                        if not -1 in pos_new[row, col]:
                            east_pos = int(pos_new[
                                row, col,
                                0])  # get easting position in map array
                            north_pos = int(pos_new[
                                row, col,
                                1]) + 1  # get northing position in map array
                            # value assignment: (value*no_of_already_assigned_values + new_value_to_be_added)/new_number_of_assigned_values
                            snow_val[row_number - north_pos, east_pos] = (
                                snow_val[row_number - north_pos, east_pos] *
                                count_array[row_number - north_pos, east_pos] +
                                image[row, col]) / (count_array[
                                    row_number - north_pos, east_pos] + 1)
                            count_array[
                                row_number - north_pos,
                                east_pos] += 1  # increase counter for this pixel by 1
            snow_val[
                count_array ==
                0] = -9999  # assign noData-value, if no value was assigned before

            ##### write output to tif-file
            dst_ds.GetRasterBand(i + 1).WriteArray(
                snow_val)  # write result array into tif-file
            dst_ds.GetRasterBand(i + 1).SetNoDataValue(
                -9999)  # define no-Data-value to -9999

    ########## define geotransformation & projection and save result ##########
        geotransform = (
            min_max_east[0], px_size, 0, min_max_north[1], 0, -px_size
        )  # geotransformation from spatial extent and pixel size
        dst_ds.SetGeoTransform(
            geotransform)  # set geotransfornation to tif-file

        # get & define projection from DEM file and save result
        dst_ds.SetProjection(pj_file)  # set projection to tif-file
        dst_ds.FlushCache()  # write to disk
        dst_ds = None  # save, close
        canal = None
        inDs = None

        ######## optionnal: fill noData-gaps with small scale interpolation #######
        ############## using the gdal function gdal_fillnodata.py #################
        # value of fill_nodata decides to what pixel range the interpolation is applied
        if (fill_nodata):
            for i in range(no_of_bands):
                if (platform.system() == 'Windows'):
                    subprocess.Popen(
                        "gdal_fillnodata.py -md " + str(fill_nodata) + " " +
                        os.path.join(out_dir, tif_name) + " -b " + str(i + 1),
                        shell=True,
                        stdout=subprocess.PIPE)
                else:
                    subprocess.Popen([
                        'gdal_fillnodata.py', '-md',
                        str(fill_nodata),
                        os.path.join(out_dir, tif_name), '-b',
                        str(i + 1)
                    ],
                                     stdout=subprocess.PIPE)

        print(img_name + ' is processed'
              )  # print to console, that processing of file x is finished
Example #48
0
    # Calculate the shift to be taken from the ToT values
    y, x = np.nonzero(tot)
    shift_x = min(x)
    shift_y = min(y)

    # Apply shift to tot and toa matrices
    tot = np.roll(tot, -shift_x, axis=1)
    tot = np.roll(tot, -shift_y, axis=0)
    toa = np.roll(toa, -shift_x, axis=1)
    toa = np.roll(toa, -shift_y, axis=0)

    # G4Medipix outputs a few zero values around clusters, convert those to -NaN
    toa[toa == 0] = -np.nan

    # Reduce the lowest ToA values to 0, as we do not know a time offset. Just like the real data
    toa = toa - np.nanmin(toa)

    # Store values  in resized matrix again
    f['clusters'][int(idx), 0, :] = tot[0:n_pixels, 0:n_pixels]
    f['clusters'][int(idx), 1, :] = toa[0:n_pixels, 0:n_pixels]

    trajectory = f['trajectories'][str(idx)][()]

    # G4medipix sets its origin in the middle of four pixels
    trajectory[:, 1] = trajectory[:, 1] + (-1 + used_n_pixels / 2 -
                                           shift_x) * pixel_size
    trajectory[:, 0] = trajectory[:, 0] + (-1 + used_n_pixels / 2 -
                                           shift_y) * pixel_size
    # Invert trajectory, and increase with half sensor height
    trajectory[:, 2] = trajectory[:, 2] * -1 + sensor_height / 2
Example #49
0
def event_to_list_and_dedup(network_events_final, nstations):
    #/ add all events to list (includes redundancies if event belongs to multiple pairs)
    networkIDs = sorted(network_events_final.keys())
    out = np.nan + np.ones((2 * len(networkIDs), nstations + 1))
    for i, nid in enumerate(networkIDs):
        tmp_dt = network_events_final[nid]['dt']
        for stid in xrange(nstations):
            if network_events_final[nid][stid]:
                if len(network_events_final[nid][stid]) == 2:
                    out[2 * i, stid] = network_events_final[nid][stid][0]
                    out[2 * i + 1, stid] = network_events_final[nid][stid][1]
                    out[2 * i, nstations] = nid
                    out[2 * i + 1, nstations] = nid
                elif len(
                        network_events_final[nid][stid]
                ) == 1:  #/ if only one event in "pair" (i.e dt is small - TODO: ensure this case can't happen)
                    out[2 * i, stid] = network_events_final[nid][stid][0]
                    out[2 * i + 1, stid] = network_events_final[nid][stid][0]
                    out[2 * i, nstations] = nid
                    out[2 * i + 1, nstations] = nid
                else:  # if multiple
                    tmp_ts = network_events_final[nid][stid]
                    sidx = [
                        0,
                        np.argmax(
                            [q - p
                             for p, q in zip(tmp_ts[:-1], tmp_ts[1:])]) + 1
                    ]
                    out[2 * i, stid] = np.min(tmp_ts[sidx[0]:sidx[1]])
                    out[2 * i + 1, stid] = np.min(tmp_ts[sidx[1]:])
                    out[2 * i, nstations] = nid
                    out[2 * i + 1, nstations] = nid

    ## remove duplicates from event-list (find and remove entries that are identical up to missing values (nan)) ##
    out2 = out[:, 0:nstations]
    netids2 = list(out[:, nstations].astype(int))

    for sta in xrange(nstations):
        row_sort0 = np.argsort(out2[:, sta])
        out2 = out2[row_sort0, :]
        netids2 = [netids2[x] for x in row_sort0]
        n1, n2 = np.shape(out2)
        keep_row = np.zeros(n1, dtype=bool)
        network_eventlist = list()
        tmp_neventlist = list()
        for i in xrange(n1 - 1):
            if np.all((out2[i, :] == out2[i + 1, :]) | np.isnan(out2[i, :])
                      | np.isnan(out2[i + 1, :])) & np.any(
                          out2[i, :] == out2[i + 1, :]):  #/ if match or nan
                out2[i + 1, :] = np.nanmin((out2[i, :], out2[i + 1, :]),
                                           axis=0)  #/ fill in nans
                tmp_neventlist.append(netids2[i])
            else:
                keep_row[i] = True
                tmp_neventlist.append(netids2[i])  #/ network id
                network_eventlist.append(tmp_neventlist)
                tmp_neventlist = list()
            if i == n1 - 2:  #/ add final event
                keep_row[i + 1] = True
                tmp_neventlist.append(netids2[i + 1])
                network_eventlist.append(tmp_neventlist)
                tmp_neventlist = list()
        out2 = out2[keep_row, :]
        netids2 = network_eventlist

    def list_flatten(S):
        if S == []:
            return S
        if isinstance(S[0], list):
            return flatten(S[0]) + flatten(S[1:])
        return S[:1] + flatten(S[1:])

    netids2 = [list_flatten(x) for x in netids2]  #/ to check if any missing

    nfinal, n2 = np.shape(out2)
    tmp = np.nanargmin(out2, axis=1)
    row_sort = np.argsort(out2[np.arange(0, nfinal), tmp])
    final_eventlist = out2[row_sort, :]
    network_eventlist = [netids2[k] for k in row_sort]

    return final_eventlist, network_eventlist, nfinal
Example #50
0
def _get_extent_from_multigmpe(rupture, config=None):
    """
    Use MultiGMPE to determine extent
    """
    (clon, clat) = _rupture_center(rupture)
    origin = rupture.getOrigin()
    if config is not None:
        gmpe = MultiGMPE.from_config(config)
        gmice = get_object_from_config('gmice', 'modeling', config)
    else:
        # Put in some default values for conf
        config = {
            'extent': {
                'mmi': {
                    'threshold': 4.5,
                    'mindist': 100,
                    'maxdist': 1000
                }
            }
        }

        # Generic GMPEs choices based only on active vs stable
        # as defaults...
        stable = is_stable(origin.lon, origin.lat)
        if not stable:
            ASK14 = AbrahamsonEtAl2014()
            CB14 = CampbellBozorgnia2014()
            CY14 = ChiouYoungs2014()
            gmpes = [ASK14, CB14, CY14]
            site_gmpes = None
            weights = [1 / 3.0, 1 / 3.0, 1 / 3.0]
            gmice = WGRW12()
        else:
            Fea96 = FrankelEtAl1996MwNSHMP2008()
            Tea97 = ToroEtAl1997MwNSHMP2008()
            Sea02 = SilvaEtAl2002MwNSHMP2008()
            C03 = Campbell2003MwNSHMP2008()
            TP05 = TavakoliPezeshk2005MwNSHMP2008()
            AB06p = AtkinsonBoore2006Modified2011()
            Pea11 = PezeshkEtAl2011()
            Atk08p = Atkinson2008prime()
            Sea01 = SomervilleEtAl2001NSHMP2008()
            gmpes = [
                Fea96, Tea97, Sea02, C03, TP05, AB06p, Pea11, Atk08p, Sea01
            ]
            site_gmpes = [AB06p]
            weights = [0.16, 0.0, 0.0, 0.17, 0.17, 0.3, 0.2, 0.0, 0.0]
            gmice = AK07()

        gmpe = MultiGMPE.from_list(gmpes,
                                   weights,
                                   default_gmpes_for_site=site_gmpes)

    min_mmi = config['extent']['mmi']['threshold']
    default_imt = imt.SA(1.0)
    sd_types = [const.StdDev.TOTAL]

    # Distance context
    dx = DistancesContext()
    # This imposes minimum/ maximum distances of:
    #   80 and 800 km; could make this configurable
    d_min = config['extent']['mmi']['mindist']
    d_max = config['extent']['mmi']['maxdist']
    dx.rjb = np.logspace(np.log10(d_min), np.log10(d_max), 2000)
    # Details don't matter for this; assuming vertical surface rupturing fault
    # with epicenter at the surface.
    dx.rrup = dx.rjb
    dx.rhypo = dx.rjb
    dx.repi = dx.rjb
    dx.rx = np.zeros_like(dx.rjb)
    dx.ry0 = np.zeros_like(dx.rjb)
    dx.rvolc = np.zeros_like(dx.rjb)

    # Sites context
    sx = SitesContext()
    # Set to soft soil conditions
    sx.vs30 = np.full_like(dx.rjb, 180)
    sx = MultiGMPE.set_sites_depth_parameters(sx, gmpe)
    sx.vs30measured = np.full_like(sx.vs30, False, dtype=bool)
    sx = Sites._addDepthParameters(sx)
    sx.backarc = np.full_like(sx.vs30, False, dtype=bool)

    # Rupture context
    rx = RuptureContext()
    rx.mag = origin.mag
    rx.rake = 0.0
    # From WC94...
    rx.width = 10**(-0.76 + 0.27 * rx.mag)
    rx.dip = 90.0
    rx.ztor = origin.depth
    rx.hypo_depth = origin.depth

    gmpe_imt_mean, _ = gmpe.get_mean_and_stddevs(sx, rx, dx, default_imt,
                                                 sd_types)

    # Convert to MMI
    gmpe_to_mmi, _ = gmice.getMIfromGM(gmpe_imt_mean, default_imt)

    # Minimum distance that exceeds threshold MMI?
    dists_exceed_mmi = dx.rjb[gmpe_to_mmi > min_mmi]
    if len(dists_exceed_mmi):
        mindist_km = np.max(dists_exceed_mmi)
    else:
        mindist_km = d_min

    # Get a projection
    proj = OrthographicProjection(clon - 4, clon + 4, clat + 4, clat - 4)
    if isinstance(rupture, (QuadRupture, EdgeRupture)):
        ruptx, rupty = proj(rupture.lons, rupture.lats)
    else:
        ruptx, rupty = proj(clon, clat)

    xmin = np.nanmin(ruptx) - mindist_km
    ymin = np.nanmin(rupty) - mindist_km
    xmax = np.nanmax(ruptx) + mindist_km
    ymax = np.nanmax(rupty) + mindist_km

    # Put a limit on range of aspect ratio
    dx = xmax - xmin
    dy = ymax - ymin
    ar = dy / dx
    if ar > 1.2:
        # Inflate x
        dx_target = dy / 1.2
        ddx = dx_target - dx
        xmax = xmax + ddx / 2
        xmin = xmin - ddx / 2
    if ar < 0.83:
        # Inflate y
        dy_target = dx * 0.83
        ddy = dy_target - dy
        ymax = ymax + ddy / 2
        ymin = ymin - ddy / 2

    lonmin, latmin = proj(np.array([xmin]), np.array([ymin]), reverse=True)
    lonmax, latmax = proj(np.array([xmax]), np.array([ymax]), reverse=True)

    #
    # Round coordinates to the nearest minute -- that should make the
    # output grid register with common grid resolutions (60c, 30c,
    # 15c, 7.5c)
    #
    logging.debug("Extent: %f, %f, %f, %f" % (lonmin, lonmax, latmin, latmax))
    return _round_coord(lonmin[0]), _round_coord(lonmax[0]), \
        _round_coord(latmin[0]), _round_coord(latmax[0])
Example #51
0
    def lows(self, assets, dt):
        """
        The low field's aggregation returns the smallest low seen between
        the market open and the current dt.
        If there has been no data on or before the `dt` the low is `nan`.

        Returns
        -------
        np.array with dtype=float64, in order of assets parameter.
        """
        market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')

        lows = []
        session_label = self._trading_calendar.minute_to_session_label(dt)

        for asset in assets:
            if not asset.is_alive_for_session(session_label):
                lows.append(np.NaN)
                continue

            if prev_dt is None:
                val = self._minute_reader.get_value(asset, dt, 'low')
                entries[asset] = (dt_value, val)
                lows.append(val)
                continue
            else:
                try:
                    last_visited_dt, last_min = entries[asset]
                    if last_visited_dt == dt_value:
                        lows.append(last_min)
                        continue
                    elif last_visited_dt == prev_dt:
                        curr_val = self._minute_reader.get_value(
                            asset, dt, 'low')
                        val = np.nanmin([last_min, curr_val])
                        entries[asset] = (dt_value, val)
                        lows.append(val)
                        continue
                    else:
                        after_last = pd.Timestamp(last_visited_dt +
                                                  self._one_min,
                                                  tz='UTC')
                        window = self._minute_reader.load_raw_arrays(
                            ['low'],
                            after_last,
                            dt,
                            [asset],
                        )[0].T
                        val = np.nanmin(np.append(window, last_min))
                        entries[asset] = (dt_value, val)
                        lows.append(val)
                        continue
                except KeyError:
                    window = self._minute_reader.load_raw_arrays(
                        ['low'],
                        market_open,
                        dt,
                        [asset],
                    )[0].T
                    val = np.nanmin(window)
                    entries[asset] = (dt_value, val)
                    lows.append(val)
                    continue
        return np.array(lows)
Example #52
0
def plot3d(aero, name, parameters):
    """
    Creates a 3D plot of the coefficient values corresponding to the given parameters.
    In the case of any constant parameters this plot represents a slice of a higher dimentional plot.
    :param aero: The aero file.
    :param name: The name of the coefficient.
    :param parameters: List of parameters.
    :return:
    """
    fig = plt.figure()
    ax = fig.gca(projection='3d')

    #find the X and Y axis (the two non constant axis)
    found = 0
    for parameter in parameters:
        if (len(parameter.scope) == 3):  #if not constant
            if found == 0:
                X = parameter.axis
                parameter.X = True
                found += 1
            if found == 1:
                Y = parameter.axis
                parameter.Y = True
            else:
                print("error, too many non constant variables")

    Z = np.zeros((len(Y), len(X)))
    X, Y = np.meshgrid(X, Y)

    i = 0
    j = 0
    while i < X.shape[1]:
        j = 0
        while j < Y.shape[0]:
            # prepare list of values
            values = []
            for parameter in parameters:
                if parameter.X == True:  #if its the X axis
                    values.append(X[0, i])
                    xlabel = parameter.name
                elif parameter.Y == True:  #if its the Y axis
                    values.append(Y[j, 0])
                    ylabel = parameter.name
                else:
                    values.append(parameter.axis[i]
                                  )  # get the i'th value of all parameters

            Z[j, i] = aero.coefficients[name].get_coefficient(*values)
            j += 1
        i += 1

    # Plot the surface.
    surf = ax.plot_surface(X,
                           Y,
                           Z,
                           cmap=cm.coolwarm,
                           vmin=np.nanmin(Z),
                           vmax=np.nanmax(Z),
                           linewidth=0,
                           antialiased=False)

    # Customize the z axis.
    ax.set_zlim(np.nanmin(Z), np.nanmax(Z))
    ax.zaxis.set_major_locator(LinearLocator(10))
    ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
    ax.xaxis.set_label_text(xlabel)
    ax.yaxis.set_label_text(ylabel)
    ax.zaxis.set_label_text(name)

    # Add a color bar which maps values to colors.
    fig.colorbar(surf, shrink=0.5, aspect=5)

    plt.show()
    def warp_images(self,
                    imgs,
                    center_coor,
                    deg_per_pixel=0.1,
                    is_luminance_correction=True):
        """
        warp a image stack into visual degree coordinate system

        parameters
        ----------
        imgs : ndarray
            should be 2d or 3d, if 3d, axis will be considered as frame x rows x width
        center_coor : list or tuple of two floats
            the visual degree coordinates of the center of the image (altitude, azimuth)
        deg_per_pixel : float or list/tuple of two floats
            size of original pixel in visual degrees, (altitude, azimuth), if float, assume
            sizes in both dimension are the same
        is_luminance_correction : bool
            if True, wrapped images will have mean intensity equal 0, and values will be
            scaled up to reach minimum equal -1. or maximum equal 1.

        returns
        -------
        imgs_wrapped : 3d array, np.float32
            wrapped images, each frame should have exact same size of down sampled monitor
            resolution. the region on the monitor not covered by the image will have value
            of np.nan. value range [-1., 1.]
        coord_alt_wrapped : 2d array, np.float32
            the altitude coordinates of all pixels in the wrapped images in visual degrees.
            should have the same shape as each frame in 'imgs_wrapped'.
        coord_azi_wrapped : 2d array, np.float32
            the azimuth coordinates of all pixels in the wrapped images in visual degrees.
            should have the same shape as each frame in 'imgs_wrapped'.
        imgs_dewrapped : 3d array, dtype same as imgs
            unwrapped images, same dimension as input image stack. the region of original
            image that was not got displayed (outside of the monitor) will have value of
            np.nan. value range [-1., 1.]
        coord_alt_dewrapped : 2d array, np.float32
            the altitude coordinates of all pixels in the dewrapped images in visual degrees.
            should have the same shape as each frame in 'imgs_dewrapped'.
        coord_azi_dewrapped : 2d array, np.float32
            the azimuth coordinates of all pixels in the dewrapped images in visual degrees.
            should have the same shape as each frame in 'imgs_dewrapped'.
        """

        try:
            deg_per_pixel_alt = abs(float(deg_per_pixel[0]))
            deg_per_pixel_azi = abs(float(deg_per_pixel[1]))
        except TypeError:
            deg_per_pixel_alt = deg_per_pixel_azi = deg_per_pixel

        if len(imgs.shape) == 2:
            imgs_raw = np.array([imgs])
        elif len(imgs.shape) == 3:
            imgs_raw = imgs
        else:
            raise ValueError('input "imgs" should be 2d or 3d array.')

        # generate raw image pixel coordinates in visual degrees
        alt_start = center_coor[0] + (imgs_raw.shape[1] /
                                      2) * deg_per_pixel_alt
        alt_axis = alt_start - np.arange(imgs_raw.shape[1]) * deg_per_pixel_alt
        azi_start = center_coor[1] - (imgs_raw.shape[2] /
                                      2) * deg_per_pixel_azi
        azi_axis = np.arange(imgs_raw.shape[2]) * deg_per_pixel_azi + azi_start
        # img_coord_azi, img_coord_alt = np.meshgrid(azi_axis, alt_axis)

        # initialize output array
        imgs_wrapped = np.zeros((imgs_raw.shape[0], self.deg_coord_x.shape[0],
                                 self.deg_coord_x.shape[1]),
                                dtype=np.float32)
        imgs_wrapped[:] = np.nan

        # for cropping imgs_raw
        x_min = x_max = y_max = y_min = None

        # for testing
        # img_count = np.zeros((imgs_raw.shape[1], imgs_raw.shape[2]), dtype=np.uint32)

        # loop through every display (wrapped) pixel
        for ii in range(self.deg_coord_x.shape[0]):
            for jj in range(self.deg_coord_x.shape[1]):

                # the wrapped coordinate of current display pixel [alt, azi]
                coord_w = [self.deg_coord_y[ii, jj], self.deg_coord_x[ii, jj]]

                # if the wrapped coordinates of current display pixel is covered
                # by the raw image
                if alt_axis[0] >= coord_w[0] >= alt_axis[-1] and \
                                        azi_axis[0] <= coord_w[1] <= azi_axis[-1]:

                    # get raw pixels arround the wrapped coordinates of current display pixel
                    u = (alt_axis[0] - coord_w[0]) / deg_per_pixel_alt
                    l = (coord_w[1] - azi_axis[0]) / deg_per_pixel_azi

                    # for testing:
                    # img_count[int(u), int(l)] += 1

                    if (u == round(u)
                            and l == round(l)):  # right hit on one raw pixel
                        imgs_wrapped[:, ii, jj] = imgs_raw[:, int(u), int(l)]

                        # for cropping
                        if x_min is None:
                            x_min = x_max = l
                            y_min = y_max = u
                        else:
                            x_min = min(x_min, l)
                            x_max = max(x_max, l)
                            y_min = min(y_min, u)
                            y_max = max(y_max, u)

                    else:
                        u = int(u)
                        b = u + 1
                        l = int(l)
                        r = l + 1
                        w_ul = 1. / ia.distance(coord_w,
                                                [alt_axis[u], azi_axis[l]])
                        w_bl = 1. / ia.distance(coord_w,
                                                [alt_axis[b], azi_axis[l]])
                        w_ur = 1. / ia.distance(coord_w,
                                                [alt_axis[u], azi_axis[r]])
                        w_br = 1. / ia.distance(coord_w,
                                                [alt_axis[b], azi_axis[r]])

                        w_sum = w_ul + w_bl + w_ur + w_br

                        imgs_wrapped[:, ii,
                                     jj] = (imgs_raw[:, u, l] * w_ul +
                                            imgs_raw[:, b, l] * w_bl +
                                            imgs_raw[:, u, r] * w_ur +
                                            imgs_raw[:, b, r] * w_br) / w_sum

                        # for cropping
                        if x_min is None:
                            x_min = l
                            x_max = l + 1
                            y_min = u
                            y_max = u + 1
                        else:
                            x_min = min(x_min, l)
                            x_max = max(x_max, l + 1)
                            y_min = min(y_min, u)
                            y_max = max(y_max, u + 1)

        # for testing
        # plt.imshow(img_count, interpolation='bicubic')
        # plt.colorbar()
        # plt.show()

        if is_luminance_correction:
            for frame_ind in range(imgs_wrapped.shape[0]):
                curr_frame = imgs_wrapped[frame_ind]
                curr_mean = np.nanmean(curr_frame.flat)
                curr_frame = curr_frame - curr_mean
                curr_amp = np.max([
                    np.nanmax(curr_frame.flat),
                    abs(np.nanmin(curr_frame.flat))
                ])
                curr_frame = curr_frame / curr_amp
                imgs_wrapped[frame_ind] = curr_frame

        # crop image
        alt_range = np.logical_and(
            np.arange(imgs_raw.shape[1]) >= y_min,
            np.arange(imgs_raw.shape[1]) <= y_max)
        azi_range = np.logical_and(
            np.arange(imgs_raw.shape[2]) >= x_min,
            np.arange(imgs_raw.shape[2]) <= x_max)

        # print imgs_raw.shape
        # print imgs_raw.shape
        # print alt_range.shape
        # print azi_range.shape
        # print np.sum(alt_range)
        # print np.sum(azi_range)

        imgs_dewrapped = imgs_raw[:, alt_range, :]
        imgs_dewrapped = imgs_dewrapped[:, :, azi_range]

        # get degree coordinats of dewrapped images
        deg_coord_alt_ax_dewrapped = alt_axis[alt_range]
        deg_coord_azi_ax_dewrapped = azi_axis[azi_range]
        deg_coord_azi_dewrapped, deg_coord_alt_dewrapped = np.meshgrid(
            deg_coord_azi_ax_dewrapped, deg_coord_alt_ax_dewrapped)
        deg_coord_alt_dewrapped = deg_coord_alt_dewrapped.astype(np.float32)
        deg_coord_azi_dewrapped = deg_coord_azi_dewrapped.astype(np.float32)

        return imgs_wrapped, self.deg_coord_y, self.deg_coord_x, imgs_dewrapped, deg_coord_alt_dewrapped, \
               deg_coord_azi_dewrapped
Example #54
0
def map_into_curves(ax,
                    forc,
                    data_str,
                    mask,
                    interpolation=None,
                    cmap='RdBu_r'):
    """Plots a quantity into the reversal curves in (H, M) space.

    Parameters
    ----------
    ax : Axes
        Axes to plot the map on
    forc : Forc
        Forc instance containing relevant data
    data_str : str
        One of ['m', 'rho', 'rho_uncertainty', 'temperature']
    mask : str or bool
        True or 'H<Hr' will mask any values for which H<Hr. False shows all data, including dataset extension.
    interpolation : str, optional
        Interpolates the map in the paths to make the map smooth. Not currently implemented.
        (the default is None, which doesn't interpolate.)
    cmap : str, optional
        Colormap to use. Choose from anything in matplotlib or colorcet.
        (the default is 'RdBu_r', which is a perceptually uniform diverging colormap good for M and rho values.)

    Raises
    ------
    NotImplementedError
        If interpolation is anything but the default None.
    """

    ax.clear()
    _h = forc.h.ravel()
    _m = forc.get_masked(forc.m, mask=mask).ravel()
    _z = forc.get_masked(forc.get_data(data_str), mask=mask).ravel()

    # The sum of a nan and anything is a nan. This masks all nan elements across all three arrays.
    indices_non_nan = np.logical_not(np.isnan(_h + _m + _z))

    _h = _h[indices_non_nan]
    _m = _m[indices_non_nan]
    _z = _z[indices_non_nan]

    triang = mtri.Triangulation(_h, _m)
    tri_mask = triangulation_mask(_h,
                                  triang.triangles,
                                  max_edge_length=1.5 * forc.step)
    triang.set_mask(tri_mask)

    if interpolation is None:
        pass
    elif interpolation == 'linear':
        # triang = mtri.LinearTriInterpolator(triang, z)
        raise NotImplementedError
        # TODO generate H-M meshgrid, remove anything outside the concave hull of the loop. Then feed into a
        # mtri.LinearTriInterpolator?
    elif interpolation in ['cubic', 'geom', 'min_E']:
        raise NotImplementedError

    vmin, vmax = symmetrize_bounds(np.nanmin(_z), np.nanmax(_z))
    im = ax.tripcolor(triang,
                      _z,
                      shading="gouraud",
                      cmap=cmap,
                      vmin=vmin,
                      vmax=vmax)
    colorbar(ax, im)
    ax.figure.canvas.draw()

    h_vs_m(ax, forc, mask=mask, points='none', cmap='none', alpha=0.3)

    return
Example #55
0
def LN_pop_plot(ctx):
    """
    compact summary plot for model fit to a single dim of a population subspace

    in 2-4 panels, show: pc load, timecourse plus STRF + static NL
    (skip the first two if their respective ax handles are None)

    """
    rec = ctx['val']
    modelspec = ctx['modelspec']
    rec = ms.evaluate(rec, modelspec)
    cellid = modelspec[0]['meta']['cellid']

    resp = rec['resp']
    stim = rec['stim']
    pred = rec['pred']
    fs = resp.fs

    fir_idx = find_module('fir', modelspec)
    wc_idx = find_module('weight_channels', modelspec, find_all_matches=True)

    chan_count = modelspec[wc_idx[-1]]['phi']['coefficients'].shape[1]
    cell_count = modelspec[wc_idx[-1]]['phi']['coefficients'].shape[0]
    filter_count = modelspec[fir_idx]['phi']['coefficients'].shape[0]
    bank_count = modelspec[fir_idx]['fn_kwargs']['bank_count']
    chan_per_bank = int(filter_count / bank_count)

    fig = plt.figure()
    for chanidx in range(chan_count):

        tmodelspec = copy.deepcopy(modelspec[:(fir_idx + 1)])
        tmodelspec[fir_idx]['fn_kwargs']['bank_count'] = 1
        rr = slice(chanidx * chan_per_bank, (chanidx + 1) * chan_per_bank)
        tmodelspec[wc_idx[0]]['phi']['mean'] = tmodelspec[
            wc_idx[0]]['phi']['mean'][rr]
        tmodelspec[wc_idx[0]]['phi']['sd'] = tmodelspec[
            wc_idx[0]]['phi']['sd'][rr]
        tmodelspec[fir_idx]['phi']['coefficients'] = \
                   tmodelspec[fir_idx]['phi']['coefficients'][rr,:]

        ax = fig.add_subplot(chan_count, 3, chanidx * 3 + 1)
        nplt.strf_heatmap(tmodelspec,
                          title=None,
                          interpolation=(2, 3),
                          show_factorized=False,
                          fs=fs,
                          ax=ax)
        nplt.ax_remove_box(ax)
        if chanidx < chan_count - 1:
            plt.xticks([])
            plt.yticks([])
            plt.xlabel('')
            plt.ylabel('')

    ax = fig.add_subplot(2, 3, 2)
    fcc = modelspec[fir_idx]['phi']['coefficients'].copy()
    fcc = np.reshape(fcc, (chan_per_bank, bank_count, -1))
    fcc = np.mean(fcc, axis=0)
    fcc_std = np.std(fcc, axis=1, keepdims=True)
    wcc = modelspec[wc_idx[-1]]['phi']['coefficients'].copy().T
    wcc *= fcc_std
    mm = np.std(wcc) * 3
    im = ax.imshow(wcc, aspect='auto', clim=[-mm, mm], cmap='bwr')
    #plt.colorbar(im)
    plt.title(modelspec.meta['cellid'])
    nplt.ax_remove_box(ax)

    ax = fig.add_subplot(2, 3, 3)
    plt.plot(modelspec.meta['r_test'])
    plt.xlabel('cell')
    plt.ylabel('r test')
    nplt.ax_remove_box(ax)

    epoch_regex = '^STIM_'
    epochs_to_extract = ep.epoch_names_matching(rec.epochs, epoch_regex)
    epoch = epochs_to_extract[0]

    # or just plot the PSTH for an example stimulus
    raster = resp.extract_epoch(epoch)
    psth = np.mean(raster, axis=0)
    praster = pred.extract_epoch(epoch)
    ppsth = np.mean(praster, axis=0)
    spec = stim.extract_epoch(epoch)[0, :, :]
    trimbins = 50
    if trimbins > 0:
        ppsth = ppsth[:, trimbins:]
        psth = psth[:, trimbins:]
        spec = spec[:, trimbins:]

    ax = plt.subplot(6, 2, 8)
    #nplt.plot_spectrogram(spec, fs=resp.fs, ax=ax, title=epoch)
    extent = [0.5 / fs, (spec.shape[1] + 0.5) / fs, 0.5, spec.shape[0] + 0.5]
    im = ax.imshow(spec,
                   origin='lower',
                   interpolation='none',
                   aspect='auto',
                   extent=extent)
    nplt.ax_remove_box(ax)
    plt.ylabel('stim')
    plt.xticks([])
    plt.colorbar(im)

    ax = plt.subplot(6, 2, 10)
    clim = (np.nanmin(psth), np.nanmax(psth) * .6)
    #nplt.plot_spectrogram(psth, fs=resp.fs, ax=ax, title="resp",
    #                      cmap='gray_r', clim=clim)
    #fig.colorbar(im, cax=ax, orientation='vertical')
    im = ax.imshow(psth,
                   origin='lower',
                   interpolation='none',
                   aspect='auto',
                   extent=extent,
                   cmap='gray_r',
                   clim=clim)
    nplt.ax_remove_box(ax)
    plt.ylabel('resp')
    plt.xticks([])
    plt.colorbar(im)

    ax = plt.subplot(6, 2, 12)
    clim = (np.nanmin(psth), np.nanmax(ppsth))
    im = ax.imshow(ppsth,
                   origin='lower',
                   interpolation='none',
                   aspect='auto',
                   extent=extent,
                   cmap='gray_r',
                   clim=clim)
    nplt.ax_remove_box(ax)
    plt.ylabel('pred')
    plt.colorbar(im)

    #    if (ax1 is not None) and (pc_idx is not None):
    #        cellids=ctx['rec'].meta['cellid']
    #        h=ctx['rec'].meta['pc_weights'][pc_idx[0],:]
    #        max_w=np.max(np.abs(h))*0.75
    #        plt.sca(ax1)
    #        plot_weights_64D(h,cellids,vmin=-max_w,vmax=max_w)
    #        plt.axis('off')
    #
    #    if ax2 is not None:
    #        r = ctx['rec']['resp'].extract_epoch('REFERENCE',
    #               mask=ctx['rec']['mask'])
    #        d = ctx['rec']['resp'].get_epoch_bounds('PreStimSilence')
    #        if len(d):
    #            PreStimSilence = np.mean(np.diff(d))
    #        else:
    #            PreStimSilence = 0
    #        prestimbins = int(PreStimSilence * fs)
    #
    #        mr=np.mean(r,axis=0)
    #        spont=np.mean(mr[:,:prestimbins],axis=1,keepdims=True)
    #        mr-=spont
    #        mr /= np.max(np.abs(mr),axis=1,keepdims=True)
    #        tt=np.arange(mr.shape[1])/fs
    #        ax2.plot(tt-PreStimSilence, mr[0,:], 'k')
    #        # time bar
    #        ax2.plot(np.array([0,1]),np.array([1.1, 1.1]), 'k', lw=3)
    #        nplt.ax_remove_box(ax2)
    #        ax2.set_title(cellid)

    #    title="r_fit={:.3f} test={:.3f}".format(
    #            modelspec[0]['meta']['r_fit'][0],
    #            modelspec[0]['meta']['r_test'][0])

    #    nl_mod_idx = find_module('nonlinearity', modelspec)
    #    nplt.nl_scatter(rec, modelspec, nl_mod_idx, sig_name='pred',
    #                    compare='resp', smoothing_bins=60,
    #                    xlabel1=None, ylabel1=None, ax=ax4)
    #
    #    sg_mod_idx = find_module('state', modelspec)
    #    if sg_mod_idx is not None:
    #        modelspec2 = copy.deepcopy(modelspec)
    #        g=modelspec2[sg_mod_idx]['phi']['g'][0,:]
    #        d=modelspec2[sg_mod_idx]['phi']['d'][0,:]
    #
    #        modelspec2[nl_mod_idx]['phi']['amplitude'] *= 1+g[-1]
    #        modelspec2[nl_mod_idx]['phi']['base'] += d[-1]
    #        nplt.plot_nl_io(modelspec2[nl_mod_idx], ax4.get_xlim(), ax4)
    #        g=["{:.2f}".format(g) for g in list(modelspec[sg_mod_idx]['phi']['g'][0,:])]
    #        ts = "SG: " + " ".join(g)
    #        ax4.set_title(ts)
    #
    #    nplt.ax_remove_box(ax4)
    return fig
Example #56
0
    def from_ATL06(self, D6, GI_files=None, beam_pair=1, cycles=[1, 12],  ref_pt_numbers=None, ref_pt_x=None, hemisphere=-1,  mission_time_bds=None, verbose=False, DOPLOT=None, DEBUG=None):
        """
        Fit a collection of ATL06 files with ATL11 surface models

        Positional input:
            ATL06_files:  List of ATL06 files (from the same rgt)
            Required keyword inputs:
                beam_pair: beam pair for the current fit (default=1)
                cycles: cycles to be included in the current fit (default=2)
                GI_files: list of geo_index file from which to read ATL06 data for crossovers
                hemisphere: +1 (north) or -1 (south), used to choose a projection
            Optional keyword arguments (not necessarily independent)
                mission_time_bds: starting and ending times for the mission
                verbose: write fitting info to stdout if true
                DOPLOT: list of plots to make
                DEBUG: output debugging info
        """

        params_11=ATL11.defaults()
        if mission_time_bds is None:
            mission_time_bds=np.array([286.*24*3600, 398.*24*3600])

        # hard code the bin size until there's a good reason to change it
        index_bin_size=1.e4

        # setup the EPSG
        if hemisphere==1:
            params_11.EPSG=3413
        else:
            params_11.EPSG=3031

        # initialize the xover data cache
        D_xover_cache={}

        last_time=time.time()
        last_count=0
        # loop over reference points
        P11_list=list()
        for count, ref_pt in enumerate(ref_pt_numbers):

            x_atc_ctr=ref_pt_x[count]
            # section 5.1.1
            D6_sub=D6[np.any(np.abs(D6.segment_id-ref_pt) <= params_11.N_search, axis=1)]
            if D6_sub.h_li.shape[0]<=1:
                if verbose:
                    print("not enough data at ref pt=%d" % ref_pt)
                continue

            #2a. define representative x and y values for the pairs
            pair_data=ATL06_pair().from_ATL06(D6_sub, datasets=['x_atc','y_atc','delta_time','dh_fit_dx','dh_fit_dy','segment_id','cycle_number','h_li', 'h_li_sigma'])   # this might go, similar to D6_sub
            if ~np.any(np.isfinite(pair_data.y)):
                continue
            P11=ATL11.point(N_pairs=len(pair_data.x), rgt=D6_sub.rgt[0, 0],\
                            ref_pt=ref_pt, beam_pair=D6_sub.BP[0, 0],  \
                            x_atc_ctr=x_atc_ctr, \
                            track_azimuth=np.nanmedian(D6_sub.seg_azimuth.ravel()),\
                            cycles=cycles,  mission_time_bds=mission_time_bds)

            P11.DOPLOT=DOPLOT
            # step 2: select pairs, based on reasonable slopes
            try:
                P11.select_ATL06_pairs(D6_sub, pair_data)
            except np.linalg.LinAlgError:
                if verbose:
                    print("LinAlg error in select_ATL06_pairs ref pt=%d" % ref_pt)
            #if P11.ref_surf.complex_surface_flag:
            #    P11.select_ATL06_pairs(D6_sub, pair_data, complex_surface_flag=True)
                    
            if P11.ref_surf.fit_quality > 0:
                #P11_list.append(P11)
                if verbose:
                    print("surf_fit_quality=%d at ref pt=%d" % (P11.ref_surf.quality_summary, ref_pt))
                continue
            
            if np.sum(P11.valid_pairs.all) < 2:
                continue
                       
            # select the y coordinate for the fit (in ATC coords)
            P11.select_y_center(D6_sub, pair_data)
            
            if np.sum(P11.valid_pairs.all) < 2:  
                continue
            
            if P11.ref_surf.fit_quality > 0:
                #P11_list.append(P11)
                if verbose:
                    print("surf_fit_quality=%d at ref pt=%d" % (P11.ref_surf.quality_summary, ref_pt))
                continue
            
            # regress the geographic coordinates from the data to the fit center
            P11.ROOT.latitude, P11.ROOT.longitude = regress_to(D6_sub,['latitude','longitude'], ['x_atc','y_atc'], [x_atc_ctr, P11.y_atc_ctr])

            # find the reference surface
            P11.find_reference_surface(D6_sub, pair_data)
            
            if 'inversion failed' in P11.status:
                #P11_list.append(P11)
                if verbose:
                    print("surf_fit_quality=%d at ref pt=%d" % (P11.ref_surf.fit_quality, ref_pt))
                continue
            # get the slope and curvature parameters
            P11.characterize_ref_surf()

            # correct the heights from other cycles to the reference point using the reference surface
            P11.corr_heights_other_cycles(D6_sub)

            P11.ROOT.quality_summary = np.logical_not(
                    (P11.cycle_stats.min_signal_selection_source <=1) &\
                    (P11.cycle_stats.min_snr_significance < 0.02) &\
                    (P11.cycle_stats.atl06_summary_zero_count > 0) )

            # find the center of the bin in polar stereographic coordinates
            x0, y0=regress_to(D6_sub, ['x','y'], ['x_atc', 'y_atc'], [x_atc_ctr,P11.y_atc_ctr])

            # get the DEM elevation
            P11.ref_surf.dem_h=regress_to(D6_sub, ['dem_h'], ['x_atc', 'y_atc'], [x_atc_ctr,P11.y_atc_ctr])

            # get the data for the crossover point
            if GI_files is not None and np.abs(P11.ROOT.latitude) < 86:
                D_xover=ATL11.get_xover_data(x0, y0, P11.rgt, GI_files, D_xover_cache, index_bin_size, params_11)
                P11.corr_xover_heights(D_xover)
            # if we have read any data for the current bin, run the crossover calculation
            PLOTME=False
            if PLOTME:
                plt.figure()
                for key in D_xover_cache.keys():
                    plt.plot(D_xover_cache[key]['D'].x, D_xover_cache[key]['D'].y,'k.')

                plt.plot(D_xover.x, D_xover.y,'m.')
                plt.plot(x0, y0,'g*')

            if not np.isfinite(P11.ROOT.latitude):
                continue
            P11_list.append(P11)
            if count-last_count>1000:
                print("completed %d/%d segments, ref_pt= %d, last 1000 segments in %2.2f s." %(count, len(ref_pt_numbers), ref_pt, time.time()-last_time))
                last_time=time.time()
                last_count=count

        if len(P11_list) > 0:
            cycles=[np.nanmin([Pi.cycles for Pi in P11_list]), np.nanmax([Pi.cycles for Pi in P11_list])]
            N_coeffs=np.nanmax([Pi.N_coeffs  for Pi in P11_list])
            return ATL11.data(track_num=P11_list[0].rgt, beam_pair=beam_pair, cycles=cycles, N_coeffs=N_coeffs, N_pts=len(P11_list)).from_list(P11_list)
        else:
            return None
Example #57
0
def reverse_lut(
        sensor,
        lutdw=None,
        par='romix',
        pct=(1, 60),
        nbins=20,
        override=False,
        pressures=[500, 1013, 1100],
        base_luts=['ACOLITE-LUT-202110-MOD1', 'ACOLITE-LUT-202110-MOD2'],
        rsky_lut='ACOLITE-RSKY-202102-82W',
        get_remote=True,
        remote_base='https://raw.githubusercontent.com/acolite/acolite_luts/main'
):
    import acolite as ac
    import numpy as np
    from netCDF4 import Dataset
    import scipy.interpolate
    import time, os

    if lutdw is None:
        rsrf = ac.config['data_dir'] + '/RSR/{}.txt'.format(sensor)
        rsr, rsr_bands = ac.shared.rsr_read(rsrf)
        bands = [b for b in rsr_bands]
    else:
        lut = list(lutdw.keys())[0]
        bands = list(lutdw[lut]['rgi'].keys())

    revl = {}
    for lut in base_luts:
        lutdir = '{}/{}-Reverse/{}'.format(ac.config['lut_dir'],
                                           '-'.join(lut.split('-')[0:3]),
                                           sensor)
        if not os.path.exists(lutdir): os.makedirs(lutdir)

        rgi = {}
        for b in bands:
            s**t = '{}-reverse-{}-{}-{}'.format(lut, sensor, par, b)
            lutnc = '{}/{}.nc'.format(lutdir, s**t)

            if (not os.path.exists(lutnc)) or (override):
                if os.path.exists(lutnc): os.remove(lutnc)

                ## try downloading LUT from GitHub
                if (get_remote):
                    remote_lut = '{}/{}-Reverse/{}/{}.nc'.format(
                        remote_base, '-'.join(lut.split('-')[0:3]), sensor,
                        s**t)
                    try:
                        print('Getting remote LUT {}'.format(remote_lut))
                        ac.shared.download_file(remote_lut, lutnc)
                        print('Testing LUT {}'.format(lutnc))
                        lutb, meta = ac.shared.lutnc_import(lutnc)  # test LUT
                    except:
                        print('Could not download remote lut {} to {}'.format(
                            remote_lut, lutnc))
                        if os.path.exists(lutnc): os.remove(lutnc)

                ## generate LUT if download did not work
                if (not os.path.exists(lutnc)):
                    print('Creating reverse LUTs for {}'.format(sensor))
                    if lutdw is None:
                        print('Importing source LUTs')
                        lutdw = ac.aerlut.import_luts(
                            sensor=sensor,
                            base_luts=base_luts,
                            lut_par=[par],
                            return_lut_array=True,
                            pressures=pressures,
                            get_remote=get_remote,
                            add_rsky=par == 'romix+rsky_t',
                            rsky_lut=rsky_lut)
                    pid = lutdw[lut]['ipd'][par]
                    if len(lutdw[lut]['dim']) == 7:
                        wind_dim = True
                        pressures, pids, raas, vzas, szas, winds, aots = lutdw[
                            lut]['dim']
                    else:
                        pressures, pids, raas, vzas, szas, aots = lutdw[lut][
                            'dim']
                        wind_dim = False
                        winds = np.atleast_1d(2)

                    print('Starting {}'.format(s**t))
                    t0 = time.time()
                    tmp = lutdw[lut]['lut'][b][:, pid, :, :, :, :, :].flatten()
                    tmp = np.log(tmp)
                    prc = np.nanpercentile(tmp, pct)
                    h = np.histogram(tmp, bins=nbins, range=prc)
                    rpath_bins = np.exp(h[1])

                    ## set up dimensions for lut
                    lut_dimensions = ('pressure', 'raa', 'vza', 'sza', 'wind',
                                      'rho')
                    dim = [pressures, raas, vzas, szas, winds, rpath_bins]
                    dims = [len(d) for d in dim]
                    luta = np.zeros(dims) + np.nan
                    ii = 0
                    ni = np.product(dims[:-1])
                    for pi, pressure in enumerate(pressures):
                        for ri, raa in enumerate(raas):
                            for vi, vza in enumerate(vzas):
                                for si, sza in enumerate(szas):
                                    for wi, wind in enumerate(winds):
                                        if wind_dim:
                                            ret = lutdw[lut]['rgi'][b](
                                                (pressure, pid, raa, vza, sza,
                                                 wind, aots))
                                        else:
                                            ret = lutdw[lut]['rgi'][b](
                                                (pressure, pid, raa, vza, sza,
                                                 aots))
                                        luta[pi, ri, vi, si,
                                             wi, :] = np.interp(
                                                 rpath_bins, ret, aots)
                                        ii += 1
                            print('{} {:.1f}%'.format(b, (ii / ni) * 100),
                                  end='\r')
                    print('\nResampling {} took {:.1f}s'.format(
                        s**t,
                        time.time() - t0))

                    ## write this sensor band lut
                    if os.path.exists(lutnc): os.remove(lutnc)
                    nc = Dataset(lutnc, 'w')
                    ## set attributes
                    setattr(nc, 'base', s**t)
                    setattr(nc, 'aermod', lut[-1])
                    setattr(nc, 'aots', aots)
                    setattr(nc, 'lut_dimensions', lut_dimensions)
                    for di, dn in enumerate(lut_dimensions):
                        ## set attribute
                        setattr(nc, dn, dim[di])
                        ## create dimensions
                        nc.createDimension(dn, len(dim[di]))
                    ## write lut
                    var = nc.createVariable('lut', np.float32, lut_dimensions)
                    var[:] = luta.astype(np.float32)
                    nc.close()

            ## read LUT and make rgi
            if os.path.exists(lutnc):
                nc = Dataset(lutnc)
                meta = {}
                for attr in nc.ncattrs():
                    attdata = getattr(nc, attr)
                    if isinstance(attdata, str): attdata = attdata.split(',')
                    meta[attr] = attdata
                lutb = nc.variables['lut'][:]
                nc.close()

                try:
                    minaot = np.nanmin(meta['aots'])
                    maxaot = np.nanmax(meta['aots'])
                except:
                    minaot = 0.001
                    maxaot = 5
                    print(meta.keys())

                ## band specific interpolator
                if len(np.atleast_1d(meta['wind'])) == 1:
                    rgi[b] = scipy.interpolate.RegularGridInterpolator(
                        [
                            meta[k] for k in meta['lut_dimensions']
                            if k not in ['wind']
                        ],
                        lutb[:, :, :, :, 0, :],
                        bounds_error=False,
                        fill_value=None)
                else:
                    rgi[b] = scipy.interpolate.RegularGridInterpolator(
                        [meta[k] for k in meta['lut_dimensions']],
                        lutb,
                        bounds_error=False,
                        fill_value=None)
        revl[lut] = {
            'rgi': rgi,
            'minaot': minaot,
            'maxaot': maxaot,
            'model': int(lut[-1]),
            'meta': meta
        }
    return (revl)
Example #58
0
def plot_mean_weights_64D(h=None,
                          cellids=None,
                          l4=None,
                          vmin=None,
                          vmax=None,
                          title=None):

    # for case where given single array

    if type(h) is not list:
        h = [h]

    if type(cellids) is not list:
        cellids = [cellids]

    if type(l4) is not list:
        l4 = [l4]

    # create average h-vector, after applying appropriate shift and filling in missing
    # electrodes with nans

    l4_zero = 52 - 1  # align center of l4 with 52
    shift = np.subtract(l4, l4_zero)
    max_shift = shift[np.argmax(abs(shift))]
    h_mat_full = np.full((len(h), 64 + abs(max_shift)), np.nan)

    for i in range(0, h_mat_full.shape[0]):

        if type(cellids[i]) is not np.ndarray:
            cellids[i] = np.array(cellids[i])

        s = shift[i]
        electrodes = np.zeros(len(cellids[i]))
        for j in range(0, len(cellids[i])):
            electrodes[j] = int(cellids[i][j][-4:-2])

        chans = (np.sort([int(x) for x in electrodes]) - 1) + abs(max_shift)

        chans = np.add(chans, s)

        h_mat_full[i, chans] = h[i]

    # remove outliers
    one_sd = np.nanstd(h_mat_full.flatten())
    print(one_sd)
    print('adjusted {0} outliers'.format(np.sum(abs(h_mat_full) > 3 * one_sd)))
    out_inds = np.argwhere(abs(h_mat_full) > 3 * one_sd)
    print(h_mat_full[out_inds[:, 0], out_inds[:, 1]])
    h_mat_full[abs(h_mat_full) > 3 * one_sd] = 2 * one_sd * np.sign(
        h_mat_full[abs(h_mat_full) > 3 * one_sd])
    print(h_mat_full[out_inds[:, 0], out_inds[:, 1]])

    # Compute a sliding window averge of the weights
    h_means = np.nanmean(h_mat_full, 0)
    h_mat = np.zeros(h_means.shape)
    h_mat_error = np.zeros(h_means.shape)
    for i in range(0, len(h_mat)):
        if i < 4:
            h_mat[i] = np.nanmean(h_means[0:i])
            h_mat_error[i] = np.nanstd(h_means[0:i]) / np.sqrt(i)
        elif i > h_mat.shape[0] - 4:
            h_mat[i] = np.nanmean(h_means[i:])
            h_mat_error[i] = np.nanstd(h_means[i:]) / np.sqrt(len(h_means) - i)
        else:
            h_mat[i] = np.nanmean(h_means[(i - 2):(i + 2)])
            h_mat_error[i] = np.nanstd(h_means[(i - 2):(i + 2)]) / np.sqrt(4)

    if vmin is None:
        vmin = np.nanmin(h_mat)
    if vmax is None:
        vmax = np.nanmax(h_mat)

    # Now plot locations for each site

    # left column + right column are identical
    el_shift = int(abs(max_shift) / 3)
    tf = 0
    while tf is 0:
        if el_shift % 3 != 0:
            el_shift += 1
        elif max_shift > 0 and max_shift < 3:
            el_shift += 1
            tf = 1
        else:
            tf = 1
    while max_shift % 3 != 0:
        if max_shift < 0:
            max_shift -= 1
        elif max_shift >= 0:
            max_shift += 1

    lr_col = np.arange(0, (21 + el_shift) * 0.25,
                       0.25)  # 25 micron vertical spacing
    left_ch_nums = np.arange(3, 64 + abs(max_shift), 3)
    right_ch_nums = np.arange(4, 65 + abs(max_shift), 3)
    center_ch_nums = np.insert(np.arange(5, 63 + abs(max_shift), 3),
                               obj=slice(0, 1),
                               values=[1, 2],
                               axis=0)
    center_col = np.arange(-0.25, (20.25 + el_shift) * .25, 0.25) - 0.125
    ch_nums = np.hstack((left_ch_nums, center_ch_nums, right_ch_nums))
    sort_inds = np.argsort(ch_nums)

    l_col = np.vstack((np.ones(21 + el_shift) * -0.2, lr_col))
    r_col = np.vstack((np.ones(21 + el_shift) * 0.2, lr_col))
    c_col = np.vstack((np.zeros(22 + el_shift), center_col))

    if l_col.shape[1] != len(left_ch_nums):
        left_ch_nums = np.concatenate((left_ch_nums, [left_ch_nums[-1] + 3]))
    if r_col.shape[1] != len(right_ch_nums):
        right_ch_nums = np.concatenate((right_ch_nums, [left_ch_nums[-1] + 3]))
    if c_col.shape[1] != len(center_ch_nums):
        center_ch_nums = np.concatenate(
            (center_ch_nums, [left_ch_nums[-1] + 3]))

    ch_nums = np.hstack((left_ch_nums, center_ch_nums, right_ch_nums))
    sort_inds = np.argsort(ch_nums)

    l_col = np.vstack((np.ones(21 + el_shift) * -0.2, lr_col))
    r_col = np.vstack((np.ones(21 + el_shift) * 0.2, lr_col))
    c_col = np.vstack((np.zeros(22 + el_shift), center_col))

    locations = np.hstack((l_col, c_col, r_col))[:, sort_inds]

    locations[1, :] = 100 * (locations[1, :])
    locations[0, :] = 3000 * (locations[0, :] * 0.2)
    print(h_mat_full.shape)
    if h_mat.shape[0] != locations.shape[1]:
        diff = locations.shape[1] - h_mat.shape[0]
        h_mat_scatter = np.concatenate(
            (h_mat_full, np.full((np.shape(h_mat_full)[0], diff), np.nan)),
            axis=1)
        h_mat = np.concatenate((h_mat, np.full(diff, np.nan)))
        h_mat_error = np.concatenate((h_mat_error, np.full(diff, np.nan)))

    if title is not None:
        plt.figure(title)
    else:
        plt.figure()
    plt.subplot(142)
    plt.title('mean weights per channel')
    plt.scatter(locations[0, :],
                locations[1, :],
                facecolor='none',
                edgecolor='k',
                s=50)

    indexes = [x[0] for x in np.argwhere(~np.isnan(h_mat))]
    # plot the colors
    import matplotlib
    norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
    cmap = matplotlib.cm.jet
    mappable = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
    mappable.set_array(h_mat[indexes])
    colors = mappable.to_rgba(list(h_mat[indexes]))
    plt.scatter(locations[:, indexes][0, :],
                locations[:, indexes][1, :],
                c=colors,
                vmin=vmin,
                vmax=vmax,
                s=50,
                edgecolor='none')
    plt.colorbar(mappable)  #,orientation='vertical',fraction=0.04, pad=0.0)
    #plt.axis('scaled')
    plt.xlim(-500, 500)
    plt.axis('off')

    # Add dashed line at "layer IV"
    plt.plot([-250, 250],
             [locations[1][l4_zero] + 75, locations[1][l4_zero] + 75],
             linestyle='-',
             color='k',
             lw=4,
             alpha=0.3)
    plt.plot([-250, 250],
             [locations[1][l4_zero] - 75, locations[1][l4_zero] - 75],
             linestyle='-',
             color='k',
             lw=4,
             alpha=0.3)

    # plot conditional density

    h_kde = h_mat.copy()
    sigma = 3
    h_kde[np.isnan(h_mat)] = 0
    h_kde = sf.gaussian_filter1d(h_kde, sigma)
    h_kde_error = h_mat_error.copy()
    h_kde_error[np.isnan(h_mat)] = 0
    h_kde_error = sf.gaussian_filter1d(h_kde_error, sigma)
    plt.subplot(141)
    plt.title('smoothed mean weights')
    plt.plot(-h_kde, locations[1, :], lw=3, color='k')
    plt.fill_betweenx(locations[1, :],
                      -(h_kde + h_kde_error),
                      -(h_kde - h_kde_error),
                      alpha=0.3,
                      facecolor='k')
    plt.axhline(locations[1][l4_zero] + 75, color='k', lw=3, alpha=0.3)
    plt.axhline(locations[1][l4_zero] - 75, color='k', lw=3, alpha=0.3)
    plt.axvline(0, color='k', linestyle='--', alpha=0.5)
    plt.ylabel('um (layer IV center at {0} um)'.format(
        int(locations[1][l4_zero])))
    #plt.xlim(-vmax, -vmin)
    for i in range(0, h_mat_scatter.shape[0]):
        plt.plot(-h_mat_scatter[i, :], locations[1, :], '.')
    #plt.axis('off')

    # plot binned histogram for each layer
    plt.subplot(222)
    l4_shift = locations[1][l4_zero]
    plt.title('center of layer IV: {0} um'.format(l4_shift))
    # 24 electrodes spans roughly 200um
    # shift by 18 (150um) each window
    width_string = '200um'
    width = 24
    step = 18
    sets = int(h_mat_full.shape[1] / step) + 1
    print('number of {1} bins: {0}'.format(sets, width_string))

    si = 0
    legend_strings = []
    w = []
    for i in range(0, sets):
        if si + width > h_mat_full.shape[1]:
            w.append(h_mat_full[:, si:][~np.isnan(h_mat_full[:, si:])])
            plt.hist(w[i], alpha=0.5)
            legend_strings.append(
                str(int(100 * si / 3 * 0.25)) + ', ' +
                str(int(100 * h_mat_full.shape[1] / 3 * 0.25)) + 'um')
            si += step
        else:
            w.append(h_mat_full[:, si:(
                si + width)][~np.isnan(h_mat_full[:, si:(si + width)])])
            plt.hist(w[i], alpha=0.5)
            legend_strings.append(
                str(int(100 * si / 3 * 0.25)) + ', ' +
                str(int(100 * (si + width) / 3 * 0.25)) + 'um')
            si += step

    plt.legend(legend_strings[::-1])
    plt.xlabel('weight')
    plt.ylabel('counts per {0} bin'.format(width_string))

    plt.subplot(224)
    mw = []
    mw_error = []
    for i in range(0, sets):
        mw.append(np.nanmean(w[i]))
        mw_error.append(np.nanstd(w[i]) / np.sqrt(len(w[i])))

    plt.bar(np.arange(0, sets), mw, yerr=mw_error, facecolor='k', alpha=0.5)
    plt.xticks(np.arange(0, sets), legend_strings, rotation=45)
    plt.xlabel('Window')
    plt.ylabel('Mean weight')

    plt.tight_layout()
idx = [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
df_idx = pd.DataFrame(index=idx)
for i in range(len(y_lablels)):
    df_t = pd.DataFrame(df.strength[df.index.date == y_lablels[i]])
    df_t['Hora'] = np.array(df_t.index.hour)
    df_t.index = df_t['Hora']
    df_fin = pd.concat([df_idx, df_t], axis=1, sort=False)
    data[i, :] = df_fin.strength.values
    print(i)

cmap = matplotlib.cm.Spectral_r
plt.close('all')
# fig = plt.figure(figsize=(8,10))
fig = plt.figure()
ax = fig.add_subplot(111)
norml = colors.Normalize(vmin=np.nanmin(df.strength.values),
                         vmax=np.nanmax(df.strength.values))
mapa = ax.imshow(data, interpolation='none', cmap=cmap, norm=norml)
cbar = fig.colorbar(mapa, ax=ax, orientation='vertical', format="%.2f")
cbar.set_label(u"Potencia [W]", fontsize=12, fontproperties=prop)
ticks = np.arange(0, data.shape[0], 10)
labels = [y_lablels[i] for i in ticks]
ax.set_aspect(aspect=0.05)
ax.set_yticks(ticks, minor=False)
ax.set_yticklabels(labels, minor=False)
ax.set_xticks(range(0, data.shape[1]), minor=False)
ax.set_xticklabels(x_lablels, minor=False)
# ax.set_ylabel(u'Potencia $[W]$', fontproperties = prop_1,  fontsize=12)
ax.set_xlabel('Hora', fontproperties=prop_1, fontsize=12)
ax.set_title('Registros horarios de potencia en ' + Punto,
             fontproperties=prop,
Example #60
0
               vmax=1.)
    if (len(sys.argv) > 3):
        plt.title("%s" % (sys.argv[3]))
    else:
        plt.title("%s" % (sys.argv[1]))

    cbar = plt.colorbar(drawedges=False)
    tick_locator = ticker.MaxNLocator(nbins=7)
    cbar.locator = tick_locator
    cbar.update_ticks()
    cbar.ax.tick_params(labelsize=20)
    cbar.solids.set_edgecolor("face")

    if (contour):
        maxdiff = numpy.nanmax(acc)
        mindiff = numpy.nanmin(acc)
        levels = numpy.arange(mindiff, maxdiff + tol,
                              (maxdiff - mindiff) / nContour)
        CS = plt.contourf(
            acc,
            levels,
            hold='on',  # colors = 'k',
            origin='lower',
            extent=extent)
        #plt.clabel(CS, inline=1, fontsize=14,colors="white")

    plt.subplot(1, 2, 2)
    plt.xlabel("r [m]")
    plt.ylabel("z [m]")
    plt.imshow(dev[:, :],
               extent=(numpy.min(rl), numpy.max(rl), numpy.min(zl),