Example #1
0
    def generate_artist(self):
           container=self.get_container()
           if self.isempty() is False:
               return

           x, y = self._eval_xy() # this handles "use_var"
 
           lp=self.getp("loaded_property") 

           if True:
              x, y = self.getp(("x", "y"))
              if y is None: return
              if x is None: return 
              
              if (x is not None  and
                  y is not None):   
                  self._data_extent=[np.nanmin(x), np.nanmax(x), 
                                     np.nanmin(y), np.nanmax(y)]

                  if len(y.shape) == 1:
                     kywds = self._var["kywds"]
                     args, self._tri =  tri_args(x, y, self._tri) 
                     kywds['mask'] = self.getp('mask')
                     kywds['linestyle'] = self.getp('linestyle')
                     kywds['linewidth'] = self.getp('linewidth')
                     kywds['color'] = self.getp('color')
                     a =  triplot(container, *args, **kywds)
                     self.set_artist(a[0])
                     self._other_artists = a[1:]

           if lp is not None:
              for i in range(0, len(lp)):
                  self.set_artist_property(self._artists[i], lp[i])
              self.delp("loaded_property")
           self.set_rasterized()
Example #2
0
def show_overlay(img3d, cc3d, ncc=10, s=85, xyz = 'xy',alpha=.8):
    """Shows the connected components overlayed over img3d

    Input
    ======
    img3d -- 3d array
    cc3d -- 3d array ( preferably of same shape as img3d, use get_3d_cc(...) )
    ncc -- where to cut off the color scale
    s -- slice to show
    xyz -- which projection to use in {'xy','xz','yz'}
    """
    cc = get_slice(cc3d,s,xyz)
    img = get_slice(img3d,s,xyz)

    notcc = np.isnan(cc)
    incc = np.not_equal(notcc,True)

    img4 = plt.cm.gray(img/np.nanmax(img))
    if ncc is not np.Inf:
        cc = plt.cm.jet(cc/float(ncc))
    else:
        cc = plt.cm.jet(np.log(cc)/np.log(np.nanmax(cc)))

    cc[notcc,:]=img4[notcc,:]
    cc[incc,3] = 1-img[incc]/(2*np.nanmax(img))

    plt.imshow(cc)
Example #3
0
def tuning(x, y, err=None, smooth=None, ylabel=None, pal=None):
    """
    Plot a tuning curve
    """
    if smooth is not None:
        xs, ys = smoothfit(x, y, smooth)
        plt.plot(xs, ys, linewidth=4, color="black", zorder=1)
    else:
        ys = asarray([0])
    if pal is None:
        pal = sns.color_palette("husl", n_colors=len(x) + 6)
        pal = pal[2 : 2 + len(x)][::-1]
    plt.scatter(x, y, s=300, linewidth=0, color=pal, zorder=2)
    if err is not None:
        plt.errorbar(x, y, yerr=err, linestyle="None", ecolor="black", zorder=1)
    plt.xlabel("Wall distance (mm)")
    plt.ylabel(ylabel)
    plt.xlim([-2.5, 32.5])
    errTmp = err
    errTmp[isnan(err)] = 0
    rng = max([nanmax(ys), nanmax(y + errTmp)])
    plt.ylim([0 - rng * 0.1, rng + rng * 0.1])
    plt.yticks(linspace(0, rng, 3))
    plt.xticks(range(0, 40, 10))
    sns.despine()
    return rng
Example #4
0
    def __call__(self, transform_xy, x1, y1, x2, y2):
        """
        get extreme values.

        x1, y1, x2, y2 in image coordinates (0-based)
        nx, ny : number of divisions in each axis
        """
        x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
        x, y = np.meshgrid(x_, y_)
        lon, lat = transform_xy(np.ravel(x), np.ravel(y))

        # iron out jumps, but algorithm should be improved.
        # This is just naive way of doing and my fail for some cases.
        # Consider replacing this with numpy.unwrap
        # We are ignoring invalid warnings. They are triggered when
        # comparing arrays with NaNs using > We are already handling
        # that correctly using np.nanmin and np.nanmax
        with np.errstate(invalid='ignore'):
            if self.lon_cycle is not None:
                lon0 = np.nanmin(lon)
                lon -= 360. * ((lon - lon0) > 180.)
            if self.lat_cycle is not None:
                lat0 = np.nanmin(lat)
                lat -= 360. * ((lat - lat0) > 180.)

        lon_min, lon_max = np.nanmin(lon), np.nanmax(lon)
        lat_min, lat_max = np.nanmin(lat), np.nanmax(lat)

        lon_min, lon_max, lat_min, lat_max = \
                 self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)

        return lon_min, lon_max, lat_min, lat_max
def summary():
    # read sonde data
    for sites in [[0],[1],[2]]:
        slist,snames=read_diff_events(sites=sites)
        ecount = [len(s.einds) for s in slist]
        mintp = [np.nanmin(s.tp) for s in slist]
        meantp = [np.nanmean(s.tp) for s in slist]
        maxtp = [np.nanmax(s.tp) for s in slist]
        
        head="%9s"%slist[0].name
        ecount = "events   "
        meantp = "mean tph "
        minmax = "tph bound"
        for sonde, sname in zip(slist,snames):
            
            head=head+'| %16s'%sname
            ecount=ecount+'| %16d'%len(sonde.einds)
            meantp=meantp+'| %16.2f'%np.nanmean(sonde.tp)
            minmax=minmax+'| %7.2f,%7.2f '%(np.nanmin(sonde.tp),np.nanmax(sonde.tp))
            
        print("")
        print(head)
        print(ecount)
        print(meantp)
        print(minmax)
Example #6
0
def normalizeFloatImage3(floatImage):

    mn_0 = np.nanmin(np.nanmin(floatImage[:, :,1]))
    mx_0 = np.nanmax(np.nanmax(floatImage[:, :, 1]))

    rows = floatImage.shape[0]
    cols = floatImage.shape[1]
    mn_0 = 1000
    mx_0 =-1000
    for r in range(rows):
        for c in range(cols):
            if floatImage[r,c, 2] <= 0:
                mn_0 = mn_0 if mn_0 <=  floatImage[r,c, 1 ] else floatImage[r,c, 1 ]
                mx_0 = mx_0 if mx_0  >=  floatImage[r,c, 1 ] else floatImage[r,c, 1 ]

    fctr_0 = 255.0/(mx_0 - mn_0)

    for r in range(rows):
        for c in range(cols):
            if floatImage[r,c, 2]  <= 0:
                floatImage[r,c, 1] = (floatImage[r,c, 1] - mn_0) * fctr_0
            else:
                floatImage[r,c, 1]=0

    print "mn_0: ", mn_0, "  mx_0: ", mx_0
Example #7
0
    def _set_minmax(self):
        data = self._get_fast_data()
        try:
            self.maxval = numpy.nanmax(data)
            self.minval = numpy.nanmin(data)
        except Exception:
            self.maxval = 0
            self.minval = 0

        # TODO: see if there is a faster way to ignore infinity
        try:
            if numpy.isfinite(self.maxval):
                self.maxval_noinf = self.maxval
            else:
                self.maxval_noinf = numpy.nanmax(data[numpy.isfinite(data)])
        except:
            self.maxval_noinf = self.maxval

        try:
            if numpy.isfinite(self.minval):
                self.minval_noinf = self.minval
            else:
                self.minval_noinf = numpy.nanmin(data[numpy.isfinite(data)])
        except:
            self.minval_noinf = self.minval
Example #8
0
    def __call__(self, transform_xy, x1, y1, x2, y2):
        """
        get extreme values.

        x1, y1, x2, y2 in image coordinates (0-based)
        nx, ny : number of divisions in each axis
        """
        x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
        x, y = np.meshgrid(x_, y_)
        lon, lat = transform_xy(np.ravel(x), np.ravel(y))

        # iron out jumps, but algorithm should be improved.
        # Tis is just naive way of doing and my fail for some cases.
        if self.lon_cycle is not None:
            lon0 = np.nanmin(lon)
            lon -= 360.0 * ((lon - lon0) > 180.0)
        if self.lat_cycle is not None:
            lat0 = np.nanmin(lat)
            lat -= 360.0 * ((lat - lat0) > 180.0)

        lon_min, lon_max = np.nanmin(lon), np.nanmax(lon)
        lat_min, lat_max = np.nanmin(lat), np.nanmax(lat)

        lon_min, lon_max, lat_min, lat_max = self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)

        return lon_min, lon_max, lat_min, lat_max
Example #9
0
def calc_norm_summary_tables(accuracy_tbl, time_tbl):
    """
    Calculate normalized performance/ranking summary, as numpy
    matrices as usual for convenience, and matrices of additional
    statistics (min, max, percentiles, etc.)

    Here normalized means relative to the best which gets a 1, all
    others get the ratio resulting from dividing by the performance of
    the best.
    """
    # Min across all minimizers, i.e. for each fit problem what is the lowest chi-squared and the lowest time
    min_sum_err_sq = np.nanmin(accuracy_tbl, 1)
    min_runtime = np.nanmin(time_tbl, 1)

    # create normalised tables
    norm_acc_rankings = accuracy_tbl / min_sum_err_sq[:, None]
    norm_runtimes = time_tbl / min_runtime[:, None]

    summary_cells_acc = np.array([np.nanmin(norm_acc_rankings, 0),
                                  np.nanmax(norm_acc_rankings, 0),
                                  stats.nanmean(norm_acc_rankings, 0),
                                  stats.nanmedian(norm_acc_rankings, 0)
                                  ])

    summary_cells_runtime = np.array([np.nanmin(norm_runtimes, 0),
                                      np.nanmax(norm_runtimes, 0),
                                      stats.nanmean(norm_runtimes, 0),
                                      stats.nanmedian(norm_runtimes, 0)
                                      ])

    return norm_acc_rankings, norm_runtimes, summary_cells_acc, summary_cells_runtime
Example #10
0
 def test_derivative(self):
     log= logging.getLogger( "test_J0023.derivative_test")
     testp = tdu.get_derivative_params(self.modelJ0023)
     delay = self.modelJ0023.delay(self.toasJ0023)
     for p in testp.keys():
         log.debug( "Runing derivative for %s", 'd_delay_d_'+p)
         if p in ['EPS2', 'EPS1']:
             testp[p] = 10
         ndf = self.modelJ0023.d_phase_d_param_num(self.toasJ0023, p, testp[p])
         adf = self.modelJ0023.d_phase_d_param(self.toasJ0023, delay, p)
         diff = adf - ndf
         if not np.all(diff.value) == 0.0:
             mean_der = (adf+ndf)/2.0
             relative_diff = np.abs(diff)/np.abs(mean_der)
             #print "Diff Max is :", np.abs(diff).max()
             msg = 'Derivative test failed at d_delay_d_%s with max relative difference %lf' % (p, np.nanmax(relative_diff).value)
             if p in ['PMELONG', 'ELONG']:
                 tol = 2e-2
             elif p in ['FB2', 'FB3']:
                 tol = 0.08
             else:
                 tol = 1e-3
             log.debug( "derivative relative diff for %s, %lf"%('d_delay_d_'+p, np.nanmax(relative_diff).value))
             assert np.nanmax(relative_diff) < tol, msg
         else:
             continue
Example #11
0
    def _get_Tp_limits(self):
        """Get the limits for the graphs in temperature and pressure, based on 
        SI units: [Tmin, Tmax, pmin, pmax]"""
        T_lo,T_hi,P_lo,P_hi = self.limits
        Ts_lo,Ts_hi = self._get_sat_bounds(CoolProp.iT)
        Ps_lo,Ps_hi = self._get_sat_bounds(CoolProp.iP)

        if T_lo is None:            T_lo  = 0.0
        elif T_lo < self.ID_FACTOR: T_lo *= Ts_lo
        if T_hi is None:            T_hi  = 1e6
        elif T_hi < self.ID_FACTOR: T_hi *= Ts_hi
        if P_lo is None:            P_lo  = 0.0
        elif P_lo < self.ID_FACTOR: P_lo *= Ps_lo
        if P_hi is None:            P_hi  = 1e10
        elif P_hi < self.ID_FACTOR: P_hi *= Ps_hi

        try: T_lo = np.nanmax([T_lo, self._state.trivial_keyed_output(CoolProp.iT_min)])
        except: pass
        try: T_hi = np.nanmin([T_hi, self._state.trivial_keyed_output(CoolProp.iT_max)])
        except: pass
        try: P_lo = np.nanmax([P_lo, self._state.trivial_keyed_output(CoolProp.iP_min)])
        except: pass
        try: P_hi = np.nanmin([P_hi, self._state.trivial_keyed_output(CoolProp.iP_max)])
        except: pass

        return [T_lo,T_hi,P_lo,P_hi]
Example #12
0
def draw_hmap_old(hmap, yvals, fname=None):
    """
    Plot a matrix as a heat map and write an image file.
    :param hmap: Heat map matrix.
    :param yvals: Heat map Y labels (e.g. amino acid names).
    :param fname: Destination image file.
    """
    if np.nanmax(hmap) > abs(np.nanmin(hmap)):
        vmax = np.nanmax(hmap)
        vmin = -np.nanmax(hmap)
    else:
        vmax = abs(np.nanmin(hmap))
        vmin = np.nanmin(hmap)
    fig = plt.figure()
    plt.figure(figsize=(20,10))
    plt.imshow(hmap, cmap='RdBu', interpolation = 'nearest',aspect='auto',vmin = vmin ,vmax = vmax )
    plt.xlim(0, hmap.shape[1])
    plt.ylim(0, hmap.shape[0])
    ax = plt.gca()
    fig.set_facecolor('white')
    ax.set_xlim((-0.5, hmap.shape[1] -0.5))
    ax.set_ylim((-0.5, hmap.shape[0] -0.5))
    ax.set_yticks([x for x in xrange(0, hmap.shape[0])])
    ax.set_yticklabels(yvals)
    ax.set_xticks(range(0,76,5))
    ax.set_xticklabels(range(2,76,5)+['STOP'])
    ax.set_ylabel('Residue')
    ax.set_xlabel('Ub Sequence Position')
    cb = plt.colorbar()
    cb.set_clim(vmin=vmin, vmax=vmax)
    cb.set_label('Relative Fitness')
    if fname is not None:
        plt.savefig(fname, bbox_inches='tight')
    return fig
Example #13
0
	def show(self,**kwargs):
		display = kwargs.get('display', True)
		show_layers = kwargs.get('show_layers',self.layers)
		try:
			show_layers=sorted(show_layers)
		except TypeError:
			show_layers=[show_layers]
		extent=kwargs.get('extent', 
						max_axis(*tuple(_image.axis for _image in self.image_sorted[self.layers[0]])))
		vmin=kwargs.get('vmin')
		vmax=kwargs.get('vmax')
		fig = plt.figure(figsize=(8, 8*abs((extent[3]-extent[2])*1./(extent[1]-extent[0]))))
		for layer in show_layers:
			for image in self.image_sorted[layer]:
				if layer==show_layers[0] and image==self.image_sorted[layer][0]:
					if not vmin:
						kwargs['vmin']=np.nanmin(image.image)
						vmin=np.nanmin(image.image)
					if not vmax:
						kwargs['vmax']=np.nanmax(image.image)
						vmax=np.nanmax(image.image)
					image.show(hold=True,**kwargs)
				else:
					image.show(hold=True,vmin=vmin,vmax=vmax,scalebar='off',colorbar='off')
		plt.xlim(extent[:2])
		plt.ylim(extent[-2:])
		if display:
			plt.show()
		else:
			return fig
Example #14
0
def plot_richness_scatter(gals, name, full_set):
    log_counts_a, scatter_a = richness_scatter(gals[gals['ssfr'] < -11.0], full_set)
    log_counts_p, scatter_p = richness_scatter(gals[gals['pred'] < -11.0], full_set)
    #fig1 = plt.figure(figsize=(12,7))
    #frame1=fig1.add_axes((.1,.3,.8,.6))
    #plt.subplot(121)
    plt.plot(log_counts_a, scatter_a, 'o', label='input', color='k', markersize=7)
    plt.plot(log_counts_p, scatter_p, 'o', label='predicted', color=red_col, markersize=7)
    #plt.title('Scatter in richness ' + name)
    plt.xlabel('Log Number of red satellites')
    plt.xlabel('$<log N_{red sat}>$')
    plt.xlim(-.1,2.6)
    plt.ylim(0, np.max([np.nanmax(scatter_a),np.nanmax(scatter_p)]) +.1)
    plt.ylabel('Scatter in $M_{halo}$')
    plt.legend(loc='best')

    #plt.subplot(122)
    #frame2=fig1.add_axes((.1,.1,.8,.2))
    #series_a = pd.Series(scatter_a, index=counts_a)
    #series_p = pd.Series(scatter_p, index=counts_p)
    # scat_diff = (series_a - series_p)/series_a
    #scat_ratio = series_p/series_a
    #plt.plot(scat_diff.index, scat_diff.values, 'ob')
    #plt.plot(scat_ratio.index, scat_ratio.values, 'ob')
    #plt.title("Scatter ratios in richness for actual vs predicted")
    #plt.axhline(0)
    #plt.ylabel('Error')
    #plt.xlabel('Number of red satellites')
    return
Example #15
0
def classify(request):
    C = json.loads(request.POST["C"])
    try:
        features, labels = get_multi_features(request)
    except ValueError as e:
        return HttpResponse(json.dumps({"status": e.message}))
    try:
        kernel = get_kernel(request, features)
    except ValueError as e:
        return HttpResponse(json.dumps({"status": e.message}))
    
    learn = "No"  
    values=[]

    try:
        domain = json.loads(request.POST['axis_domain'])
        x, y, z = svm.classify_svm(sg.GMNPSVM, features, labels, kernel, domain, learn, values, C, False)
    except Exception as e:
        return HttpResponse(json.dumps({"status": repr(e)}))

#    z = z + np.random.rand(*z.shape) * 0.01
	
    z_max = np.nanmax(z)
    z_min = np.nanmin(z)
    z_delta = 0.1*(np.nanmax(z)-np.nanmin(z))
    data = {"status": "ok",
            "domain": [z_min-z_delta, z_max+z_delta],
            "max": z_max+z_delta,
            "min": z_min-z_delta,
            "z": z.tolist()}

    return HttpResponse(json.dumps(data))
Example #16
0
    def getRange(self, axis, depname, axrange):
        """Update axis range from data."""

        s = self.settings
        doc = self.document

        if ( (depname == 'sx' and s.direction == 'horizontal') or
             (depname == 'sy' and s.direction == 'vertical') ):
            # update axis in direction of data
            if s.calculate:
                # update from values
                values = s.get('values').getData(doc)
                if values:
                    for v in values:
                        if len(v.data) > 0:
                            axrange[0] = min(axrange[0], N.nanmin(v.data))
                            axrange[1] = max(axrange[1], N.nanmax(v.data))
            else:
                # update from manual entries
                drange = self.rangeManual()
                axrange[0] = min(axrange[0], drange[0])
                axrange[1] = max(axrange[1], drange[1])
        else:
            # update axis in direction of datasets
            posns = self.getPosns()
            if len(posns) > 0:
                axrange[0] = min(axrange[0], N.nanmin(posns)-0.5)
                axrange[1] = max(axrange[1], N.nanmax(posns)+0.5)
    def acquire_data(self, var_name=None, slice_=()):
        if var_name in self._variables:
            vars = [var_name]
        else:
            vars = self._variables

        if not isinstance(slice_, tuple): slice_ = (slice_,)

        for vn in vars:
            var = self._data_array[vn]

            ndims = len(var.shape)
            # Ensure the slice_ is the appropriate length
            if len(slice_) < ndims:
                slice_ += (slice(None),) * (ndims-len(slice_))

            arri = ArrayIterator(var, self._block_size)[slice_]
            for d in arri:
                if d.dtype.char is "S":
                    # Obviously, we can't get the range of values for a string data type!
                    rng = None
                elif isinstance(d, numpy.ma.masked_array):
                    # TODO: This is a temporary fix because numpy 'nanmin' and 'nanmax'
                    # are currently broken for masked_arrays:
                    # http://mail.scipy.org/pipermail/numpy-discussion/2011-July/057806.html
                    dc = d.compressed()
                    if dc.size == 0:
                        rng = None
                    else:
                        rng = (numpy.nanmin(dc), numpy.nanmax(dc))
                else:
                    rng = (numpy.nanmin(d), numpy.nanmax(d))
                yield vn, arri.curr_slice, rng, d

        return
Example #18
0
def stokes_plot(x_data, xlabel, I_data, Q_data, U_data, V_data,
                filename):
    """Generate plot of 4 stokes parameters"""
    fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex=True)
    ax1.plot(x_data,I_data)
    ax1.set_xlim(np.nanmin(x_data),np.nanmax(x_data))
    ax1.set_ylim(np.nanmin(I_data[I_data.nonzero()]),
                 np.nanmax(I_data))
    ax1.set_ylabel("Stokes I (K)")
    ax2.plot(x_data,Q_data)
    ax2.set_ylim(np.nanmin(Q_data),np.nanmax(Q_data))
    ax2.set_ylabel("Stokes Q (K)")
    ax3.plot(x_data,U_data)
    ax3.set_ylim(np.nanmin(U_data),np.nanmax(U_data))
    ax3.set_ylabel("Stokes U (K)")
    ax4.plot(x_data,V_data)
    ax4.set_ylim(np.nanmin(V_data),np.nanmax(V_data))
    ax4.set_ylabel("Stokes V (K)")
    ax4.set_xlabel(xlabel)
    fig.subplots_adjust(hspace=0.1)
    for ax in [ax1, ax2, ax3, ax4]:
        # make the fontsize a bit smaller
        for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
                     ax.get_xticklabels() + ax.get_yticklabels()):
            item.set_fontsize(12)
    plt.savefig(filename)
    plt.close(fig)
 def set_range(self, x_data, y_data):
     min_x, max_x = np.nanmin(x_data), np.nanmax(x_data)
     min_y, max_y = np.nanmin(y_data), np.nanmax(y_data)
     self.plotview.setRange(
         QRectF(min_x, min_y, max_x - min_x, max_y - min_y),
         padding=0.025)
     self.plotview.replot()
Example #20
0
    def _axes_domain(self, nx=None, ny=None, background_patch=None):
        """Returns x_range, y_range"""
        DEBUG = False

        transform = self._crs_transform()

        ax_transform = self.axes.transAxes
        desired_trans = ax_transform - transform

        nx = nx or 30
        ny = ny or 30
        x = np.linspace(1e-9, 1 - 1e-9, nx)
        y = np.linspace(1e-9, 1 - 1e-9, ny)
        x, y = np.meshgrid(x, y)

        coords = np.concatenate([x.flatten()[:, None], y.flatten()[:, None]], 1)

        in_data = desired_trans.transform(coords)

        ax_to_bkg_patch = self.axes.transAxes - background_patch.get_transform()

        ok = np.zeros(in_data.shape[:-1], dtype=np.bool)
        # XXX Vectorise contains_point
        for i, val in enumerate(in_data):
            # convert the coordinates of the data to the background
            # patches coordinates
            background_coord = ax_to_bkg_patch.transform(coords[i : i + 1, :])
            bkg_patch_contains = background_patch.get_path().contains_point
            if bkg_patch_contains(background_coord[0, :]):
                color = "r"
                ok[i] = True
            else:
                color = "b"

            if DEBUG:
                import matplotlib.pyplot as plt

                plt.plot(coords[i, 0], coords[i, 1], "o" + color, clip_on=False, transform=ax_transform)
        #                plt.text(coords[i, 0], coords[i, 1], str(val), clip_on=False,
        #                         transform=ax_transform, rotation=23,
        #                         horizontalalignment='right')

        inside = in_data[ok, :]
        x_range = np.nanmin(inside[:, 0]), np.nanmax(inside[:, 0])
        y_range = np.nanmin(inside[:, 1]), np.nanmax(inside[:, 1])

        # XXX Cartopy specific thing. Perhaps make this bit a specialisation
        # in a subclass...
        crs = self.crs
        if isinstance(crs, Projection):
            x_range = np.clip(x_range, *crs.x_limits)
            y_range = np.clip(y_range, *crs.y_limits)

            # if the limit is >90 of the full x limit, then just use the full
            # x limit (this makes circular handling better)
            prct = np.abs(np.diff(x_range) / np.diff(crs.x_limits))
            if prct > 0.9:
                x_range = crs.x_limits

        return x_range, y_range
Example #21
0
    def info(self):
        """
    info()

    Prints out a simple human-readable summary of the spectrum,
    containing the name of the spectrum, the units on its axes,
    and their limits. Also shows whether the spectrum has been
    baselined or convolved yet.

    Parameters
    ----------
    None

    Returns
    -------
    Nothing, but prints out a summary of the spectrum.
    """
        print "---"
        print "Summary for spectrum " + self.name
        print "x unit: " + str(self.x.unit)
        print "min(x): " + str(np.nanmin(self.x.value))
        print "max(x): " + str(np.nanmax(self.x.value))
        print "y unit: " + str(self.y.unit)
        print "min(y): " + str(np.nanmin(self.y.value))
        print "max(y): " + str(np.nanmax(self.y.value))
        print "baselined: " + str(self.baselined)
        print "convolved: " + str(self.convolved)
        print "---"
Example #22
0
def threshold(frame, threshold = 0.5, normalized_threshold = True, threshold_type = cv2.THRESH_BINARY, debug = False):
    """
    tresholding an image: the input type has to be either np.uint8 or np.float32
    returns a uint8 image
    """

    fmin = np.double( np.nanmin(frame) )
    fmax = np.double( np.nanmax(frame) )
    
    if debug:
        print "fmin: ", fmin
        print "fmax: ", fmax
    
    floatframe = ( (1.0 - 0.0) / (fmax - fmin) * (np.double(frame) - fmin) ).astype(np.float32)
    
    if debug:
        print "floatframe: ", floatframe.dtype
        print "min: ", np.nanmin(floatframe)
        print "max: ", np.nanmax(floatframe)
    
    if not normalized_threshold:
        threshold = 1.0 / (fmax - fmin) * (threshold - fmin)
        if debug:
            print "normalized threshold: ", threshold
    
    retval, t = cv2.threshold(floatframe, thresh = threshold, maxval = 255, type = threshold_type)
    
    return t
def bin_fit(x, y, buckets=3):
     
    assert buckets in [3,25]

    xstd=np.nanstd(x)
    
    if buckets==3:
        binlimits=[np.nanmin(x), -xstd/2.0,xstd/2.0 , np.nanmax(x)]
    elif buckets==25:
    
        steps=xstd/4.0
        binlimits=np.arange(-xstd*3.0, xstd*3.0, steps)
    
        binlimits=[np.nanmin(x)]+list(binlimits)+[np.nanmax(x)]
    
    fit_y=[]
    err_y=[]
    x_values_to_plot=[]
    for binidx in range(len(binlimits))[1:]:
        lower_bin_x=binlimits[binidx-1]
        upper_bin_x=binlimits[binidx]

        x_values_to_plot.append(np.mean([lower_bin_x, upper_bin_x]))

        y_in_bin=[y[idx] for idx in range(len(y)) if x[idx]>=lower_bin_x and x[idx]<upper_bin_x]

        fit_y.append(np.nanmedian(y_in_bin))
        err_y.append(np.nanstd(y_in_bin))

    ## no zeros
    

    return (binlimits, x_values_to_plot, fit_y, err_y)
Example #24
0
def TestPlot(fig=None):
    A = numpy.array([1,2,3,4,2,5,8,3,2,3,5,6])
    B = numpy.array([8,7,3,6,4,numpy.nan,9,3,7,numpy.nan,2,4])
    C = numpy.array([6,3,4,7,2,1,1,7,8,4,3,2])
    D = numpy.array([5,2,4,5,3,8,2,5,3,5,6,8])
    
    # A work around to get the histograms overplotted with each other to overlap correctly;
    histrangelist = [(numpy.nanmin(A),numpy.nanmax(A)),(numpy.nanmin(B),numpy.nanmax(B)),
                (numpy.nanmin(C),numpy.nanmax(C)),(numpy.nanmin(D),numpy.nanmax(D))]
    
    data = numpy.array([A,B,C,D])
    labels = ['A','3','C','D']

    fig = GridPlot(data,labels=labels, no_tick_labels=True, color='black', 
                    hist=True, histbins=3, histloc='tl', histrangelist=histrangelist, fig=None) 
    
    # Data of note to plot in different color
    A2 = numpy.array([1,2,3,4])
    B2 = numpy.array([8,7,3,6])
    C2 = numpy.array([6,3,4,7])
    D2 = numpy.array([5,2,4,5])
    data2 = numpy.array([A2,B2,C2,D2])
    
    fig = GridPlot(data2,labels=labels, no_tick_labels=True, color='red', 
                hist=True, histbins=3, histloc='tr', histrangelist=histrangelist, fig=fig) 
    
    return fig
Example #25
0
    def plot_result(self, result):
        """
        It plots the resulting Q and q when atype is set to 'tsl' or 'asl'

         :param result:
           Event Sync result from compute()
        :type result: dict

        :returns: plt.figure
               -- figure plot
        """

        ' Raise error if parameters are not in the correct type '
        if not(isinstance(result, dict)) : raise TypeError("Requires result to be a dictionary")

        ' Raise error if not the good dictionary '
        if not 'Q' in result : raise ValueError("Requires dictionary to be the output of compute() method")
        if not 'q' in result : raise ValueError("Requires dictionary to be the output of compute() method")

        x=np.arange(0, result['Q'].size, 1)

        figure, axarr = plt.subplots(2, sharex=True)
        axarr[0].set_title('Synchrony and time delay pattern')
        axarr[0].set_xlabel('Samples')
        axarr[1].set_xlabel('Samples')
        axarr[0].set_ylim(0,np.nanmax(result['Q']))
        axarr[0].plot(x, result['Q'], label="Synchrony (Qn)")
        axarr[1].set_ylim(np.nanmin(result['q']),np.nanmax(result['q']))
        axarr[1].plot(x, result['q'], label="Time delay pattern (qn)")
        axarr[0].legend(loc='best')
        axarr[1].legend(loc='best')

        return figure
Example #26
0
def plot_nontarget_betas_n_back(t_vols_n_back_beta_1, b_vols_smooth_n_back, in_brain_mask, brain_structure, nice_cmap, n_back):

  beta_index = 1

  b_vols_smooth_n_back[~in_brain_mask] = np.nan
  t_vols_n_back_beta_1[~in_brain_mask] = np.nan
  min_val = np.nanmin(b_vols_smooth_n_back[...,(40,50,60),beta_index])
  max_val = np.nanmax(b_vols_smooth_n_back[...,(40,50,60),beta_index])

  plt.figure()

  for map_index, depth in (((3,2,1), 40),((3,2,3), 50),((3,2,5), 60)):
    plt.subplot(*map_index)
    plt.title("z=%d,%s" % (depth, n_back + "-back nontarget,beta values"))
    plt.imshow(brain_structure[...,depth], alpha=0.5)
    plt.imshow(b_vols_smooth_n_back[...,depth,beta_index], cmap=nice_cmap, alpha=0.5, vmin=min_val, vmax=max_val)
    plt.colorbar()
    plt.tight_layout()

  t_min_val = np.nanmin(t_vols_n_back_beta_1[...,(40,50,60)])
  t_max_val = np.nanmax(t_vols_n_back_beta_1[...,(40,50,60)])

  for map_index, depth in (((3,2,2), 40),((3,2,4), 50),((3,2,6), 60)):
    plt.subplot(*map_index)
    plt.title("z=%d,%s" % (depth, n_back + "-back nontarget,t values"))
    plt.imshow(brain_structure[...,depth], alpha=0.5)
    plt.imshow(t_vols_n_back_beta_1[...,depth], cmap=nice_cmap, alpha=0.5, vmin=t_min_val, vmax=t_max_val)
    plt.colorbar()
    plt.tight_layout()

  plt.savefig(os.path.join(output_filename, "sub011_nontarget_betas_%s_back.png" % (n_back)), format='png', dpi=500)  
Example #27
0
def plot_all_time_series(config_list, output_dir):
    """Plot column charts of the raw total time/energy spent in each profiler category.

    Keyword arguments:
    config_list -- [(config, result of process_config_dir(...))]
    output_dir -- where to write plots to
    """
    time_series_out_dir = path.join(output_dir, 'time_series')
    os.makedirs(time_series_out_dir)

    max_end_times = []
    max_power_values = []
    for (c, cd) in config_list:
        for (t, td) in cd:
            trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
            max_end_times.append(np.nanmax(trial_max_end_times))
            for (p, ts, te, es, ee) in td:
                # We only care about the energy profiler (others aren't reliable for instant power anyway)
                if p == ENERGY_PROFILER_NAME and len(te) > 0:
                    max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
    max_time = np.nanmax(max_end_times)
    max_power = np.nanmax(np.array(max_power_values)) * 1.2  # leave a little space at the top

    for (config, config_data) in config_list:
        [plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
            for (trial, trial_data) in config_data]
Example #28
0
 def test_threshold_filter_nan(self):
     src = self.make_src(nan=True)
     self.e.add_source(src)
     threshold = Threshold()
     self.e.add_filter(threshold)
     self.assertEqual(np.nanmin(src.scalar_data), np.nanmin(threshold.outputs[0].point_data.scalars.to_array()))
     self.assertEqual(np.nanmax(src.scalar_data), np.nanmax(threshold.outputs[0].point_data.scalars.to_array()))
Example #29
0
def plot_noise_regressor_betas(b_vols_smooth, t_vols_beta_6_to_9, brain_structure, in_brain_mask, nice_cmap):

  plt.figure()

  min_val = np.nanmin(b_vols_smooth[...,40,(6,7,9)])
  max_val = np.nanmax(b_vols_smooth[...,40,(6,7,9)])

  plt.subplot(3,2,1)
  plt.title("z=%d,%s" % (40, "linear drift,betas"))
  b_vols_smooth[~in_brain_mask] = np.nan
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(b_vols_smooth[...,40,6], cmap=nice_cmap, alpha=0.5, vmin=min_val, vmax=max_val)
  plt.colorbar()
  plt.tight_layout()

  plt.subplot(3,2,3)
  plt.title("z=%d,%s" % (40, "quadratic drift,betas"))
  b_vols_smooth[~in_brain_mask] = np.nan
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(b_vols_smooth[...,40,7], cmap=nice_cmap, alpha=0.5, vmin=min_val, vmax=max_val)
  plt.colorbar()
  plt.tight_layout()

  plt.subplot(3,2,5)
  plt.title("z=%d,%s" % (40, "second PC,betas"))
  b_vols_smooth[~in_brain_mask] = np.nan
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(b_vols_smooth[...,40,9], cmap=nice_cmap, alpha=0.5, vmin=min_val, vmax=max_val)
  plt.colorbar()
  plt.tight_layout()

  t_vols_beta_6_to_9[0][~in_brain_mask] = np.nan
  t_vols_beta_6_to_9[1][~in_brain_mask] = np.nan
  t_vols_beta_6_to_9[3][~in_brain_mask] = np.nan

  t_min_val = np.nanmin([t_vols_beta_6_to_9[0][...,40], t_vols_beta_6_to_9[1][...,40], t_vols_beta_6_to_9[3][...,40]])
  t_max_val = np.nanmax([t_vols_beta_6_to_9[0][...,40], t_vols_beta_6_to_9[1][...,40], t_vols_beta_6_to_9[3][...,40]])

  plt.subplot(3,2,2)
  plt.title("z=%d,%s" % (40, "linear drift,t values"))
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(t_vols_beta_6_to_9[0][...,40], cmap=nice_cmap, alpha=0.5, vmin=t_min_val, vmax=t_max_val)
  plt.colorbar()
  plt.tight_layout()

  plt.subplot(3,2,4)
  plt.title("z=%d,%s" % (40, "quadratic drift,t values"))
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(t_vols_beta_6_to_9[1][...,40], cmap=nice_cmap, alpha=0.5, vmin=t_min_val, vmax=t_max_val)
  plt.colorbar()
  plt.tight_layout()

  plt.subplot(3,2,6)
  plt.title("z=%d,%s" % (40, "second PC,t values"))
  plt.imshow(brain_structure[...,40], alpha=0.5)
  plt.imshow(t_vols_beta_6_to_9[3][...,40], cmap=nice_cmap, alpha=0.5, vmin=t_min_val, vmax=t_max_val)
  plt.colorbar()
  plt.tight_layout()

  plt.savefig(os.path.join(output_filename, "sub001_noise_regressors_betas_map.png"), format='png', dpi=500)  
 def preprocess_crowdlabels(self, crowdlabels):
     # Initialise all objects relating to the crowd labels.
     C = {}
     crowdlabels[np.isnan(crowdlabels)] = -1
     if self.discretedecisions:
         crowdlabels = np.round(crowdlabels).astype(int)
     if self.table_format_flag:# crowd labels as a full KxN table? If false, use diags sparse 3-column list, where 1st
         logging.error("Can't use table format with preference pairs at the moment.")
         return
     
     if self.K < int(np.nanmax(crowdlabels[:,0]))+1:
         self.K = int(np.nanmax(crowdlabels[:,0]))+1 # add one because indexes start from 0
         
     for l in range(self.nscores):
         lIdxs = np.argwhere(crowdlabels[:, 3] == l)[:,0]
         data = np.ones((len(lIdxs), 1)).reshape(-1)
         rows = np.array(crowdlabels[lIdxs, 1]).reshape(-1) * self.N + crowdlabels[lIdxs, 2]
         cols = np.array(crowdlabels[lIdxs, 0]).reshape(-1)
         
         Cl = csr_matrix(coo_matrix((data,(rows,cols)), shape=(self.N**2, self.K)))
         C[l] = Cl            
         
     # Set and reset object properties for the new dataset
     self.C = C
     self.lnpCT = np.zeros((self.N, self.nclasses))
     self.conf_mat_ind = []
     # pre-compute the indices into the pi arrays
     # repeat for test labels only
     
     self.Ctest = {}
     for l in range(self.nscores):
         self.Ctest[l] = C[l][self.testidxs, :]
     # Reset the pre-calculated data for the training set in case goldlabels has changed
     self.alpha_tr = []
            txt_sim_list.append(txt_sim)
                
            # calculate img similarity
            
            if index in img_similarity:
                img_sim = img_similarity[index]
            else:
                try:
                    img_sim = img_model.similarity(str(item), str(related_item))
                except:
                    img_sim = np.nan
            img_similarity[index] = img_sim
            img_sim_list.append(img_sim)
            
        txt_feature.append([eachrow[0], item,
                            np.nanmax(txt_sim_list),
                            np.nanmean(txt_sim_list),
                            np.nanstd(txt_sim_list),
                            np.nansum(txt_sim_list),
                            np.sum(np.isnan(txt_sim_list))])
        img_feature.append([eachrow[0], item,
                            np.nanmax(img_sim_list),
                            np.nanmean(img_sim_list),
                            np.nanstd(img_sim_list),
                            np.nansum(img_sim_list),
                            np.sum(np.isnan(img_sim_list))])
    gc.collect()


# In[ ]:
 def normalize(self, indices):
     if self.features.size:
         nanmin = np.nanmin(self.features[:, indices], 0)
         nanmax = np.nanmax(self.features[:, indices], 0)
         self.features[:, indices] = (self.features[:, indices] -
                                      nanmin) / (nanmax - nanmin)
Example #33
0
def write_depth_seg_image(dpth_img, msk, fname, overlay=[0,0,255], transp=0.15):
	img = dpth_img * (255.0/np.nanmax(dpth_img))
	img = np.stack((img,)*3, axis=-1)
	for i, hue in enumerate(overlay):
		img[msk, i] = hue*transp + img[msk, i] * (1 - transp)
	cv2.imwrite(fname, img)
rain[rain < 0.05] = 0  # Filtering the error values

meteo_secs = mdate.epoch2num(meteo_time)

meteo.close()

# %%
# =========================
# # Get maxima and minima:
# =========================

print(Time[TMT.nanargmax(Z_dbz)[0]])
print(time.strftime('%H:%M:%S', time.gmtime((Time[TMT.nanargmax(Z_dbz)[0]])))[0:5])

MaxEcho = 'Highest Echo = ' + str(np.nanmax(Z_dbz))[0:5] + ' dBZ (' + str(
    time.strftime('%H:%M:%S', time.gmtime((Time[TMT.nanargmax(Z_dbz)[0]])))[0:5]) + ' UTC)'

MinTemp = 'Min Temp = ' + str(np.nanmin(temp))[0:6] + ' °C (' + str(
    time.strftime('%H:%M:%S', time.gmtime((meteo_time[TMT.nanargmin(temp)[0]])))[0:5]) + ' UTC)'
MaxTemp = 'Max Temp = ' + str(np.nanmax(temp))[0:6] + ' °C (' + str(
    time.strftime('%H:%M:%S', time.gmtime((meteo_time[TMT.nanargmax(temp)[0]])))[0:5]) + ' UTC)'
DeltaTemp = '$\Delta$ Temp = ' + str(abs(abs(np.nanmax(temp)) - abs(np.nanmin(temp))))[0:6] + ' °C'
temp_str = MinTemp + '\n' + MaxTemp + '\n' + DeltaTemp

MaxRain = 'Max Precip = ' + str(np.nanmax(rain))[0:6] + ' mm/h (' + str(
    time.strftime('%H:%M:%S', time.gmtime((meteo_time[TMT.nanargmax(rain)[0]])))[0:5]) + ' UTC)'

MinWind = 'Min Wind = ' + str(np.nanmin(wind_speed))[0:6] + ' m/s (' + str(
    time.strftime('%H:%M:%S', time.gmtime((meteo_time[TMT.nanargmin(wind_speed)[0]])))[0:5]) + ' UTC)'
MaxWind = 'Max Wind = ' + str(np.nanmax(wind_speed))[0:6] + ' m/s (' + str(
Example #35
0
  nv = len(v);
  nvm = nv/nbins * nbins;
  
  v_mean = v[:nvm];
  v_mean = v_mean.reshape([nvm/nbins, nbins]);
  v_mean = np.nanmean(v_mean, axis = 1);
  v_mean = np.hstack([v_mean, [np.nanmean(v[nvm:])]]);
  
  ts = int(exp_times[wid] / time_bin);
  v_24hr[wid, ts:ts+len(r_mean)] = v_mean.copy();  
  

plt.figure(12); plt.clf();
#plt.plot(v_mean)
#plt.ylim(0,20)
np.nanmax(roam_24hr)
rrange = np.nanpercentile(roam_24hr, [5, 95]);
plt.imshow(roam_24hr, aspect = 'auto', vmax = rrange[1], interpolation = 'none')
plt.tight_layout()

# hr lines
nhr  = 60 * 60 / time_bin;
nday = 24 * nhr;

for d in range(ntimes/nhr):
  plt.plot([d*nhr, d*nhr], [-0.5, len(roam_24hr)-0.5], 'k')

for d in range(ntimes/nday):
  plt.plot([d*nday, d*nday], [-0.5, len(roam_24hr)-0.5], 'r', linewidth = 2)

plt.xlim(0, roam_24hr.shape[1]);
Example #36
0
File: run.py Project: rizac/sdaas
    def process(self, sort_by_time=False,
                aggregate: str or None = None,
                progress: TextIO or None = sys.stderr,
                info: TextIO or None = None):
        """Processes all added files/URLs and yields the results"""
        if aggregate:
            aggregates = ('min', 'max', 'median', 'mean')
            if aggregate not in aggregates:
                raise ValueError(f'"aggregate" not in {str(aggregates)}')

        count, total = 0, sum(_[-1] for _ in self._data)
        data = {}
        messages = None if info is None else []
        # load model now: it takes ~=1 second and is usually lazy loaded,
        # but this means that the progressbar would show misleading results
        # at the beginning
        load_default_trained_model()
        with ProgressBar(progress) as pbar:
            # self._data has generally only one element in the current
            # implementation (see module function `process`), however, it
            # already supports multiple call of its `add_*` methods above
            for key, streamiterator, metadata_path, length in self._data:
                try:
                    inv = read_inventory(metadata_path)
                except Exception as exc:
                    if messages is not None:
                        messages.append(f'Metadata error, {str(exc)}. {key}')
                    streamiterator = []  # hack to skip iteration below

                feats = []
                ids = []
                kount = 0
                try:
                    for fpath, stream in streamiterator:
                        kount += 1
                        for trace in stream:
                            (id_, st_, et_), feat = trace_idfeatures(trace, inv)
                            feats.append(feat)
                            ids.append((fpath, id_, st_, et_))

                except Exception as exc:
                    if messages is not None:
                        messages.append(f'{str(exc)}. {key}')
                        feats = []  # hack to stop after updating the pbar

                count += 1
                pbar.update(count / total)

                if kount < length:
                    count += length - kount
                    pbar.update(count / total)

                if not feats:
                    continue

                scores = aa_scores(np.asarray(feats))

                iter_ = zip(ids, scores)
                if aggregate:
                    data = defaultdict(lambda: [None, []])
                    for (fpath, id_, stime, etime), score in iter_:
                        timeranges, scores_ = data[id_]
                        if not timeranges:
                            data[id_][0] = [stime, etime]
                        else:
                            timeranges[0] = min(stime, timeranges[0])
                            timeranges[1] = max(etime, timeranges[1])
                        scores_.append(score)
                    ids = []
                    scores = []
                    for id_, (timeranges, scores_) in data.items():
                        ids.append(('', id_, *timeranges))
                        scores_ = np.asarray(scores_)
                        if np.isnan(scores_).all():
                            scores.append(np.nan)
                        elif aggregate == 'mean':
                            scores.append(np.nanmean(scores_))
                        elif aggregate == 'min':
                            scores.append(np.nanmin(scores_))
                        elif aggregate == 'max':
                            scores.append(np.nanmax(scores_))
                        else:
                            scores.append(np.nanmedian(scores_))

                    # now sort (if needed) and print them at once:
                    iter_ = zip(ids, scores)

                if sort_by_time:
                    yield sorted(iter_, key=lambda _: _[0][1])
                yield iter_

        for msg in (messages or []):
            print(msg, file=info)
Example #37
0
def train_model(train_dir=None, validation_dir=None, debug=True):
    config = GEDIconfig()
    if train_dir is None:  # Use globals
        train_data = os.path.join(config.tfrecord_dir,
                                  config.tf_record_names['train'])
        meta_data = np.load(
            os.path.join(config.tfrecord_dir,
                         '%s_%s' % (config.tvt_flags[0], config.max_file)))
    else:
        meta_data = np.load(
            os.path.join(train_dir,
                         '%s_%s' % (config.tvt_flags[0], config.max_file)))

    # Prepare image normalization values
    if config.max_gedi is None:
        max_value = np.nanmax(meta_data['max_array']).astype(np.float32)
        if max_value == 0:
            max_value = None
            print 'Derived max value is 0'
        else:
            print 'Normalizing with empirical max.'
        if 'min_array' in meta_data.keys():
            # min_value = np.min(meta_data['min_array']).astype(np.float32)
            print 'Normalizing with empirical min.'
        else:
            # min_value = None
            print 'Not normalizing with a min.'
    else:
        max_value = config.max_gedi
        # min_value = config.min_gedi
    ratio = meta_data['ratio']
    print 'Ratio is: %s' % ratio

    if validation_dir is None:  # Use globals
        validation_data = os.path.join(config.tfrecord_dir,
                                       config.tf_record_names['val'])
    elif validation_dir is False:
        pass  # Do not use validation data during training

    # Make output directories if they do not exist
    dt_stamp = re.split(
        '\.', str(datetime.now()))[0].\
        replace(' ', '_').replace(':', '_').replace('-', '_')
    dt_dataset = config.which_dataset + '_' + dt_stamp + '/'
    config.train_checkpoint = os.path.join(config.train_checkpoint,
                                           dt_dataset)  # timestamp this run
    out_dir = os.path.join(config.results, dt_dataset)
    dir_list = [
        config.train_checkpoint, config.train_summaries, config.results,
        out_dir
    ]
    [make_dir(d) for d in dir_list]
    # im_shape = get_image_size(config)
    im_shape = config.gedi_image_size

    print '-' * 60
    print('Training model:' + dt_dataset)
    print '-' * 60

    # Prepare data on CPU
    assert os.path.exists(train_data)
    assert os.path.exists(validation_data)
    assert os.path.exists(config.vgg16_weight_path)
    with tf.device('/cpu:0'):
        train_images, train_labels, train_times = inputs(
            train_data,
            config.train_batch,
            im_shape,
            config.model_image_size,
            # max_value=max_value,
            # min_value=min_value,
            train=config.data_augmentations,
            num_epochs=config.epochs,
            normalize=config.normalize,
            return_filename=True)
        val_images, val_labels, val_times = inputs(
            validation_data,
            config.validation_batch,
            im_shape,
            config.model_image_size,
            # max_value=max_value,
            # min_value=min_value,
            num_epochs=config.epochs,
            normalize=config.normalize,
            return_filename=True)
        train_image_list, val_image_list = [], []
        for idx in range(int(train_images.get_shape()[1])):
            train_image_list += [tf.gather(train_images, idx, axis=1)]
            val_image_list += [tf.gather(val_images, idx, axis=1)]
            tf.summary.image('train_image_frame_%s' % idx,
                             train_image_list[idx])
            tf.summary.image('validation_image_frame_%s' % idx,
                             val_image_list[idx])

    # Prepare model on GPU
    with tf.device('/gpu:0'):

        with tf.variable_scope('match'):
            # Build matching model for frame 0
            model_0 = matching_gedi.model_struct()
            frame_activity = []
            frame_activity += [
                tf.nn.l2_normalize(model_0.build(train_image_list[0]), 0,
                                   1e-12)
            ]

        with tf.variable_scope('match', reuse=tf.AUTO_REUSE):
            # Build matching model for other frames
            for idx in range(1, len(train_image_list)):
                frame_activity += [
                    tf.nn.l2_normalize(model_0.build(train_image_list[idx]), 0,
                                       1e-12)
                ]

        pos = l2_dist(frame_activity[0], frame_activity[1], axis=1)
        neg = l2_dist(frame_activity[0], frame_activity[2], axis=1)
        # loss = tf.reduce_mean(tf.pow(pos - neg + 0.2, 2))
        loss = tf.reduce_mean(tf.maximum(pos - neg + 0.01, 0.))
        # loss = tf.reduce_mean(tf.nn.relu(1 - (neg / (pos + 0.2)))) * 100
        tf.summary.scalar('Triplet_loss', loss)

        # Weight decay
        if config.wd_layers is not None:
            _, l2_wd_layers = fine_tune_prepare_layers(
                tf.trainable_variables(), config.wd_layers)
            l2_wd_layers = [x for x in l2_wd_layers if 'biases' not in x.name]
            if len(l2_wd_layers) > 0:
                loss += (config.wd_penalty *
                         tf.add_n([tf.nn.l2_loss(x) for x in l2_wd_layers]))

        # Optimize
        train_op = tf.train.AdamOptimizer(config.new_lr).minimize(loss)
        train_accuracy = tf.reduce_mean(
            tf.cast(
                tf.equal(
                    tf.nn.relu(tf.sign(neg - pos)),  # 1 if pos < neg
                    tf.cast(tf.ones_like(train_labels), tf.float32)),
                tf.float32))
        tf.summary.scalar('training_accuracy', train_accuracy)

        # Setup validation op
        if validation_data is not False:
            with tf.variable_scope('match', tf.AUTO_REUSE) as match:
                # Build matching model for frame 0
                match.reuse_variables()
                val_model_0 = matching_gedi.model_struct()
                val_frame_activity = []
                val_frame_activity += [
                    tf.nn.l2_normalize(val_model_0.build(val_image_list[0]), 1,
                                       1e-12)
                ]

                # Build matching model for other frames
                for idx in range(1, len(train_image_list)):
                    val_frame_activity += [
                        tf.nn.l2_normalize(
                            val_model_0.build(val_image_list[idx]), 1, 1e-12)
                    ]

            val_pos = l2_dist(val_frame_activity[0],
                              val_frame_activity[1],
                              axis=1)
            val_neg = l2_dist(val_frame_activity[0],
                              val_frame_activity[2],
                              axis=1)
            # val_loss = tf.reduce_mean(tf.pow(val_pos - val_neg + 0.2, 2))
            val_loss = tf.reduce_mean(tf.maximum(val_pos - val_neg + 0.01, 0.))
            # val_loss = tf.reduce_mean(tf.nn.relu(1 - (val_neg / (val_pos + 0.2)))) * 100
            tf.summary.scalar('Validation_triplet_loss', val_loss)

            # Calculate validation accuracy
        val_accuracy = tf.reduce_mean(
            tf.cast(
                tf.equal(
                    tf.nn.relu(tf.sign(val_neg - val_pos)),  # 1 if pos < neg
                    tf.cast(tf.ones_like(val_labels), tf.float32)),
                tf.float32))
        tf.summary.scalar('val_accuracy', val_accuracy)

    # Set up summaries and saver
    saver = tf.train.Saver(tf.global_variables(),
                           max_to_keep=config.keep_checkpoints)
    summary_op = tf.summary.merge_all()

    # Initialize the graph
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    # Need to initialize both of these if supplying num_epochs to inputs
    sess.run(
        tf.group(tf.global_variables_initializer(),
                 tf.local_variables_initializer()))
    summary_dir = os.path.join(config.train_summaries,
                               config.which_dataset + '_' + dt_stamp)
    summary_writer = tf.summary.FileWriter(summary_dir, sess.graph)

    # Set up exemplar threading
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    # Train operations
    train_dict = {
        'train_op': train_op,
        'loss': loss,
        'pos': pos,
        'neg': neg,
        'train_accuracy': train_accuracy,
        'val_accuracy': val_accuracy,
    }
    val_dict = {'val_accuracy': val_accuracy}
    if debug:
        for idx in range(len(train_image_list)):
            train_dict['train_im_%s' % idx] = train_image_list[idx]
        for idx in range(len(val_image_list)):
            val_dict['val_im_%s' % idx] = val_image_list[idx]

    # Start training loop
    np.save(out_dir + 'meta_info', config)
    step, losses = 0, []  # val_max = 0
    try:
        # print response
        while not coord.should_stop():
            start_time = time.time()
            train_values = sess.run(train_dict.values())
            it_train_dict = {
                k: v
                for k, v in zip(train_dict.keys(), train_values)
            }
            losses += [it_train_dict['loss']]
            duration = time.time() - start_time
            if np.isnan(it_train_dict['loss']).sum():
                assert not np.isnan(it_train_dict['loss']),\
                    'Model loss = NaN'

            if step % config.validation_steps == 0:
                if validation_data is not False:
                    val_values = sess.run(val_dict.values())
                    it_val_dict = {
                        k: v
                        for k, v in zip(val_dict.keys(), val_values)
                    }
                    val_acc = it_val_dict['val_accuracy']
                else:
                    val_acc -= 1  # Store every checkpoint

                # Summaries
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

                # Training status and validation accuracy
                format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '
                              '%.3f sec/batch) | Training accuracy = %s | '
                              'Validation accuracy = %s | '
                              'logdir = %s')
                print(format_str %
                      (datetime.now(), step,
                       it_train_dict['loss'], config.train_batch / duration,
                       float(duration), it_train_dict['train_accuracy'],
                       it_train_dict['val_accuracy'], summary_dir))

                # Save the model checkpoint if it's the best yet
                if 1:  # val_acc >= val_max:
                    saver.save(sess,
                               os.path.join(config.train_checkpoint,
                                            'model_' + str(step) + '.ckpt'),
                               global_step=step)
                    # Store the new max validation accuracy
                    # val_max = val_acc

            else:
                # Training status
                format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '
                              '%.3f sec/batch) | Training accuracy = %s | ')
                print(format_str %
                      (datetime.now(), step,
                       it_train_dict['loss'], config.train_batch / duration,
                       float(duration), it_train_dict['train_accuracy']))
            # End iteration
            step += 1

    except tf.errors.OutOfRangeError:
        print('Done training for %d epochs, %d steps.' % (config.epochs, step))
    finally:
        coord.request_stop()
        np.save(os.path.join(config.tfrecord_dir, 'training_loss'), losses)
    coord.join(threads)
    sess.close()
Example #38
0
    # Keeping track of performance on the simple validation task by getting a precision recall curve.
    model_name = "bert_small"
    vectorize = lambda x: vectorize_with_bert(" ".join(preprocess_string(x)),
                                              model, tokenizer, "sum", 1)
    get_similarity = lambda s1, s2: 1 - cosine(vectorize(s1), vectorize(s2))
    df[model_name] = df.apply(
        lambda x: get_similarity(x["concept_1"], x["concept_2"]), axis=1)
    y_true = list(df["class"].values)
    y_prob = list(df[model_name].values)
    precision, recall, thresholds = precision_recall_curve(y_true, y_prob)
    f_beta = lambda pr, re, beta: [((1 + beta**2) * p * r) /
                                   ((((beta**2) * p) + r))
                                   for p, r in zip(pr, re)]
    f_1_scores = f_beta(precision, recall, beta=1)
    f_1_max = np.nanmax(f_1_scores)
    rows.append((model_name, epoch_i, loss.item(), f_1_max))

# Writing results of the validation to those files.
df.to_csv(output_path_for_results, index=False)
header = ["model", "epoch", "training_loss", "f1_max"]
pd.DataFrame(rows, columns=header).to_csv(output_path_for_results_summary,
                                          index=False)

# In[44]:

output_dir = "../models/bert_small/model_save_{}/".format(
    datetime.datetime.now().strftime('%m_%d_%Y_h%Hm%Ms%S'))
if not os.path.exists(output_dir):
    os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
def test_IEEE_case_9_with_bad_data():
    # 1. Create network
    # test grid: IEEE case 9 (HV)
    # overall: 9 buses, 8 lines

    net = pp.networks.case9()

    pp.create_measurement(net, "v", "bus", 1.0, 0.01, bus=1)  # V at bus 1
    pp.create_measurement(net, "v", "bus", 1.02, 0.01, bus=2)  # V at bus 2
    pp.create_measurement(net, "v", "bus", 0.9822, 0.01, bus=3)  # V at bus 3
    pp.create_measurement(net, "v", "bus", 0.979, 0.01, bus=4)  # V at bus 4
    pp.create_measurement(net, "v", "bus", 1.005, 0.01, bus=5)  # V at bus 5
    pp.create_measurement(net, "v", "bus", 0.997, 0.01, bus=7)  # V at bus 7
    pp.create_measurement(net, "v", "bus", 0.953, 0.01, bus=8)  # V at bus 8

    pp.create_measurement(net, "p", "bus", 72000, 100., bus=0)
    pp.create_measurement(net, "p", "bus", 162780, 100., bus=1)
    pp.create_measurement(net, "p", "bus", 84958, 70., bus=2)
    pp.create_measurement(net, "p", "bus", 0., 1., bus=3)
    pp.create_measurement(net, "p", "bus", -89967., 20., bus=4)
    pp.create_measurement(net, "p", "bus", 0., 10., bus=5)
    pp.create_measurement(net, "p", "bus", -100059., 30., bus=6)
    pp.create_measurement(net, "p", "bus", 0., 10., bus=7)
    pp.create_measurement(net, "p", "bus", -125100, 50., bus=8)

    pp.create_measurement(net, "q", "bus", 24000, 100., bus=0)
    pp.create_measurement(net, "q", "bus", 14500, 100., bus=1)
    pp.create_measurement(net, "q", "bus", 3644, 70., bus=2)
    pp.create_measurement(net, "q", "bus", 0., 1., bus=3)
    pp.create_measurement(net, "q", "bus", -30041., 20., bus=4)
    pp.create_measurement(net, "q", "bus", 0., 10., bus=5)
    pp.create_measurement(net, "q", "bus", -35087, 30., bus=6)
    pp.create_measurement(net, "q", "bus", 0., 10., bus=7)
    pp.create_measurement(net, "q", "bus", -49900, 50., bus=8)

    # 2. Do state estimation
    success_SE = estimate(net, init='flat', ref_power=1e6)
    v_est_SE = net.res_bus_est.vm_pu.values
    delta_SE = net.res_bus_est.va_degree.values

    # 3. Create false measurement (very close to useful values)
    pp.create_measurement(net, "v", "bus", 0.2, 0.01, bus=0)  # V at bus 0

    # 4. Do chi2-test
    bad_data_detected = chi2_analysis(net, init='flat', ref_power=1e6)

    # 5. Perform rn_max_test
    success_rn_max = remove_bad_data(net,
                                     init='flat',
                                     ref_power=1e6,
                                     rn_max_threshold=7.0)
    v_est_rn_max = net.res_bus_est.vm_pu.values
    delta_est_rn_max = net.res_bus_est.va_degree.values

    diff_v = v_est_SE - v_est_rn_max
    diff_delta = delta_SE - delta_est_rn_max

    assert success_SE
    assert bad_data_detected
    assert success_rn_max
    assert (np.nanmax(abs(diff_v)) < 1e-5)
    assert (np.nanmax(abs(diff_delta)) < 1e-5)
Example #40
0
    def q_learning(self):
        ns = len(self.state2ind.keys())
        na = len(self.actions.keys())
        discount = self.gamma
        lr = self.alpha
        # initialization
        self.allowed_movements()
        # ADD YOUR CODE SNIPPET BETWEEN EX. 2.1
        # Initialize a numpy array with ns state rows and na state columns with float values from 0.0 to 1.0.
        Q = np.random.rand(ns, na)
        # ADD YOUR CODE SNIPPET BETWEEN EX. 2.1

        for s in range(ns):
            list_pos = self.allowed_moves[s]
            for i in range(4):
                if i not in list_pos:
                    Q[s, i] = np.nan

        Q_old = Q.copy()

        diff = np.infty
        end_episode = False

        init_pos_tuple = self.settings.init_pos_diver
        init_pos = self.ind2state[(init_pos_tuple[0], init_pos_tuple[1])]
        episode = 0

        R_total = 0
        current_total_steps = 0
        steps = 0

        # ADD YOUR CODE SNIPPET BETWEEN EX. 2.3
        # Change the while loop to incorporate a threshold limit, to stop training when the mean difference
        # in the Q table is lower than the threshold
        #while episode <= self.episode_max or diff > self.threshold:
        while diff > self.threshold:
            if episode > self.episode_max:
                break
            # ADD YOUR CODE SNIPPET BETWEENEX. 2.3

            s_current = init_pos
            R_total = 0
            steps = 0
            while not end_episode:
                # selection of action
                list_pos = self.allowed_moves[s_current]

                # ADD YOUR CODE SNIPPET BETWEEN EX 2.1 and 2.2
                # Chose an action from all possible actions

                # 2.2 below, choosing an action based on highest q-value at the given state:
                action = np.nanargmax(Q[s_current])
                #print("chosen action", action, "\n")
                # ADD YOUR CODE SNIPPET BETWEEN EX 2.1 and 2.2

                # ADD YOUR CODE SNIPPET BETWEEN EX 5
                # Use the epsilon greedy algorithm to retrieve an action
                # ADD YOUR CODE SNIPPET BETWEEN EX 5

                # compute reward
                action_str = self.action_list[action]
                msg = {"action": action_str, "exploration": True}
                self.sender(msg)

                # wait response from game
                msg = self.receiver()
                R = msg["reward"]
                R_total += R
                s_next_tuple = msg["state"]
                end_episode = msg["end_episode"]
                s_next = self.ind2state[s_next_tuple]

                # ADD YOUR CODE SNIPPET BETWEEN EX. 2.2
                # Implement the Bellman Update equation to update Q
                Q[s_current, action] = (
                    1 - self.alpha) * Q[s_current, action] + self.alpha * (
                        R + self.gamma * np.nanmax(Q[s_next]))
                # ADD YOUR CODE SNIPPET BETWEEN EX. 2.2

                s_current = s_next
                current_total_steps += 1
                steps += 1

            # ADD YOUR CODE SNIPPET BETWEEN EX. 2.3
            # Compute the absolute value of the mean between the Q and Q-old
            diff = abs(np.nanmean(Q) - np.nanmean(Q_old))
            # ADD YOUR CODE SNIPPET BETWEEN EX. 2.3
            Q_old[:] = Q
            print(
                "Episode: {}, Steps {}, Diff: {:6e}, Total Reward: {}, Total Steps {}"
                .format(episode, steps, diff, R_total, current_total_steps))
            episode += 1
            end_episode = False

        return Q
def test_3bus_with_2_slacks():
    # load the net which already contains 3 buses
    net = load_3bus_network()
    # add the same net with different slack (no galvanic connection)
    # skip bus index 4 as further stability test
    pp.create_bus(net, name="bus5", vn_kv=1., index=5)
    pp.create_bus(net, name="bus6", vn_kv=1., index=6)
    pp.create_bus(net, name="bus7", vn_kv=1., index=7)
    pp.create_ext_grid(net, 5)
    pp.create_line_from_parameters(net,
                                   5,
                                   6,
                                   1,
                                   r_ohm_per_km=.01,
                                   x_ohm_per_km=.03,
                                   c_nf_per_km=0.,
                                   max_i_ka=1)
    pp.create_line_from_parameters(net,
                                   5,
                                   7,
                                   1,
                                   r_ohm_per_km=.02,
                                   x_ohm_per_km=.05,
                                   c_nf_per_km=0.,
                                   max_i_ka=1)
    pp.create_line_from_parameters(net,
                                   6,
                                   7,
                                   1,
                                   r_ohm_per_km=.03,
                                   x_ohm_per_km=.08,
                                   c_nf_per_km=0.,
                                   max_i_ka=1)

    pp.create_measurement(net, "v", "bus", 1.006, .004, bus=5)  # V at bus 5
    pp.create_measurement(net, "v", "bus", .968, .004, bus=6)  # V at bus 6

    pp.create_measurement(net, "p", "bus", -501, 10, 6)  # P at bus 6
    pp.create_measurement(net, "q", "bus", -286, 10, 6)  # Q at bus 6

    pp.create_measurement(net, "p", "line", 888, 8, 5,
                          3)  # Pline (bus 1 -> bus 2) at bus 5
    pp.create_measurement(net, "p", "line", 1173, 8, 5,
                          4)  # Pline (bus 1 -> bus 3) at bus 5
    pp.create_measurement(net, "q", "line", 568, 8, 5,
                          3)  # Qline (bus 1 -> bus 2) at bus 5
    pp.create_measurement(net, "q", "line", 663, 8, 5,
                          4)  # Qline (bus 1 -> bus 3) at bus 5

    # 2. Do state estimation
    success = estimate(net, init='flat', maximum_iterations=10)
    v_result = net.res_bus_est.vm_pu.values
    delta_result = net.res_bus_est.va_degree.values

    target_v = np.array(
        [0.9996, 0.9741, 0.9438, np.nan, 0.9996, 0.9741, 0.9438])
    diff_v = target_v - v_result
    target_delta = np.array([
        0.0, -1.2475469989322963, -2.7457167371166862, np.nan, 0.0,
        -1.2475469989322963, -2.7457167371166862
    ])
    diff_delta = target_delta - delta_result

    assert success
    assert (np.nanmax(abs(diff_v)) < 1e-4)
    assert (np.nanmax(abs(diff_delta)) < 1e-4)
def test_init_slack_with_multiple_transformers(angles=True):
    np.random.seed(123)
    net = pp.create_empty_network()
    pp.create_bus(net, 220, index=0)
    pp.create_bus(net, 110, index=1)
    pp.create_bus(net, 110, index=2)
    pp.create_bus(net, 110, index=3)
    pp.create_bus(net, 10, index=4)
    pp.create_bus(net, 10, index=5)
    pp.create_bus(net, 10, index=6)
    pp.create_bus(net, 10, index=7, in_service=False)
    pp.create_transformer(net,
                          3,
                          7,
                          std_type="63 MVA 110/10 kV",
                          in_service=False)
    pp.create_transformer(net, 3, 4, std_type="63 MVA 110/10 kV")
    pp.create_transformer(net, 0, 1, std_type="100 MVA 220/110 kV")
    pp.create_line(net, 1, 2, 2.0, std_type="N2XS(FL)2Y 1x120 RM/35 64/110 kV")
    pp.create_line(net, 1, 3, 2.0, std_type="N2XS(FL)2Y 1x120 RM/35 64/110 kV")
    pp.create_line(net, 4, 5, 2.0, std_type="NA2XS2Y 1x95 RM/25 12/20 kV")
    pp.create_line(net, 5, 6, 2.0, std_type="NA2XS2Y 1x95 RM/25 12/20 kV")
    pp.create_load(net, 2, 5000, 3300)
    pp.create_load(net, 5, 900, 500)
    pp.create_load(net, 6, 700, 300)
    pp.create_ext_grid(net,
                       bus=0,
                       vm_pu=1.04,
                       va_degree=10.,
                       name="Slack 220 kV")
    pp.runpp(net, calculate_voltage_angles=angles)
    for bus, row in net.res_bus[net.bus.in_service == True].iterrows():
        pp.create_measurement(net, "v", "bus", row.vm_pu * r(0.01), 0.01, bus)
        if row.p_kw != 0.:
            continue
        pp.create_measurement(net, "p", "bus", -row.p_kw * r(),
                              max(1.0, abs(0.03 * row.p_kw)), bus)
        pp.create_measurement(net, "q", "bus", -row.q_kvar * r(),
                              max(1.0, abs(0.03 * row.q_kvar)), bus)
    pp.create_measurement(net,
                          "p",
                          "line",
                          net.res_line.p_from_kw[0],
                          10.,
                          bus=1,
                          element=0)
    pp.create_measurement(net,
                          "q",
                          "line",
                          net.res_line.q_from_kvar[0],
                          10.,
                          bus=1,
                          element=0)
    pp.create_measurement(net,
                          "p",
                          "line",
                          net.res_line.p_from_kw[2],
                          10.,
                          bus=4,
                          element=2)
    pp.create_measurement(net,
                          "q",
                          "line",
                          net.res_line.q_from_kvar[2],
                          10.,
                          bus=4,
                          element=2)
    pp.create_measurement(net,
                          "p",
                          "line",
                          net.res_line.p_from_kw[3],
                          10.,
                          bus=5,
                          element=3)
    pp.create_measurement(net,
                          "q",
                          "line",
                          net.res_line.q_from_kvar[3],
                          10.,
                          bus=5,
                          element=3)
    success = estimate(net, init='slack', calculate_voltage_angles=angles)

    # pretty high error for vm_pu (half percent!)
    assert success
    assert (np.nanmax(
        np.abs(net.res_bus.vm_pu.values - net.res_bus_est.vm_pu.values)) <
            0.006)
    assert (np.nanmax(
        np.abs(net.res_bus.va_degree.values -
               net.res_bus_est.va_degree.values)) < 0.006)
def test_3bus_with_out_of_service_bus():
    # Test case from book "Power System State Estimation", A. Abur, A. G. Exposito, p. 20ff.
    # S_ref = 1 MVA (PP standard)
    # V_ref = 1 kV
    # Z_ref = 1 Ohm

    # The example only had per unit values, but pandapower expects kV, MVA, kW, kVar
    # Measurements should be in kW/kVar/A - Voltage in p.u.

    # 1. Create network
    net = pp.create_empty_network()
    pp.create_bus(net, name="bus1", vn_kv=1.)
    pp.create_bus(net, name="bus2", vn_kv=1.)
    pp.create_bus(net, name="bus3", vn_kv=1.)
    pp.create_bus(net, name="bus4", vn_kv=1.,
                  in_service=0)  # out-of-service bus test
    pp.create_ext_grid(net, 0)
    pp.create_line_from_parameters(net,
                                   0,
                                   1,
                                   1,
                                   r_ohm_per_km=.01,
                                   x_ohm_per_km=.03,
                                   c_nf_per_km=0.,
                                   max_i_ka=1)
    pp.create_line_from_parameters(net,
                                   0,
                                   2,
                                   1,
                                   r_ohm_per_km=.02,
                                   x_ohm_per_km=.05,
                                   c_nf_per_km=0.,
                                   max_i_ka=1)
    pp.create_line_from_parameters(net,
                                   1,
                                   2,
                                   1,
                                   r_ohm_per_km=.03,
                                   x_ohm_per_km=.08,
                                   c_nf_per_km=0.,
                                   max_i_ka=1)

    pp.create_measurement(net, "v", "bus", 1.006, .004, bus=0)  # V at bus 1
    pp.create_measurement(net, "v", "bus", .968, .004, bus=1)  # V at bus 2

    pp.create_measurement(net, "p", "bus", -501, 10, 1)  # P at bus 2
    pp.create_measurement(net, "q", "bus", -286, 10, 1)  # Q at bus 2

    pp.create_measurement(net, "p", "line", 888, 8, 0,
                          0)  # Pline (bus 1 -> bus 2) at bus 1
    pp.create_measurement(net, "p", "line", 1173, 8, 0,
                          1)  # Pline (bus 1 -> bus 3) at bus 1
    pp.create_measurement(net, "q", "line", 568, 8, 0,
                          0)  # Qline (bus 1 -> bus 2) at bus 1
    pp.create_measurement(net, "q", "line", 663, 8, 0,
                          1)  # Qline (bus 1 -> bus 3) at bus 1

    # 2. Do state estimation
    success = estimate(net, init='flat')
    v_result = net.res_bus_est.vm_pu.values
    delta_result = net.res_bus_est.va_degree.values

    target_v = np.array([[0.9996, 0.9741, 0.9438, np.nan]])
    diff_v = target_v - v_result
    target_delta = np.array([[0., -1.2475, -2.7457, np.nan]])
    diff_delta = target_delta - delta_result

    assert success
    assert (np.nanmax(abs(diff_v)) < 1e-4)
    assert (np.nanmax(abs(diff_delta)) < 1e-4)
def test_3bus():
    # 1. Create network
    net = pp.create_empty_network()
    pp.create_bus(net, name="bus1", vn_kv=1.)
    pp.create_bus(net, name="bus2", vn_kv=1.)
    pp.create_bus(net, name="bus3", vn_kv=1.)
    pp.create_ext_grid(net, 0)
    pp.create_line_from_parameters(net,
                                   0,
                                   1,
                                   1,
                                   r_ohm_per_km=0.7,
                                   x_ohm_per_km=0.2,
                                   c_nf_per_km=0,
                                   max_i_ka=1)
    pp.create_line_from_parameters(net,
                                   0,
                                   2,
                                   1,
                                   r_ohm_per_km=0.8,
                                   x_ohm_per_km=0.8,
                                   c_nf_per_km=0,
                                   max_i_ka=1)
    pp.create_line_from_parameters(net,
                                   1,
                                   2,
                                   1,
                                   r_ohm_per_km=1,
                                   x_ohm_per_km=0.6,
                                   c_nf_per_km=0,
                                   max_i_ka=1)

    pp.create_measurement(net,
                          "p",
                          "line",
                          -0.0011e3,
                          0.01e3,
                          bus=0,
                          element=0)  # p12
    pp.create_measurement(net, "q", "line", 0.024e3, 0.01e3, bus=0,
                          element=0)  # q12

    pp.create_measurement(net, "p", "bus", 0.018e3, 0.01e3, bus=2)  # p3
    pp.create_measurement(net, "q", "bus", -0.1e3, 0.01e3, bus=2)  # q3

    pp.create_measurement(net, "v", "bus", 1.08, 0.05, 0)  # u1
    pp.create_measurement(net, "v", "bus", 1.015, 0.05, 2)  # u3

    # 2. Do state estimation
    success = estimate(net, init='flat')
    v_result = net.res_bus_est.vm_pu.values
    delta_result = net.res_bus_est.va_degree.values

    target_v = np.array([1.0627, 1.0589, 1.0317])
    diff_v = target_v - v_result
    target_delta = np.array([0., 0.8677, 3.1381])
    diff_delta = target_delta - delta_result

    assert success
    assert (np.nanmax(abs(diff_v)) < 1e-4)
    assert (np.nanmax(abs(diff_delta)) < 1e-4)
    def plot_good_bad_stns(pws_in_coords_df, ids_pws_stns_gd, ids_pws_stns_bad,
                           zvalues, prim_netw_ppt, zvalues_bad, prim_netwx,
                           prim_netwy, event_date):

        xstns_good = pws_in_coords_df.loc[ids_pws_stns_gd, 'X'].values.ravel()
        ystns_good = pws_in_coords_df.loc[ids_pws_stns_gd, 'Y'].values.ravel()

        xstns_bad = pws_in_coords_df.loc[ids_pws_stns_bad, 'X'].values.ravel()
        ystns_bad = pws_in_coords_df.loc[ids_pws_stns_bad, 'Y'].values.ravel()
        max_ppt = max(np.nanmax(zvalues), np.nanmax(prim_netw_ppt))

        interval_ppt = np.linspace(0.0, 0.99)
        colors_ppt = plt.get_cmap('Blues')(interval_ppt)
        cmap_ppt = LinearSegmentedColormap.from_list('name', colors_ppt)
        # cmap_ppt = plt.get_cmap('jet_r')
        cmap_ppt.set_over('navy')

        interval_ppt_bad = np.linspace(0.02, 0.95)
        colors_ppt_bad = plt.get_cmap('autumn')(interval_ppt_bad)
        cmap_ppt_bad = LinearSegmentedColormap.from_list(
            'name', colors_ppt_bad)

        plt.ioff()
        plt.figure(figsize=(12, 8), dpi=100)
        plt.scatter(prim_netwx,
                    prim_netwy,
                    c=prim_netw_ppt,
                    cmap=cmap_ppt,
                    marker=',',
                    s=10,
                    alpha=0.75,
                    vmin=0,
                    vmax=max_ppt,
                    label='prim_netw %d' % prim_netwx.size)

        sc = plt.scatter(xstns_good,
                         ystns_good,
                         c=zvalues,
                         cmap=cmap_ppt,
                         marker='.',
                         s=10,
                         alpha=0.75,
                         vmin=0,
                         vmax=max_ppt,
                         label='PWS Good %d' % xstns_good.size)

        plt.scatter(xstns_bad,
                    ystns_bad,
                    alpha=0.75,
                    c=zvalues_bad,
                    cmap=cmap_ppt_bad,
                    marker='X',
                    s=20,
                    vmin=0,
                    vmax=max_ppt,
                    label='PWS Bad %d' % xstns_bad.size)
        # plt.show()
        plt.xlabel('X [m]')
        plt.ylabel('Y [m]')
        plt.axis('equal')
        plt.legend(loc=0)
        cbar = plt.colorbar(sc, extend='max')
        cbar.ax.get_yaxis().labelpad = 15
        cbar.ax.set_ylabel('[mm/hr]')
        plt.title('Event date %s ' % (event_date))
        plt.grid(alpha=0.25)
        plt.savefig(
            os.path.join(
                out_save_dir, 'event_date_%s.png' %
                (str(event_date).replace('-', '_').replace(':', '_'))))
        plt.close()
def test_3bus_with_transformer():
    np.random.seed(12)

    # 1. Create network
    net = pp.create_empty_network()
    pp.create_bus(net, name="bus1", vn_kv=10.)
    pp.create_bus(net, name="bus2", vn_kv=10.)
    pp.create_bus(net, name="bus3", vn_kv=10.)
    pp.create_bus(net, name="bus4", vn_kv=110.)
    pp.create_ext_grid(net, bus=3, vm_pu=1.01)
    pp.create_line_from_parameters(net,
                                   0,
                                   1,
                                   1,
                                   r_ohm_per_km=.01,
                                   x_ohm_per_km=.03,
                                   c_nf_per_km=0.,
                                   max_i_ka=1)
    pp.create_line_from_parameters(net,
                                   0,
                                   2,
                                   1,
                                   r_ohm_per_km=.02,
                                   x_ohm_per_km=.05,
                                   c_nf_per_km=0.,
                                   max_i_ka=1)
    pp.create_line_from_parameters(net,
                                   1,
                                   2,
                                   1,
                                   r_ohm_per_km=.03,
                                   x_ohm_per_km=.08,
                                   c_nf_per_km=0.,
                                   max_i_ka=1)
    pp.create_transformer(net, 3, 0, std_type="25 MVA 110/10 kV")

    pp.create_load(net, 1, 450, 300)
    pp.create_load(net, 2, 350, 200)

    pp.runpp(net, calculate_voltage_angles=True)

    pp.create_measurement(net,
                          "v",
                          "bus",
                          r2(net.res_bus.vm_pu.iloc[0], .004),
                          .004,
                          bus=0)
    pp.create_measurement(net,
                          "v",
                          "bus",
                          r2(net.res_bus.vm_pu.iloc[1], .004),
                          .004,
                          bus=1)
    pp.create_measurement(net,
                          "v",
                          "bus",
                          r2(net.res_bus.vm_pu.iloc[3], .004),
                          .004,
                          bus=3)

    pp.create_measurement(net,
                          "p",
                          "bus",
                          -r2(net.res_bus.p_kw.iloc[1], 10),
                          10,
                          bus=1)
    pp.create_measurement(net,
                          "q",
                          "bus",
                          -r2(net.res_bus.q_kvar.iloc[1], 10),
                          10,
                          bus=1)

    pp.create_measurement(net,
                          "p",
                          "bus",
                          -r2(net.res_bus.p_kw.iloc[2], 10),
                          10,
                          bus=2)
    pp.create_measurement(net,
                          "q",
                          "bus",
                          -r2(net.res_bus.q_kvar.iloc[2], 10),
                          10,
                          bus=2)

    pp.create_measurement(net, "p", "bus", 0., 1.0, bus=0)
    pp.create_measurement(net, "q", "bus", 0., 1.0, bus=0)

    pp.create_measurement(net, "p", "line",
                          r2(net.res_line.p_from_kw.iloc[0], 8), 8, 0, 0)
    pp.create_measurement(net, "p", "line",
                          r2(net.res_line.p_from_kw.iloc[1], 8), 8, 0, 1)

    pp.create_measurement(net,
                          "p",
                          "transformer",
                          r2(net.res_trafo.p_hv_kw.iloc[0], 10),
                          10,
                          bus=3,
                          element=0)  # transformer meas.
    pp.create_measurement(net,
                          "q",
                          "transformer",
                          r2(net.res_trafo.q_hv_kvar.iloc[0], 10),
                          10,
                          bus=3,
                          element=0)  # at hv side

    # 2. Do state estimation
    success = estimate(net,
                       init='slack',
                       tolerance=5e-5,
                       maximum_iterations=10)
    v_result = net.res_bus_est.vm_pu.values
    delta_result = net.res_bus_est.va_degree.values

    diff_v = net.res_bus.vm_pu.values - v_result
    diff_delta = net.res_bus.va_degree.values - delta_result

    assert success
    assert (np.nanmax(abs(diff_v)) < 6e-4)
    assert (np.nanmax(abs(diff_delta)) < 1.4e-4)
Example #47
0
def main():
#%% First setup some parameters for data and motion correction

    # dataset dependent parameters
    fr = 15.49             # imaging rate in frames per second
    decay_time = 0.9    # length of a typical transient in seconds
    dxy = (1.4, 1.4)      # spatial resolution in x and y in (um per pixel)
    # note the lower than usual spatial resolution here
    max_shift_um = (12., 12.)       # maximum shift in um
    patch_motion_um = (100., 100.)  # patch size for non-rigid correction in um

    # motion correction parameters
    pw_rigid = True       # flag to select rigid vs pw_rigid motion correction
    # maximum allowed rigid shift in pixels
    max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)]
    # start a new patch for pw-rigid motion correction every x pixels
    strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)])
    # overlap between pathes (size of patch in pixels: strides+overlaps)
    overlaps = (24, 24)
    # maximum deviation allowed for patch with respect to rigid shifts
    max_deviation_rigid = 3

    mc_dict = {
        'fnames': fnames,
        'fr': fr,
        'decay_time': decay_time,
        'dxy': dxy,
        'pw_rigid': pw_rigid,
        'max_shifts': max_shifts,
        'strides': strides,
        'overlaps': overlaps,
        'max_deviation_rigid': max_deviation_rigid,
        'border_nan': 'copy'
    }

    opts = params.CNMFParams(params_dict=mc_dict)

# %% start a cluster for parallel processing
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

# %%% MOTION CORRECTION
    # first we create a motion correction object with the specified parameters
    mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
    # note that the file is not loaded in memory

# %% Run (piecewise-rigid motion) correction using NoRMCorre
    mc.motion_correct(save_movie=True)

# %% MEMORY MAPPING
    border_to_0 = 0 if mc.border_nan is 'copy' else mc.border_to_0
    # you can include the boundaries of the FOV if you used the 'copy' option
    # during motion correction, although be careful about the components near
    # the boundaries

    # memory map the file in order 'C'
    fname_new = cm.save_memmap(mc.mmap_file, base_name='_memmap_', order='C',
                               border_to_0=border_to_0)  # exclude borders

    # now load the file
    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    # load frames in python format (T x X x Y)

# %% restart cluster to clean up memory
    cm.stop_server(dview=dview)
    c, dview, n_processes = cm.cluster.setup_cluster(
        backend='local', n_processes=None, single_thread=False)

# %%  parameters for source extraction and deconvolution
    p = 1                    # order of the autoregressive system
    gnb = 3                  # number of global background components
    merge_thr = 0.85         # merging threshold, max correlation allowed
    rf = 20                  # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
    stride_cnmf = 6          # amount of overlap between the patches in pixels
    K = 3                    # number of components per patch
    gSig = [7, 7]            # expected half size of neurons in pixels
    method_init = 'greedy_roi'   # initialization method (if analyzing dendritic data using 'sparse_nmf')
    ssub = 2                     # spatial subsampling during initialization
    tsub = 2                     # temporal subsampling during intialization
    s_min = -20                   # minimum signal amplitude needed in order for a transient to be considered as activity

    # parameters for component evaluation
    opts_dict = {'fnames': fnames,
                 'fr': fr,
                 'nb': gnb,
                 'rf': rf,
                 'K': K,
                 'gSig': gSig,
                 'stride': stride_cnmf,
                 'method_init': method_init,
                 'rolling_sum': True,
                 'merge_thr': merge_thr,
                 'n_processes': n_processes,
                 'only_init': True,
                 'ssub': ssub,
                 'tsub': tsub,
                 's_min': s_min}

    opts.change_params(params_dict=opts_dict)

# %% RUN CNMF ON PATCHES
    # First extract spatial and temporal components on patches and combine them
    # for this step deconvolution is turned off (p=0)
    opts.change_params({'p': 0})
    cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
    cnm = cnm.fit(images)

# %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE
    #   you can also perform the motion correction plus cnmf fitting steps
    #   simultaneously after defining your parameters object using
    #  cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
    #  cnm1.fit_file(motion_correct=True)


# %% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution
    cnm.params.change_params({'p': p})
    cnm2 = cnm.refit(images, dview=dview)
    # %% COMPONENT EVALUATION
    # the components are evaluated in three ways:
    #   a) the shape of each component must be correlated with the data
    #   b) a minimum peak SNR is required over the length of a transient
    #   c) each shape passes a CNN based classifier
    min_SNR = 2  # Overall minimum signal to noise ratio for accepting a component
    rval_thr = 0.85  # space correlation threshold for accepting a component
    cnn_thr = 0.99  # threshold for CNN based classifier
    cnn_lowest = 0.15 # neurons with cnn probability lower than this value are rejected
    min_size_neuro = 0.1*gSig[0]*np.pi**2
    max_size_neuro = 2.5*gSig[0]*np.pi**2

    cnm2.params.set('quality', {'decay_time': decay_time,
                               'min_SNR': min_SNR,
                               'rval_thr': rval_thr,
                               'use_cnn': True,
                               'min_cnn_thr': cnn_thr,
                               'cnn_lowest': cnn_lowest,
                               'min_size_neuro': min_size_neuro,
                               'max_size_neuro': max_size_neuro,})

    cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)

    #%% update object with selected components
    cnm2.estimates.select_components(use_object=True)

    #%% Extract DF/F values
    cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250)


    #cnm2.estimates.threshold_spatial_components(maxthr=0.85)
    #cnm2.estimates.remove_small_large_neurons(min_size_neuro=min_size_neuro, max_size_neuro=max_size_neuro)

    # %% Generate heat image of video and save data
    Cn = cm.local_correlations(images, swap_dim=False)
    Cn[np.isnan(Cn)] = 0
    cnm2.estimates.Cn = Cn
    cnm2.save(cnm2.mmap_file[:-4] + 'hdf5')


    #%% STOP CLUSTER and clean up log files
    cm.stop_server(dview=dview)

    #Create denoised video as matrix
    denoised = cm.movie(cnm2.estimates.A.dot(cnm2.estimates.C) + \
                    cnm2.estimates.b.dot(cnm2.estimates.f)).reshape(dims + (-1,), order='F').transpose([2, 0, 1])

    #Normalizing denoised matrix to the [0,255] range prior to type conversion
    max_pixel = np.nanmax(denoised)
    norm_denoised = (denoised/max_pixel)*255

    #Type conversion so imageio doesn't get mad
    norm_denoised = np.uint8(norm_denoised)

    #Saving a denoised avi of the analyzed video
    sname = Path(args.in_file).stem + '.avi'
    imageio.mimwrite(sname, norm_denoised, fps=15.49)
def test_3bus_with_bad_data():
    net = pp.create_empty_network()
    pp.create_bus(net, name="bus1", vn_kv=1.)
    pp.create_bus(net, name="bus2", vn_kv=1.)
    pp.create_bus(net, name="bus3", vn_kv=1.)
    pp.create_ext_grid(net, 0)
    pp.create_line_from_parameters(net,
                                   0,
                                   1,
                                   1,
                                   r_ohm_per_km=0.7,
                                   x_ohm_per_km=0.2,
                                   c_nf_per_km=0,
                                   max_i_ka=1)
    pp.create_line_from_parameters(net,
                                   0,
                                   2,
                                   1,
                                   r_ohm_per_km=0.8,
                                   x_ohm_per_km=0.8,
                                   c_nf_per_km=0,
                                   max_i_ka=1)
    pp.create_line_from_parameters(net,
                                   1,
                                   2,
                                   1,
                                   r_ohm_per_km=1,
                                   x_ohm_per_km=0.6,
                                   c_nf_per_km=0,
                                   max_i_ka=1)

    pp.create_measurement(net,
                          "p",
                          "line",
                          -0.0011e3,
                          0.01e3,
                          bus=0,
                          element=0)  # p12
    pp.create_measurement(net, "q", "line", 0.024e3, 0.01e3, bus=0,
                          element=0)  # q12

    pp.create_measurement(net, "p", "bus", 0.018e3, 0.01e3, bus=2)  # p3
    pp.create_measurement(net, "q", "bus", -0.1e3, 0.01e3, bus=2)  # q3

    pp.create_measurement(net, "v", "bus", 1.08, 0.05, 0)  # u1
    pp.create_measurement(net, "v", "bus", 1.015, 0.05, 2)  # u3

    # 1. Create false voltage measurement for testing bad data detection (-> should be removed)
    pp.create_measurement(net, "v", "bus", 1.3, 0.01, bus=1)  # V at bus 2

    # 2. Do chi2-test
    bad_data_detected = chi2_analysis(net, init='flat')

    # 3. Perform rn_max_test
    success_rn_max = remove_bad_data(net, init='flat')
    v_est_rn_max = net.res_bus_est.vm_pu.values
    delta_est_rn_max = net.res_bus_est.va_degree.values

    target_v = np.array([1.0627, 1.0589, 1.0317])
    diff_v = target_v - v_est_rn_max
    target_delta = np.array([0., 0.8677, 3.1381])
    diff_delta = target_delta - delta_est_rn_max

    assert bad_data_detected
    assert success_rn_max
    assert (np.nanmax(abs(diff_v)) < 1e-4)
    assert (np.nanmax(abs(diff_delta)) < 1e-4)
Example #49
0
def image_as_uint(im, bitdepth=None):
    """ Convert the given image to uint (default: uint8)
    
    If the dtype already matches the desired format, it is returned
    as-is. If the image is float, and all values are between 0 and 1,
    the values are multiplied by np.power(2.0, bitdepth). In all other
    situations, the values are scaled such that the minimum value
    becomes 0 and the maximum value becomes np.power(2.0, bitdepth)-1
    (255 for 8-bit and 65535 for 16-bit).
    """
    if not bitdepth:
        bitdepth = 8
    if not isinstance(im, np.ndarray):
        raise ValueError('Image must be a numpy array')
    if bitdepth == 8:
        out_type = np.uint8
    elif bitdepth == 16:
        out_type = np.uint16
    else:
        raise ValueError('Bitdepth must be either 8 or 16')
    dtype_str1 = str(im.dtype)
    dtype_str2 = out_type.__name__
    if ((im.dtype == np.uint8 and bitdepth == 8)
            or (im.dtype == np.uint16 and bitdepth == 16)):
        # Already the correct format? Return as-is
        return im
    if (dtype_str1.startswith('float') and np.nanmin(im) >= 0
            and np.nanmax(im) <= 1):
        _precision_warn(dtype_str1, dtype_str2, 'Range [0, 1].')
        im = im.astype(
            np.float64) * (np.power(2.0, bitdepth) - 1) + 0.499999999
    elif im.dtype == np.uint16 and bitdepth == 8:
        _precision_warn(dtype_str1, dtype_str2, 'Losing 8 bits of resolution.')
        im = np.right_shift(im, 8)
    elif im.dtype == np.uint32:
        _precision_warn(dtype_str1, dtype_str2,
                        'Losing {} bits of resolution.'.format(32 - bitdepth))
        im = np.right_shift(im, 32 - bitdepth)
    elif im.dtype == np.uint64:
        _precision_warn(
            dtype_str1, dtype_str2,
            'Losing {} bits of resolution.'.format(64 - bitdepth, ))
        im = np.right_shift(im, 64 - bitdepth)
    else:
        mi = np.nanmin(im)
        ma = np.nanmax(im)
        if not np.isfinite(mi):
            raise ValueError('Minimum image value is not finite')
        if not np.isfinite(ma):
            raise ValueError('Maximum image value is not finite')
        if ma == mi:
            raise ValueError('Max value == min value, ambiguous given dtype')
        _precision_warn(dtype_str1, dtype_str2,
                        'Range [{}, {}].'.format(mi, ma))
        # Now make float copy before we scale
        im = im.astype('float64')
        # Scale the values between 0 and 1 then multiply by the max value
        im = (im - mi) / (ma - mi) * (np.power(2.0, bitdepth) -
                                      1) + 0.499999999
    assert np.nanmin(im) >= 0
    assert np.nanmax(im) < np.power(2.0, bitdepth)
    return im.astype(out_type)
def on_evt_filter_pws(args):

    (path_to_prim_netw_ppt_hdf5, prim_netw_in_coords_df,
     path_to_neatmo_ppt_hdf5, pws_in_coords_df, ids_pws_to_use, time_list,
     df_save_results) = args

    HDF5_pws_ppt = HDF5(infile=path_to_neatmo_ppt_hdf5)
    #     all_pws_ids = HDF5_pws_ppt.get_all_names()
    all_pws_ids_to_use = ids_pws_to_use

    HDF5_prim_netw_ppt = HDF5(infile=path_to_prim_netw_ppt_hdf5)
    all_prim_netw_ids = HDF5_prim_netw_ppt.get_all_names()

    def scale_vg_based_on_prim_netw_ppt(ppt_dwd_vals, vg_sill_b4_scale):
        # sacle variogram based on dwd ppt
        #         vg_sill = float(vg_model_to_scale.split(" ")[0])
        dwd_vals_var = np.var(ppt_dwd_vals)
        vg_scaling_ratio = dwd_vals_var / vg_sill_b4_scale

        if vg_scaling_ratio == 0:
            vg_scaling_ratio = vg_sill_b4_scale

        # rescale variogram
        vgs_model_dwd_ppt = str(np.round(
            vg_scaling_ratio, 4)) + ' ' + vg_model_to_scale.split(" ")[1]
        #         vgs_model_dwd_ppt
        return vgs_model_dwd_ppt  # vg_scaling_ratio

    def plot_good_bad_stns(pws_in_coords_df, ids_pws_stns_gd, ids_pws_stns_bad,
                           zvalues, prim_netw_ppt, zvalues_bad, prim_netwx,
                           prim_netwy, event_date):

        xstns_good = pws_in_coords_df.loc[ids_pws_stns_gd, 'X'].values.ravel()
        ystns_good = pws_in_coords_df.loc[ids_pws_stns_gd, 'Y'].values.ravel()

        xstns_bad = pws_in_coords_df.loc[ids_pws_stns_bad, 'X'].values.ravel()
        ystns_bad = pws_in_coords_df.loc[ids_pws_stns_bad, 'Y'].values.ravel()
        max_ppt = max(np.nanmax(zvalues), np.nanmax(prim_netw_ppt))

        interval_ppt = np.linspace(0.0, 0.99)
        colors_ppt = plt.get_cmap('Blues')(interval_ppt)
        cmap_ppt = LinearSegmentedColormap.from_list('name', colors_ppt)
        # cmap_ppt = plt.get_cmap('jet_r')
        cmap_ppt.set_over('navy')

        interval_ppt_bad = np.linspace(0.02, 0.95)
        colors_ppt_bad = plt.get_cmap('autumn')(interval_ppt_bad)
        cmap_ppt_bad = LinearSegmentedColormap.from_list(
            'name', colors_ppt_bad)

        plt.ioff()
        plt.figure(figsize=(12, 8), dpi=100)
        plt.scatter(prim_netwx,
                    prim_netwy,
                    c=prim_netw_ppt,
                    cmap=cmap_ppt,
                    marker=',',
                    s=10,
                    alpha=0.75,
                    vmin=0,
                    vmax=max_ppt,
                    label='prim_netw %d' % prim_netwx.size)

        sc = plt.scatter(xstns_good,
                         ystns_good,
                         c=zvalues,
                         cmap=cmap_ppt,
                         marker='.',
                         s=10,
                         alpha=0.75,
                         vmin=0,
                         vmax=max_ppt,
                         label='PWS Good %d' % xstns_good.size)

        plt.scatter(xstns_bad,
                    ystns_bad,
                    alpha=0.75,
                    c=zvalues_bad,
                    cmap=cmap_ppt_bad,
                    marker='X',
                    s=20,
                    vmin=0,
                    vmax=max_ppt,
                    label='PWS Bad %d' % xstns_bad.size)
        # plt.show()
        plt.xlabel('X [m]')
        plt.ylabel('Y [m]')
        plt.axis('equal')
        plt.legend(loc=0)
        cbar = plt.colorbar(sc, extend='max')
        cbar.ax.get_yaxis().labelpad = 15
        cbar.ax.set_ylabel('[mm/hr]')
        plt.title('Event date %s ' % (event_date))
        plt.grid(alpha=0.25)
        plt.savefig(
            os.path.join(
                out_save_dir, 'event_date_%s.png' %
                (str(event_date).replace('-', '_').replace(':', '_'))))
        plt.close()

    #==========================================================================
    #
    #==========================================================================

    for ix, date_to_correct in enumerate(time_list):

        print(ix, '/', len(time_list), '--', date_to_correct)
        # pws_data = pd.read_feather(path_netatamo_edf_fk, columns=pws_ids_str)
        pws_data_evt = HDF5_pws_ppt.get_pandas_dataframe_for_date(
            ids=all_pws_ids_to_use,
            event_date=date_to_correct).dropna(how='all', axis=1)

        if len(pws_data_evt.columns) > 0:
            pws_stns_evt = pws_data_evt.columns.to_list()
            cmn_pws_event = pws_in_coords_df.index.intersection(pws_stns_evt)
            # coords of stns to correct
            xstns_interp = pws_in_coords_df.loc[cmn_pws_event,
                                                'X'].values.ravel()
            ystns_interp = pws_in_coords_df.loc[cmn_pws_event,
                                                'Y'].values.ravel()
            cmn_pws_data_evt = pws_data_evt.loc[:, cmn_pws_event]
            # prim_netw data
            ppt_prim_netw_vals_evt = (
                HDF5_prim_netw_ppt.get_pandas_dataframe_for_date(
                    ids=all_prim_netw_ids, event_date=date_to_correct))
            prim_netw_stns_evt = ppt_prim_netw_vals_evt.columns.to_list()

            cmn_prim_netw_stns_evt = prim_netw_in_coords_df.index.intersection(
                prim_netw_stns_evt)

            prim_netw_xcoords = prim_netw_in_coords_df.loc[
                cmn_prim_netw_stns_evt, 'X'].values.ravel()
            prim_netw_ycoords = prim_netw_in_coords_df.loc[
                cmn_prim_netw_stns_evt, 'Y'].values.ravel()
            # primary network values for event
            cmn_ppt_prim_netw_vals_evt = ppt_prim_netw_vals_evt.loc[:,
                                                                    cmn_prim_netw_stns_evt]
            # scale variogram
            vgs_model_dwd_ppt = scale_vg_based_on_prim_netw_ppt(
                ppt_prim_netw_vals_evt.values, vg_sill_b4_scale)

            # start kriging pws location
            #             OK_prim_netw_pws_crt = OKpy(xi=prim_netw_xcoords,
            #                                         yi=prim_netw_ycoords,
            #                                         zi=cmn_ppt_prim_netw_vals_evt.values.ravel(),
            #                                         xk=xstns_interp,
            #                                         yk=ystns_interp,
            #                                         model=vgs_model_dwd_ppt)
            # using PYkrige
            dwd_vals_var = np.var(cmn_ppt_prim_netw_vals_evt.values)
            vg_scaling_ratio = dwd_vals_var / vg_sill_b4_scale
            #
            if vg_scaling_ratio == 0:
                vg_scaling_ratio = vg_sill_b4_scale
            OK_prim_netw_pws_crt = OKpy(
                prim_netw_xcoords,
                prim_netw_ycoords,
                cmn_ppt_prim_netw_vals_evt.values.ravel(),
                variogram_model=vg_model_str,
                variogram_parameters={
                    'sill': vg_scaling_ratio,
                    'range': vg_range,
                    'nugget': 0
                })
            try:
                #                 OK_prim_netw_pws_crt.krige()
                #                 zvalues = OK_prim_netw_pws_crt.zk.copy()
                #
                #                 # calcualte standard deviation of estimated values
                #                 std_est_vals = np.sqrt(OK_prim_netw_pws_crt.est_vars)

                zvalues, est_var = OK_prim_netw_pws_crt.execute(
                    'points', np.array([xstns_interp]),
                    np.array([ystns_interp]))
                std_est_vals = np.sqrt(est_var).data
            except Exception as msg:
                print('ror', msg)

            # if neg assign 0
            zvalues[zvalues < 0] = 0
            # calcualte standard deviation of estimated values

            # calculate difference observed and estimated
            # values
            diff_obsv_interp = np.abs(cmn_pws_data_evt.values - zvalues)

            idx_good_stns = np.where(diff_obsv_interp <= 3 * std_est_vals)[1]
            idx_bad_stns = np.where(diff_obsv_interp > 3 * std_est_vals)[1]

            if len(idx_bad_stns) > 0:

                # use additional filter
                try:
                    ids_pws_stns_gd = np.take(cmn_pws_event,
                                              idx_good_stns).ravel()
                    ids_pws_stns_bad = np.take(cmn_pws_event,
                                               idx_bad_stns).ravel()

                except Exception as msg:
                    print(msg)
                # ids of bad stns
                xstns_bad = pws_in_coords_df.loc[ids_pws_stns_bad,
                                                 'X'].values.ravel()
                ystns_bad = pws_in_coords_df.loc[ids_pws_stns_bad,
                                                 'Y'].values.ravel()
                #                 # check if bad are truly bad
                xstns_good = pws_in_coords_df.loc[ids_pws_stns_gd,
                                                  'X'].values.ravel()
                ystns_good = pws_in_coords_df.loc[ids_pws_stns_gd,
                                                  'Y'].values.ravel()

                # coords of neighbors good
                neighbors_coords = np.array([
                    (x, y) for x, y in zip(xstns_good, ystns_good)
                ])

                # create a tree from coordinates
                points_tree = spatial.cKDTree(neighbors_coords)

                neighbors_coords_prim_netw = np.array([
                    (x, y)
                    for x, y in zip(prim_netw_xcoords, prim_netw_ycoords)
                ])

                points_tree_prim_netw = spatial.cKDTree(
                    neighbors_coords_prim_netw)

                #                 plt.ioff()
                #                 plt.scatter(xstns_good, ystns_good, c='b')
                #                 plt.scatter(prim_netw_xcoords, prim_netw_ycoords, c='g')
                #                 plt.scatter(xstns_bad, ystns_bad, c='r')
                #                 plt.show()
                if len(idx_bad_stns) > 0 > 0:
                    for stn_ix, stn_bad in zip(idx_bad_stns, ids_pws_stns_bad):
                        ppt_bad = cmn_pws_data_evt.loc[:, stn_bad].values
                        # print('ppt_bad', ppt_bad)
                        if ppt_bad >= 0.:

                            xstn_bd = pws_in_coords_df.loc[stn_bad, 'X']
                            ystn_bd = pws_in_coords_df.loc[stn_bad, 'Y']

                            idxs_neighbours = points_tree.query_ball_point(
                                np.array((xstn_bd, ystn_bd)), 1e4)

                            ids_neighbours = ids_pws_stns_gd[idxs_neighbours]
                            ids_neighbours_evt = np.in1d(
                                cmn_pws_event, ids_neighbours)

                            idxs_neighbours_prim_netw = points_tree_prim_netw.query_ball_point(
                                np.array((xstn_bd, ystn_bd)), 1e4)

                            ids_neighbours_prim_netw_evt = np.array(
                                cmn_prim_netw_stns_evt
                            )[idxs_neighbours_prim_netw]

                            if len(ids_neighbours_evt) > 0:
                                ppt_pws_ngbrs = cmn_pws_data_evt.loc[:,
                                                                     ids_neighbours_evt]
                                ppt_pws_data = ppt_pws_ngbrs.values
                                #---------------------------------------------
                                # xstn_ngbr = pws_in_coords_df.loc[
                                #     ppt_pws_ngbrs.columns, 'X'].values.ravel()
                                # ystn_ngbr = pws_in_coords_df.loc[
                                #     ppt_pws_ngbrs.columns, 'Y'].values.ravel()
                                #---------------------------------------------

                                if ppt_pws_data.size == 0:
                                    ppt_pws_data = 1000
                            else:
                                ppt_pws_data = 1000

                            if len(ids_neighbours_prim_netw_evt) > 0:
                                ppt_prim_netw_ngbrs = cmn_ppt_prim_netw_vals_evt.loc[:,
                                                                                     ids_neighbours_prim_netw_evt]
                                ppt_prim_netw_data = ppt_prim_netw_ngbrs.values
                                #---------------------------------------------
                                # prim_netw_xstn_ngbr = prim_netw_in_coords_df.loc[
                                #     ppt_prim_netw_ngbrs.columns, 'X'].values.ravel()
                                # prim_netw_ystn_ngbr = prim_netw_in_coords_df.loc[
                                #     ppt_prim_netw_ngbrs.columns, 'Y'].values.ravel()
                                #---------------------------------------------

                                #                                 plt.ioff()
                                #                                 plt.scatter(xstn_bd, ystn_bd, c='r')
                                #                                 plt.scatter(xstn_ngbr, ystn_ngbr, c='b')
                                #                                 plt.scatter(prim_netw_xstn_ngbr,
                                #                                             prim_netw_ystn_ngbr, c='g')
                                #                                 plt.show()
                                if ppt_prim_netw_ngbrs.size == 0:
                                    ppt_prim_netw_ngbrs = 1000
                            else:
                                ppt_prim_netw_ngbrs = 1000  # always wrong
                            try:
                                if (ppt_bad > np.nanmin(ppt_pws_data)
                                        or ppt_bad >
                                        np.nanmin(ppt_prim_netw_data)):
                                    # print('added bad to good\n')
                                    # print('ppt_bad', ppt_bad)
                                    ids_pws_stns_gd_final = np.append(
                                        ids_pws_stns_gd, stn_bad)
                                    idx_good_stns_final = np.append(
                                        idx_good_stns, stn_ix)

                                    ids_pws_stns_bad_final = np.setdiff1d(
                                        ids_pws_stns_bad, stn_bad)
                                    ids_pws_stns_bad_final.size
                                    idx_bad_stns_final = np.setdiff1d(
                                        idx_bad_stns, stn_ix)

                                    assert stn_bad in ids_pws_stns_gd_final
                                    assert stn_bad not in ids_pws_stns_bad_final
                                else:
                                    pass
                                    # print('not added bad stn')

                                idx_good_stns_final = np.sort(
                                    idx_good_stns_final)
                                idx_bad_stns_final = np.sort(
                                    idx_bad_stns_final)
                            except Exception as msg:
                                print('error', msg)
                                continue
                                # raise Exception

                else:
                    ids_pws_stns_gd_final = ids_pws_stns_gd
                    idx_good_stns_final = idx_good_stns
                    ids_pws_stns_bad_final = ids_pws_stns_bad
                    idx_bad_stns_final = idx_bad_stns

                try:
                    print('Number of Stations with bad index \n',
                          len(idx_bad_stns_final))
                    print('Number of Stations with good index \n',
                          len(idx_good_stns_final))

                    # save results gd+1, bad -1
                    df_save_results.loc[date_to_correct,
                                        ids_pws_stns_gd_final] = 1
                    df_save_results.loc[date_to_correct,
                                        ids_pws_stns_bad_final] = -1
                except Exception as msg3:
                    print(msg3)
                    continue

                try:
                    zvalues_good = cmn_pws_data_evt.loc[
                        date_to_correct, ids_pws_stns_gd_final].values.ravel()
                    zvalues_bad = cmn_pws_data_evt.loc[
                        date_to_correct,
                        ids_pws_stns_bad_final].values.ravel()

                    # plot configuration
                    max_ppt = max(
                        np.nanmax(zvalues_good),
                        np.nanmax(cmn_ppt_prim_netw_vals_evt.values.ravel()))
                    if max_ppt >= 0:
                        print('plotting map')
                        plot_good_bad_stns(
                            pws_in_coords_df, ids_pws_stns_gd_final,
                            ids_pws_stns_bad_final, zvalues_good,
                            cmn_ppt_prim_netw_vals_evt.values.ravel(),
                            zvalues_bad, prim_netw_xcoords, prim_netw_ycoords,
                            date_to_correct)
                        plt.close()
                except Exception as msg2:
                    print('error plotting ', msg2)
#         break
    df_save_results.dropna(how='all', inplace=True)

    return df_save_results
Example #51
0
def aabb(points):
    points = np.asarray(points)
    dim = points.shape[-1]
    points = points.reshape(-1, dim)
    p1, p2 = np.nanmin(points, 0), np.nanmax(points, 0)
    return np.array([p1, p2])
Example #52
0
from astropy.io import fits

hdu1 = fits.open('mom2_12co_pix_2_Tmb.fits')[0]
xcenter = 84
ycenter = -6
wid = 1.5
hei = 2.4
xpanels = 1
ypanels = 1
fig = plt.figure(figsize=(3 * xpanels * 1.1 * (wid / (wid + hei)) * 10.,
                          3 * ypanels / 1.1 * (hei / (wid + hei)) * 10.))
ff = aplpy.FITSFigure(hdu1, figure=fig)
ff.recenter(xcenter, ycenter, width=wid, height=hei)
ff.set_theme('publication')
#ff.set_system_latex(True)
maxcolor = np.nanmax(hdu1.data)
ff.show_colorscale(cmap='Purples', vmin=0, vmax=5, stretch='sqrt')
#ff.show_contour(mask_hdu, levels=1, colors='yellow', linewidths=0.1)
#ff.show_regions('olay.reg')
ff.show_regions('olay3.reg')
ff.add_colorbar()
ff.colorbar.set_font(size=12)
ff.colorbar.set_pad(0.5)
ff.set_tick_labels_font(size=12)
ff.colorbar.set_axis_label_text('km s$^{-1}$')
ff.colorbar.set_font(size=12)
ff.set_axis_labels_font(size=12)
ff.add_scalebar(0.286, corner='bottom right',
                pad=10)  # degree for 2pc at 400 pc
ff.scalebar.set_label('2 pc')
ff.scalebar.set_font_size(12)
                       c=color)  #, label = label_name)#"tab:blue")
    flux_axarr[0].plot(longitude_list,
                       rolling_mean_Osborn_flux,
                       ls=":",
                       lw=2.5,
                       zorder=3,
                       c=color)  #, label = label_name)

    #flux_axarr.fill_between(longitude_list,rolling_upper_percentile_flux,rolling_lower_percentile_flux, color = "tab:blue", zorder = 2, alpha = 0.7, label = str(flux_percentile)+"% percentile")
    #flux_axarr.fill_between(longitude_list,rolling_upper_percentile_flux,rolling_second_upper_percentile_flux, color = "tab:blue", zorder = 2, alpha = 0.4, label = str(second_flux_percentile)+"% percentile")
    #flux_axarr.fill_between(longitude_list,rolling_lower_percentile_flux,rolling_second_lower_percentile_flux, color = "tab:blue", zorder = 2, alpha = 0.4)

    #bathymetrie_axes.plot(bathymetry_longitude_list,bathymetry_list)
    if cruisename == "emb217":
        flux_axarr[1].set_ylim(
            (np.nanmin(bathymetry_list) - 5, np.nanmax(bathymetry_list)))
        flux_axarr[1].invert_yaxis()
        flux_axarr[1].set_ylabel("pressure [dbar]")
        flux_axarr[1].fill_between(bathymetry_longitude_list,
                                   bathymetry_list,
                                   np.ones(len(bathymetry_list)) *
                                   max(bathymetry_list),
                                   color="lightgrey",
                                   zorder=1,
                                   alpha=0.8,
                                   label="bathymetry")

    #bathymetrie_axes.plot(bathymetry_longitude_list,interval_list[:,0])
    #bathymetrie_axes.plot(bathymetry_longitude_list,interval_list[:,1])
    flux_axarr[1].fill_between(bathymetry_longitude_list,
                               interval_list[:, 0],
def minimum_bounding_rectangle(points):
    """
    Find the smallest bounding rectangle for a set of points.
    Returns a set of points representing the corners of the bounding box.

    :param points: an nx2 matrix of coordinates
    :rval: an nx2 matrix of coordinates
    """
    from scipy.ndimage.interpolation import rotate
    pi2 = np.pi / 2.

    # get the convex hull for the points
    hull_points = points[ConvexHull(points).vertices]

    # calculate edge angles
    edges = np.zeros((len(hull_points) - 1, 2))
    edges = hull_points[1:] - hull_points[:-1]

    angles = np.zeros((len(edges)))
    angles = np.arctan2(edges[:, 1], edges[:, 0])

    angles = np.abs(np.mod(angles, pi2))
    angles = np.unique(angles)

    # find rotation matrices
    # XXX both work
    rotations = np.vstack([
        np.cos(angles),
        np.cos(angles - pi2),
        np.cos(angles + pi2),
        np.cos(angles)
    ]).T
    #     rotations = np.vstack([
    #         np.cos(angles),
    #         -np.sin(angles),
    #         np.sin(angles),
    #         np.cos(angles)]).T
    rotations = rotations.reshape((-1, 2, 2))

    # apply rotations to the hull
    rot_points = np.dot(rotations, hull_points.T)

    # find the bounding points
    min_x = np.nanmin(rot_points[:, 0], axis=1)
    max_x = np.nanmax(rot_points[:, 0], axis=1)
    min_y = np.nanmin(rot_points[:, 1], axis=1)
    max_y = np.nanmax(rot_points[:, 1], axis=1)

    # find the box with the best area
    areas = (max_x - min_x) * (max_y - min_y)
    best_idx = np.argmin(areas)

    # return the best box
    x1 = max_x[best_idx]
    x2 = min_x[best_idx]
    y1 = max_y[best_idx]
    y2 = min_y[best_idx]
    r = rotations[best_idx]

    rval = np.zeros((4, 2))
    rval[0] = np.dot([x1, y2], r)
    rval[1] = np.dot([x2, y2], r)
    rval[2] = np.dot([x2, y1], r)
    rval[3] = np.dot([x1, y1], r)

    area = abs(y2 - y1) * abs(x2 - x1)
    per = 2 * (abs(y2 - y1) + abs(x2 - x1))
    return per, area
Example #55
0
def interpolate2d(x, y, z, points, mode='linear', bounds_error=False):
    """Fundamental 2D interpolation routine

    :param x: 1D array of x-coordinates of the mesh on which to interpolate
    :type x: numpy.ndarray

    :param y: 1D array of y-coordinates of the mesh on which to interpolate
    :type y: numpy.ndarray

    :param z: 2D array of values for each x, y pair
    :type z: numpy.ndarry

    :param points: Nx2 array of coordinates where interpolated values are
        sought
    :type points: numpy.narray

    :param mode: Determines the interpolation order.
        Options are:

            * 'constant' - piecewise constant nearest neighbour interpolation
            * 'linear' - bilinear interpolation using the four
              nearest neighbours (default)

    :type mode: str

    :param bounds_error: If True (default) a BoundsError exception
          will be raised when interpolated values are requested
          outside the domain of the input data. If False, nan
          is returned for those values
    :type bounds_error: bool

    :returns: 1D array with same length as points with interpolated values

    :raises: Exception, BoundsError (see note about bounds_error)

    ..notes::
        Input coordinates x and y are assumed to be monotonically increasing,
        but need not be equidistantly spaced. No such assumption regarding
        ordering of points is made.

        z is assumed to have dimension M x N, where M = len(x) and N = len(y).
        In other words it is assumed that the x values follow the first
        (vertical) axis downwards and y values the second (horizontal) axis
        from left to right.

        If this routine is to be used for interpolation of raster grids where
        data is typically organised with longitudes (x) going from left to
        right and latitudes (y) from left to right then user
        interpolate_raster in this module
    """

    # Input checks
    validate_mode(mode)
    x, y, z, xi, eta = validate_inputs(x=x,
                                       y=y,
                                       z=z,
                                       points=points,
                                       bounds_error=bounds_error)

    # Identify elements that are outside interpolation domain or NaN
    outside = (xi < x[0]) + (eta < y[0]) + (xi > x[-1]) + (eta > y[-1])
    outside += numpy.isnan(xi) + numpy.isnan(eta)

    inside = -outside
    xi = xi[inside]
    eta = eta[inside]

    # Find upper neighbours for each interpolation point
    idx = numpy.searchsorted(x, xi, side='left')
    idy = numpy.searchsorted(y, eta, side='left')

    # Internal check (index == 0 is OK)
    if len(idx) > 0 or len(idy) > 0:
        if (max(idx) >= len(x)) or (max(idy) >= len(y)):
            msg = ('Interpolation point outside domain. '
                   'This should never happen. '
                   'Please email [email protected]')
            raise InaSAFEError(msg)

    # Get the four neighbours for each interpolation point
    x0 = x[idx - 1]
    x1 = x[idx]
    y0 = y[idy - 1]
    y1 = y[idy]

    z00 = z[idx - 1, idy - 1]
    z01 = z[idx - 1, idy]
    z10 = z[idx, idy - 1]
    z11 = z[idx, idy]

    # Coefficients for weighting between lower and upper bounds
    old_set = numpy.seterr(invalid='ignore')  # Suppress warnings
    alpha = (xi - x0) / (x1 - x0)
    beta = (eta - y0) / (y1 - y0)
    numpy.seterr(**old_set)  # Restore

    if mode == 'linear':
        # Bilinear interpolation formula
        dx = z10 - z00
        dy = z01 - z00
        z = z00 + alpha * dx + beta * dy + alpha * beta * (z11 - dx - dy - z00)
    else:
        # Piecewise constant (as verified in input_check)

        # Set up masks for the quadrants
        left = alpha < 0.5
        right = -left
        lower = beta < 0.5
        upper = -lower

        lower_left = lower * left
        lower_right = lower * right
        upper_left = upper * left

        # Initialise result array with all elements set to upper right
        z = z11

        # Then set the other quadrants
        z[lower_left] = z00[lower_left]
        z[lower_right] = z10[lower_right]
        z[upper_left] = z01[upper_left]

    # Self test
    if len(z) > 0:
        mz = numpy.nanmax(z)
        mZ = numpy.nanmax(z)
        # noinspection PyStringFormat
        msg = ('Internal check failed. Max interpolated value %.15f '
               'exceeds max grid value %.15f ' % (mz, mZ))
        if not (numpy.isnan(mz) or numpy.isnan(mZ)):
            if not mz <= mZ:
                raise InaSAFEError(msg)

    # Populate result with interpolated values for points inside domain
    # and NaN for values outside
    r = numpy.zeros(len(points))
    r[inside] = z
    r[outside] = numpy.nan

    return r
def SDS(df, n=3):
    """Finds the `n` most dissimilar items. In the input matrix, the ith row (and 
    ith column) is an array belonging to item i. The matrix element (i,j) would then be the pairwise 
    dissimilarity metric between items i and j (for example, geometric RMSD between molecular conformers i and j).
    Args:
      df (pandas.DataFrame): Square matrix where each row (and by symmetry, column) is an array 
                             corresponding to a specific item or object, and each element (i,j) the 
                             floating point dissimilarity between items i and j. The element (i,i) must
                             be represented as np.nan (for log-summing).

                             If pairwise data between two                                         
                             items is missing, this can also be represented as np.nan. What will  
                             happen is the second item will automatically be set to np.nan in the 
                             log-summation array once the first of one of the two items is chosen.
                             Thus, the second item will never be chosen. In this same manner,     
                             entire missing items can be represented as arrays of np.nan,         
                             as a trick to preserve externally related indexing.                  

      n (int): Dissimilar set size to find.
               1 < n < N, where N is the full population size.
    Returns:
      pandas.DataFrame containing indices of the items found in the dissimilar set of size `n`. 
    """
    # Check df matrix is square
    N = len(df.index)
    assert N == len(df.columns)
    
    # Reduce n to maximum number of items if n is over, otherwise search will fail with an erorr.
    M = len(df.dropna(how='all'))
    if n > M:
        n = M

    print(f'Starting SDS search for most dissimilar set of size n = {n}...')

    # First grab matrix indices of the two most dissimilar geometries
    row_mx = []
    
    for i in range(N):
        row_mx.append(np.nanmax(df.loc[i]))
    ind1 = np.nanargmax(row_mx)
    ind2 = ind1 + 1 + np.nanargmax(row_mx[ind1+1:])

    # Initialize the dissimilar matrix with the two most dissimilar
    disarray = [np.array(df.loc[ind1]), np.array(df.loc[ind2])]
    indices = [ind1, ind2]
    
    # Find n-2 other most dissimilar
     # Multiply the rows of the n-1 dissimilar set. Or,
     # use log summing if N is large (e.g. 50000) to avoid 
     # exceeding floating point machine precision.
     # This script uses log summing.
     # The index of the largest value is the index of the nth
     # item which makes the nth dissimilar set.

    # Initialize array for log summing
    logsum = [0 for x in range(N)]
    logsum += np.log(disarray[0])
    
    for i in range(n-2):
        logsum += np.log(disarray[-1])
        indn = np.nanargmax(logsum)
        indices.append(indn)
        disarray.append(np.array(df.loc[indn]))

    return_df = pd.DataFrame([indices], index=['matrix index']).T

    print('Finished')
    return return_df   
def extractSimData(sim, min_frames_visible=MIN_FRAMES_VISIBLE, check_only=False, param_class_name=None, \
    postprocess_params=None):
    """ Extract input parameters and model outputs from the simulation container and normalize them. 

    Arguments:
        sim: [ErosionSimContainer object] Container with the simulation.

    Keyword arguments:
        min_frames_visible: [int] Minimum number of frames above the limiting magnitude
        check_only: [bool] Only check if the simulation satisfies filters, don' compute eveything.
            Speed up the evaluation. False by default.
        param_class_name: [str] Override the simulation parameters object with an instance of the given
            class. An exact name of the class needs to be given.
        postprocess_params: [list] A list of limiting magnitude for wide and narrow fields, and the delay in
            length measurements. None by default, in which case they will be generated herein.

    Return: 
        - None if the simulation does not satisfy filter conditions.
        - postprocess_params if check_only=True and the simulation satisfies the conditions.
        - params, input_data_normed, simulated_data_normed if check_only=False and the simulation satisfies 
            the conditions.

    """

    # Create a frash instance of the system parameters if the same parameters are used as in the simulation
    if param_class_name is None:
        #params_obj = getattr(GenerateSimulations, sim.params.__class__.__name__)
        params = globals()[sim.params.__class__.__name__]()

    # Override the system parameters using the given class
    else:
        params = globals()[param_class_name]()



    ### DRAW LIMITING MAGNITUDE AND LENGTH DELAY ###

    # If the drawn values have already been given, use them
    if postprocess_params is not None:
        lim_mag, lim_mag_len, len_delay = postprocess_params

    else:

        # Draw limiting magnitude and length end magnitude
        lim_mag     = np.random.uniform(params.lim_mag_brightest, params.lim_mag_faintest)
        lim_mag_len = np.random.uniform(params.lim_mag_len_end_brightest, params.lim_mag_len_end_faintest)

        # Draw the length delay
        len_delay = np.random.uniform(params.len_delay_min, params.len_delay_max)

        postprocess_params = [lim_mag, lim_mag_len, len_delay]


    lim_mag_faintest  = np.max([lim_mag, lim_mag_len])
    lim_mag_brightest = np.min([lim_mag, lim_mag_len])

    ### ###

    # Fix NaN values in the simulated magnitude
    sim.simulation_results.abs_magnitude[np.isnan(sim.simulation_results.abs_magnitude)] \
        = np.nanmax(sim.simulation_results.abs_magnitude)


    # Get indices that are above the faintest limiting magnitude
    indices_visible = sim.simulation_results.abs_magnitude <= lim_mag_faintest

    # If no points were visible, skip this solution
    if not np.any(indices_visible):
        return None

    ### CHECK METEOR VISIBILITY WITH THE BRIGTHER (DETECTION) LIMITING MAGNITUDE ###
    ###     (in the CAMO widefield camera)                                       ###

    # Get indices of magnitudes above the brighter limiting magnitude
    indices_visible_brighter = sim.simulation_results.abs_magnitude <= lim_mag_brightest

    # If no points were visible, skip this solution
    if not np.any(indices_visible_brighter):
        return None

    # Compute the minimum time the meteor needs to be visible
    min_time_visible = min_frames_visible/params.fps + len_delay

    time_lim_mag_bright  = sim.simulation_results.time_arr[indices_visible_brighter]
    time_lim_mag_bright -= time_lim_mag_bright[0]

    # Check if the minimum time is satisfied
    if np.max(time_lim_mag_bright) < min_time_visible:
        return None

    ### ###

    # Get the first index after the magnitude reaches visibility in the wide field
    index_first_visibility = np.argwhere(indices_visible_brighter)[0][0]

    # Set all visibility indices before the first one visible in the wide field to False
    indices_visible[:index_first_visibility] = False


    # Select time, magnitude, height, and length above the visibility limit
    time_visible = sim.simulation_results.time_arr[indices_visible]
    mag_visible  = sim.simulation_results.abs_magnitude[indices_visible]
    ht_visible   = sim.simulation_results.brightest_height_arr[indices_visible]
    len_visible  = sim.simulation_results.brightest_length_arr[indices_visible]


    # Resample the time to the system FPS
    mag_interpol = scipy.interpolate.CubicSpline(time_visible, mag_visible)
    ht_interpol  = scipy.interpolate.CubicSpline(time_visible, ht_visible)
    len_interpol = scipy.interpolate.CubicSpline(time_visible, len_visible)

    # Create a new time array according to the FPS
    time_sampled = np.arange(np.min(time_visible), np.max(time_visible), 1.0/params.fps)

    # Create new mag, height and length arrays at FPS frequency
    mag_sampled = mag_interpol(time_sampled)
    ht_sampled = ht_interpol(time_sampled)
    len_sampled = len_interpol(time_sampled)


    # Normalize time to zero
    time_sampled -= time_sampled[0]


    ### SIMULATE CAMO tracking delay for length measurements ###

    # Zero out all length measurements before the length delay (to simulate the delay of CAMO
    #   tracking)
    len_sampled[time_sampled < len_delay] = 0

    ###

    # Set all magnitudes below the brightest limiting magnitude to the faintest magnitude
    mag_sampled[mag_sampled > lim_mag] = params.lim_mag_len_end_faintest


    # Normalize the first length to zero
    first_length_index = np.argwhere(time_sampled >= len_delay)[0][0]
    len_sampled[first_length_index:] -= len_sampled[first_length_index]


    # ### Plot simulated data
    # fig, (ax1, ax2, ax3) = plt.subplots(nrows=3)
    
    # ax1.plot(time_sampled, mag_sampled)
    # ax1.invert_yaxis()
    # ax1.set_ylabel("Magnitude")

    # ax2.plot(time_sampled, len_sampled/1000)
    # ax2.set_ylabel("Length (km)")

    # ax3.plot(time_sampled, ht_sampled/1000)
    # ax3.set_xlabel("Time (s)")
    # ax3.set_ylabel("Height (km)")

    # plt.subplots_adjust(hspace=0)

    # plt.show()

    # ### ###

    

    # Check that there are any length measurements
    if not np.any(len_sampled > 0):
        return None


    # If the simulation should only be checked that it's good, return the postprocess parameters used to 
    #   generate the data
    if check_only:
        return postprocess_params


    ### ADD NOISE ###

    # Add noise to magnitude data
    mag_sampled[mag_sampled <= lim_mag] += np.random.normal(loc=0.0, scale=params.mag_noise, \
        size=len(mag_sampled[mag_sampled <= lim_mag]))

    # Add noise to length data
    len_sampled[first_length_index:] += np.random.normal(loc=0.0, scale=params.len_noise, \
        size=len(len_sampled[first_length_index:]))

    ### ###

    # Construct input data vector with normalized values
    input_data_normed = sim.getNormalizedInputs()


    # Normalize simulated data
    ht_normed, len_normed, mag_normed = sim.normalizeSimulations(params, ht_sampled, len_sampled, mag_sampled)


    # Generate vector with simulated data
    simulated_data_normed = np.vstack([padOrTruncate(ht_normed, params.data_length), \
        padOrTruncate(len_normed, params.data_length), \
        padOrTruncate(mag_normed, params.data_length)])


    # Return input data and results
    return params, input_data_normed, simulated_data_normed
Example #58
0
    def mod_u_matrix(prototype_density, avg_preictal_times):

        raw_dens = np.sum(prototype_density, axis=1)
        norm_times = avg_preictal_times / np.nanmax(avg_preictal_times)

        u_matrix = SOMPY.visualization.umatrix.UMatrixView(
            *SOM.codebook.mapsize, title='SOM Visualization',
            text_size=10).build_u_matrix(SOM)

        prep_H = np.argmax(prototype_density, axis=1)
        prep_H = label_hue_arr[prep_H]
        prep_H[prep_H == label_hue_arr[label_indices['Preictal']]] -= \
            .15 * norm_times[prep_H == label_hue_arr[label_indices['Preictal']]]
        H = np.reshape(prep_H, SOM.codebook.mapsize)

        S_prep = 0.75 * np.ones_like(u_matrix.flatten())
        S_prep[raw_dens == 0] = 0
        S = np.reshape(S_prep, SOM.codebook.mapsize)

        V = 0.85 * (1 - u_matrix / np.max(u_matrix))

        image_mod_mat = hsv2rgb(np.stack((H, S, V), axis=2))
        plt.subplot(1, 2, 1)
        plt.imshow(image_mod_mat)
        plt.title('Prototype classes and Neighborhood Distances')

        proj = SOM.project_data(SOM.data_raw)
        coord = SOM.bmu_ind_to_xy(proj)

        mn = np.min(u_matrix.flatten())
        # mx  =  np.max(u_matrix.flatten())
        std = np.std(u_matrix.flatten())
        md = np.median(u_matrix.flatten())
        mx = md + 0.5 * std
        plt.contour(u_matrix,
                    np.linspace(mn, mx, 15),
                    linewidths=0.7,
                    cmap=plt.cm.get_cmap('Blues'))
        plt.scatter(coord[:, 1],
                    coord[:, 0],
                    s=2,
                    alpha=1.,
                    c='Gray',
                    marker='o',
                    cmap='jet',
                    linewidths=3,
                    edgecolor='Gray')
        plt.axis('off')

        preictal_hues = np.linspace(label_hues['Preictal'],
                                    label_hues['Preictal'] - .15, 100)
        prelim_colormap = np.squeeze(
            hsv2rgb(
                np.dstack((preictal_hues, np.full_like(preictal_hues, .75),
                           np.full_like(preictal_hues, .85)))))
        cmap = mpl.colors.ListedColormap(prelim_colormap)
        ax, _ = mpl.colorbar.make_axes(plt.gca(), shrink=0.75)
        cbar = mpl.colorbar.ColorbarBase(
            ax,
            cmap=cmap,
            norm=mpl.colors.Normalize(vmin=np.nanmin(avg_preictal_times),
                                      vmax=np.nanmax(avg_preictal_times)))

        plt.subplot(1, 2, 2)
        plt.imshow(np.log(np.reshape(raw_dens, SOM.codebook.mapsize) + .05),
                   cmap='gray')
        plt.scatter(coord[:, 1],
                    coord[:, 0],
                    s=2,
                    alpha=1.,
                    c='Gray',
                    marker='o',
                    cmap='jet',
                    linewidths=3,
                    edgecolor='Gray')
        plt.title('Prototype Density Map')
        plt.axis('off')
Example #59
0
    def __init__(self,
                 subcase_id,
                 header,
                 title,
                 location,
                 scalar,
                 mask_value=None,
                 nlabels=None,
                 labelsize=None,
                 ncolors=None,
                 colormap='jet',
                 data_format=None,
                 uname='GuiResult'):
        """
        subcase_id : int
            the flag that points to self.subcases for a message
        header : str
            the sidebar word
        title : str
            the legend title
        location : str
            node, centroid
        scalar : (n,) ndarray
            the data to make a contour plot with
        data_format : str
            the type of data result (e.g. '%i', '%.2f', '%.3f')
        uname : str
            some unique name for ...
        """
        GuiResultCommon.__init__(self)

        self.subcase_id = subcase_id
        #assert self.subcase_id > 0, self.subcase_id

        self.title = title
        self.header = header
        #self.scale = scale
        self.location = location
        assert location in ['node', 'centroid'], location
        self.subcase_id = subcase_id
        self.uname = uname

        if scalar is None:
            raise RuntimeError('title=%r scalar is None...' % self.title)
        assert scalar.shape[0] == scalar.size, 'shape=%s size=%s' % (str(
            scalar.shape), scalar.size)
        self.scalar = scalar
        #self.data_type = self.dxyz.dtype.str # '<c8', '<f4'
        self.data_type = self.scalar.dtype.str  # '<c8', '<f4'
        self.is_real = True if self.data_type in REAL_TYPES else False
        self.is_complex = not self.is_real
        self.nlabels = nlabels
        self.labelsize = labelsize
        self.ncolors = ncolors
        self.colormap = colormap

        #print('title=%r data_type=%r' % (self.title, self.data_type))
        if self.data_type in INT_TYPES:
            self.data_format = '%i'
        elif data_format is None:
            self.data_format = '%.2f'
        else:
            self.data_format = data_format

        self.title_default = self.title
        self.header_default = self.header
        self.data_format_default = self.data_format

        self.min_default = np.nanmin(self.scalar)
        self.max_default = np.nanmax(self.scalar)
        if self.data_type in INT_TYPES:
            # turns out you can't have a NaN/inf with an integer array
            # we need to recast it
            if mask_value is not None:
                inan_short = np.where(self.scalar == mask_value)[0]
                if len(inan_short):
                    # overly complicated way to allow us to use ~inan to invert the array
                    inan = np.in1d(np.arange(len(self.scalar)), inan_short)
                    inan_remaining = self.scalar[~inan]

                    self.scalar = np.asarray(self.scalar, 'f')
                    self.data_type = self.scalar.dtype.str
                    self.data_format = '%.0f'
                    self.scalar[inan] = np.nan
                    self.min_default = inan_remaining.min()
                    self.max_default = inan_remaining.max()
        else:
            # handling VTK NaN oddinty
            # filtering the inf values and replacing them with NaN
            # 1.#R = inf
            # 1.#J = nan
            ifinite = np.isfinite(self.scalar)
            if not np.all(ifinite):
                self.scalar[~ifinite] = np.nan
                try:
                    self.min_default = self.scalar[ifinite].min()
                except ValueError:
                    print(self.title)
                    print(self.scalar)
                    raise
                self.max_default = self.scalar[ifinite].max()
        self.min_value = self.min_default
        self.max_value = self.max_default
# Print some summary stats:
print(f"Count of tracks: {len(df_with_crossings)}")
for gidx,name in enumerate(gates['name']):
    if ('levee' in name) or (name=='point'): continue
    t_first=df_with_crossings[name+"_first"]
    t_last=df_with_crossings[name+"_last"]
    n_crossed=t_first.count()
    n_multiple=(t_first<t_last).sum()
    n_uncross=(df_with_crossings[name+"_uncrossed"]>0).sum()
    print(f"  {name:12}: {n_crossed:3} tracks, {n_multiple:2} recross, {n_uncross} uncross")

##

track_subs=[]
for idx,row in df_with_crossings.iterrows():
    t_min=np.nanmax( [row['top_of_array_last'],
                      row['track']['tnum'].min()] )
    # Ah - sj_lower_first is negative??
    t_max=np.nanmin( [row['sj_lower_first'],
                      row['hor_lower_first'],
                      row['track']['tnum'].max()])
    sel=( (t_min<=row['track']['tnum'].values)
          & (row['track']['tnum'].values<=t_max ) )
    track_sub=row['track'].iloc[sel,:].copy()
    track_subs.append(track_sub)

df_trim_crossings=df_with_crossings.copy()
df_trim_crossings['track']=track_subs
valid=np.array( [len(t)>0 for t in track_subs] )
df_trim_crossings=df_trim_crossings.iloc[valid,:].copy()

# 134 to 134 tracks