Example #1
0
    def save_px_df(self, id_suffix):
        """ Save up regions intensity pixel-wise, for best mask and corresponding pixel-wise ΔF/F image

        """
        self.px_df = pd.DataFrame(columns=['ID',           # recording ID 
                                           'stim',         # stimulus number
                                           'mask_region',  # mask region (1 for master or down)
                                           'int',          # px intensity
                                           'delta'])       # px ΔF/F

        best_up_mask = self.up_diff_mask[self.best_up_mask_index]
        best_up_mask_prop = measure.regionprops(best_up_mask)

        # best_delta_img = self.peak_deltaF_series[self.best_up_mask_index]
        # best_img = self.stim_mean_series[self.best_up_mask_index]

        for stim_img_num in range(len(self.stim_mean_series)):
            stim_mean_img = self.stim_mean_series[stim_img_num]
            stim_deltaF_img = self.peak_deltaF_series[stim_img_num]
            for i in best_up_mask_prop:  # calculate profiles for each up region
                best_up_mask_region = best_up_mask == i.label 
                for px_int, px_delta in zip(ma.compressed(ma.masked_where(~best_up_mask_region, stim_mean_img)),
                                            ma.compressed(ma.masked_where(~best_up_mask_region, stim_deltaF_img))): 
                    point_series = pd.Series([f'{self.img_name}{id_suffix}',  # recording ID
                                              stim_img_num+1,                 # stimulus number  
                                              i.label,                        # mask region
                                              px_int,                         # px intensity
                                              px_delta],                      # px ΔF/F
                                            index=self.px_df.columns)
                    self.px_df = self.px_df.append(point_series, ignore_index=True)

        logging.info(f'Recording profile data frame {self.px_df.shape} created')
        return self.px_df
Example #2
0
 def calcBBScore(self, field, attempt=-1):
     tm = self.ClasData[attempt]
     mean = tm['BBVpp'].mean()
     lothresh = mean * self.MeasSettings['MinVppFracMean'][0]
     hithresh = mean * self.MeasSettings['MaxVppFracMean'][0]
     dat = []
     for r in range(0, len(self.meas_rows)):
         v = tm[tm['Row'] == r][field].values
         v_ma = ma.masked_outside(v, lothresh, hithresh)
         dat.append(v_ma)
     average = ma.masked_array((dat)).mean(axis=0)
     bb = ma.compressed(average)
     mask = average.mask
     scores = []
     for c in self.Candidates:
         field = c + '_bbvpp'
         k = ma.compressed(
             ma.masked_array(self.bbvpp_kernels[field][0:len(average)],
                             mask=mask))
         #           print('len(bb): ', len(bb), ' len(k): ', len(k))
         score = np.corrcoef(bb, k)[0, 1]
         scores.append(score)
     self.BBScores = scores
     #self.nBB  = ma.count(average)
     return dat, average, scores
Example #3
0
def Fill2ThetaAzimuthMap(masks, TA, tam, image):
    'Needs a doc string'
    Zlim = masks['Thresholds'][1]
    rings = masks['Rings']
    arcs = masks['Arcs']
    TA = np.dstack((ma.getdata(TA[1]), ma.getdata(TA[0]),
                    ma.getdata(TA[2])))  #azimuth, 2-theta, dist
    tax, tay, tad = np.dsplit(TA, 3)  #azimuth, 2-theta, dist**2/d0**2
    for tth, thick in rings:
        tam = ma.mask_or(
            tam.flatten(),
            ma.getmask(
                ma.masked_inside(tay.flatten(), max(0.01, tth - thick / 2.),
                                 tth + thick / 2.)))
    for tth, azm, thick in arcs:
        tamt = ma.getmask(
            ma.masked_inside(tay.flatten(), max(0.01, tth - thick / 2.),
                             tth + thick / 2.))
        tama = ma.getmask(ma.masked_inside(tax.flatten(), azm[0], azm[1]))
        tam = ma.mask_or(tam.flatten(), tamt * tama)
    taz = ma.masked_outside(image.flatten(), int(Zlim[0]), Zlim[1])
    tabs = np.ones_like(taz)
    tam = ma.mask_or(tam.flatten(), ma.getmask(taz))
    tax = ma.compressed(ma.array(tax.flatten(), mask=tam))  #azimuth
    tay = ma.compressed(ma.array(tay.flatten(), mask=tam))  #2-theta
    taz = ma.compressed(ma.array(taz.flatten(), mask=tam))  #intensity
    tad = ma.compressed(ma.array(tad.flatten(), mask=tam))  #dist**2/d0**2
    tabs = ma.compressed(ma.array(
        tabs.flatten(), mask=tam))  #ones - later used for absorption corr.
    return tax, tay, taz, tad, tabs
Example #4
0
    def __init__(self, stampdata, disk, annulus, center, disk_radius, annulus_radii, empty = False):
        """
        Constructor for a Button object

        Arguments:
            (np.ndarray) stampdata: the original stamp data
            (np.ndarray) disk: a boolean mask for the stampdata FALSE within the found chamber
            (np.ndarray) annulus: a boolean mask for the stampdata FALSE within the button annulus 
                (local background)
            (tuple) center: chamber center coordinates, with respect to stampdata coord. system
            (int) disk_radius: button radius
            (tuple) annulus radii: inner and outer radii of the annulus (innerrad, outerrad)
            (bool) empty: flag for empty button

        Returns:
            None
        
        """

        self.blankFlag = empty
        self.stampdata = stampdata  # uint16 ndarray
        self.disk = disk # a mask
        self.disk_intensities = ma.compressed(self.get_disk())
        self.annulus = annulus # a mask
        self.annulus_intensities = ma.compressed(self.get_annulus())
        try:
            self.annulus_to_disk_ratio =  len(self.annulus_intensities) / len(self.disk_intensities)
        except:
            warnings.warn('Annulus ratio could not be calculated.\nButton Intensities Are Of Length Zero.\
                            Annulus to disk ratio is NaN')
            self.annulus_to_disk_ratio = np.nan
        self.center = center
        self.disk_radius = disk_radius
        self.annulus_radii = annulus_radii
        self.summary = self.summarize()
Example #5
0
    def set_gen3data(self, Apaths, Ppaths, Xdet, Zdet, omega, mask=None):

        self.omega = omega
        self.wbyv = omega/self.v
        self.amshape = mask.shape
        self.mask = mask

        for Apath, Ppath in zip(Apaths, Ppaths):
            # read the data
            Amp = bp.io.read_frame(Apath)
            Pha = bp.io.read_frame(Ppath)

            # mask all data arrays
            Amp = ma.compressed(ma.masked_array(Amp, self.mask))
            Pha = ma.compressed(ma.masked_array(Pha, self.mask))

            try:
                Amps = np.vstack((Amps, Amp))
                Phas = np.vstack((Phas, Pha))
            except:
                Amps = Amp
                Phas = Pha
        self.Amps = Amps
        self.Phas = Phas

        # masked detector positions
        self.dIdx = np.arange(len(Xdet))
        self.dIdx = ma.compressed(ma.masked_array(self.dIdx.reshape(self.amshape), self.mask))
        self.Xdet = ma.compressed(ma.masked_array(Xdet.reshape(self.amshape), self.mask))
        self.Zdet = ma.compressed(ma.masked_array(Zdet.reshape(self.amshape), self.mask))
Example #6
0
    def calculateMeans(self, synMean, synMin, synMed, synMax, synMinCP):
        """
        Calculate mean, median, minimum, maximum and percentiles of pressure
        values from synthetic events.

        :param synMean: `numpy.ndarray`
        :param synMin: `numpy.ndarray`
        :param synMed: `numpy.ndarray`
        :param synMax: `numpy.ndarray`
        :param synMinCP: `numpy.ndarray`

        """
        synMean = ma.masked_values(synMean, -9999.)
        synMin = ma.masked_values(synMin, -9999.)
        synMed = ma.masked_values(synMed, -9999.)
        synMax = ma.masked_values(synMax, -9999.)

        self.synMean = ma.mean(synMean, axis=0)
        self.synMed = ma.mean(synMed, axis=0)
        self.synMin = ma.mean(synMin, axis=0)
        self.synMax = ma.mean(synMax, axis=0)

        self.synMeanUpper = percentile(ma.compressed(synMean), per=95, axis=0)
        self.synMeanLower = percentile(ma.compressed(synMean), per=5, axis=0)
        self.synMinUpper = percentile(ma.compressed(synMin), per=95, axis=0)
        self.synMinLower = percentile(ma.compressed(synMin), per=5, axis=0)

        self.synMinCPDist = np.mean(synMinCP, axis=0)
        self.synMinCPLower = percentile(synMinCP, per=5, axis=0)
        self.synMinCPUpper = percentile(synMinCP, per=95, axis=0)
        r = list(np.random.uniform(high=synMean.shape[0], size=3).astype(int))
        self.synRandomMinima = synMean[r, :, :]
Example #7
0
 def fill_from_netcdf(self, rec_num, netcdf):
     """
     Used by handle_station to get the records from netcdf for comparing with the
     records from the database.
     """
     netcdf = {}
     if not ma.getmask(self.ncdf_data_set["latitude"][rec_num]):
         netcdf["latitude"] = ma.compressed(
             self.ncdf_data_set["latitude"][rec_num])[0]
     else:
         netcdf["latitude"] = None
     if not ma.getmask(self.ncdf_data_set["longitude"][rec_num]):
         netcdf["longitude"] = ma.compressed(
             self.ncdf_data_set["longitude"][rec_num])[0]
     else:
         netcdf["longitude"] = None
     if not ma.getmask(self.ncdf_data_set["elevation"][rec_num]):
         netcdf["elevation"] = ma.compressed(
             self.ncdf_data_set["elevation"][rec_num])[0]
     else:
         netcdf["elevation"] = None
     # pylint: disable=no-member
     netcdf["description"] = str(
         nc.chartostring(self.ncdf_data_set["locationName"][rec_num]))
     netcdf["name"] = str(
         nc.chartostring(self.ncdf_data_set["stationName"][rec_num]))
     return netcdf
Example #8
0
def mflag(arrayname, uvwbl, uvdata, flaglist):
    """
        Return flagged uvdata                
    """
    Nant = 40 if arrayname == 'm1' else 60
    Nbl = Nant * (Nant - 1) / 2
    #    logger.debug('Nbl is %d',Nbl)
    dout_mask = ma.masked_array(list(
        zip(uvwbl['u'], uvwbl['v'], uvwbl['w'],
            np.hstack((uvdata[0:Nbl], np.conj(uvdata[0:Nbl]))),
            np.ones(Nbl * 2))),
                                dtype=ri.dtk)
    flagarr = mi.flagind(Nant, flaglist)
    flagarr = np.hstack((flagarr, flagarr + Nbl))  # flag (-u,-v)
    for ifl in flagarr:
        dout_mask[ifl] = ma.masked


#    Np = ma.count(dout_mask.dtype.names[0])
#    logger.debug('Np is %d',Np)
#    logger.debug('dout shape is %d',dout.shape[0])
    dout_flag = np.array(list(
        zip(ma.compressed(dout_mask['u']), ma.compressed(dout_mask['v']),
            ma.compressed(dout_mask['w']), ma.compressed(dout_mask['x']),
            ma.compressed(dout_mask['wgt']))),
                         dtype=ri.dtk)
    return dout_flag
Example #9
0
def spike_flag(data,masked,freq,percent):
    """
    Flags out RFI spikes using a 11 bin filter
    Can be used with either time or freq
    percent is a percentage level cut (100 would be twice the 11 bin average)
    Needs to be applied to masked data.
    """
    new_mask = np.zeros(len(data))
    new_array = ma.array(data,mask=masked)
    new_comp = ma.compressed(new_array)
    freq_array = ma.array(freq,mask=masked)
    new_freq = ma.compressed(freq_array)
    for i in range(0,len(data)):
        if masked[i]==1.0:
            new_mask[i] = 1.0

    for i in range(5,len(new_comp)-5):
        group = new_comp[i-5]+new_comp[i-4]+new_comp[i-3]+new_comp[i-2]+new_comp[i-1]+new_comp[i]+new_comp[i+1]+new_comp[i+2]+new_comp[i+3]+new_comp[i+4]+new_comp[i+5]
        mean_group = group/11.
        if new_comp[i]/mean_group>=(1+percent/100.):
            comp_freq = new_freq[i]
            for j in range(0,len(freq)):
                if freq[j]==comp_freq:
                    index=j
            new_mask[index]= 1.0
        elif new_comp[i]/mean_group<=1/(1+percent/100.):
            comp_freq = new_freq[i]
            for j in range(0,len(freq)):
                if freq[j]==comp_freq:
                    index=j
            new_mask[index]= 1.0
   
    return new_mask
Example #10
0
    def set_gen3data(self, Apath, Ppath, Xdet, Zdet, 
            omega, mask_cutoff, toprowsmask=0, polygonmaskfile=None):

        self.omega = omega
        self.wbyv = omega/self.v

        # read the data
        self.Amp = bp.io.read_frame(Apath)
        self.Pha = bp.io.read_frame(Ppath)
        self.amshape = self.Amp.shape

        # set the mask
        self.mask = np.ones_like(self.Amp, dtype='bool')
        self.mask[self.Amp > mask_cutoff*np.max(self.Amp)] = False
        # mask top rows
        if toprowsmask > 0:
            toprowsmask = toprowsmask -1 
            self.mask[0:toprowsmask,:] = True

        # mask all data arrays
        self.Amp = ma.compressed((ma.masked_array(self.Amp, self.mask)))
        self.Pha = ma.compressed(ma.masked_array(self.Pha, self.mask))

        # masked detector positions
        self.dIdx = np.arange(len(Xdet))
        self.dIdx = ma.compressed(ma.masked_array(self.dIdx.reshape(self.amshape), self.mask))
        self.Xdet = ma.compressed(ma.masked_array(Xdet.reshape(self.amshape), self.mask))
        self.Zdet = ma.compressed(ma.masked_array(Zdet.reshape(self.amshape), self.mask))
Example #11
0
def plot_scatterplot(prefix, feature='asymmetry1', vmin=0, vmax=1, resolution=512, rows=4, cols=4,
                     dotsize=10, figsize=(12, 10), upload=True, remote_folder = "01_18_Experiment",
                     bucket='ccurtis.data'):
    """
    Plot scatterplot of trajectories in video with colors corresponding to features.

    Parameters
    ----------
    prefix: string
        Prefix of file name to be plotted e.g. features_P1.csv prefix is P1.
    feature: string
        Feature to be plotted.  See features_analysis.py
    vmin: float64
        Lower intensity bound for heatmap.
    vmax: float64
        Upper intensity bound for heatmap.
    resolution: int
        Resolution of base image.  Only needed to calculate bounds of image.
    rows: int
        Rows of base images used to build tiled image.
    cols: int
        Columns of base images used to build tiled images.
    upload: boolean
        True if you want to upload to s3.

    """
    # Inputs
    # ----------
    merged_ft = pd.read_csv('features_{}.csv'.format(prefix))
    string = feature
    leveler = merged_ft[string]
    t_min = vmin
    t_max = vmax
    ires = resolution

    norm = mpl.colors.Normalize(t_min, t_max, clip=True)
    mapper = cm.ScalarMappable(norm=norm, cmap=cm.viridis)

    zs = ma.masked_invalid(merged_ft[string])
    zs = ma.masked_where(zs <= t_min, zs)
    zs = ma.masked_where(zs >= t_max, zs)
    to_mask = ma.getmask(zs)
    zs = ma.compressed(zs)
    xs = ma.compressed(ma.masked_where(to_mask, merged_ft['X'].astype(int)))
    ys = ma.compressed(ma.masked_where(to_mask, merged_ft['Y'].astype(int)))

    fig = plt.figure(figsize=figsize)
    plt.scatter(xs, ys, c=zs, s=dotsize)
    mapper.set_array(10)
    plt.colorbar(mapper)
    plt.xlim(0, ires*cols)
    plt.ylim(0, ires*rows)
    plt.axis('off')

    print('Plotted {} scatterplot successfully.'.format(prefix))
    outfile = 'scatter_{}_{}.png'.format(feature, prefix)
    fig.savefig(outfile, bbox_inches='tight')
    if upload == True:
        aws.upload_s3(outfile, remote_folder+'/'+outfile, bucket_name=bucket)
Example #12
0
    def calculateMeans(self):
        self.synHist = ma.masked_values(self.synHist, -9999.)
        self.synHistMean = ma.mean(self.synHist, axis=0)
        self.medSynHist = ma.median(self.synHist, axis=0)

        self.synHistUpper = percentile(ma.compressed(self.synHist),
                                       per=95, axis=0)
        self.synHistLower = percentile(ma.compressed(self.synHist),
                                       per=5, axis=0)
Example #13
0
 def Fill2ThetaMap(data,TA,image):
     import numpy.ma as ma
     Zmin = data['Zmin']
     Zmax = data['Zmax']
     tax,tay = TA    # 2-theta & yaxis
     taz = ma.masked_outside(image.flatten()-Zmin,0,Zmax-Zmin)
     tam = ma.getmask(taz)
     tax = ma.compressed(ma.array(tax.flatten(),mask=tam))
     tay = ma.compressed(ma.array(tay.flatten(),mask=tam))
     taz = ma.compressed(ma.array(taz.flatten(),mask=tam))
     del(tam)
     return tax,tay,taz
Example #14
0
 def Fill2ThetaMap(data, TA, image):
     import numpy.ma as ma
     Zmin = data['Zmin']
     Zmax = data['Zmax']
     tax, tay = TA  # 2-theta & yaxis
     taz = ma.masked_outside(image.flatten() - Zmin, 0, Zmax - Zmin)
     tam = ma.getmask(taz)
     tax = ma.compressed(ma.array(tax.flatten(), mask=tam))
     tay = ma.compressed(ma.array(tay.flatten(), mask=tam))
     taz = ma.compressed(ma.array(taz.flatten(), mask=tam))
     del (tam)
     return tax, tay, taz
Example #15
0
def aboveThreshold(data, threshold):
    if type(data) == list:
        new_data = []
        for i in range(len(data)):
            dat = masked_where(abs(data[0]) < threshold, data[i])
            dat = compressed(dat)
            new_data.append(dat)
        return new_data
    else:
        data = masked_where(abs(data) < threshold, data)
        data = compressed(data)
        return data
Example #16
0
def azimuthalAverage(image, center=None, maskval=0):
    """
    calculate the azimuthally averaged radial profile.

    image - 2D image
    center - [x,y] pixel coordinates used as the center. the default is
             None which then uses the center of the image (including
             fractional pixels).
    maskval - threshold value for including data in the profile
    """

    # calculate the indices from the image
    y, x = np.indices(image.shape)

    # default to image center if no center given
    if not center:
        center = np.array([(x.max() - x.min()) / 2.0,
                           (x.max() - x.min()) / 2.0])

    r = np.hypot(x - center[0], y - center[1])

    # get sorted radii and sort image accordingly
    ind = np.argsort(r.flat)
    i_sorted = image.flat[ind]

    # for FP data we need to at least mask out data at
    # 0 or less so the gaps get ignored.
    # also want to mask out area outside of aperture
    # so use given maskval to do that.
    i_ma = ma.masked_less_equal(i_sorted, maskval)
    mask = ma.getmask(i_ma)

    # remove masked data points from further analysis
    r_sorted = ma.compressed(ma.array(r.flat[ind], mask=mask))
    i_mask = ma.compressed(i_ma)

    # get the integer part of the radii (bin size = 1)
    r_int = r_sorted.astype(int)

    # find all pixels that fall within each radial bin.
    deltar = r_int[1:] - r_int[:-1]  # assumes all radii represented
    rind = np.where(deltar)[0]       # location of changed radius
    nr_tot = rind[1:] - rind[:-1]    # total number of points in radius bin

    # cumulative sum to figure out sums for each radius bin
    csim = ma.cumsum(i_mask, dtype=float)
    tbin = csim[rind[1:]] - csim[rind[:-1]]

    # calculate and return profile of mean within each bin
    radial_prof = tbin / nr_tot

    return radial_prof
Example #17
0
def loessmin_diff((file1, file2, filex1, filex2, cutoff, toprowsmask, span)):
    from bopy.io import read_frame
    from bopy.utils import mask2_maxcutoff 
    from bopy.utils.rfuncs import loess2d
    d1 = read_frame(file1)
    d2 = read_frame(file2)
    dmask = mask2_maxcutoff(read_frame(filex1),
            read_frame(filex2),
            cutoff, toprowsmask=toprowsmask)
    d1 = loess2d(d1, span=span)
    d1 = np.min(ma.compressed(ma.masked_array(d1, mask=dmask)))
    d2 = loess2d(d2, span=span)
    d2 = np.min(ma.compressed(ma.masked_array(d2, mask=dmask)))
    return d1 - d2
Example #18
0
def band2txt(band, outfile):
    '''Accepts numpy rasterand writes to specified text file on disk.'''
    if ma.isMaskedArray(band) is True:
        outraster = ma.compressed(band)
    else:
        outraster = band
    np.savetxt(outfile, outraster, fmt='%f')
Example #19
0
 def apply_new_mask(ifgs, mask_old, mask_new):
     """Apply a new mask to a collection of ifgs (or sources) that are stored as row vectors with an accompanying mask.  
     Inputs:
         ifgs | r2 array | ifgs as row vectors
         mask_old | r2 array | mask to convert a row of ifg into a rank 2 masked array
         mask_new | r2 array | the new mask to be applied.  Note that it must not unmask any pixels that are already masked.  
     Returns:
         ifgs_new_mask | r2 array | as per ifgs, but with a new mask.  
     History:
         2020/06/26 | MEG | Written
     """
     n_pixs_new = len(np.argwhere(mask_new == False))
     ifgs_new_mask = np.zeros(
         (ifgs.shape[0], n_pixs_new)
     )  # initiate an array to store the modified sources as row vectors
     for ifg_n, ifg in enumerate(ifgs):  # Loop through each source
         ifg_r2 = col_to_ma(
             ifg, mask_old
         )  # turn it from a row vector into a rank 2 masked array
         ifg_r2_new_mask = ma.array(ifg_r2,
                                    mask=mask_new)  # apply the new mask
         ifgs_new_mask[ifg_n, :] = ma.compressed(
             ifg_r2_new_mask
         )  # convert to row vector and places in rank 2 array of modified sources
     return ifgs_new_mask
Example #20
0
def bin_spike(x, l):
    """

        l is the number of points used for comparison, thus l=2 means that each
          point will be compared only against the previous and following
          measurements. l=2 is is probably not a good choice, too small.

        Maybe use pstsd instead?

        Dummy way to avoid warnings when x[ini:fin] are all masked.
        Improve this in the future.
    """
    assert x.ndim == 1, "I'm not ready to deal with multidimensional x"

    assert l%2 == 0, "l must be an even integer"

    N = len(x)
    bin = ma.masked_all(N)
    # bin_std = ma.masked_all(N)
    half_window = int(l/2)
    idx = (i for i in range(half_window, N - half_window) if np.isfinite(x[i]))
    for i in idx:
        ini = max(0, i - half_window)
        fin = min(N, i + half_window)
        # At least 3 valid points
        if ma.compressed(x[ini:fin]).size >= 3:
            bin[i] = x[i] - ma.median(x[ini:fin])
            # bin_std[i] = (np.append(x[ini:i], x[i+1:fin+1])).std()
            bin[i] /= (np.append(x[ini:i], x[i+1:fin+1])).std()

    return bin
Example #21
0
 def umask_value_transform(self, params_dict):
     """Retrieves a netcdf value, checking for masking and retrieves the value as a float
     Args:
         params_dict (dict): named function parameters
     Returns:
         float: the corresponding value
     """
     # Probably need more here....
     try:
         key = None
         rec_num = params_dict["recNum"]
         for key in params_dict.keys():
             if key != "recNum":
                 break
         nc_value = self.ncdf_data_set[key][rec_num]
         if not ma.getmask(nc_value):
             value = ma.compressed(nc_value)[0]
             return float(value)
         else:
             return None
     except Exception as _e:  # pylint:disable=broad-except
         logging.error(
             "%s umask_value_transform: Exception in named function umask_value_transform for key %s:  error: %s",
             self.__class__.__name__,
             key,
             str(_e),
         )
         return None
Example #22
0
def band2txt(band, outfile):
    """Accepts numpy rasterand writes to specified text file on disk."""
    if ma.isMaskedArray(band) is True:
        outraster = ma.compressed(band)
    else:
        outraster = band
    np.savetxt(outfile, outraster, fmt="%f")
Example #23
0
 def interpolate_time_iso(self, params_dict):
     """
     Rounds to nearest hour by adding a timedelta hour if minute >= delta_minutes
     """
     try:
         _time = None
         time_obs = params_dict["timeObs"]
         delta_minutes = self.delta / 60
         if not ma.getmask(time_obs):
             _time = int(ma.compressed(time_obs)[0])
         else:
             return ""
         _time = datetime.utcfromtimestamp(_time)
         _time = _time.replace(
             second=0, microsecond=0, minute=0,
             hour=_time.hour) + timedelta(hours=_time.minute //
                                          delta_minutes)
         # convert this iso
         return str(_time.isoformat())
     except Exception as _e:  # pylint:disable=broad-except
         logging.error(
             "%s handle_data: Exception in named function interpolate_time_iso:  error: %s",
             self.__class__.__name__,
             str(_e),
         )
Example #24
0
    def interpolate_time(self, params_dict):
        """
        Rounds to nearest hour by adding a timedelta hour if minute >= delta (from the template)
        """
        try:
            _thistime = None
            _time_obs = params_dict["timeObs"]
            if not ma.getmask(_time_obs):
                _thistime = int(ma.compressed(_time_obs)[0])
            else:
                return ""
            # if I get here process the _thistime
            delta_minutes = self.delta / 60
            _ret_time = datetime.utcfromtimestamp(_thistime)
            _ret_time = _ret_time.replace(
                second=0, microsecond=0, minute=0,
                hour=_ret_time.hour) + timedelta(hours=_ret_time.minute //
                                                 delta_minutes)
            return calendar.timegm(_ret_time.timetuple())

        except Exception as _e:  # pylint:disable=broad-except
            logging.error(
                "%s handle_data: Exception in named function interpolate_time:  error: %s",
                self.__class__.__name__,
                str(_e),
            )
Example #25
0
def EdgeFinder(image,data):
    '''this makes list of all x,y where I>edgeMin suitable for an ellipse search?
    Not currently used but might be useful in future?
    '''
    import numpy.ma as ma
    Nx,Ny = data['size']
    pixelSize = data['pixelSize']
    edgemin = data['edgemin']
    scalex = pixelSize[0]/1000.
    scaley = pixelSize[1]/1000.    
    tay,tax = np.mgrid[0:Nx,0:Ny]
    tax = np.asfarray(tax*scalex,dtype=np.float32)
    tay = np.asfarray(tay*scaley,dtype=np.float32)
    tam = ma.getmask(ma.masked_less(image.flatten(),edgemin))
    tax = ma.compressed(ma.array(tax.flatten(),mask=tam))
    tay = ma.compressed(ma.array(tay.flatten(),mask=tam))
    return zip(tax,tay)
Example #26
0
def EdgeFinder(image, data):
    '''this makes list of all x,y where I>edgeMin suitable for an ellipse search?
    Not currently used but might be useful in future?
    '''
    import numpy.ma as ma
    Nx, Ny = data['size']
    pixelSize = data['pixelSize']
    edgemin = data['edgemin']
    scalex = pixelSize[0] / 1000.
    scaley = pixelSize[1] / 1000.
    tay, tax = np.mgrid[0:Nx, 0:Ny]
    tax = np.asfarray(tax * scalex, dtype=np.float32)
    tay = np.asfarray(tay * scaley, dtype=np.float32)
    tam = ma.getmask(ma.masked_less(image.flatten(), edgemin))
    tax = ma.compressed(ma.array(tax.flatten(), mask=tam))
    tay = ma.compressed(ma.array(tay.flatten(), mask=tam))
    return zip(tax, tay)
Example #27
0
    def set_features(self):
        cluster_size = constant_cluster_size(self.data[self.varname])
        N = ma.compressed(self.data[self.varname]).size
        cluster_fraction = cluster_size / N

        self.features = {'constant_cluster_size': cluster_size,
                         'constant_cluster_fraction': cluster_fraction,
                         }
Example #28
0
def check_def_visible(ph_def,
                      mask_def,
                      ph_topo,
                      ph_turb,
                      snr_threshold=2.0,
                      debugging_plot=False):
    """A function to check if a (synthetic) deformation pattern is still visible
    over synthetic topo correlated and turbulent atmospheres.  
    
    Inputs:
        ph_def | r2 array | deformation phase
        mask_def | rank 2 array of ints | maks showing where the deformation is - 1s where deforming
        ph_topo | r2 array | topo correlated APS
        ph_turb | r2 array | turbulent APS
        snr_threshold | float | sets the level at which deformation is not considered visible over topo and turb APS
                                bigger = more likely to accept, smaller = less (if 0, will never accept)
        debugging_plot | boolean | if True, a figure is produced to help interepret the correct SNR.  Could give problems with dependencies.  
    Returns:
        viable | boolean | True if SNR value is acceptable.  
        snr | float | SNR
    History:
        2019/MM/DD | MEG | Written as part of f01_real_image_dem_data_vXX.py
        2019/11/06 | MEG | Extracted from sctipt and placed in synth_ts.py
        2020/08/19 | MEG | WIP
    
    """
    import numpy as np
    import numpy.ma as ma

    ph_def = ma.array(ph_def, mask=(1 - mask_def))
    ph_atm = ma.array((ph_turb + ph_topo), mask=(1 - mask_def))
    snr = np.var(ma.compressed(ph_def)) / np.var(ma.compressed(ph_atm))

    if snr > snr_threshold:
        viable = True
    else:
        viable = False

    # import matplotlib.pyplot as plt
    # from small_plot_functions import create_universal_cmap
    # cmap, vmin, vmax = create_universal_cmap([ph_def, ph_atm])
    # f, axes = plt.subplots(1,2)
    # f.suptitle(f"SNR: {snr}")
    # axes[0].imshow(ph_def, vmin = vmin, vmax = vmax, cmap = cmap)
    # axes[1].imshow(ph_atm, vmin = vmin, vmax = vmax, cmap = cmap)
    return viable, snr
Example #29
0
def trim_loop(loop, N=20, Verbose=True, Use_Dip=True):
    f = f1 = ma.array(loop.freq)
    z = z1 = ma.array(loop.z)

    #Estimate Resonance frequency using minimum Dip or max adjacent distance
    if Use_Dip:
        zr_mag_est = np.abs(z).min()
        zr_est_index = np.where(np.abs(z) == zr_mag_est)[0][0]
    else:
        z_adjacent_distance = np.abs(z[:-1] - z[1:])
        zr_est_index = np.argmax(z_adjacent_distance)
        zr_mag_est = z[zr_est_index]

    # estimate max transmission mag using max valuse of abs(z)
    z_max_mag = np.abs(z).max()

    #Depth of resonance in dB
    depth_est = 20.0 * np.log10(zr_mag_est / z_max_mag)

    #Magnitude of resonance dip at half max
    res_half_max_mag = (z_max_mag + zr_mag_est) / 2

    #find the indices of the closest points to this magnitude along the loop, one below zr_mag_est and one above zr_mag_est
    a = np.square(np.abs(z[:zr_est_index + 1]) - res_half_max_mag)
    lower_index = np.argmin(a)
    a = np.square(np.abs(z[zr_est_index:]) - res_half_max_mag)
    upper_index = np.argmin(a) + zr_est_index

    #estimate the FWHM bandwidth of the resonance
    f_upper_FWHM = f[upper_index]
    f_lower_FWHM = f[lower_index]
    FWHM_est = np.abs(f_upper_FWHM - f_lower_FWHM)
    fr_est = f[zr_est_index]

    #Bandwidth Cut: cut data that is more than N * FWHM_est away from zr_mag_est
    z = z2 = ma.masked_where(
        (f > fr_est + N * FWHM_est) | (fr_est - N * FWHM_est > f), z)
    f = f2 = ma.array(f, mask=z.mask)

    loop.z = ma.compressed(z)
    loop.freq = ma.compressed(f)

    if Verbose:
        print(
            'Bandwidth cut:\n\t{1} points outside of fr_est +/- {0}*FWHM_est removed, {2} remaining data points'
            .format(N, *_points_removed(z1, z2)))
Example #30
0
def gain_calc(data,masked,gsm,K0):
    """
    Calculates the gain using least squares for a single frequency.
    Inputs:
    data - single freq array of data
    gsm  - similar single freq array of gsm data
    masked - mask for data
    K0 - preliminary guess for gain
    """
    fK = lambda K,d,g: K*d-g

    d0_array = ma.array(data,mask=masked)
    d0 = ma.compressed(d0_array)
    g0_array = ma.array(gsm,mask=masked)
    g0 = ma.compressed(g0_array)
    Kf = opt.leastsq(fK,K0,args=(d0,g0),maxfev=100000)
    
    return Kf[0]
Example #31
0
def peak_finder(arr, smoothing=5, ddy_thresh=-300, dy0_thresh=5):
    array        = medfilt(arr, smoothing) 
    x            = arange(len(array))
    kernel       = [4, 0, -4]
    dY           = convolve(array, kernel, 'same') 
    ddy          = convolve(dY, kernel, 'same')
    falloff      = -15000*exp(-0.003*x) #This has to be worked on
    masked_array = ma.masked_where(logical_or(ddy>falloff+ddy_thresh, abs(dY) > dy0_thresh) , arr) 
    x_masked     = ma.array(x, mask=masked_array.mask)
    return ma.compressed(x_masked)
Example #32
0
def custom_mad_func(array_like, criteria=2.5, **kwargs):
    """This function performs a mad calculation on array of data
    Returns the median of MAD data"""
    if kwargs:
        criteria = kwargs['criteria']
        ul = kwargs['ul']
        ll = kwargs['ll']
    else:
        ul = 58
        ll = 0.001
    # prescreen burst data for outliers
    array_like = ma.compressed(ma.masked_outside(array_like, ll, ul))
    MAD = smc.mad(array_like)
    k = (MAD * criteria)
    M = np.nanmedian(array_like)
    high = M + k
    low = M - k
    b = ma.masked_outside(array_like, high, low)
    c = ma.compressed(b)
    return np.nanmedian(c)
Example #33
0
    def test(self):
        self.flags = {}

        x = ma.compressed(self.data[self.varname])
        flag = np.zeros(self.data[self.varname].shape, dtype='i1')
        if (x.size > 1) and (np.allclose(x, np.ones_like(x) * x[0])):
            flag[:] = self.flag_bad
        else:
            flag[:] = self.flag_good
        flag[ma.getmaskarray(self.data[self.varname])] = 9
        self.flags['stuck_value'] = flag
Example #34
0
    def predict(self, X):
        # Handle missing values
        X = ma.masked_invalid(X)
        mask = X.mask
        dX = ma.compressed(X).reshape(-1, 1)
        dZ = self.model.predict(dX)
        Z = np.array([np.nan for i in range(X.shape[0])])
        Z[~mask] = dZ
        Z = ma.masked_invalid(Z)

        return Z * self.step
Example #35
0
File: cnv.py Project: xuanblo/jcvi
    def predict(self, X):
        # Handle missing values
        X = ma.masked_invalid(X)
        mask = X.mask
        dX = ma.compressed(X).reshape(-1, 1)
        dZ = self.model.predict(dX)
        Z = np.array([np.nan for i in xrange(X.shape[0])])
        Z[~mask] = dZ
        Z = ma.masked_invalid(Z)

        return Z * self.step
Example #36
0
def gfit3(xi, yi):
    """
    relies on masking , use pyspeckit
    """
    # not sure if we need this, or try x = xi
    x = ma.compressed(xi)
    y = ma.compressed(yi)
    sp = pyspeckit.Spectrum(
        data=y,
        xarr=x,
        error=None,
        header=h,
    )
    sp.plotter()
    sp.specfit(fittype='gaussian')
    sp.specfit.plot_fit()
    # sp.baseline()
    print(x)
    print(y)
    # fake a return array
    return yi
Example #37
0
def peak_finder(arr, smoothing=5, ddy_thresh=-300, dy0_thresh=5):
    array = medfilt(arr, smoothing)
    x = arange(len(array))
    kernel = [4, 0, -4]
    dY = convolve(array, kernel, 'same')
    ddy = convolve(dY, kernel, 'same')
    falloff = -15000 * exp(-0.003 * x)  #This has to be worked on
    masked_array = ma.masked_where(
        logical_or(ddy > falloff + ddy_thresh,
                   abs(dY) > dy0_thresh), arr)
    x_masked = ma.array(x, mask=masked_array.mask)
    return ma.compressed(x_masked)
Example #38
0
def Intensity(L, graph=False):
    """
    Finds the intensity value for a masked array L. 
    graph plots the results of the watershed segmentation on top of
    the raw image. 
    Need to do work to generate a zero-gradient analysis instead of watershed
    segmentation.
    """
    Ny, Nx = L.shape
    threshold = abs(min(ma.compressed(L))) / 2.
    RawData = ma.filled(L, 0)
    # plt.imshow(RawData)
    # plt.show()
    Filtered = gaussian_filter(RawData, 1)
    # plt.imshow(-Filtered, cmap = plt.cm.jet)
    # plt.show()
    data_max = peak_local_max(Filtered,
                              indices=False,
                              min_distance=2,
                              threshold_abs=threshold)
    DataIndices = peak_local_max(Filtered,
                                 indices=True,
                                 min_distance=2,
                                 threshold_abs=threshold)
    markers = ndi.label(data_max)[0]
    # plt.imshow(markers)
    # plt.show()
    Regions = watershed(-Filtered, markers, mask=np.logical_not(L.mask))
    Intensities = []
    if len(np.unique(Regions)) == 2:
        return DataIndices[0], 0
    if graph:
        plt.figure()
        plt.imshow(L.data, cmap='gray_r', interpolation='nearest')
        plt.imshow(ma.MaskedArray(Regions, mask=L.mask),
                   cmap='cubehelix_r',
                   interpolation='nearest',
                   alpha=0.3,
                   vmin=0)
        plt.xticks([])
        plt.yticks([])
        plt.xlim(20, Nx - 20)
        plt.ylim(20, Ny - 20)
        plt.title('Intensity')
        plt.savefig('IntensityExample.png', format='png', dpi=300)
        plt.close()
    for J in np.unique(Regions)[1:]:
        Med = ma.masked_array(L, Regions != J)
        Intensities.append(np.sum(Med))
    Intensity, Index = zip(
        *sorted(zip(Intensities, range(1, len(np.unique(Regions))))))

    return DataIndices[Index[-1] - 1], Intensity[-2] / Intensity[-1]
Example #39
0
def Fill2ThetaAzimuthMap(masks,TA,tam,image):
    'Needs a doc string'
    Zlim = masks['Thresholds'][1]
    rings = masks['Rings']
    arcs = masks['Arcs']
    TA = np.dstack((ma.getdata(TA[1]),ma.getdata(TA[0]),ma.getdata(TA[2])))    #azimuth, 2-theta, dist
    tax,tay,tad = np.dsplit(TA,3)    #azimuth, 2-theta, dist**2/d0**2
    for tth,thick in rings:
        tam = ma.mask_or(tam.flatten(),ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.)))
    for tth,azm,thick in arcs:
        tamt = ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.))
        tama = ma.getmask(ma.masked_inside(tax.flatten(),azm[0],azm[1]))
        tam = ma.mask_or(tam.flatten(),tamt*tama)
    taz = ma.masked_outside(image.flatten(),int(Zlim[0]),Zlim[1])
    tabs = np.ones_like(taz)
    tam = ma.mask_or(tam.flatten(),ma.getmask(taz))
    tax = ma.compressed(ma.array(tax.flatten(),mask=tam))   #azimuth
    tay = ma.compressed(ma.array(tay.flatten(),mask=tam))   #2-theta
    taz = ma.compressed(ma.array(taz.flatten(),mask=tam))   #intensity
    tad = ma.compressed(ma.array(tad.flatten(),mask=tam))   #dist**2/d0**2
    tabs = ma.compressed(ma.array(tabs.flatten(),mask=tam)) #ones - later used for absorption corr.
    return tax,tay,taz,tad,tabs
Example #40
0
    def calcTBScore(self, attempt=-1):
        field1 = 'BottomThick'
        field2 = 'TBVpp'
        tm = self.ClasData[attempt]
        loBTthresh = self.MeasSettings['MinBotThickNs'][0] / 1000
        hiBTthresh = self.MeasSettings['MaxBotThickNs'][0] / 1000
        loVppthresh = self.MeasSettings['MinTBVppInclusionThreshold'][0]
        hiVppthresh = self.MeasSettings['MaxTBVppInclusionThreshold'][0]
        botthick = []
        #    alld = []
        for r in range(0, len(self.meas_rows)):
            bt = tm[tm['Row'] == r][field1].values
            bt_ma = ma.masked_outside(bt, loBTthresh, hiBTthresh)
            tbvpp = tm[tm['Row'] == r][field2].values
            tbvpp_ma = ma.masked_outside(tbvpp, loVppthresh, hiVppthresh)
            thismask = ma.mask_or(bt_ma.mask, tbvpp_ma.mask)
            bt = ma.masked_array(bt, mask=thismask)
            #         thisd = [bt_ma,tbvpp_ma]
            #       alld.append(thisd)
            botthick.append(bt)
#       return botthick,alld
        average = ma.masked_array((botthick)).mean(axis=0)
        thickness = ma.compressed(average)
        self.nTB = len(thickness)
        #     if self.nTB == 0 :
        #          return
        mask = average.mask
        scores = []
        for c in self.Candidates:
            field = c + '_tofdelta'
            k = ma.compressed(
                ma.masked_array(self.tof_kernels[field][0:len(average)],
                                mask=mask))
            #           print('len(bb): ', len(bb), ' len(k): ', len(k))
            score = np.corrcoef(thickness, k)[0, 1]
            scores.append(score)
        #self.TBScores = scores
        #self.nTB  = ma.count(average)
        return botthick, average, scores
def r3_to_r2(phUnw):
    """ Given a rank3 of ifgs, convert it to rank2 and a mask.  Works with either masked arrays or just arrays.  
    Inputs:
        phUnw | rank 3 array | n_ifgs x height x width
    returns:
        r2_data['ifgs'] | rank 2 array | ifgs as row vectors
        r2_data['mask'] | rank 2 array 
    History:
        2020/06/09 | MEG  | Written
    """
    import numpy as np
    import numpy.ma as ma

    if ma.isMaskedArray(phUnw):
        n_pixels = len(
            ma.compressed(phUnw[0, ])
        )  # if it's a masked array, get the number of non-masked pixels
        mask = ma.getmask(phUnw)[
            0, ]  # get the mask, which is assumed to be constant through time
    else:
        n_pixels = len(np.ravel(phUnw[
            0, ]))  # or if a normal numpy array, just get the number of pixels
        mask = np.zeros(phUnw[0, ].shape)  # or make a blank mask

    r2_ifgs = np.zeros(
        (phUnw.shape[0], n_pixels))  # initiate to store ifgs as rows in
    for ifg_n, ifg in enumerate(phUnw):
        if ma.isMaskedArray(phUnw):
            r2_ifgs[ifg_n, ] = ma.compressed(
                ifg)  # non masked pixels into row vectors
        else:
            r2_ifgs[ifg_n, ] = np.ravel(
                ifg)  # or all just pixles into row vectors

    r2_data = {
        'ifgs': r2_ifgs,  # make into a dictionary.  
        'mask': mask
    }
    return r2_data
Example #42
0
    def calculateMeans(self, synMean, synMin, synMed, synMax, synMinCP):
        synMean = ma.masked_values(synMean, -9999.)
        synMin = ma.masked_values(synMin, -9999.)
        synMed = ma.masked_values(synMed, -9999.)
        synMax = ma.masked_values(synMax, -9999.)

        self.synMean = ma.mean(synMean, axis=0)
        self.synMed = ma.mean(synMed, axis=0)
        self.synMin = ma.mean(synMin, axis=0)
        self.synMax = ma.mean(synMax, axis=0)

        self.synMeanUpper = percentile(ma.compressed(synMean), per=95, axis=0)
        self.synMeanLower = percentile(ma.compressed(synMean), per=5, axis=0)
        self.synMinUpper = percentile(ma.compressed(synMin), per=95, axis=0)
        self.synMinLower = percentile(ma.compressed(synMin), per=5, axis=0)

        self.synMinCPDist = np.mean(synMinCP, axis=0)
        self.synMinCPLower = percentile(synMinCP, per=5, axis=0)
        self.synMinCPUpper = percentile(synMinCP, per=95, axis=0)
        
        r = list(np.random.uniform(high=synMean.shape[0], size=3).astype(int))
        self.synRandomMinima = synMean[r, :, :]
Example #43
0
def unmask_track(track):
    """Removes empty frames from inpute trajectory datset.

    Parameters
    ----------
    track : pandas.core.frame.DataFrame
        At a minimum, must contain a Frame, Track_ID, X, Y, MSDs, and
        Gauss column.

    Returns
    -------
    comp_track : pandas.core.frame.DataFrame
        Similar to track, but has all masked components removed.

    """
    xpos = ma.masked_invalid(track['X'])
    msds = ma.masked_invalid(track['MSDs'])
    x_mask = ma.getmask(xpos)
    msd_mask = ma.getmask(msds)
    comp_frame = ma.compressed(ma.masked_where(msd_mask, track['Frame']))
    compid = ma.compressed(ma.masked_where(msd_mask, track['Track_ID']))
    comp_x = ma.compressed(ma.masked_where(x_mask, track['X']))
    comp_y = ma.compressed(ma.masked_where(x_mask, track['Y']))
    comp_msd = ma.compressed(ma.masked_where(msd_mask, track['MSDs']))
    comp_gauss = ma.compressed(ma.masked_where(msd_mask, track['Gauss']))
    comp_qual = ma.compressed(ma.masked_where(x_mask, track['Quality']))
    comp_snr = ma.compressed(ma.masked_where(x_mask, track['SN_Ratio']))
    comp_meani = ma.compressed(ma.masked_where(x_mask,
                                               track['Mean_Intensity']))

    data1 = {
        'Frame': comp_frame,
        'Track_ID': compid,
        'X': comp_x,
        'Y': comp_y,
        'MSDs': comp_msd,
        'Gauss': comp_gauss,
        'Quality': comp_qual,
        'SN_Ratio': comp_snr,
        'Mean_Intensity': comp_meani
    }
    comp_track = pd.DataFrame(data=data1)
    return comp_track
Example #44
0
def rebin(data,masked,freq,binscale):
    """
    Rebins data to coarser frequency resolution.
    Assumes that the input is the data after flagging, mask,
    corresponding freq array and a binscale (number of bins to merge).
    Output is rebinned data with corresponding freq, mask arrays.
    """

    if binscale > 1:
        new_data = np.zeros(len(data)/binscale)
        new_mask = np.zeros(len(data)/binscale)
        new_freq = np.zeros(len(data)/binscale)
        f=0
        for f in range(0, len(new_data)-1):
            if len(masked[f*binscale:(f+1)*binscale])==sum(masked[f*binscale:(f+1)*binscale]):
                new_data[f] = 1.0
            else: 
                test_data = ma.array(data[f*binscale:(f+1)*binscale],mask=masked[f*binscale:(f+1)*binscale])
                test_data_con = ma.compressed(test_data)
                new_data[f] = ma.mean(test_data_con)
            if sum(masked[f*binscale:(f+1)*binscale])>=binscale/2.:
                new_mask[f] = 1.0
            new_freq[f] = ma.mean(freq[f*binscale:(f+1)*binscale])
        if len(masked[(f+1)*binscale:-1])==sum(masked[(f+1)*binscale:-1]):
            new_data[-1] = 1.0
        else:
            test_data = ma.array(data[(f+1)*binscale:-1],mask=masked[(f+1)*binscale:-1])
            test_data_con = ma.compressed(test_data) 
            new_data[-1] = ma.mean(test_data_con)
        if sum(masked[(f+1)*binscale:-1])>=1.:
            new_mask[-1] = 1.0
        new_freq[-1] = ma.mean(freq[(f+1)*binscale:-1])
    elif binscale == 1:
        new_data = data
        new_mask = masked
        new_freq = freq
    
    return new_data,new_mask,new_freq
Example #45
0
def rat_fore(data,masked,freq,minf,maxf,n,m):
    """
    Calculates a rational function fit for the data.
    Inputs:
    data - single frequency dependent spectrum
    masked - corresponding mask
    freq - corresponding frequency array
    minf, maxf - min and max freq if want to truncate range
    n - index of numerator polynomial fitting
    m - index of denominator polynomial fitting
    (4>= n,m >=1)
    Output:
    dfit - polynomial fit spectrum
    fit_params - parameters for the fit
    """ 
    data_array = ma.array(data,mask=masked)
    data_comp = ma.compressed(data_array)
    freq_array = ma.array(freq,mask=masked)
    freq_comp = ma.compressed(freq_array)
 
    min_ind = 0
    max_ind = -1
    if minf>freq_comp[0]:
        min_ind = np.where(freq_comp<=minf)[0][-1]
    if maxf<freq_comp[-1]:
        max_ind = np.where(freq_comp<=maxf)[0][-1]
    mid_ind = min_ind+(max_ind-min_ind)/2 
 
    log_data = np.log10(data_comp[min_ind:max_ind])
    log_freq = np.log10(freq_comp[min_ind:max_ind]/freq_comp[mid_ind])
    
    p0 = np.ones(n+m+2)
    fitfunc = rational_fit(n,m)
    errfunc = lambda p,x,y: fitfunc(p,x)-y
    pf,success = opt.leastsq(errfunc,p0[:],args=(log_freq,log_data))
    dfit = 10**(fitfunc(pf,np.log10(freq/freq_comp[mid_ind])))

    return dfit,pf
Example #46
0
    def _get_bounds(item_list, wrapped_coords=False):
        """
        Return a tuple containing the first and secound bound in the list.
            * For 0 values it returns (None, None).
            * For 1 value it returns that value twice
            * For 2 values it returns those (low, high) for unwrapped
              co-ordinates or in the order given for wrapped
            * For Multiple values unwrapped, this returns min and max
            * For Multiple values wrapped co-ordinates, this
              returns the value around the largest gap in values

        :param list item_list: List of comparable data items
        :param wrapped_coords: is this a coordinate which wraps at 360 back to
                               0, e.g. longitude
        :return: Tuple of (first bound, second bound for values in the list)
        """
        items = ma.compressed(item_list)
        if len(items) < 1:
            return None, None

        if len(items) == 1:
            return items[0], items[0]

        if wrapped_coords:
            if len(items) is 2:
                first_bound_index = 0
                second_bound_index = 1
            else:
                # find the largest angle between closest points and exclude
                # this from the bounding box ensuring that this includes going
                # across the zero line
                items = sorted(items)
                first_bound_index = 0
                second_bound_index = len(items) - 1
                max_diff = (items[first_bound_index] -
                            items[second_bound_index]) % 360
                for i in range(1, len(items)):
                    diff = (items[i] - items[i-1]) % 360
                    if diff > max_diff:
                        max_diff = diff
                        first_bound_index = i
                        second_bound_index = i-1

            first_bound = items[first_bound_index]
            second_bound = items[second_bound_index]
        else:
            first_bound = min(items)
            second_bound = max(items)

        return float(first_bound), float(second_bound)
Example #47
0
    def set_gen3data(self, Apath, Ppath, Xdet, Zdet, 
            omega, mask=None):

        self.omega = omega
        self.wbyv = omega/self.v

        # read the data
        self.Amp = bp.io.read_frame(Apath)
        self.Pha = bp.io.read_frame(Ppath)
        self.amshape = self.Amp.shape

        # set the mask
        self.mask = mask

        # mask all data arrays
        self.Amp = ma.compressed((ma.masked_array(self.Amp, self.mask)))
        self.Pha = ma.compressed(ma.masked_array(self.Pha, self.mask))

        # masked detector positions
        self.dIdx = np.arange(len(Xdet))
        self.dIdx = ma.compressed(ma.masked_array(self.dIdx.reshape(self.amshape), self.mask))
        self.Xdet = ma.compressed(ma.masked_array(Xdet.reshape(self.amshape), self.mask))
        self.Zdet = ma.compressed(ma.masked_array(Zdet.reshape(self.amshape), self.mask))
Example #48
0
def poly_fore(data,masked,freq,minf,maxf,n,std):
    """
    Calculates a polynomial fit for data.
    Inputs:
    data - single frequency dependent spectrum
    masked - corresponding mask
    freq - corresponding frequency array
    minf, maxf - min and max freq if want to truncate range 
    n - index of polynomial fitting
    std - 1/weights, set to ones if not needed.
    Output:
    dfit - polynomial fit spectrum
    fit_params - parameters for the fit
    """
    
    data_array = ma.array(data,mask=masked)
    data_comp = ma.compressed(data_array)
    freq_array = ma.array(freq,mask=masked)
    freq_comp = ma.compressed(freq_array)
    std_comp = ma.compressed(ma.array(std, mask=masked))

    min_ind = 0
    max_ind = -1
    if minf>freq_comp[0]:
        min_ind = np.where(freq_comp<=minf)[0][-1]
    if maxf<freq_comp[-1]:
        max_ind = np.where(freq_comp<=maxf)[0][-1]
    mid_ind = min_ind+(max_ind-min_ind)/2

    log_data = np.log10(data_comp[min_ind:max_ind])
    log_freq = np.log10(freq_comp[min_ind:max_ind]/freq_comp[mid_ind])
    weights = 1/std_comp[min_ind:max_ind]
    
    fit_params = poly.polyfit(log_freq,log_data,n,w=weights)
    dfit = 10**(poly.polyval(np.log10(freq/freq_comp[mid_ind]),fit_params))

    return dfit, fit_params
Example #49
0
def rasterHistogram(raster_matrix):
    '''Accepts matrix and generates histogram'''
     
    # Line above is function docstring
    # Flatten 2d-matrix
    flat_raster = ma.compressed(raster_matrix)
     
    # Setup the plot (see matplotlib.sourceforge.net)
    fig = plt.figure(figsize=(8,11))
    ax = fig.add_subplot(1,1,1)
     
    # Plot histogram
    ax.hist(flat_raster, 10, normed=0, histtype='bar',
            align='mid')
    # Show the plot on screen
    plt.show()
Example #50
0
 def correct(self, y):
     """Correct the state distribution, given the measurement vector."""
     # Get the y-mask
     mask = ma.getmaskarray(y)
     self.active = active = ~mask
     if np.all(mask):
         return self.x, self.Px
     
     # Remove inactive outputs
     y = ma.compressed(y)
     R = self.model.R()[np.ix_(active, active)]
     def h_fun(x):
         return self.model.h(self.k, x)[..., active]
     
     # Perform unscented transform
     h, Ph = self.__ut.transform(self.x, self.Px, h_fun)
     Pxh = self.__ut.crosscov()
     
     # Factorize covariance
     self.__chol = DifferentiableCholesky()
     Py = Ph + R
     PyC = self.__chol(Py)
     PyCI = np.linalg.inv(PyC)
     PyI = np.einsum('...ik,...jk', PyCI, PyCI)
     
     # Perform correction
     e = y - h
     K = np.einsum('...ik,...kj', Pxh, PyI)
     x_corr = self.x + np.einsum('...ij,...j', K, e)
     Px_corr = self.Px - np.einsum('...ik,...jl,...kl', K, K, Py)
     
     # Save and return the correction data
     self.prev_x = self.x
     self.prev_Px = self.Px
     self.e = e
     self.x = x_corr
     self.Px = Px_corr
     self.Pxh = Pxh
     self.Py = Py
     self.PyI = PyI
     self.PyC = PyC
     self.PyCI = PyCI
     self.K = K
     return x_corr, Px_corr
Example #51
0
	def pixel_value_list(self, image):
		"""
		Produce two arrays, the first is a list of the binned pixel values
		and the second is the number of pixels at that value

		Input: masked numpy array of the image
		Output: Two lists of histogram data on the image. ([bins],[values])
		"""

		# Compress the array into a list of pixel values
		compressed = ma.compressed(image)

		# Find highest pixel value
		max_val = np.amax(compressed)

		# Make a histogram of the compressed list
		result = np.histogram(compressed, bins=max_val, normed=True)
		#print result[0]
		#print result[1]
		return result
Example #52
0
def get_toast_phi_loesssmooth((dfile, mfile, span, rpath, rfile)):
    """ Reads phi files, smooths and then returns the unmasked pixels
        In addition, performs normalization on the target files, if rfile is 
        not None
        
        Args:
        dfile - data file
        mfile - mask file
        span - span parameter for LOESS
        rpath - the path to ref files (None if not for target phase normalization)
        rfile - reference data file
    """
    d = np.load(dfile)
    m = np.load(mfile)
    if rfile is not None:
        from bopy.gen3pp.gen3pp_utils import phase_normalization
        r = np.load(os.path.join(rpath, rfile))
        d = phase_normalization(d, r, m)
    d = loess2d(d, span)
    d = ma.compressed(ma.masked_array(d, mask=m))
    return -d   # -d because toast phase decreases with r
Example #53
0
def match_binning(gsm_time,freq,time,data,mask):
    """
    Process for matching the time binning of two datasets.
    Initially designed to match signal data to gsm datasets.
    Assumes that GSM data has coarser resolution.
    Inputs:
    gsm_time - time that is being matched to.
    freq - frequency array (only need length)
    time - original time array of data
    data - original data array
    mask - original mask array

    outputs are new data and mask array
    """
    stack_time = gsm_time
    stack_data = np.zeros((len(stack_time),len(freq)))
    stack_mask = np.zeros((len(stack_time),len(freq)))
    for i in range(0,len(stack_time)):
        sub_data = np.zeros((len(time),len(freq)))
        sub_mask = np.zeros((len(time),len(freq)))
        num_mean = 0.
        for j in range(0,len(time)):
            if abs(stack_time[i]-time[j])<=(stack_time[1]-stack_time[0])/2.:
                sub_data[num_mean] = data[j]
                sub_mask[num_mean] = mask[j]
                num_mean = num_mean+1.
        sub_data = sub_data[0:num_mean]
        sub_mask = sub_mask[0:num_mean]
        if num_mean>=1.0:
            for f in range(0,len(freq)):
                if sum(sub_mask[:,f])==len(sub_mask[:,f]):
                    stack_data[i,f]=0.0
                    stack_mask[i,f]=1.0
                else:
                    single_data = ma.array(sub_data[:,f],mask=sub_mask[:,f])
                    single_comp = ma.compressed(single_data)
                    stack_data[i,f] = ma.mean(single_comp)
    

    return stack_data, stack_mask
Example #54
0
    def correct(self, y):
        """Correct the state distribution, given the measurement vector."""
        # Get the y-mask
        mask = ma.getmaskarray(y)
        self.active = active = ~mask
        if np.all(mask):
            return self.x, self.Px
        
        # Remove inactive outputs
        y = ma.compressed(y)
        R = self.model.R()[np.ix_(active, active)]

        # Evaluate the model functions
        h = self.model.h(self.k, self.x)[..., active]
        dh_dx = self.model.dh_dx(self.k, self.x)[..., active]

        # Calculate the covariances and gain
        Pxh = np.einsum('...ij,...jz', self.Px, dh_dx)
        Ph = np.einsum('...iy,...iz', dh_dx, Pxh)
        Py = Ph + R
        PyI = np.linalg.inv(Py)
        K = np.einsum('...ik,...kj', Pxh, PyI)
        
        # Perform correction
        e = y - h
        x_corr = self.x + np.einsum('...ij,...j', K, e)
        Px_corr = self.Px - np.einsum('...ik,...jl,...kl', K, K, Py)
        
        # Save and return the correction data
        self.prev_x = self.x
        self.prev_Px = self.Px
        self.e = e
        self.x = x_corr
        self.Px = Px_corr
        self.Pxh = Pxh
        self.Py = Py
        self.PyI = PyI
        self.K = K
        return x_corr, Px_corr
Example #55
0
def mask(x, xm, x0, x1):
    if numpy.ndim(xm) != 1:
        print "utility.mask(): array to used to derive mask must be 1D"
        return(numpy.array([]))
    xmask = ma.masked_outside(xm, x0, x1)
    tmask =ma.getmask(xmask)
    if numpy.ndim(x) == 1:
        xnew = ma.array(x, mask=tmask)
        return(xnew.compressed())
    if numpy.ndim(x) == 2:
        for i in range(0, numpy.shape(x)[0]):
            xnew= ma.array(x[i,:], mask=tmask)
            xcmp = ma.compressed(xnew)
            if i == 0:
                print ma.shape(xcmp)[0]
                print numpy.shape(x)[0]
                xout = numpy.zeros((numpy.shape(x)[0], ma.shape(xcmp)[0]))
            xout[i,:] = xcmp
        return(xout)
    else:
        print "Utility.Mask: dimensions of input arrays are not acceptable"
        return(numpy.array([]))
Example #56
0
def timerebin(data,masked):
    """
    Rebins chunk of time data to single dataset
    Assumes that the input is a two dimensional array with corresponding mask
    Output is single dataset with corresponding mask
    """
    new_data = np.zeros(len(data[0]))
    new_mask = np.zeros(len(data[0]))
    data = ma.array(data)
    masked = ma.array(masked)
    
    for f in range(0,len(data[0])):
        if sum(masked[:,f])==len(masked[:,f]):
            new_data[f] = 1.0
        else:
            masked_data = ma.array(data[:,f],mask=masked[:,f])
            compressed_data = ma.compressed(masked_data)
            new_data[f] = ma.mean(compressed_data)
        if sum(masked[:,f])>=len(data[0])/2.:
            new_mask[f] = 1.0
        
    return new_data,new_mask
Example #57
0
def time_mean(data,mask):
    """
    Calculates time mean for a 2 d array (1st ind time, 2nd ind freq).
    """
    mean_data = np.zeros(len(data[0]))
    mean_mask = np.zeros(len(data[0]))
    data = ma.array(data)
    mask = ma.array(mask)
    num_time = len(mask)
    num_freq = len(mask[0])
    mod_mask = np.zeros((num_time,num_freq))
    int = 0

    for i in range(0,num_time):
        single_mask = mask[i]
        if sum(single_mask)>=len(single_mask)/2.:
            new_mask = np.ones(len(single_mask))
        else:
            new_mask = np.zeros(len(single_mask))
        mod_mask[int] = new_mask
        int +=1

    mod_mask = ma.array(mod_mask)
    int2 = 0 
    for i in range(0,num_freq):
        single_mask = mod_mask[:,i]
        bad_num = sum(single_mask)
        if num_time<=bad_num:
            mean_data[int2] = 0.0
            mean_mask[int2] = 1.0
        else:
            single = ma.array(data[:,i],mask=single_mask)
            single_comp = ma.compressed(single)
            mean_data[int2]= ma.mean(single_comp)
            mean_mask[int2] = 0.0
        int2+=1
    
    return mean_data,mean_mask
        new_maskb = fc.timeflag(sortdatab[:,i],sortmaskb[:,i],sorttimeb,2.5,timescale) 
        sortmaskb[:,i] = new_maskb

percent_masked = 100.*sum(sortmask)/(len(sortmask)*len(sortmask[0]))
print 'Percentage of Masked Data from Frequency and Time Masking',percent_masked 

percent_maskedb = 100.*sum(sortmaskb)/(len(sortmaskb)*len(sortmask[0]))
print 'Percentage of Masked Data from Frequency and Time Masking part2',percent_maskedb

mean_data =[]
for i in range(0,len(freq)):
    if sum(sortmask[:,i])==len(sortmask[:,i]):
        mean_data.append(0.0)
    else:
        single_data = ma.array(sortdata[:,i],mask=sortmask[:,i])
        single_comp = ma.compressed(single_data)
        single_mean = ma.mean(single_comp)
        mean_data.append(single_mean)

mean_datab =[]
for i in range(0,len(freq)): 
    if sum(sortmaskb[:,i])==len(sortmaskb[:,i]): 
        mean_datab.append(0.0) 
    else: 
        single_data = ma.array(sortdatab[:,i],mask=sortmaskb[:,i])
        single_comp = ma.compressed(single_data) 
        single_mean = ma.mean(single_comp) 
        mean_datab.append(single_mean)

mean_data = array(mean_data)
mean_mask = zeros(len(mean_data))
Example #59
0
for i in range(fmin,fmax+1):
    single_data = lim_stack[:,i]
    Kt_cal[:,i-fmin] = Kt[i]*single_data
    Kdgsm_cal[:,i-fmin] = K_dgsm[i]*single_data
    single_mask = lim_mask[:,i]
    data_mask[:,i-fmin] = single_mask

savetxt(outdir+'June_'+date_ind+'_Kdgsm_full_time_series.txt',Kdgsm_cal,delimiter=' ')
savetxt(outdir+'June_'+date_ind+'_Kt_full_time_series.txt',Kt_cal,delimiter=' ')
savetxt(outdir+'June_'+date_ind+'_mask_full_time_series.txt',data_mask,delimiter=' ')

f70 = where(freq<=70.)[0][-1]
single_data = lim_stack[:,f70]
single_mask = lim_mask[:,f70]
 
pylab.scatter(ma.compressed(ma.array(lim_time,mask=single_mask)),ma.compressed(ma.array(single_data*K_dgsm[f70],mask=single_mask)),c='g',edgecolor='g',s=5,label='Calibrated time series')
pylab.xlim(0,24)
pylab.ylim(0,6000)
pylab.xlabel('Local Sidereal Time (Hours)')
pylab.ylabel('Temperature (Kelvin)')
pylab.grid()
pylab.title('Time Dependence at 70 MHz')
pylab.savefig(outdir+'June_'+date_ind+'_Kdgsm_time_series',dpi=300)
pylab.clf()

pylab.scatter(ma.compressed(ma.array(lim_time,mask=single_mask)),ma.compressed(ma.array(single_data*Kt[f70],mask=single_mask)),c='g',edgecolor='g',s=5)
pylab.xlim(0,24) 
pylab.ylim(0,6000) 
pylab.xlabel('Local Sidereal Time (Hours)') 
pylab.ylabel('Temperature (Kelvin)') 
pylab.grid()