Example #1
0
    def condition1(self, t0):
        print self.FOV_ARRAY.shape
        print(self.FOV_ARRAY < 0).shape
        print self.SC_TSTOP.shape
        print(self.SC_TSTOP > t0).shape

        print sp.logical_and(self.FOV_ARRAY < 0, self.SC_TSTOP > t0)[0]
Example #2
0
    def getRegion(self,
                  size=3e4,
                  min_nSNPs=1,
                  chrom_i=None,
                  pos_min=None,
                  pos_max=None):
        """
        Sample a region from the piece of genotype X, chrom, pos
        minSNPnum:  minimum number of SNPs contained in the region
        Ichrom:  restrict X to chromosome Ichrom before taking the region
        cis:        bool vector that marks the sorted region
        region:  vector that contains chrom and init and final position of the region
        """
        bim = plink_reader.readBIM(self.bfile, usecols=(0, 1, 2, 3))

        chrom = SP.array(bim[:, 0], dtype=int)
        pos = SP.array(bim[:, 3], dtype=int)

        if chrom_i is None:
            n_chroms = chrom.max()
            chrom_i = int(SP.ceil(SP.rand() * n_chroms))

        pos = pos[chrom == chrom_i]
        chrom = chrom[chrom == chrom_i]

        ipos = SP.ones(len(pos), dtype=bool)
        if pos_min is not None:
            ipos = SP.logical_and(ipos, pos_min < pos)

        if pos_max is not None:
            ipos = SP.logical_and(ipos, pos < pos_max)

        pos = pos[ipos]
        chrom = chrom[ipos]

        if size == 1:
            # select single SNP
            idx = int(SP.ceil(pos.shape[0] * SP.rand()))
            cis = SP.arange(pos.shape[0]) == idx
            region = SP.array([chrom_i, pos[idx], pos[idx]])
        else:
            while 1:
                idx = int(SP.floor(pos.shape[0] * SP.rand()))
                posT1 = pos[idx]
                posT2 = pos[idx] + size
                if posT2 <= pos.max():
                    cis = chrom == chrom_i
                    cis *= (pos > posT1) * (pos < posT2)
                    if cis.sum() > min_nSNPs: break
            region = SP.array([chrom_i, posT1, posT2])

        start = SP.nonzero(cis)[0].min()
        nSNPs = cis.sum()
        rv = plink_reader.readBED(self.bfile,
                                  useMAFencoding=True,
                                  start=start,
                                  nSNPs=nSNPs,
                                  bim=bim)
        Xr = rv['snps']
        return Xr, region
Example #3
0
 def eliminatePercentileTails(self,
                              mskDds,
                              loPercentile=10.0,
                              hiPercentile=90.0):
     """
     Trims lower and/or upper image histogram tails by replacing :samp:`mskDds`
     voxel values with :samp:`mskDds.mtype.maskValue()`. 
     """
     rootLogger.info("Eliminating percentile tails...")
     rootLogger.info("Calculating element frequencies...")
     elems, counts = elemfreq(mskDds)
     rootLogger.info("elems:\n%s" % (elems, ))
     rootLogger.info("counts:\n%s" % (counts, ))
     cumSumCounts = sp.cumsum(counts, dtype="float64")
     percentiles = 100.0 * (cumSumCounts / float(cumSumCounts[-1]))
     percentileElems = elems[sp.where(
         sp.logical_and(percentiles > loPercentile,
                        percentiles < hiPercentile))]
     loThresh = percentileElems[0]
     hiThresh = percentileElems[-1]
     rootLogger.info("Masking percentiles range (%s,%s) = (%s,%s)" %
                     (loPercentile, hiPercentile, loThresh, hiThresh))
     mskDds.asarray()[...] = \
         sp.where(
             sp.logical_and(
                 sp.logical_and(mskDds.asarray() >= loThresh, mskDds.asarray() <= hiThresh),
                 mskDds.asarray() != mskDds.mtype.maskValue()
             ),
             mskDds.asarray(),
             mskDds.mtype.maskValue()
         )
     rootLogger.info("Done eliminating percentile tails.")
Example #4
0
 def reduction_T_3(self, I):
     A = logical_or(I[0:-1:2, :], I[1::2, :])
     A = logical_and(A[:, 0:-1:2], A[:, 1::2])
     B = logical_and(I[0:-1:2, :], I[1::2, :])
     B = logical_or(B[:, 0:-1:2], B[:, 1::2])
     C = logical_and(A, B)
     return C
Example #5
0
    def __init__(self, kind, dic_init):

        self.kind = kind
        if (self.kind == 'auto'):
            fname = dic_init['data_auto']
        elif (self.kind == 'cross'):
            fname = dic_init['data_cross']
        elif (self.kind == 'autoQSO'):
            fname = dic_init['data_autoQSO']

        rmin = dic_init['rmin']
        rmax = dic_init['rmax']
        mumin = dic_init['mumin']
        mumax = dic_init['mumax']
        bin_size = dic_init['bin_size']

        h = pyfits.open(fname)
        da = h[1].data.DA
        co = h[1].data.CO

        self.dm = h[1].data.DM
        self.rt = h[1].data.RT
        self.rp = h[1].data.RP
        self.z = h[1].data.Z
        self.da_all = copy.deepcopy(da)
        self.co_all = copy.deepcopy(co)

        ### Get the center of the bins from the regular grid
        bin_center_rt = np.zeros(self.rt.size)
        bin_center_rp = np.zeros(self.rp.size)
        for i in np.arange(-self.rt.size - 1, self.rt.size + 1,
                           1).astype('int'):
            bin_center_rt[sp.logical_and(self.rt >= bin_size * i,
                                         self.rt < bin_size *
                                         (i + 1.))] = bin_size * (i + 0.5)
            bin_center_rp[sp.logical_and(self.rp >= bin_size * i,
                                         self.rp < bin_size *
                                         (i + 1.))] = bin_size * (i + 0.5)

        r = np.sqrt(bin_center_rt**2 + bin_center_rp**2)
        mu = bin_center_rp / r

        cuts = (r > rmin) & (r < rmax) & (mu >= mumin) & (mu <= mumax)
        if sp.isfinite(dic_init['r_per_min']):
            cuts = cuts & (bin_center_rt > dic_init['r_per_min'])
        if sp.isfinite(dic_init['r_per_max']):
            cuts = cuts & (bin_center_rt < dic_init['r_per_max'])
        if sp.isfinite(dic_init['r_par_min']):
            cuts = cuts & (bin_center_rp > dic_init['r_par_min'])
        if sp.isfinite(dic_init['r_par_max']):
            cuts = cuts & (bin_center_rp < dic_init['r_par_max'])

        co = co[:, cuts]
        co = co[cuts, :]
        da = da[cuts]

        self.cuts = cuts
        self.da = da
        self.co = co
        self.ico = sp.linalg.inv(co)
Example #6
0
    def coordreduce(self,coorddict):
        assert type(coorddict)==dict, "Coorddict needs to be a dictionary"
        ncoords = self.Cart_Coords.shape[0]
        coordlist = ['x','y','z','r','theta','phi']

        coordkeysorg = coorddict.keys()
        coordkeys = [ic for ic in coordkeysorg if ic in coordlist]

        ckeep = sp.ones(ncoords,dtype=bool)

        for ic in coordkeys:
            currlims = coorddict[ic]
            if ic=='x':
                tempcoords = self.Cart_Coords[:,0]
            elif ic=='y':
                tempcoords = self.Cart_Coords[:,1]
            elif ic=='z':
                tempcoords = self.Cart_Coords[:,2]
            elif ic=='r':
                tempcoords = self.Sphere_Coords[:,0]
            elif ic=='theta':
                tempcoords = self.Sphere_Coords[:,1]
            elif ic=='phi':
                tempcoords = self.Sphere_Coords[:,2]
            keeptemp = sp.logical_and(tempcoords>=currlims[0],tempcoords<currlims[1])
            ckeep = sp.logical_and(ckeep,keeptemp)
        # prune the arrays
        self.Cart_Coords=self.Cart_Coords[ckeep]
        self.Sphere_Coords=self.Sphere_Coords[ckeep]
        self.Param_List=self.Param_List[ckeep]
        self.Velocity=self.Velocity[ckeep]
Example #7
0
def control_step(t, u0, u1, t0, t1, t2, t3):
    """
    Control function, has value u0 for t < t0, linearly transitions from u0 
    to u1 when t0 < t < t1, has value u1 when t1 < t < t2, linearly transistions
    from u1 to u0 when t2 < t < t3, and has value u0 when t > t3.
    """
    f = scipy.zeros(t.shape)
    # Masks for selecting time sections
    mask0 = t < t0
    mask1 = scipy.logical_and(t >= t0, t < t1)
    mask2 = scipy.logical_and(t >= t1, t < t2)
    mask3 = scipy.logical_and(t >= t2, t < t3)
    mask4 = t >= t3
    # Constants for linear transition regions
    a_01 = (u0 - u1) / (t0 - t1)
    a_23 = (u1 - u0) / (t2 - t3)
    b_01 = u0 - a_01 * t0
    b_23 = u1 - a_23 * t2
    # Assign functin values
    f[mask0] = u0 * scipy.ones(t[mask0].shape)
    f[mask1] = a_01 * t[mask1] + b_01
    f[mask2] = u1 * scipy.ones(t[mask2].shape)
    f[mask3] = a_23 * t[mask3] + b_23
    f[mask4] = u0 * scipy.ones(t[mask4].shape)
    return f
Example #8
0
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)

    # constants
    colours = {'i': 10, 'o': 11}

    # load volumes
    marker_data, _ = load(args.marker)
    contour_data, _ = load(args.contour)

    # perform check
    contour_data = contour_data == colours[args.type]
    marker_data_fg = marker_data == 1
    marker_data_bg = marker_data == 2
    if scipy.logical_and(contour_data, marker_data_fg).any():
        logger.warning(
            'Intersection between {} and {} (type {}) in foreground.'.format(
                args.marker, args.contour, args.type))
    elif scipy.logical_and(contour_data, marker_data_bg).any():
        logger.warning(
            'Intersection between {} and {} (type {}) in background.'.format(
                args.marker, args.contour, args.type))
    else:
        print "No intersection."
 def eliminatePercentileTails(self, mskDds, loPercentile=10.0, hiPercentile=90.0):
     """
     Trims lower and/or upper image histogram tails by replacing :samp:`mskDds`
     voxel values with :samp:`mskDds.mtype.maskValue()`. 
     """
     rootLogger.info("Eliminating percentile tails...")
     rootLogger.info("Calculating element frequencies...")
     elems, counts = elemfreq(mskDds)
     rootLogger.info("elems:\n%s" % (elems,))
     rootLogger.info("counts:\n%s" % (counts,))
     cumSumCounts = sp.cumsum(counts, dtype="float64")
     percentiles = 100.0*(cumSumCounts/float(cumSumCounts[-1]))
     percentileElems = elems[sp.where(sp.logical_and(percentiles > loPercentile, percentiles < hiPercentile))]
     loThresh = percentileElems[0]
     hiThresh = percentileElems[-1]
     rootLogger.info("Masking percentiles range (%s,%s) = (%s,%s)" % (loPercentile, hiPercentile, loThresh, hiThresh))
     mskDds.asarray()[...] = \
         sp.where(
             sp.logical_and(
                 sp.logical_and(mskDds.asarray() >= loThresh, mskDds.asarray() <= hiThresh),
                 mskDds.asarray() != mskDds.mtype.maskValue()
             ),
             mskDds.asarray(),
             mskDds.mtype.maskValue()
         )
     rootLogger.info("Done eliminating percentile tails.")
def main():
    args = getArguments(getParser())

    # prepare logger
    logger = Logger.getInstance()
    if args.debug: logger.setLevel(logging.DEBUG)
    elif args.verbose: logger.setLevel(logging.INFO)  
    
    # constants
    colours = {'i': 10, 'o': 11}
    
    # load volumes
    marker_data, _ = load(args.marker)
    contour_data, _ = load(args.contour)
    
    # perform check
    contour_data = contour_data == colours[args.type]
    marker_data_fg = marker_data == 1
    marker_data_bg = marker_data == 2
    if scipy.logical_and(contour_data, marker_data_fg).any():
        logger.warning('Intersection between {} and {} (type {}) in foreground.'.format(args.marker, args.contour, args.type))
    elif scipy.logical_and(contour_data, marker_data_bg).any():
        logger.warning('Intersection between {} and {} (type {}) in background.'.format(args.marker, args.contour, args.type))
    else:
        print "No intersection."
Example #11
0
def sqr_wave(t, amp, T, epsilon):
    """
    Generates a square wave with the given frequency and amplitude
    """
    f = scipy.zeros(t.shape)
    t_curr = 0.5 * T
    cnt = 0
    while (t_curr - T < t[-1]):
        if cnt % 2 == 0:
            val = amp
        else:
            val = -amp

        t0 = t_curr - 0.5 * T
        t1 = t_curr - 0.5 * T + 0.5 * epsilon
        mask0 = scipy.logical_and(t >= t0, t < t1)
        interp_func = scipy.interpolate.interp1d([t0, t1], [0, val])
        f[mask0] = interp_func(t[mask0])

        t0 = t_curr - 0.5 * T + 0.5 * epsilon
        t1 = t_curr - 0.5 * epsilon
        mask1 = scipy.logical_and(t >= t0, t < t1)
        f[mask1] = val

        t0 = t_curr - 0.5 * epsilon
        t1 = t_curr
        mask2 = scipy.logical_and(t >= t0, t < t1)
        interp_func = scipy.interpolate.interp1d([t0, t1], [val, 0])
        f[mask2] = interp_func(t[mask2])

        t_curr += 0.5 * T
        cnt += 1
    return f
Example #12
0
    def getEntryExitTime(self,ra,dec,t0):
        idx0=self.getIndex(t0)        
        infov=self.inFovTime(ra,dec)
        theta=self.getThetaTime(ra,dec)
        zenith=self.getZenithTime(ra,dec)
        
        #ra_scz  = self.RA_SCZ[idx0]
        #dec_scz  = self.DEC_SCZ[idx0]
        #ra_zenith  = self.RA_ZENITH[idx0]
        #dec_zenith  = self.DEC_ZENITH[idx0]
        #point   = coord.SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
        ##scz     = coord.SkyCoord(ra=ra_scz*u.degree, dec=dec_scz*u.degree, frame='icrs')
        #zenith2 = coord.SkyCoord(ra=ra_zenith*u.degree, dec=dec_zenith*u.degree, frame='icrs')

        #        print ra,dec,theta[idx0],zenith[idx0],infov[idx0],scz.separation(point).degree,zenith2.separation(point).degree

        #print len(infov), len(infov[infov==1]),len(infov[infov==0])
        
        start=self.SC_TSTART-t0
        stop=self.SC_TSTOP-t0
        #print infov.shape
        #print start.shape
        #print stop.shape
        infov_t0=infov[idx0]
        
        if infov_t0:
            t_0 = stop[sp.logical_and(infov==0,stop<0)][-1]
            t_1 = stop[sp.logical_and(infov==0,stop>0)][0]
        else:
            t_0 = stop[sp.logical_and(infov==1,stop>0)][0]
            t_1 = stop[sp.logical_and(infov==0,stop>t_0)][0]
        return t_0,t_1 
Example #13
0
    def getRegion(self,size=3e4,min_nSNPs=1,chrom_i=None,pos_min=None,pos_max=None):
        """
        Sample a region from the piece of genotype X, chrom, pos
        minSNPnum:  minimum number of SNPs contained in the region
        Ichrom:  restrict X to chromosome Ichrom before taking the region
        cis:        bool vector that marks the sorted region
        region:  vector that contains chrom and init and final position of the region
        """
        if (self.chrom is None) or (self.pos is None):
            bim = plink_reader.readBIM(self.bfile,usecols=(0,1,2,3))
            chrom = SP.array(bim[:,0],dtype=int)
            pos   = SP.array(bim[:,3],dtype=int)
        else:
            chrom = self.chrom
            pos   = self.pos

        if chrom_i is None:
            n_chroms = chrom.max()
            chrom_i  = int(SP.ceil(SP.rand()*n_chroms))

        pos   = pos[chrom==chrom_i]
        chrom = chrom[chrom==chrom_i]

        ipos = SP.ones(len(pos),dtype=bool)
        if pos_min is not None:
            ipos = SP.logical_and(ipos,pos_min<pos)

        if pos_max is not None:
            ipos = SP.logical_and(ipos,pos<pos_max)

        pos = pos[ipos]
        chrom = chrom[ipos]

        if size==1:
            # select single SNP
            idx = int(SP.ceil(pos.shape[0]*SP.rand()))
            cis  = SP.arange(pos.shape[0])==idx
            region = SP.array([chrom_i,pos[idx],pos[idx]])
        else:
            while 1:
                idx = int(SP.floor(pos.shape[0]*SP.rand()))
                posT1 = pos[idx]
                posT2 = pos[idx]+size
                if posT2<=pos.max():
                    cis = chrom==chrom_i
                    cis*= (pos>posT1)*(pos<posT2)
                    if cis.sum()>min_nSNPs: break
            region = SP.array([chrom_i,posT1,posT2])

        start = SP.nonzero(cis)[0].min()
        nSNPs  = cis.sum()

        if self.X is None:
            rv = plink_reader.readBED(self.bfile,useMAFencoding=True,start = start, nSNPs = nSNPs,bim=bim)
            Xr = rv['snps']
        else:
            Xr = self.X[:,start:start+nSnps]

        return Xr, region
Example #14
0
def get_prf_data(pixel_values,
                 pixel_stddev,
                 pixel_offsets,
                 error_threshold=0.1):
    """
    Return the PRF measurements for (a subset of) the image.

    Args:
        pixel_values(2-D float array):    The calibrated pixel responses from
            the image to include in the plot.

        pixel_stddev(2-D float array):    The estimated standard deviation of
            `pixel_values`.

        pixel_offsets:    The slice of the return value of find_pixel_offsets()
            corresponding to `pixel_values`.

    Returns:
        (2-D float array, 2-D float array, 2-D float array, 2-D float array):

            * The x-offsets of the points at which PRF measurements are
              available.

            * The y-offsets of the points at which PRF measurements are
              available.

            * The measured normalized PRF at the available offsets

            * estimated errors of the PRF measurements.
    """

    prf_measurements = (
        (
            pixel_values
            -
            pixel_offsets['zero_point']
        )
        /
        pixel_offsets['norm']
    )

    prf_errors = (
        pixel_stddev
        /
        pixel_offsets['norm']
    )

    #False positive
    #pylint: disable=assignment-from-no-return
    include = scipy.logical_and(scipy.isfinite(prf_measurements),
                                scipy.isfinite(prf_errors))
    include = scipy.logical_and(include, prf_errors < error_threshold)
    #pylint: enable=assignment-from-no-return
    return scipy.stack((
        pixel_offsets['x_off'][include],
        pixel_offsets['y_off'][include],
        prf_measurements[include],
        prf_errors[include]
    ))
def main(data_dir='../data/'):
    url = "https://data.cityofchicago.org/api/views/ijzp-q8t2/rows.csv?accessType=DOWNLOAD"
    fname = 'AllChicagoCrimes.csv'
    target = data_dir + fname
    print("Downloading raw Chicago crime data...")
    download_file(url, target)

    ProgressBar().register()  # Setup Dask progress bar
    chicago = dd.read_csv(target, assume_missing=True, parse_dates=['Date'])
    chicago = chicago[sp.logical_and(2012 <= chicago['Year'],
                                     chicago['Year'] <= 2017)]

    chicago = chicago.loc[chicago["Primary Type"] == "HOMICIDE"]
    chicago = chicago.dropna(subset=[
        'X Coordinate', 'Y Coordinate', 'Community Area', 'Latitude',
        'Longitude', 'Date'
    ])

    chicago['x'] = chicago['X Coordinate']
    chicago['y'] = chicago['Y Coordinate']
    chicago['t'] = pd.to_numeric(chicago.Date)

    # 0-1 normalize t to prevent overflow issues in PredPol
    chicago.t -= chicago.t.min()
    chicago.t /= chicago.t.max()

    print("Processing raw Chicago crime data...")
    chicago = chicago.compute()
    print("Processing complete.")

    # Compute kilometer distance from longitude and latitude,
    x_min = chicago.x.min()
    y_min = chicago.y.min()
    x_max = chicago.x.max()
    y_max = chicago.y.max()
    MIN_LON = chicago.loc[chicago.x == x_min, 'Longitude'].min()
    MAX_LON = chicago.loc[chicago.x == x_max, 'Longitude'].max()
    MIN_LAT = chicago.loc[chicago.y == y_min, 'Latitude'].min()
    MAX_LAT = chicago.loc[chicago.y == y_max, 'Latitude'].max()
    meters_height = geopy.distance.distance((MIN_LAT, MIN_LON),
                                            (MAX_LAT, MIN_LON)).m
    meters_width = geopy.distance.distance((MIN_LAT, MIN_LON),
                                           (MIN_LAT, MAX_LON)).m
    # print(meters_height, meters_width)

    # Normalize by these values to obtain roughly 150m by 150m grid cells
    chicago.x -= chicago.x.min()
    chicago.y -= chicago.y.min()
    chicago.x /= chicago.x.max()
    chicago.y /= chicago.y.max()
    chicago.x *= round(meters_width / 150 / 100, 2)
    chicago.y *= round(meters_height / 150 / 100, 2)

    chicago.to_csv(data_dir + 'ChicagoHomicides2012to2017.csv')
    print("Successfully wrote cleaned Chicago crime data to file.")

    chicago_small = chicago[sp.logical_and(2014 <= chicago['Year'],
                                           chicago['Year'] <= 2015)]
    chicago_small.to_csv(data_dir + 'ChicagoHomicides2014to2015.csv')
def separate_cal(data, n_bins_cal, cal_mask=None) :
    """Function separates data into cal_on and cal off.
    
    No Guarantee that data argument remains unchanged."""
    
    # Allowcate memeory for output    
    ntime, npol, nfreq = data.shape
    n_bins_after_cal = ntime//n_bins_cal
    out_data = sp.zeros((n_bins_after_cal, npol, 2, nfreq), dtype=sp.float32)
    
    # Get the phase offset of the cal.
    try :
        if cal_mask is None:
            first_on, n_blank = get_cal_mask(data, n_bins_cal)
        else :
            first_on, n_blank = cal_mask
    except ce.DataError :
        print "Discarded record due to bad profile. "
        out_data[:] = float('nan')
    else :
        # How many samples for each cal state.
        n_cal_state = n_bins_cal//2 - n_blank
        first_off = (first_on + n_bins_cal//2) % n_bins_cal

        # Reshape data to add an index to average over.
        data.shape = (n_bins_after_cal, n_bins_cal) + data.shape[1:]

        # Get the masks for the on and off data.
        inds = sp.arange(n_bins_cal)
        if first_on == min((sp.arange(n_cal_state) +
                        first_on)% n_bins_cal) :
            on_mask = sp.logical_and(inds >= first_on, inds < 
                                     first_on+n_cal_state)
        else :
            on_mask = sp.logical_or(inds >= first_on, inds < 
                                (first_on + n_cal_state) % n_bins_cal)
        if first_off == min((sp.arange(n_cal_state) +
                        first_off)% n_bins_cal) :
            off_mask = sp.logical_and(inds >= first_off, inds < 
                                  first_off + n_cal_state)
        else :
            off_mask = sp.logical_or(inds >= first_off, inds < 
                                 (first_off + n_cal_state) % n_bins_cal)

        # Find cal on and cal off averages.  Always use mean not median due to
        # discretization noise.
        # This loop is much faster than the built in numpy mean() for some
        # reason.
        for ii in range(n_bins_cal) :
            if on_mask[ii]:
                out_data[:,:,0,:] += data[:,ii,:,:]
            elif off_mask[ii]:
                out_data[:,:,1,:] += data[:,ii,:,:]
        out_data[:,:,0,:] /= sp.sum(on_mask)
        out_data[:,:,1,:] /= sp.sum(off_mask)


    return out_data
def spk_phase(t, V1, V2, spike_thresh=0):
    #phase_mean, isi_mean = spk_phase(t, V1, V2, spike_thresh=0)
    t = t[1:]
    time1 = t[sp.logical_and(V1[:-1] < spike_thresh, V1[1:] >= spike_thresh)]
    time2 = t[sp.logical_and(V2[:-1] < spike_thresh, V2[1:] >= spike_thresh)]
    l = sp.amin([len(time1), len(time2)])
    isi_mean = sp.mean(sp.diff(time1))
    phase_mean = sp.mean((time1[0:l] - time2[0:l]) / isi_mean * 2 * sp.pi)
    return phase_mean, isi_mean
Example #18
0
 def interpolate(self, canvas, status=None):
     # Clear the interpolated canvas
     canvas.interpolated = sp.zeros_like(canvas.fringes_image) - 1024.0
     if status is not None:
         status.set("Performing the interpolation", 70)
     else:
         print("Performing the interpolation")
     # Iterate over all the triangles in the triangulation
     for triangle in self.triangles:
         # Create a shortcut to the triangle's vertices
         co = triangle.vert_coordinates
         # Calculate a few constants for the Barycentric Coordinates
         # More info: https://codeplea.com/triangular-interpolation
         div = (co[1, 0] - co[2, 0]) * (co[0, 1] - co[2, 1]) + (
             co[2, 1] - co[1, 1]) * (co[0, 0] - co[2, 0])
         a0 = (co[1, 0] - co[2, 0])
         a1 = (co[2, 1] - co[1, 1])
         a2 = (co[2, 0] - co[0, 0])
         a3 = (co[0, 1] - co[2, 1])
         # Calculate the bounds of a rectangle that fully encloses
         # the current triangle
         xmin = int(sp.amin(triangle.vert_coordinates[:, 1]))
         xmax = int(sp.amax(triangle.vert_coordinates[:, 1])) + 1
         ymin = int(sp.amin(triangle.vert_coordinates[:, 0]))
         ymax = int(sp.amax(triangle.vert_coordinates[:, 0])) + 1
         # Take out slices of the x and y arrays,
         # containing the points' coordinates
         x_slice = canvas.x[ymin:ymax, xmin:xmax]
         y_slice = canvas.y[ymin:ymax, xmin:xmax]
         # Use Barycentric Coordinates and the magic of numpy (scipy in this
         # case) to perform the calculations with the C backend, instead
         # of iterating on pixels with Python loops.
         # If you have not worked with numpy arrays befor dear reader,
         # the idea is that if x = [[0 1]
         #                          [2 3]],
         # then x*3+1 is a completely valid operation, returning
         # x = [[1 4]
         #      [7 10]]
         # Basically, we can do maths on arrays as if they were variables.
         # Convenient, and really fast!
         w0 = (a0 * (x_slice - co[2, 1]) + a1 * (y_slice - co[2, 0])) / div
         w1 = (a2 * (x_slice - co[2, 1]) + a3 * (y_slice - co[2, 0])) / div
         w2 = sp.round_(1 - w0 - w1, 10)
         # Calculate the values for a rectangle enclosing our triangle
         slice = (self.values[triangle.vertices[0]] * w0 +
                  self.values[triangle.vertices[1]] * w1 +
                  self.values[triangle.vertices[2]] * w2)
         # Make a mask (so that we only touch the points
         # inside of the triangle).
         # In Barycentric Coordinates the points outside of the triangle
         # have at least one of the coefficients negative, so we use that
         mask = sp.logical_and(sp.logical_and(w0 >= 0, w1 >= 0), w2 >= 0)
         # Change the points in the actual canvas
         canvas.interpolated[ymin:ymax, xmin:xmax][mask] = slice[mask]
     canvas.interpolation_done = True
Example #19
0
    def __call__(self, X, alpha=1.0, bytes=False):

        rgba = sp.array(self.cmap(X, alpha=1.0, bytes=False))
        rgba[..., 0] = sp.where(
            sp.logical_and(X >= self.fill_range[0], X <= self.fill_range[1]),
            1.0, rgba[..., 0])
        rgba[..., 1] = sp.where(
            sp.logical_and(X >= self.fill_range[0], X <= self.fill_range[1]),
            1.0, rgba[..., 1])
        rgba[..., 2] = sp.where(
            sp.logical_and(X >= self.fill_range[0], X <= self.fill_range[1]),
            1.0, rgba[..., 2])
        return rgba
Example #20
0
    def _commonx(self, other, res='coarsest', source='linspace'):
        """Merge x-axis discretizations of this object and another.

        If method is "linspace", make a new uniform spacing.
        If method is "original", use one of the original discretizations.

        If res (resolution) is "self" or "this", use the resolution of this
        object.
        If res is "other", use the resolution of the other object.
        If res is "coarsest", use the coarsest discretization of the two
        objects.
        If res is "finest", use the finest discretization of the two objects.
        If res is "medium", use a medium discretization
        (implies method "linspace")."""
        # 2012-06-27 - 2013-06-24

        # if an empty function object is given
        if len(self) == 0 or len(other) == 0:
            return scipy.empty(shape=(0,))

        # determine extremal values
        min1, max1 = min(self.x),  max(self.x)  # use self.box()[:2]
        min2, max2 = min(other.x), max(other.x)  # use other.box()[:2]
        newmin = max(min1, min2)
        newmax = min(max1, max2)

        # choose coarsest discretization
        ### maybe offer option to use "coarse", "fine", "medium" discretization
        cand1 = self.x[scipy.logical_and(self.x >= newmin, self.x <= newmax)]
        cand2 = other.x[scipy.logical_and(other.x >= newmin,
                                          other.x <= newmax)]
        if res is not None and 'other'.startswith(res):
            winner = cand2
        elif res is not None and \
                ('self'.startswith(res) or 'this'.startswith(res)):
            winner = cand1
        elif res is not None and 'finest'.startswith(res):
            winner = cand1 if len(cand1) > len(cand2) else cand2
        elif res is not None and 'medium'.startswith(res):
            source = 'linspace'
            winner = [0]*scipy.ceil(scipy.mean(len(cand1), len(cand2)))
        else:
            winner = cand1 if len(cand1) < len(cand2) else cand2

        if source is not None and 'linspace'.startswith(source):
            newx = scipy.linspace(newmin, newmax, len(winner))
        else:
            # res may not be "medium" here!
            newx = winner

        return newx
Example #21
0
    def _commonx(self, other, res='coarsest', source='linspace'):
        """Merge x-axis discretizations of this object and another.

        If method is "linspace", make a new uniform spacing.
        If method is "original", use one of the original discretizations.

        If res (resolution) is "self" or "this", use the resolution of this
        object.
        If res is "other", use the resolution of the other object.
        If res is "coarsest", use the coarsest discretization of the two
        objects.
        If res is "finest", use the finest discretization of the two objects.
        If res is "medium", use a medium discretization
        (implies method "linspace")."""
        # 2012-06-27 - 2013-06-24

        # if an empty function object is given
        if len(self) == 0 or len(other) == 0:
            return scipy.empty(shape=(0, ))

        # determine extremal values
        min1, max1 = min(self.x), max(self.x)  # use self.box()[:2]
        min2, max2 = min(other.x), max(other.x)  # use other.box()[:2]
        newmin = max(min1, min2)
        newmax = min(max1, max2)

        # choose coarsest discretization
        ### maybe offer option to use "coarse", "fine", "medium" discretization
        cand1 = self.x[scipy.logical_and(self.x >= newmin, self.x <= newmax)]
        cand2 = other.x[scipy.logical_and(other.x >= newmin,
                                          other.x <= newmax)]
        if res is not None and 'other'.startswith(res):
            winner = cand2
        elif res is not None and \
                ('self'.startswith(res) or 'this'.startswith(res)):
            winner = cand1
        elif res is not None and 'finest'.startswith(res):
            winner = cand1 if len(cand1) > len(cand2) else cand2
        elif res is not None and 'medium'.startswith(res):
            source = 'linspace'
            winner = [0] * scipy.ceil(scipy.mean(len(cand1), len(cand2)))
        else:
            winner = cand1 if len(cand1) < len(cand2) else cand2

        if source is not None and 'linspace'.startswith(source):
            newx = scipy.linspace(newmin, newmax, len(winner))
        else:
            # res may not be "medium" here!
            newx = winner

        return newx
Example #22
0
 def accumulator(acc, curr):
     predictions = sp.double(p_val < curr)
     tp = sp.sum(
         sp.logical_and(predictions == 1,
                        sp.asarray(Y_val == 1).ravel()))
     fp = sp.sum(
         sp.logical_and(predictions == 1,
                        sp.asarray(Y_val == 0).ravel()))
     fn = sp.sum(
         sp.logical_and(predictions == 0,
                        sp.asarray(Y_val == 1).ravel()))
     prec = tp / (tp + fp)
     rec = tp / (tp + fn)
     F1 = 2 * prec * rec / (prec + rec)
     return {'epsilon': curr, 'F1': F1} if F1 > acc['F1'] else acc
Example #23
0
def conditionVal(name='val7', nozero=False, conc='c_w_l'):
    #this is a program which extracts the useful data from the CSXR data

    val = loadData(name)
    idx = loadData('id')
    c_w_l = loadData(conc)
    shot = loadData('shot')

    base = loadData('baseline')
    width = loadData('width'+name[-1])

    fiducial = (base+val/scipy.sqrt(scipy.pi)/width)*123+1000

  
    #c_w_l must have a value below 1e-2 and nonzero
    good = c_w_l > 0 #this had to be done based on how the M matrix was generated (for all non-zero c_w_l values)

    idx = idx[good]
    val = val[good]
    shot = shot[good]
    c_w_l = c_w_l[good]


    #c_w_l must have a value below 1e-2 and nonzero
    good = scipy.logical_and(scipy.logical_and(fiducial[good] < 6e4, c_w_l < 1e-2), val*disp > 1e0)

    idx = idx[good]
    val = val[good]
    shot = shot[good]
    c_w_l = c_w_l[good]


    #val[val < 1.] = 0.

    #normalize to time
    #all valid data with the new computer has 8ms exposure time
    #older shots had 5ms exposure times

    val[shot <= 32858] /= 5e-3
    val[shot > 32858] /= 8e-3

    if nozero:
        good = val != 0
        idx = idx[good]
        val = val[good]
        c_w_l = c_w_l[good]
    
    return val, c_w_l, idx
Example #24
0
    def getX(self,standardized=True,maf=None):
        """
        return SNPs, if neccessary standardize them
        """
        X = SP.copy(self.X)

        # test for missing values
        isnan = SP.isnan(X)
        for i in isnan.sum(0).nonzero()[0]:
            # set to mean 
            X[isnan[:,i],i] = X[~isnan[:,i],i].mean()
                
        if maf!=None:
            LG.debug('filter SNPs')
            LG.debug('... number of SNPs(before filtering): %d'%X.shape[1])
            idx_snps = SP.logical_and(X[self.idx_samples].mean(0)>0.1,X[self.idx_samples].mean(0)<0.9)
            LG.debug('... number of SNPs(after filtering) : %d'%idx_snps.sum())
        else:
            idx_snps = SP.ones(self.n_f,dtype=bool)
        
        if standardized:
            LG.debug('standardize SNPs')
            X = X[self.idx_samples][:,idx_snps]
            X-= X.mean(0)
            X /= X.std(0,dtype=NP.float32)
            X /= SP.sqrt(X.shape[1])
            return X
      
        return X[self.idx_samples][:,idx_snps]
Example #25
0
    def timereduce(self, timebounds):
        """This method will remove any data points out side of the time limits.
        Inputs
            timebounds - A list of length 2 of posix times."""

        lowerbnd = self.times[:, 0] >= timebounds[0]
        upperbnd = self.times[:, 1] <= timebounds[1]
        keep = sp.logical_and(lowerbnd, upperbnd)
        if self.issatellite():
            self.times = self.times[keep]
            self.dataloc = self.dataloc[keep]
            for idata in self.datanames():
                if isinstance(self.data[idata], DataFrame):
                    self.data[idata] = self.data[idata][
                        self.times]  #data is a vector
                else:
                    self.data[idata] = self.data[idata][keep]
        else:
            self.times = self.times[keep]
            for idata in self.datanames():
                #XXX Probably want to check this with a data frame
                if isinstance(self.data[idata], DataFrame):
                    self.data[idata] = self.data[
                        idata][:, self.times]  #data is a vector
                else:
                    self.data[idata] = self.data[idata][:, keep]
Example #26
0
    def get_width_upper(max, width, upper, array_in):

        from copy import copy
        array = copy(array_in)
        ''' now pick objects somewhat larger than star column '''
        mask = array.field(radius_var) > max + width
        array = array[mask]
        rads = array.field(radius_var)  #[mask]
        mask = rads < max + width + 0.6
        array = array[mask]
        mags = array.field(ap_type)
        mags.sort()
        ''' take 20% percentile and subtract 0.5 mag '''
        if len(mags) == 0:
            upper = 99
        else:
            upper = mags[int(len(mags) * 0.2)]  #+ 0.5

        array = copy(array_in)
        maskA = array.field(ap_type) < upper  #+ 0.5
        maskB = array.field(radius_var) < max + width
        maskC = array.field(radius_var) > max - width
        mask = scipy.logical_and(maskA, maskB, maskC)
        array = array[mask]
        rads = array.field(radius_var)

        pylab.clf()
        a, b, varp = pylab.hist(array.field(radius_var),
                                bins=scipy.arange(1.0, 8, 0.04))
        z = zip(a, b)
        z.sort()
        max = z[-1][1]
        width = 1.0 * scipy.std(rads)
        print 'width', width, 'max', max, 'upper', upper, 'rads', rads
        return max, width, upper
Example #27
0
def Dirac(x, sigma):
    from scipy import logical_and, pi, cos

    f = (1.0 / 2.0 / sigma) * (1 + cos(pi * x / sigma))
    b = logical_and(x <= sigma, x >= -sigma)
    f = f * b
    return f
Example #28
0
def adaptive_threshold(SpikeTrain, adaptive_thresh_lower,
                       adaptive_thresh_upper):
    """
    removes all spike from a SpikeTrain which amplitudes do not fall within the
    specified variable bounds.

    Args:
        SpikeTrain (neo.core.SpikeTrain): The SpikeTrain
        adaptive_thresh_lower (neo.core.AnalogSignal): the variable lower bound
        adaptive_thresh_upper (neo.core.AnalogSignal): the variable upper bound

    Returns:
        neo.core.SpikeTrain: the resulting SpikeTrain
    """
    SpikeTrain = copy.deepcopy(SpikeTrain)

    spike_inds = get_spike_inds(SpikeTrain)
    spike_amps = SpikeTrain.waveforms.max(axis=1)

    cond_1 = adaptive_thresh_lower.magnitude[spike_inds] < spike_amps.magnitude
    cond_2 = adaptive_thresh_upper.magnitude[spike_inds] > spike_amps.magnitude

    good_inds = sp.logical_and(cond_1, cond_2)

    SpikeTrain = SpikeTrain[good_inds.flatten()]

    return SpikeTrain
Example #29
0
    def cut(self, range=None, lower=None, upper=None):
        """Cut away all data points whose x-value is outside of the given
        "range", or those that are smaller than "lower" or greater than
        "upper"."""
        # 2012-07-12

        # get range
        if range is None:
            # default range captures all values
            range = self.box()[:2]
        else:
            range = list(range)
            if scipy.array(range).shape != (2, ):
                raise ValueError('range must be 2-tuple')

        # overwrite range with lower and upper value
        range = list(range)
        if lower is not None:
            range[0] = lower
        if upper is not None:
            range[1] = upper

        #if range[0] >= range[1]:
        #raise ValueError, 'lower bound must be smaller than upper bound'
        ### so then, nothing is kept, just fine

        # cut away data points
        keep = scipy.logical_and(self.x >= range[0], self.x <= range[1])
        self.x = self.x[keep]
        self.y = self.y[keep]
Example #30
0
 def getEntryExitTime(self, ra, dec, t0):
     idx0 = self.getIndex(t0)
     infov = self.inFovTime(ra, dec)
     theta = self.getThetaTime(ra, dec)
     zenith = self.getZenithTime(ra, dec)
     start = self.SC_TSTART - t0
     stop = self.SC_TSTOP - t0
     infov_t0 = infov[idx0]
     print t0, idx0, infov_t0
     if infov_t0: t_0 = stop[sp.logical_and(infov == 0, stop < 0)][-1]
     else: t_0 = start[sp.logical_and(infov == 1, start > 0)][0]
     t1 = start[sp.logical_and(infov == 0, stop > t_0)][0]
     t_1 = stop[self.getIndex(t0 + t1)]
     #if stop[ii_1+1] > stop[ii_1]+60: t_1 = stop[ii_1]
     #print '%10.3f %10.3f %5d %10.3f %10.3f %10.3f %10.3f ' %(ra,dec,ii_1,stop[ii_1],stop[ii_1+1],t_0,t_1)
     return t_0, t_1
Example #31
0
def gridSearch(maxT, sumM, name,loc='svmcrossvalid.p',reduction=50,k=2.2e35,gamma=[-3,3], C=[0,6], nozero=False, conc='c_w_l', lims=[2.2e3,9e3]):

    val, c_w_l, idx = conditionVal(name=name, nozero=nozero, conc=conc)
    idx1 = sample(val,len(val)/reduction)
    idx2 = sample(val,len(val)/reduction) #very small subsamples started for testing algos
    index0 = scipy.logspace(gamma[0],gamma[1],int(abs(gamma[0]-gamma[1])+1))
    index1 = scipy.logspace(C[0],C[1],int(abs(C[0]-C[1])+1))

    data = scipy.io.readsav('W_Abundances_grid_puestu_adpak_fitscaling_74_0.00000_5.00000_1000_idlsave')
    te = data['en']
    idx2 = scipy.logical_and(te > lims[0], te < lims[1])


    temp = val[idx2]/c_w_l[idx2]/sumM[idx2]*k


    output = scipy.zeros((len(index0),len(index1)))
    output2 = scipy.zeros((len(index0),len(index1),len(te[idx2])))



    for i in xrange(len(index0)):
        for j in xrange(len(index1)):
            print(i,j)
            pipe = SVMtest(maxT, sumM, val, c_w_l, reduced=idx1, gamma=index0[i], C=index1[j],k=k)
            output[i,j] = pipe.score(scipy.log(scipy.atleast_2d(maxT[idx2]).T),scipy.log(temp))
            output2[i,j] = scipy.exp(pipe.predict(scipy.log(scipy.atleast_2d(te[idx2]).T)))

    pickle.dump([output,output2],open(loc,'wb'))
    return output,output2
Example #32
0
def showVectorDisplacements():

    global testImage, croppedRefImage, u, v, valid, q1, umean, vmean, x, y, sxyVar, wxyVar, goodvectorsVar
    from scipy import where, compress, logical_and, median, logical_or, nan
    from pylab import resize, transpose, quiver, title, show, find, imshow, hist, figure, clf, draw, save, load, xlabel, ylabel, flipud

    mxy = 3
    wxy = int(wxyVar.get())
    sxy = int(sxyVar.get())
    goodvectors = float(goodvectorsVar.get())
    #process to find PIV-style displacements
    x, y, u, v, q1, valid = simplepiv(croppedRefImage, testImage, wxy, mxy,
                                      sxy)
    good = where(logical_and(q1 > goodvectors, valid > 0), True, False)
    umean = median(compress(good.flat, u.flat))
    vmean = median(compress(good.flat, v.flat))
    u = where(logical_or(q1 < goodvectors, valid < 0), 0, u)
    v = where(logical_or(q1 < goodvectors, valid < 0), 0, v)
    u = u - umean
    v = v - vmean
    save('vecx.out', x)
    save('vecy.out', y)
    save('vecu.out', u)
    save('vecv.out', v)
    save('vecq1.out', q1)
    save('vecvalid.out', valid)
    u = flipud(u)
    v = -flipud(v)
    quiver(x, y, u, v)
    title('Vector displacements')
    xlabel('Pixels')
    ylabel('Pixels')
    show()
    return
Example #33
0
    def getX(self, standardized=True, maf=None):
        """
        return SNPs, if neccessary standardize them
        """
        X = SP.copy(self.X)

        # test for missing values
        isnan = SP.isnan(X)
        for i in isnan.sum(0).nonzero()[0]:
            # set to mean
            X[isnan[:, i], i] = X[~isnan[:, i], i].mean()

        if maf != None:
            LG.debug('filter SNPs')
            LG.debug('... number of SNPs(before filtering): %d' % X.shape[1])
            idx_snps = SP.logical_and(X[self.idx_samples].mean(0) > 0.1,
                                      X[self.idx_samples].mean(0) < 0.9)
            LG.debug('... number of SNPs(after filtering) : %d' %
                     idx_snps.sum())
        else:
            idx_snps = SP.ones(self.n_f, dtype=bool)

        if standardized:
            LG.debug('standardize SNPs')
            X = X[self.idx_samples][:, idx_snps]
            X -= X.mean(0)
            X /= X.std(0, dtype=NP.float32)
            X /= SP.sqrt(X.shape[1])
            return X

        return X[self.idx_samples][:, idx_snps]
Example #34
0
    def assess_calibration(self):
        """Assess if PredPol is calibrated by conditioning on predicted intensity
        and checking the correlation between number of crimes and demographics.

        Returns: a 2D array where the first dimension is the number of days in
        the test set and the second dimension is the number of bins for the
        range of predicted intensities, as computed by `sp.histogram_bin_edges`.
        The entry in the ith row and jth column is the Pearson correlation
        coefficient between race and actual number of crimes in the jth bin of
        predicted intensity for the ith day.
        """
        black = self.pred_obj.grid_cells.black
        not_nan = sp.logical_not(sp.isnan(black.values))

        bins = sp.histogram_bin_edges(self.get_predicted_intensities(), bins='auto')
        correlations = sp.empty((len(self.lambda_columns), len(bins)))
        correlations[:] = sp.nan
        for i, (lambda_col, actual_col) in self._iterator():
            idx_bins = sp.digitize(self.results[lambda_col], bins)
            for j in range(len(bins)):
                idx_selected = sp.logical_and(idx_bins == j, not_nan)
                if sp.sum(idx_selected) > 2:
                    actual = self.results.loc[idx_selected, actual_col]
                    demographics = black.loc[idx_selected]
                    correlations[i, j] = sp.stats.pearsonr(actual, demographics)[0]
        return correlations
def isi(t, V, spike_thresh=0):
    #isi_mean, isi_dev = isi(t, V, spike_thresh=0)
    t = t[1:]
    time = t[sp.logical_and(V[:-1] < spike_thresh, V[1:] >= spike_thresh)]
    dt = sp.diff(time)
    #     print(time)
    return sp.mean(dt), sp.std(dt), dt
Example #36
0
    def cut(self, range=None, lower=None, upper=None):
        """Cut away all data points whose x-value is outside of the given
        "range", or those that are smaller than "lower" or greater than
        "upper"."""
        # 2012-07-12

        # get range
        if range is None:
            # default range captures all values
            range = self.box()[:2]
        else:
            range = list(range)
            if scipy.array(range).shape != (2,):
                raise ValueError('range must be 2-tuple')

        # overwrite range with lower and upper value
        range = list(range)
        if lower is not None:
            range[0] = lower
        if upper is not None:
            range[1] = upper

        #if range[0] >= range[1]:
            #raise ValueError, 'lower bound must be smaller than upper bound'
        ### so then, nothing is kept, just fine

        # cut away data points
        keep = scipy.logical_and(self.x >= range[0], self.x <= range[1])
        self.x = self.x[keep]
        self.y = self.y[keep]
Example #37
0
    def __init__(self, config, forward):
        """Initialization specifies retrieval subwindows for calculating
        measurement cost distributions"""
        self.lasttime = time.time()
        self.fm = forward
        self.wl = self.fm.wl
        self.ht = OrderedDict()  # Hash table
        self.max_table_size = 500
        self.windows = config['windows']
        if 'verbose' in config:
            self.verbose = config['verbose']
        else:
            self.verbose = True
        if 'Cressie_MAP_confidence' in config:
            self.state_indep_S_hat = config['Cressie_MAP_confidence']
        else:
            self.state_indep_S_hat = False
        self.windows = config['windows']

        self.winidx = s.array((), dtype=int)  # indices of retrieval windows
        inds = range(len(self.wl))
        for lo, hi in self.windows:
            idx = s.where(s.logical_and(self.wl > lo, self.wl < hi))[0]
            self.winidx = s.concatenate((self.winidx, idx), axis=0)
        self.counts = 0
        self.inversions = 0
def DFT_PSD(data,movingwin=[0.201, 0.051], Fs = 1000, pad=0, fpass=[1,100]):
	'''Discrete Fourier Transform
		Input: 
			data: format is np.array that is  time_window x samples
		
		'''
	num_trials = data.shape[1]
	N = data.shape[0] #ms of trials
	Nwin=round(Fs*movingwin[0])
	Nstep=round(Fs*movingwin[1])
	
	winstart=np.arange(0,N-Nwin,Nstep)
	nw=len(winstart)

	f = np.fft.fftfreq(int(movingwin[0]*Fs))
	f = Fs*f[f>=0]
	f_ind = scipy.logical_and(f>=fpass[0], f<=fpass[1])

	#set(f[f>=fpass[0]] ) & set(f[f<=fpass[1]])
	#f_ind = np.array(list(f_ind))

	S = np.zeros(( num_trials, nw, sum(f_ind) ))

	for n in range(nw):
		datawin=data[winstart[n]:winstart[n]+Nwin,:]
		sp = np.fft.rfft(datawin.T)
		psd_est = abs(sp)**2
		S[:,n,:] = psd_est[:,f_ind] 

	t=(winstart+round(Nwin/2))/float(Fs)
	return S, f[f_ind], t
Example #39
0
    def __init__(self, ft2file, tmin, tmax):
        hdulist = fits.open(ft2file)
        SC_data = hdulist['SC_DATA'].data
        SC_data = hdulist['SC_DATA'].data
        # TIME
        TIME_ALL = SC_data.field('START')
        FILTER = sp.logical_and(TIME_ALL > tmin, TIME_ALL < tmax)
        self.SC_TSTART = SC_data.field('START')[FILTER]
        self.SC_TSTOP = SC_data.field('STOP')[FILTER]
        self.NENTRIES = len(self.SC_TSTART)
        # BORESIGHT:
        self.RA_SCZ = SC_data.field('RA_SCZ')[FILTER]
        self.DEC_SCZ = SC_data.field('DEC_SCZ')[FILTER]
        # ZENITH
        self.RA_ZENITH = SC_data.field('RA_ZENITH')[FILTER]
        self.DEC_ZENITH = SC_data.field('DEC_ZENITH')[FILTER]
        # SAA
        self.IN_SAA = SC_data.field('IN_SAA')[FILTER]

        self.IDX_ARRAY = sp.arange(self.NENTRIES)
        self.FOV_ARRAY = sp.zeros(self.NENTRIES)
        self.theta_max = 65
        self.zenith_max = 90
        print 'Summary FT2 file: %s ' % ft2file
        print 'NENTRIES........:%d' % (self.NENTRIES)
        print 'TMIN............:%s' % (self.SC_TSTART[0])
        print 'TMAX............:%s' % (self.SC_TSTOP[-1])
        print 'DELTA T.........:%s' % (self.SC_TSTOP[-1] - self.SC_TSTART[0])
        pass
Example #40
0
def learning_curve_metrics(hdf_list, epoch_size=56, n_factors=5):
    #hdf_list = [3822, 3834, 3835, 3840]
    #obstacle learning: hdf_list = [4098, 4100, 4102, 4104, 4114, 4116, 4118, 4119]
    rew_ix_list = []
    te_refs = []
    rpm_list = []
    hdf_dict = {}
    perc_succ = []
    time_list = []
    offs = 0

    #f, ax = plt.subplots()
    for te in hdf_list:
        hdf_t = dbfn.TaskEntry(te)
        hdf = hdf_t.hdf
        hdf_dict[te] = hdf

        rew_ix, rpm = pa.get_trials_per_min(hdf, nmin=2,rew_per_min_cutoff=0, 
            ignore_assist=True, return_rpm=True)
        ix = 0
        #ax.plot(rpm)

        trial_ix = np.array([i for i in hdf.root.task_msgs[:] if 
            i['msg'] in ['reward','timeout_penalty','hold_penalty','obstacle_penalty'] ], dtype=hdf.root.task_msgs.dtype)


        while (ix+epoch_size) < len(rew_ix):
            start_rew_ix = rew_ix[ix]
            end_rew_ix = rew_ix[ix+epoch_size]
            msg_ix_mod = np.nonzero(scipy.logical_and(trial_ix['time']<=end_rew_ix, trial_ix['time']>start_rew_ix))[0]
            all_msg = trial_ix[msg_ix_mod]
            perc_succ.append(len(np.nonzero(all_msg['msg']=='reward')[0]) / float(len(all_msg)))

            rew_ix_list.append(rew_ix[ix:ix+epoch_size])
            rpm_list.append(np.mean(rpm[ix:ix+epoch_size]))
            te_refs.append(te)
            time_list.append((0.5*(start_rew_ix+end_rew_ix))+offs)

            ix += epoch_size
        offs = offs+len(hdf.root.task)

    #For each epoch, fit FA model (stick w/ 5 factors for now):
    ratio = []
    for te, r_ix in zip(te_refs, rew_ix_list):
        print te, len(r_ix)

        update_bmi_ix = np.nonzero(np.diff(np.squeeze(hdf.root.task[:]['internal_decoder_state'][:, 3, 0])))[0] + 1
        bin_spk, targ_pos, targ_ix, z, zz = pa.extract_trials_all(hdf_dict[te], r_ix, time_cutoff=1000, update_bmi_ix=update_bmi_ix)
        zscore_X, mu = pa.zscore_spks(bin_spk)
        FA = skdecomp.FactorAnalysis(n_components=n_factors)
        FA.fit(zscore_X)

        #SOT Variance Ratio by target
        #Priv var / mean
        Cov_Priv = np.sum(FA.noise_variance_)
        U = np.mat(FA.components_).T
        Cov_Shar = np.trace(U*U.T)

        ratio.append(Cov_Shar/(Cov_Shar+Cov_Priv))
Example #41
0
def getBeamFluxSpline(beam, plasma, t, lim1, lim2, points=1000):
    """ generates a spline off of the beampath.  Assumes
    that the change in flux is MONOTONIC"""

    lim = beam.norm.s
    beam.norm.s = scipy.linspace(0, lim[-1], points)
    h = time.time()
    psi = plasma.eq.rz2rmid(beam.r()[0],
                            beam.r()[2], t)  #evaluates all psi's at once
    print(time.time() - h)
    outspline = len(t) * [0]
    inspline = len(t) * [0]
    for i in range(t.size):
        temp = lim1
        mask = scipy.logical_and(scipy.isfinite(psi[i]), psi[i] < lim2 + .02)

        try:
            minpos = scipy.argmin(psi[i][mask])
            test = psi[i][mask][minpos]
        except ValueError:
            test = lim2 + .03

        #plt.plot(beam.x()[0][mask],psi[i][mask])
        #plt.show()
        sizer = psi[i][mask].size
        if not test > lim2:

            #plt.plot(beam.x()[0][mask][0:minpos],psi[i][mask][0:minpos],beam.x()[0][mask][minpos:],psi[i][mask][minpos:])
            #plt.show()
            #limout = scipy.insert(lim,(2,2),(beam.norm.s[mask][minpos],beam.norm.s[mask][minpos]))  # add minimum flux s for bound testing
            if lim1 < test:
                temp = test

            try:
                temp1 = scipy.clip(
                    scipy.digitize((lim1, lim2), psi[i][mask][minpos::-1]), 0,
                    minpos)
                outspline[i] = beam.norm.s[mask][minpos::-1][temp1]

            except ValueError:
                tempmask = (psi[i][mask] < lim2)[0]
                outspline[i] = scipy.array(
                    [beam.norm.s[mask][minpos], beam.norm.s[mask][tempmask]])

            try:
                temp2 = scipy.clip(
                    scipy.digitize((lim1, lim2), psi[i][mask][minpos:]), 0,
                    sizer - minpos - 1)
                inspline[i] = beam.norm.s[mask][minpos:][temp2]

            except ValueError:
                inspline[i] = scipy.array(
                    [beam.norm.s[mask][minpos], beam.norm.s[mask][-1]])

        else:
            outspline[i] = scipy.array([[], []])
            inspline[i] = scipy.array([[], []])

    return (outspline, inspline)
def get_kin_sig_shenoy(kin_sig, bins=np.linspace(0,3000,3000), start_bin=1200,first_local_max_method=False,
    after_start_est=300+300, kin_est = 1000, anim='seba'):

    kin_feat = np.zeros((kin_sig.shape[0], 5))

    for r in range(kin_sig.shape[0]):   
        spd_after_go = kin_sig[r,start_bin:]

        if first_local_max_method: #Done only on BMI 3d, assuming Fs = 60 Hz. 
            d_spd = np.diff(spd_after_go)

            #Est. number of bins RT should come after: 
            aft = after_start_est/float(1000)*60 #Aft is in iteration for bmi3d
            rch = kin_est/float(1000)*60 #rch is in iteration for bmi3d
            #Find first cross from + --> -
            
            max_ind = np.array([i for i, s in enumerate(d_spd[:-1]) if scipy.logical_and(s>0, d_spd[i+1]<0)]) #derivative crosses zero w/ negative slope
            z = np.nonzero(scipy.logical_and(max_ind>aft, max_ind<(rch+aft)))[0] #local maxima that fit estimate of rxn time --> rch time

            #How to choose: 
            if len(z)>0:
                z_ind = np.argmax(spd_after_go[max_ind[z]]) #choose the biggest
                kin_feat[r,1] = bins[max_ind[z[z_ind]]+start_bin] #write down the time
                maxbin = max_ind[z[z_ind]]+start_bin
            else:
                print ' no local maxima found within range :/ '
                kin_feat[r,1] = bins[int(start_bin+aft+rch)]
                maxbin = start_bin+aft+rch
        else:
            kin_feat[r,1] = bins[ start_bin + np.argmax(spd_after_go) ]
            maxbin = start_bin + np.argmax(spd_after_go)

        kin_feat[r,0] = kin_sig[r,int(maxbin)]

        perc = [0.2, 0.5, 0.1]
        for p, per in enumerate(perc):
            percent0 = kin_feat[r,0]*per #Bottom Threshold
            indz = range(0, int(maxbin-start_bin)) #0 - argmax_index
            indzz = indz[-1:0:-1] #Reverse
            datz = spd_after_go[indzz]
            try:
                x = np.nonzero(datz<percent0)[0][0]
            except:
                x = len(datz)
            kin_feat[r,2+p] = bins[int(maxbin-x)]
    return kin_feat
def bias(a,b):
    '''
    bias
    '''
    a,b = sp.array(a),sp.array(b)
    mask = sp.logical_and(sp.isfinite(a),sp.isfinite(b))
    a, b = a[mask], b[mask]
    return a.mean()-b.mean()
Example #44
0
def get_trials_per_min(hdf,nmin=2, rew_per_min_cutoff=0, ignore_assist=False, return_rpm=False, 
    return_per_succ=False, plot=False):
    '''
    Summary: Getting trials per minute from hdf file
    Input param: hdf: hdf file to use
    Input param: nmin: number of min to use a rectangular window
    Input param: rew_per_min_cutoff: ignore rew_ix after a 
        certain rew_per_min low threshold is passed
    Output param: rew_ix = rewarded indices in hdf file
    '''

    rew_ix = np.array([t[1] for it, t in enumerate(hdf.root.task_msgs[:]) if t[0]=='reward'])
    tm = np.zeros((np.max(rew_ix)+1))
    tm[rew_ix] += 1
    
    if hasattr(hdf.root.task, 'assist_level'):
        assist_ix = np.nonzero(hdf.root.task[:]['assist_level']==0)[0]
    else:
        assist_ix = np.zeros((len(hdf.root.task)))

    #Each row occurs ~1/60 sec, so: 
    minute = 60*60;
    min_wind = np.ones((nmin*minute))/float(nmin)
    rew_per_min_tmp = np.convolve(min_wind, tm, mode='same')

    #Now smooth please: 
    smooth_wind = np.ones((3*minute))/float(3*minute)
    rew_per_min = pk_convolve(smooth_wind, rew_per_min_tmp)

    if rew_per_min_cutoff > 0:
        ix = np.nonzero(rew_per_min < rew_per_min_cutoff)[0]
        if len(ix)>0:
            cutoff_ix = ix[0]
        else:
            cutoff_ix = rew_ix[-1]
    
    else:
        cutoff_ix = rew_ix[-1]

    if ignore_assist:
        try:
            beg_zer_assist_ix = assist_ix[0]
        except:
            print 'No values w/o assist for filename: ', hdf.filename
            beg_zer_assist_ix = rew_ix[-1]+1
    else:
        beg_zer_assist_ix = 0

    if plot:
        plt.plot(np.arange(len(tm))/float(minute), rew_per_min)
        plt.show()
    ix_final = scipy.logical_and(rew_ix <= cutoff_ix, rew_ix >= beg_zer_assist_ix)
    if return_rpm:
        return rew_ix[ix_final], rew_per_min[rew_ix[ix_final]]
    else:
        return rew_ix[ix_final]
Example #45
0
def filter_M_T(gmr_name, gmr_characteristics):

    if gmr_characteristics[0] == 'None':
        # gmr_characteristics has not been defined because filtering of aftershocks is not of interest
        return 'Inf'
    else:
        Mw_multiplier = gmr_characteristics[-1]
        [M_m, Tg_m, Mw, Tg] = find_M_T(gmr_name, gmr_characteristics)
        available_aftershocks = len(Mw[scipy.logical_and(Mw<M_m*Mw_multiplier, Tg<Tg_m)])
        return available_aftershocks
Example #46
0
def makehist(testpath,npulses):
    """
        This functions are will create histogram from data made in the testpath.
        Inputs
            testpath - The path that the data is located.
            npulses - The number of pulses in the sim.
    """
    sns.set_style("whitegrid")
    sns.set_context("notebook")
    params = ['Ne', 'Te', 'Ti', 'Vi']
    histlims = [[1e10, 3e11], [1000., 3000.], [100., 2500.], [-400., 400.]]
    erlims = [[-2e11, 2e11], [-1000., 1000.], [-800., 800], [-400., 400.]]
    erperlims = [[-100., 100.]]*4
    lims_list = [histlims, erlims, erperlims]
    errdict = makehistdata(params, testpath)[:4]
    ernames = ['Data', 'Error', 'Error Percent']


    # Two dimensiontal histograms
    pcombos = [i for i in itertools.combinations(params, 2)]
    c_rows = int(math.ceil(float(len(pcombos))/2.))
    (figmplf, axmat) = plt.subplots(c_rows, 2, figsize=(12, c_rows*6), facecolor='w')
    axvec = axmat.flatten()
    for icomn, icom in enumerate(pcombos):
        curax = axvec[icomn]
        str1, str2 = icom
        _, _, _ = make2dhist(testpath, PARAMDICT[str1], PARAMDICT[str2], figmplf, curax)
    filetemplate = str(Path(testpath).joinpath('AnalysisPlots', 'TwoDDist'))
    plt.tight_layout()
    plt.subplots_adjust(top=0.95)
    figmplf.suptitle('Pulses: {0}'.format(npulses), fontsize=20)
    fname = filetemplate+'_{0:0>5}Pulses.png'.format(npulses)
    plt.savefig(fname)
    plt.close(figmplf)
    # One dimensiontal histograms
    for ierr, iername in enumerate(ernames):
        filetemplate = str(Path(testpath).joinpath('AnalysisPlots', iername))
        (figmplf, axmat) = plt.subplots(2, 2, figsize=(20, 15), facecolor='w')
        axvec = axmat.flatten()
        for ipn, iparam in enumerate(params):
            plt.sca(axvec[ipn])
            if sp.any(sp.isinf(errdict[ierr][iparam])):
                continue
            binlims = lims_list[ierr][ipn]
            bins = sp.linspace(binlims[0], binlims[1], 100)
            xdata = errdict[ierr][iparam]
            xlog = sp.logical_and(xdata >= binlims[0], xdata < binlims[1])

            histhand = sns.distplot(xdata[xlog], bins=bins, kde=True, rug=False)

            axvec[ipn].set_title(iparam)
        figmplf.suptitle(iername +' Pulses: {0}'.format(npulses), fontsize=20)
        fname = filetemplate+'_{0:0>5}Pulses.png'.format(npulses)
        plt.savefig(fname)
        plt.close(figmplf)
def nonna_select_data(data, outlier_threshold, level='high'):
	"""
	This function returns a list of indexed after identifying the main outliers. It applies
	a cut on the data to remove exactly a fraction (1-outlier_threshold) of all data points.
	By default the cut is applied only at the higher end of the data values, but the 
	parameter level can be used to change this
	
	Input arguments:
	data              = vector containing all data points
	outlier_threshold = remove outliers until we are left with exactly this fraction of the
	                    original data
	level             = 'high|low|both' determines if the outliers are removed only from the
					    high values end, the low values end of both ends.
					    
	Output:
	idx               = index of selected (good) data
	"""
	
	# histogram all the data values
	n,x = scipy.histogram(data, len(data)/10)
	# compute the cumulative distribution and normalize
	nn = scipy.cumsum(n)
	nn = nn / float(max(nn))
	
	if level=='high':
		# select the value such that a fraction outlier_threshold of the data lies below it
		if outlier_threshold < 1:
			val = x[pylab.find(nn/float(max(nn)) >= outlier_threshold)[0]]
		else:
			val = max(data)
		# use that fraction of data only
		idx = data <= val 
	elif level=='low':
		# select the value such that a fraction outlier_threshold of the data lies above it
		if outlier_threshold < 1:
			val = x[pylab.find(nn/float(max(nn)) <= (1-outlier_threshold))[-1]]
		else:
			val = min(data)
		# use that fraction of data only
		idx = data >= val 
	elif level=='both':		
		# select the value such that a fraction outlier_threshold/2 of the data lies below it
		if outlier_threshold < 1:
			Hval = x[pylab.find(nn/float(max(nn)) >= 1-(1-outlier_threshold)/2)[0]]
		else:
			Hval = max(data)	
		# select the value such that a fraction outlier_threshold/2 of the data lies above it
		if outlier_threshold < 1:
			Lval = x[pylab.find(nn/float(max(nn)) <= (1-outlier_threshold)/2)[-1]]
		else:
			Lval = min(data)
  		# use that fraction of data only
		idx = scipy.logical_and(data >= Lval, data <= Hval) 
	
	return idx
Example #48
0
    def get_scan_IF_inds(self, scan_ind, IF_ind) :
        """Gets the record indices of the fits file that correspond to the
        given scan and IF.

        Note that the scans are numbered with 0 corresponding to the first scan
        in the file i.e., it is not the session scan number."""

        # TODO: Should check valid scan IF, and raise value errors as apropriate
        thescan = self.scan_set[scan_ind]
        theIF = self.IF_set[IF_ind]
        
        # Find all the records that correspond to this IF and this scan.
        # These indicies *should now be ordered in time, cal (on off)
        # and in polarization, once the IF is isolated.
        (inds_sif,) = sp.where(sp.logical_and(self._IFs_all==theIF, 
                                        self._scans_all==thescan))
        ncal = len(sp.unique(self.fitsdata.field('CAL')[inds_sif]))
        npol = len(sp.unique(self.fitsdata.field('CRVAL4')[inds_sif]))
        
        # Reform to organize by pol, cal, etc.
        ntimes = len(inds_sif)//npol//ncal
        inds_sif = sp.reshape(inds_sif, (ntimes, npol, ncal))

        if self.verify_ordering > 0:
            # We expect noise cal to be on for every second record.
            for thecal in range(ncal) :
                tmp = sp.unique(self.fitsdata.field('CAL')[inds_sif[:,:,thecal]])
                if len(tmp) > 1 :
                    raise ce.DataError("Calibration (ON/OFF) not in "
                                    "perfect order in file: "+self.fname)
            # Polarization should cycle through 4 modes (-5,-7,-8,-6)
            for thepol in range(npol) :
                tmp = sp.unique(self.fitsdata.field('CRVAL4')
                            [inds_sif[:,thepol,:]])
                if len(tmp) > 1 :
                    raise ce.DataError("Polarizations not in perfect order in "
                                    "file: "+self.fname)
            # We expect the entries to be sorted in time and for time to not
            # change across pol and cal.
            lastLST = 0
            for ii in range(ntimes) :
                # Sometimes won't have the LST.
                try :
                    thisLST = self.fitsdata.field('LST')[inds_sif[ii,0,0]]
                # If 'LST' is missing raises a KeyError in later versions of
                # pyfits, and a NameError in earlier ones.
                except (KeyError, NameError) :
                    break
                if not (sp.allclose(self.fitsdata.field('LST')
                        [inds_sif[ii,:,:]] - thisLST, 0)) :
                    raise ce.DataError("LST change across cal or pol in "
                                       "file: " + self.fname)

        return inds_sif
 def evaluate(self, rotMatrix):
     """
     Evaluate the correlation for the given orientation (rotation).
     :rtype: :obj:`float`
     :return: Correlation of normalised bin counts.
     """
     trnsCoords  = rotMatrix.dot(self.trnsCoords.T).T
     fixdDensity = self.fixdSphHist.getBinCounts(trnsCoords)
     msk = sp.where(sp.logical_and(fixdDensity > self.fixdLoThrsh, fixdDensity <= self.fixdHiThrsh))
     
     return sp.stats.pearsonr(self.trnsDensity[msk], fixdDensity[msk])[0]
Example #50
0
    def timereduce(self, timelims=None,timesselected=None):
        assert (timelims is not None) or (timesselected is not None), "Need a set of limits or selected set of times"

        if timelims is not None:
            tkeep = sp.logical_and(self.Time_Vector>=timelims[0],self.Time_Vector<timelims[1])
        if timesselected is not None:
            tkeep = sp.in1d(self.Time_Vector,timesselected)
        # prune the arrays
        self.Time_Vector=self.Time_Vector[tkeep]
        self.Param_List=self.Param_List[:,tkeep]
        self.Velocity=self.Velocity[:,tkeep]
Example #51
0
def mask_frequency_range(Data, limits):

    if len(limits) != 2:
        raise ValueError("Limits must be length 2 sequence.")
    lower = min(limits)
    upper = max(limits)

    Data.calc_freq()
    freq = Data.freq
    delta_f = abs(sp.mean(sp.diff(freq)))
    mask = sp.logical_and(freq + delta_f/2 <= upper, freq - delta_f/2 >= lower)
    Data.data[...,mask] = ma.masked
Example #52
0
def velocity_dof(domain, ax):
    # Calculate velocity dof numbers forr each cell
    rm = roll( domain, 1, axis=ax )
    type_3 = logical_and( domain, rm )
    type_2 = logical_or(  domain, rm )
    
    dof = cumsum( logical_not( logical_or( type_3, type_2 ) ) ).reshape( domain.shape ) - 1
    # Do logic to figure out type 2 and 3
    dof[type_2 == 1] = -2
    dof[type_3 == 1] = -3

    return dof.astype(int64)
Example #53
0
def getBeamFluxSpline(beam,plasma,t,lim1,lim2,points = 1000):
    """ generates a spline off of the beampath.  Assumes
    that the change in flux is MONOTONIC"""

    lim = beam.norm.s
    beam.norm.s = scipy.linspace(0,lim[-1],points)
    h = time.time()
    psi = plasma.eq.rz2rmid(beam.r()[0],beam.r()[2],t) #evaluates all psi's at once
    print(time.time()-h)
    outspline = len(t)*[0]
    inspline = len(t)*[0]
    for i in xrange(t.size):
        temp = lim1
        mask = scipy.logical_and(scipy.isfinite(psi[i]),psi[i] < lim2+.02)

        try:
            minpos = scipy.argmin(psi[i][mask])
            test = psi[i][mask][minpos]
        except ValueError:
            test = lim2+.03
            
        #plt.plot(beam.x()[0][mask],psi[i][mask])
        #plt.show()
        sizer = psi[i][mask].size
        if not test > lim2:

        #plt.plot(beam.x()[0][mask][0:minpos],psi[i][mask][0:minpos],beam.x()[0][mask][minpos:],psi[i][mask][minpos:])
        #plt.show()
        #limout = scipy.insert(lim,(2,2),(beam.norm.s[mask][minpos],beam.norm.s[mask][minpos]))  # add minimum flux s for bound testing
            if lim1 < test:
                temp = test

            try:
                temp1 = scipy.clip(scipy.digitize((lim1,lim2),psi[i][mask][minpos::-1]),0,minpos)
                outspline[i] = beam.norm.s[mask][minpos::-1][temp1]
            
            except ValueError:
                tempmask = (psi[i][mask] < lim2)[0]
                outspline[i] = scipy.array([beam.norm.s[mask][minpos],beam.norm.s[mask][tempmask]])

            try:
                temp2 = scipy.clip(scipy.digitize((lim1,lim2),psi[i][mask][minpos:]),0,sizer-minpos-1)
                inspline[i] = beam.norm.s[mask][minpos:][temp2]
                
            except ValueError:
                inspline[i] = scipy.array([beam.norm.s[mask][minpos],beam.norm.s[mask][-1]])

        else:
            outspline[i] = scipy.array([[],[]])
            inspline[i] = scipy.array([[],[]])

    return (outspline,inspline)
Example #54
0
def estimate_rate_func(t, T, N, plot_flag=False, method='central diff'):

    t_est_pts = scipy.linspace(t.min(), t.max(), N+2) 
    interp_func = scipy.interpolate.interp1d(t,T,'linear')
    T_est_pts = interp_func(t_est_pts)
 
    if plot_flag == True:
        pylab.figure()
        pylab.subplot(211)
        pylab.plot(t_est_pts, T_est_pts,'or')

    # Estimate slopes
    slope_pts = scipy.zeros((N,)) 
    T_slope_pts = scipy.zeros((N,))  

    if method == 'local fit':
        for i in range(1,(N+1)):
            mask0 = t > 0.5*(t_est_pts[i-1] + t_est_pts[i])
            mask1 = t < 0.5*(t_est_pts[i+1] + t_est_pts[i])
            mask = scipy.logical_and(mask0, mask1)
            t_slope_est = t[mask]
            T_slope_est = T[mask]
            local_fit = scipy.polyfit(t_slope_est,T_slope_est,2)
            dlocal_fit = scipy.polyder(local_fit)
            slope_pts[i-1] = scipy.polyval(dlocal_fit,t_est_pts[i]) 
            T_slope_pts[i-1] = scipy.polyval(local_fit,t_est_pts[i])
            if plot_flag == True:
                t_slope_fit = scipy.linspace(t_slope_est[0], t_slope_est[-1], 100)
                T_slope_fit = scipy.polyval(local_fit,t_slope_fit)
                pylab.plot(t_slope_fit, T_slope_fit,'g')
    elif method == 'central diff':
        dt = t_est_pts[1] - t_est_pts[0]
        slope_pts = (T_est_pts[2:] - T_est_pts[:-2])/(2.0*dt)
        T_slope_pts = T_est_pts[1:-1]
    else:
        raise ValueError, 'unkown method %s'%(method,)
        
    # Fit line to slope estimates
    fit = scipy.polyfit(T_slope_pts, slope_pts,1)

    if plot_flag == True:
        T_slope_fit = scipy.linspace(T_slope_pts.min(), T_slope_pts.max(),100)
        slope_fit = scipy.polyval(fit,T_slope_fit)
        pylab.subplot(212)
        pylab.plot(T_slope_fit, slope_fit,'r')
        pylab.plot(T_slope_pts, slope_pts,'o')
        pylab.show()

    rate_slope = fit[0]
    rate_offset = fit[1]
    return rate_slope, rate_offset 
Example #55
0
    def coordreduce(self,coorddict):
        """
        Given a dictionary of coordinates the location points in the IonoContainer
        object will be reduced.
        Inputs
            corrddict - A dictionary with keys 'x','y','z','r','theta','phi'. The
                values in the dictionary are coordinate values that will be kept.
        """
        assert type(coorddict)==dict, "Coorddict needs to be a dictionary"
        ncoords = self.Cart_Coords.shape[0]
        coordlist = ['x','y','z','r','theta','phi']

        coordkeysorg = coorddict.keys()
        coordkeys = [ic for ic in coordkeysorg if ic in coordlist]

        ckeep = sp.ones(ncoords,dtype=bool)

        for ic in coordkeys:
            currlims = coorddict[ic]
            if ic=='x':
                tempcoords = self.Cart_Coords[:,0]
            elif ic=='y':
                tempcoords = self.Cart_Coords[:,1]
            elif ic=='z':
                tempcoords = self.Cart_Coords[:,2]
            elif ic=='r':
                tempcoords = self.Sphere_Coords[:,0]
            elif ic=='theta':
                tempcoords = self.Sphere_Coords[:,1]
            elif ic=='phi':
                tempcoords = self.Sphere_Coords[:,2]
            keeptemp = sp.logical_and(tempcoords>=currlims[0],tempcoords<currlims[1])
            ckeep = sp.logical_and(ckeep,keeptemp)
        # prune the arrays
        self.Cart_Coords=self.Cart_Coords[ckeep]
        self.Sphere_Coords=self.Sphere_Coords[ckeep]
        self.Param_List=self.Param_List[ckeep]
        self.Velocity=self.Velocity[ckeep]
Example #56
0
    def doTestMcrWithHalo(self, haloSz=0, filecache=False):
        if (isinstance(haloSz, int) or ((sys.version_info.major < 3) and isinstance(haloSz, long))):
            if (haloSz < 0):
                haloSz = 0
            haloSz = sp.array((haloSz,)*3)
        
        subDirName = "doTestMcrWithHalo_%s_%s" % ("x".join(map(str, haloSz)), str(filecache))
        outDir = self.createTmpDir(subDirName)
        # outDir = subDirName
        if (mpi.world != None):
            if ((mpi.world.Get_rank() == 0) and (not os.path.exists(outDir))):
                os.makedirs(subDirName)
            mpi.world.barrier()

        segDds = mango.zeros(self.imgShape, mtype="segmented", halo=haloSz)
        segDds.setAllToValue(segDds.mtype.maskValue())
        mango.data.fill_ellipsoid(segDds, centre=self.centre, radius=(self.radius*1.05,)*3, fill=0)
        mango.data.fill_ellipsoid(segDds, centre=self.centre, radius=(self.radius,)*3, fill=1)
        mango.io.writeDds(os.path.join(outDir,"segmentedSphere.nc"), segDds)
        dtDds = mango.image.distance_transform_edt(segDds, 1)
        mango.io.writeDds(os.path.join(outDir,"distance_mapSphereEdt.nc"), dtDds)
        mcrDds = mango.image.max_covering_radius(dtDds, maxdist=self.radius*1.5, filecache=filecache)
        mango.io.writeDds(os.path.join(outDir,"distance_mapSphereEdtMcr.nc"), mcrDds)
        
        slc = []
        for d in range(len(haloSz)):
            slc.append(slice(haloSz[d], segDds.asarray().shape[d]-haloSz[d]))
        
        slc = tuple(slc)

        self.assertTrue(
            sp.all(
                (
                    self.radius
                    -
                    sp.where(
                        sp.logical_and(mcrDds.asarray() > 0, mcrDds.asarray() != mcrDds.mtype.maskValue()),
                        sp.ceil(mcrDds.asarray()),
                        self.radius
                    )
                )
                <=
                0.1*self.radius # Test is true in the masked areas.
            )
        )
        
        self.assertTrue(sp.all(segDds.halo == mcrDds.halo))
        self.assertTrue(sp.all(segDds.shape == mcrDds.shape))
        self.assertTrue(sp.all(segDds.origin == mcrDds.origin), "%s != %s" % (segDds.origin, mcrDds.origin))
        self.assertTrue(sp.all(segDds.mpi.shape == mcrDds.mpi.shape))
Example #57
0
def spk_phase(t, V1, V2, spike_thresh=0):
    """phase_mean, isi_mean = spk_phase(t, V1, V2, spike_thresh=0)
    
    Given two voltage vectors (V1 and V2) and time vector (t), phase calculates
    the mean phase of the spikes in radians (phase_mean) and the mean
    interspike interval (isi_mean).

    You can optionally specify the spike threshold (defaults to 0).

    This uses an assumption that every time it spikes, the voltage increases
    above the given spike threshold.

    The method used here is not robust. If the data is noisy then there will
    likely be false positives. With a model however, it should work very
    well.
    """
    
    time1 = t[sp.logical_and(V1[:-1] < spike_thresh, V1[1:] >= spike_thresh)]
    time2 = t[sp.logical_and(V2[:-1] < spike_thresh, V2[1:] >= spike_thresh)]

    l = sp.amin([len(time1), len(time2)])
    isi_mean = sp.mean(sp.diff(time1))
    phase_mean = sp.mean((time1[0:l]-time2[0:l]) / isi_mean * 2 * sp.pi)
    return phase_mean, isi_mean
Example #58
0
File: pdf.py Project: ayr0/StatLab
def betapdf(X,a,b):
    #operate only on x in (0,1)
    if isscalar(X):
        if X<=0 or X>=1:
            raise ValueError("X must be in the interval (0,1)")
        else:
            x=X
    else:
        goodx = logical_and(X>0, X<1)
        x = X[goodx].copy()

    loga = (a-1.0)*log(x)
    logb = (b-1.0)*log(1.0-x)

    return exp(gammaln(a+b)-gammaln(a)-gammaln(b)+loga+logb)
Example #59
0
def spikes2states(spikes):
    """Convert a sequence of binarized spikes to a sequence of state numbers.

    Input arguments:
    spikes -- spikes trains: 2D binary array, each column a different unit,
              each row a time point
    """

    # check that the incoming array is binary
    if not scipy.all(scipy.logical_and(spikes>=0, spikes<=1)):
        raise ValueError('Input array must be binary')

    nchannels = spikes.shape[1]
    # convert binary sequence to decimal numbers
    pow2 = array([2**i for i in range(nchannels-1,-1,-1)])
    return (spikes*pow2).sum(axis=1)
Example #60
0
    def combine(self, *others, **kwargs):
        """Combine the x-y pairs of this continuous function object with those
        of the given others. Returns a new continuous function instance. If cut
        is True, only the intersection of the x-axes is used, the rest of the
        input functions is cut away. Raise an exception if the x-axes do not
        intersect."""
        # 2012-07-11

        # get keyword arguments
        cut = kwargs.pop('cut', False)
        if len(kwargs) > 0:
            raise TypeError('%s() got an unexpected keyword argument "%s"'
                            % (__name__, kwargs.keys()[0]))

        new = self.copy()
        for other in others:
            # check type
            if type(other) is not type(self):
                raise TypeError('expected instance of %s'
                                % type(self).__name__)

            # combine x-y value pairs
            x = scipy.r_[new.x, other.x]
            y = scipy.r_[new.y, other.y]
            #x = self._filter_double(x)

            if cut:
                # determine extremal values
                min1, max1 = new.box()[:2]  # min(new.x), max(new.x)
                min2, max2 = other.box()[:2]  # min(other.x), max(other.x)
                newmin = max(min1, min2)
                newmax = min(max1, max2)

                # find indices to cut away
                keep = scipy.logical_and(x >= newmin, x <= newmax)
                if len(keep) == 0:
                    raise ValueError('x-axes do not intersect')

                # cut away
                x = x[keep]
                y = y[keep]

        # make new object, merge attributes
        new = type(self)(x, y, attrs=new.attrs.intersection(other.attrs))

        # return new object
        return new