Ejemplo n.º 1
0
    def load_file(self,repnum):
        global options
        global basepath
        # if repnum<10:
        #     upf = basepath + options.upfile + '/00' + str(repnum) + '/Camera/frames.ma'
        # else:
        #     upf = basepath + options.upfile + '/0' + str(repnum) + '/Camera/frames.ma'

        upf = basepath + options.upfile  + '/Camera/frames.ma'
        im=[]
        self.imageData = []
        print "loading data from ", upf
        try:
            im = MetaArray(file = upf,  subset=(slice(0,2), slice(64,128), slice(64,128)))
        except:
            print "Error loading upfile: %s\n" % upf
            return
        print "data loaded"
 
        self.times = im.axisValues('Time').astype('float32')
        self.imageData = im.view(np.ndarray).astype('float32')
        #pg.image(self.imageData, title=str(repnum))
        #self.ProcessImage()
        print 'imageData shape:', np.shape(self.imageData)
        #self.imageData = self.imageData[np.where(self.times>1)]
        # back  = self.imageData[np.where(np.logical_and(self.times>2, self.times<3))]
        # print 'size of back', np.shape(back)
        
        return
Ejemplo n.º 2
0
    def load_file(self,repnum):
        global options
        global basepath
        # if repnum<10:
        #     upf = basepath + options.upfile + '/00' + str(repnum) + '/Camera/frames.ma'
        # else:
        #     upf = basepath + options.upfile + '/0' + str(repnum) + '/Camera/frames.ma'

        upf = basepath + options.upfile  + '/Camera/frames.ma'
        im=[]
        self.imageData = []
        print "loading data from ", upf
        try:
            im = MetaArray(file = upf,  subset=(slice(0,2), slice(64,128), slice(64,128)))
        except:
            print "Error loading upfile: %s\n" % upf
            return
        print "data loaded"
 
        self.times = im.axisValues('Time').astype('float32')
        self.imageData = im.view(np.ndarray).astype('float32')
        #pg.image(self.imageData, title=str(repnum))
        #self.ProcessImage()
        print 'imageData shape:', np.shape(self.imageData)
        #self.imageData = self.imageData[np.where(self.times>1)]
        # back  = self.imageData[np.where(np.logical_and(self.times>2, self.times<3))]
        # print 'size of back', np.shape(back)
        
        return
Ejemplo n.º 3
0
    def load_file(self, repnum):
        global options
        global basepath
        if repnum < 10:
            upf = basepath + options.upfile + '/00' + str(
                repnum) + '/Camera/frames.ma'
        else:
            upf = basepath + options.upfile + '/0' + str(
                repnum) + '/Camera/frames.ma'

        # upf = basepath + options.upfile  + '/Camera/frames.ma'
        im = []
        self.imageData = []
        print "loading data from ", upf
        try:
            im = MetaArray(file=upf,
                           subset=(slice(0, 2), slice(64, 128), slice(64,
                                                                      128)))
        except:
            print "Error loading upfile: %s\n" % upf
            return
        print "data loaded"

        self.times = im.axisValues('Time').astype('float32')
        self.imageData = im.view(np.ndarray).astype('float32')
        pg.image(self.imageData, title=str(repnum))
        #self.ProcessImage()
        print 'imageData shape:', np.shape(self.imageData)
        #self.imageData = self.imageData[np.where(self.times>1)]
        back = self.imageData[np.where(
            np.logical_and(self.times > 2, self.times < 3))]
        print 'size of back', np.shape(back)
        self.background = np.mean(back[5:], axis=0)
        #self.times= self.times-1

        # interval1=self.imageData[np.where(np.logical_and(self.times>=1, self.times<=1.25))]
        # print 'interval1 shape:', np.shape(interval1)
        # interval2=self.imageData[np.where(np.logical_and(self.times>=2, self.times<=2.25))]
        # print 'interval2 shape:', np.shape(interval2)
        # interval3=self.imageData[np.where(np.logical_and(self.times>=3, self.times<=3.25))]
        # print 'interval3 shape:', np.shape(interval3)
        # interval4=self.imageData[np.where(np.logical_and(self.times>=4, self.times<=4.25))]
        # print 'interval4 shape:', np.shape(interval4)
        # interval5=self.imageData[np.where(np.logical_and(self.times>=5, self.times<=5.25))]
        # print 'interval5 shape:', np.shape(interval5)
        # back=self.imageData[np.where(np.logical_and(self.times>0, self.times<1))]
        # background=np.mean(back,axis=0)
        # meanimg=np.mean(self.imageData, axis=0)
        # pg.image(meanimg, title='mean image')
        #self.imageData = ((interval1+interval2+interval3+interval4+interval5)/5-background)/background
        print 'imageData shape:', np.shape(self.imageData)
        #pg.image(background, title='background')
        #self.imageData=scipy.ndimage.gaussian_filter(self.imageData, sigma=[1,3,3], order=0,mode='reflect',truncate=4.0)

        return
Ejemplo n.º 4
0
    def subtract_Background(self, diffup=0.005):
        #loading background data
        print 'running subtractBackground'

        bckfile = videobasepath + '008.ma'
        bckaudio = audiobasepath + '008/DaqDevice.ma'

        # bckfile = videobasepath + '011.ma'
        # bckaudio = audiobasepath + '011/DaqDevice.ma'
        
        try:
            im = MetaArray(file = bckfile,  subset=(slice(0,2), slice(64,128), slice(64,128)))
        except:
            print 'no background file!'
        #correct for timing differences
        audio = []
        audio = MetaArray(file = bckaudio, subset=(slice(0,2), slice(64,128), slice(64,128)))
        audiotime = audio.axisValues('Time').astype('float32')

        audiomin = np.min(audiotime) + diffup
       
        audiomax = np.max(audiotime) + diffup
        rawtimes = im.axisValues('Time').astype('float32')
        adjustedtime = rawtimes[np.logical_and(rawtimes <= audiomax+.5, rawtimes >= audiomin)]
        bckimagedata = im[np.logical_and(rawtimes <= audiomax, rawtimes >= audiomin)]
        raw=self.imageData
        #check that the background image and the data are the same shape and average
        #then subtract the average from the stimulated data
        getout = fac_lift(bckimagedata,adjustedtime)
        bckimagedata=getout
        getout2 = fac_lift(raw, adjustedtime)
        self.imageData=getout2
        print 'shape of background', bckimagedata.shape
        print 'shape of imageData', self.imageData.shape
        if bckimagedata.shape[0] <= self.imageData.shape[0]:
            print 'image is longer'
            stop = bckimagedata.shape[0]
            print 'stop'
            self.imageData=self.imageData[: stop,:,:,:]
            print 'stop2'
            subtractor = np.zeros(bckimagedata.shape, float)
            diffimage = np.zeros(bckimagedata.shape, float)
            subtractor = np.mean(np.array([self.imageData,bckimagedata]), axis=0)
            #diffimage=sub_func(self.imageData, subtractor)
            diffimage = self.imageData - subtractor
            print 'stop 3'     
        else:
            print 'error! image is shorter, fix this code!'
        diffimage = scipy.signal.detrend(diffimage, axis=0)    
        self.imageData = diffimage    
        return
Ejemplo n.º 5
0
    def load_file(self,repnum):
        global options
        global basepath
        if repnum ==0:
            upf = basepath + options.upfile  + '/Camera/frames.ma'
        else:
            if repnum<10:
                upf = basepath + options.upfile + '/00' + str(repnum) + '/Camera/frames.ma'
            else:
                upf = basepath + options.upfile + '/0' + str(repnum) + '/Camera/frames.ma'
        
        #upf = basepath + options.upfile  + '/Camera/frames.ma'
        im=[]
        self.imageData = []
        print "loading data from ", upf
        try:
            im = MetaArray(file = upf,  subset=(slice(0,2), slice(64,128), slice(64,128)))
        except:
            print "Error loading upfile: %s\n" % upf
            return
        print "data loaded"
 
        self.times = im.axisValues('Time').astype('float32')
        self.imageData = im.view(np.ndarray).astype('float32')
        print 'imageData shape:', np.shape(self.imageData)
        #self.imageData = self.imageData[np.where(self.times>1)]
        back  = self.imageData[np.where(self.times<1)]

        self.background = np.mean(back[5:],axis=0)
        #self.times= self.times-1

        # interval1=self.imageData[np.where(np.logical_and(self.times>=1, self.times<=1.25))]
        # print 'interval1 shape:', np.shape(interval1)
        # interval2=self.imageData[np.where(np.logical_and(self.times>=2, self.times<=2.25))]
        # print 'interval2 shape:', np.shape(interval2)
        # interval3=self.imageData[np.where(np.logical_and(self.times>=3, self.times<=3.25))]
        # print 'interval3 shape:', np.shape(interval3)
        # interval4=self.imageData[np.where(np.logical_and(self.times>=4, self.times<=4.25))]
        # print 'interval4 shape:', np.shape(interval4)
        # interval5=self.imageData[np.where(np.logical_and(self.times>=5, self.times<=5.25))]
        # print 'interval5 shape:', np.shape(interval5)
        # back=self.imageData[np.where(np.logical_and(self.times>0, self.times<1))]
        # background=np.mean(back,axis=0)
        # meanimg=np.mean(self.imageData, axis=0)
        # pg.image(meanimg, title='mean image')
        #self.imageData = ((interval1+interval2+interval3+interval4+interval5)/5-background)/background
        print 'imageData shape:', np.shape(self.imageData)
        #pg.image(background, title='background')
        #self.imageData=scipy.ndimage.gaussian_filter(self.imageData, sigma=[1,3,3], order=0,mode='reflect',truncate=4.0)
   
        return
Ejemplo n.º 6
0
def denoise(data, radius=2, threshold=4):
    """Very simple noise removal function. Compares a point to surrounding points,
    replaces with nearby values if the difference is too large."""

    r2 = radius * 2
    d1 = data.view(np.ndarray)
    d2 = d1[radius:] - d1[:-radius]  #a derivative
    #d3 = data[r2:] - data[:-r2]
    #d4 = d2 - d3
    stdev = d2.std()
    #print "denoise: stdev of derivative:", stdev
    mask1 = d2 > stdev * threshold  #where derivative is large and positive
    mask2 = d2 < -stdev * threshold  #where derivative is large and negative
    maskpos = mask1[:-radius] * mask2[radius:]  #both need to be true
    maskneg = mask1[radius:] * mask2[:-radius]
    mask = maskpos + maskneg
    d5 = np.where(
        mask, d1[:-r2], d1[radius:-radius]
    )  #where both are true replace the value with the value from 2 points before
    d6 = np.empty(d1.shape, dtype=d1.dtype)  #add points back to the ends
    d6[radius:-radius] = d5
    d6[:radius] = d1[:radius]
    d6[-radius:] = d1[-radius:]

    if (hasattr(data, 'implements') and data.implements('MetaArray')):
        return MetaArray(d6, info=data.infoCopy())
    return d6
Ejemplo n.º 7
0
def histogramDetrend(data,
                     window=500,
                     bins=50,
                     threshold=3.0,
                     offsetOnly=False):
    """Linear detrend. Works by finding the most common value at the beginning and end of a trace, excluding outliers.
    If offsetOnly is True, then only the offset from the beginning of the trace is subtracted.
    """

    d1 = data.view(np.ndarray)
    d2 = [d1[:window], d1[-window:]]
    v = [0, 0]
    for i in [0, 1]:
        d3 = d2[i]
        stdev = d3.std()
        mask = abs(d3 - np.median(d3)) < stdev * threshold
        d4 = d3[mask]
        y, x = np.histogram(d4, bins=bins)
        ind = np.argmax(y)
        v[i] = 0.5 * (x[ind] + x[ind + 1])

    if offsetOnly:
        d3 = data.view(np.ndarray) - v[0]
    else:
        base = np.linspace(v[0], v[1], len(data))
        d3 = data.view(np.ndarray) - base

    if (hasattr(data, 'implements') and data.implements('MetaArray')):
        return MetaArray(d3, info=data.infoCopy())
    return d3
Ejemplo n.º 8
0
    def load_file(self,loadstr, repnum):
        global options

        upf = basepath + loadstr + '/00' + str(repnum) + '/Camera/frames.ma'
        im=[]
        self.imageData = []
        print "loading data from ", upf
        try:
            im = MetaArray(file = upf,  subset=(slice(0,2), slice(64,128), slice(64,128)))
        except:
            print "Error loading upfile: %s\n" % videoupf
            return
        print "data loaded"
 
        self.times = im.axisValues('Time').astype('float32')
        self.imageData = im.view(np.ndarray).astype('float32')
   
        return
Ejemplo n.º 9
0
    def load_file(self, loadstr, repnum):
        global options

        upf = basepath + loadstr + '/00' + str(repnum) + '/Camera/frames.ma'
        im = []
        self.imageData = []
        print "loading data from ", upf
        try:
            im = MetaArray(file=upf,
                           subset=(slice(0, 2), slice(64, 128), slice(64,
                                                                      128)))
        except:
            print "Error loading upfile: %s\n" % videoupf
            return
        print "data loaded"

        self.times = im.axisValues('Time').astype('float32')
        self.imageData = im.view(np.ndarray).astype('float32')

        return
Ejemplo n.º 10
0
    def subtract_Background(self, diffup=0.005):
        #loading background data
        print 'running subtractBackground'

        bckfile = videobasepath + '008.ma'
        bckaudio = audiobasepath + '008/DaqDevice.ma'

        # bckfile = videobasepath + '011.ma'
        # bckaudio = audiobasepath + '011/DaqDevice.ma'

        try:
            im = MetaArray(file=bckfile,
                           subset=(slice(0, 2), slice(64, 128), slice(64,
                                                                      128)))
        except:
            print 'no background file!'
        #correct for timing differences
        audio = []
        audio = MetaArray(file=bckaudio,
                          subset=(slice(0, 2), slice(64, 128), slice(64, 128)))
        audiotime = audio.axisValues('Time').astype('float32')

        audiomin = np.min(audiotime) + diffup

        audiomax = np.max(audiotime) + diffup
        rawtimes = im.axisValues('Time').astype('float32')
        adjustedtime = rawtimes[np.logical_and(rawtimes <= audiomax + .5,
                                               rawtimes >= audiomin)]
        bckimagedata = im[np.logical_and(rawtimes <= audiomax,
                                         rawtimes >= audiomin)]
        raw = self.imageData
        #check that the background image and the data are the same shape and average
        #then subtract the average from the stimulated data
        getout = fac_lift(bckimagedata, adjustedtime)
        bckimagedata = getout
        getout2 = fac_lift(raw, adjustedtime)
        self.imageData = getout2
        print 'shape of background', bckimagedata.shape
        print 'shape of imageData', self.imageData.shape
        if bckimagedata.shape[0] <= self.imageData.shape[0]:
            print 'image is longer'
            stop = bckimagedata.shape[0]
            print 'stop'
            self.imageData = self.imageData[:stop, :, :, :]
            print 'stop2'
            subtractor = np.zeros(bckimagedata.shape, float)
            diffimage = np.zeros(bckimagedata.shape, float)
            subtractor = np.mean(np.array([self.imageData, bckimagedata]),
                                 axis=0)
            #diffimage=sub_func(self.imageData, subtractor)
            diffimage = self.imageData - subtractor
            print 'stop 3'
        else:
            print 'error! image is shorter, fix this code!'
        diffimage = scipy.signal.detrend(diffimage, axis=0)
        self.imageData = diffimage
        return
Ejemplo n.º 11
0
def histogramDetrend(data, window=500, bins=50, threshold=3.0):
    """Linear detrend. Works by finding the most common value at the beginning and end of a trace, excluding outliers."""

    d1 = data.view(np.ndarray)
    d2 = [d1[:window], d1[-window:]]
    v = [0, 0]
    for i in [0, 1]:
        d3 = d2[i]
        stdev = d3.std()
        mask = abs(d3 - np.median(d3)) < stdev * threshold
        d4 = d3[mask]
        y, x = np.histogram(d4, bins=bins)
        ind = np.argmax(y)
        v[i] = 0.5 * (x[ind] + x[ind + 1])

    base = np.linspace(v[0], v[1], len(data))
    d3 = data.view(np.ndarray) - base

    if isinstance(data, MetaArray):
        return MetaArray(d3, info=data.infoCopy())
    return d3
Ejemplo n.º 12
0
def applyFilter(data, b, a, padding=100, bidir=True):
    """Apply a linear filter with coefficients a, b. Optionally pad the data before filtering
    and/or run the filter in both directions."""
    d1 = data.view(np.ndarray)

    if padding > 0:
        d1 = np.hstack([d1[:padding], d1, d1[-padding:]])

    if bidir:
        d1 = scipy.signal.lfilter(b, a,
                                  scipy.signal.lfilter(b, a, d1)[::-1])[::-1]
    else:
        d1 = scipy.signal.lfilter(b, a, d1)

    if padding > 0:
        d1 = d1[padding:-padding]

    if (hasattr(data, 'implements') and data.implements('MetaArray')):
        return MetaArray(d1, info=data.infoCopy())
    else:
        return d1
Ejemplo n.º 13
0
def adaptiveDetrend(data, x=None, threshold=3.0):
    """Return the signal with baseline removed. Discards outliers from baseline measurement."""
    if x is None:
        x = data.xvals(0)

    d = data.view(np.ndarray)

    d2 = scipy.signal.detrend(d)

    stdev = d2.std()
    mask = abs(d2) < stdev * threshold
    #d3 = where(mask, 0, d2)
    #d4 = d2 - lowPass(d3, cutoffs[1], dt=dt)

    lr = stats.linregress(x[mask], d[mask])
    base = lr[1] + lr[0] * x
    d4 = d - base

    if (hasattr(data, 'implements') and data.implements('MetaArray')):
        return MetaArray(d4, info=data.infoCopy())
    return d4
Ejemplo n.º 14
0
def downsample(data, n, axis=0, xvals='subsample'):
    """Downsample by averaging points together across axis.
    If multiple axes are specified, runs once per axis.
    If a metaArray is given, then the axis values can be either subsampled
    or downsampled to match.
    """
    ma = None
    if (hasattr(data, 'implements') and data.implements('MetaArray')):
        ma = data
        data = data.view(np.ndarray)

    if hasattr(axis, '__len__'):
        if not hasattr(n, '__len__'):
            n = [n] * len(axis)
        for i in range(len(axis)):
            data = downsample(data, n[i], axis[i])
        return data

    nPts = int(data.shape[axis] / n)
    s = list(data.shape)
    s[axis] = nPts
    s.insert(axis + 1, n)
    sl = [slice(None)] * data.ndim
    sl[axis] = slice(0, nPts * n)
    d1 = data[tuple(sl)]
    #print d1.shape, s
    d1.shape = tuple(s)
    d2 = d1.mean(axis + 1)

    if ma is None:
        return d2
    else:
        info = ma.infoCopy()
        if 'values' in info[axis]:
            if xvals == 'subsample':
                info[axis]['values'] = info[axis]['values'][::n][:nPts]
            elif xvals == 'downsample':
                info[axis]['values'] = downsample(info[axis]['values'], n)
        return MetaArray(d2, info=info)
Ejemplo n.º 15
0
def modeFilter(data, window=500, step=None, bins=None):
    """Filter based on histogram-based mode function"""
    d1 = data.view(np.ndarray)
    vals = []
    l2 = int(window / 2.)
    if step is None:
        step = l2
    i = 0
    while True:
        if i > len(data) - step:
            break
        vals.append(mode(d1[i:i + window], bins))
        i += step

    chunks = [np.linspace(vals[0], vals[0], l2)]
    for i in range(len(vals) - 1):
        chunks.append(np.linspace(vals[i], vals[i + 1], step))
    remain = len(data) - step * (len(vals) - 1) - l2
    chunks.append(np.linspace(vals[-1], vals[-1], remain))
    d2 = np.hstack(chunks)

    if (hasattr(data, 'implements') and data.implements('MetaArray')):
        return MetaArray(d2, info=data.infoCopy())
    return d2
Ejemplo n.º 16
0
    def parse_and_go(self, argsin = None):
        global period
        global binsize
        parser=OptionParser() # command line options
        ##### parses all of the options inputted at the command line TFR 11/13/2015
        parser.add_option("-u", "--upfile", dest="upfile", metavar='FILE',
                          help="load the up-file")
        parser.add_option("-d", "--downfile", dest="downfile", metavar='FILE',
                          help="load the down-file")
        parser.add_option("-D", "--directory", dest="directory", metavar='FILE',
                          help="Use directory for data")
        parser.add_option("-t", "--test", dest="test", action='store_true',
                          help="Test mode to check calculations", default=False)
        parser.add_option("-p", '--period', dest = "period", default=4.25, type="float",
                          help = "Stimulus cycle period")
        parser.add_option("-c", '--cycles', dest = "cycles", default=0, type="int",
                          help = "# cycles to analyze")
        parser.add_option("-b", '--binning', dest = "binsize", default=0, type="int",
                          help = "bin reduction x,y")
        parser.add_option("-g", '--gfilter', dest = "gfilt", default=0, type="float",
                          help = "gaussian filter width")
        parser.add_option("-f", '--fdict', dest = "fdict", default=0, type="int",
                          help = "Use dictionary entry")
        
        if argsin is not None:
            (options, args) = parser.parse_args(argsin)
        else:
            (options, args) = parser.parse_args()

        if options.test is True:
            print "Running Test Sample"
            period = 8.0 # period and frame sample rate can be different
            framerate = 8.0
            nper = 1
            d = 10.0*numpy.random.normal(size=(2500,128,128)).astype('float32')
            ds = d.shape
            self.nFrames = d.shape[0]
            self.nPhases = 10
            maxdel = 50
            self.phasex = []
            self.phasey = []
            for i in range(0,self.nPhases):
                dx = i*ds[1]/self.nPhases # each phase is assigned to a region
                baseline = 0.0
                self.resp = numpy.zeros((self.nFrames,))
                phaseDelay = 0.25*period+period*(float(i)/self.nPhases) # phase delay for this region from 0 to nearly the stimulus repeat period
               # print '********phase delay: ', phaseDelay
                for j in range(0, nper): # for each period 
                    tdelay = (float(j) * period) + phaseDelay # time to phase delay point
                    idelay = int(numpy.floor(tdelay*framerate)) # convert to frame position in frame space
               #     print '     tdel: ', tdelay, '    idel: ', idelay
                #    if idelay < self.nFrames-maxdel:
                #        self.resp[idelay:idelay+maxdel] = (i+1)*numpy.exp(-numpy.linspace(0, 2, maxdel)) # marks amplitudes as well
                self.resp = 1000.0*numpy.sin(
                         numpy.linspace(0, 2.0*numpy.pi*self.nFrames/(period*framerate), self.nFrames)+i*numpy.pi/8.0 - numpy.pi/2.0)
                d[:, dx:dx+int(ds[1]/self.nPhases), 5:int(ds[2]/2)] += self.resp[:, numpy.newaxis, numpy.newaxis]
                self.phasex.append( (2+(dx+int(ds[1]/self.nPhases))/2))
                self.phasey.append((6+int(ds[2]/2)/2)) # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)
            d = (d*3000.0*1e-4)+3000.0 # scale and offset to match data scaling coming in
            self.imageData = d.astype('int16') # reduce to a 16-bit map to match camera data type
            self.times = numpy.arange(0, self.nFrames/framerate, 1.0/framerate)
            print "Test Image Created"
            getout2 = fac_lift(self.imageData, self.times)
            self.imageData=getout2
            self.Analysis_FourierMap_TFR(period=period, target = 1, mode=1, bins=binsize)
            print "Completed Analysis FourierMap"
            self.plotmaps_pg(mode = 2, gfilter = 0)
            print "Completed plot maps"

        if options.period is not None:
            measuredPeriod = options.period
        if options.cycles is not None:
            self.nCycles = options.cycles
        if options.binsize is not None:
            binsize = options.binsize
        if options.gfilt is not None:
            gfilt = options.gfilt
        if options.upfile is not None:
            self.upfile = options.upfile
            target = 1
        
        if options.downfile is not None:
            self.downfile = options.downfile
            target = 2

        target = 0
        videoupf = None
        videodwnf = None
        audioupf = None
        audiodwnf = None

        
        print 'DB keys', DB.keys()
        if options.fdict is not None:
            if options.fdict in DB.keys(): # populate options 
                options.upfile = DB[options.fdict][0]
                options.downfile = DB[options.fdict][1]
                options.period = DB[options.fdict][4]
            else:
               print "File %d NOT in DBase\n" % options.fdict
               return
        if options.directory is not None:
            self.directory = options.directory

        if options.upfile is not None:
            videoupf = videobasepath + options.upfile + '.ma'
            audioupf = audiobasepath + options.upfile + '/DaqDevice.ma'
        if options.downfile is not None:
            videodwnf = videobasepath + options.downfile + '.ma'
            audiodwnf = audiobasepath + options.downfile + '/DaqDevice.ma'

        
        im=[]
        self.imageData = []
        print "loading data from ", videoupf
        try:
            im = MetaArray(file = videoupf,  subset=(slice(0,2), slice(64,128), slice(64,128)))
        except:
            print "Error loading upfile: %s\n" % videoupf
            return
        print "data loaded"
        
         
        rawtimes=[]
        rawimageData=[]
        rawtimes = im.axisValues('Time').astype('float32')

        rawimageData = im.view(np.ndarray).astype('float32')
#  

        #reads the timestamps from the files
        indexFile = configfile.readConfigFile(basepath+'.index') 
        timestampup = indexFile.__getitem__('video_'+DB[options.fdict][0]+'.ma')[u'__timestamp__']
        audioupindex = configfile.readConfigFile(audiobasepath+DB[options.fdict][0]+'/.index')
        audioupstamp = audioupindex.__getitem__(u'.')[u'__timestamp__'] 
 
       
        diffup = audioupstamp - timestampup

        
        
        audio = MetaArray(file = audioupf, subset=(slice(0,2), slice(64,128), slice(64,128)))
        audiotime = audio.axisValues('Time').astype('float32')
        audiomin = np.min(audiotime) + diffup
        audiomax = np.max(audiotime) + diffup
        
        print 'audiomin', audiomin
        print 'audiomax', audiomax

        adjustedtime = rawtimes[np.logical_and(rawtimes <= audiomax+.5, rawtimes >= audiomin)]
        frame_start=np.amin(np.where(rawtimes >= audiomin))
        frame_end=np.amax(np.where(rawtimes <= audiomax+0.5))
        adjustedimagedata = rawimageData[frame_start:frame_end]
        #adjustedimagedata = rawimageData[np.logical_and(rawtimes <= audiomax+.5, rawtimes >= audiomin)]
 
        self.times = [x-np.min(adjustedtime) for x in adjustedtime]
        #self.imageData = adjustedimagedata
        # self.imageData=np.mean(self.imageData, axis=0)
        #self.imageData=np.mean(self.imageData)
        outofbounds = 2*adjustedimagedata.std()

        for i in range(adjustedimagedata.shape[0]):
            for j in range(adjustedimagedata.shape[1]):
                for k in range(adjustedimagedata.shape[2]):
                    
                    if adjustedimagedata[i,j,k]< outofbounds:
                        adjustedimagedata[i,j,k]=1
        pg.image(np.mean(adjustedimagedata,axis=0),title='remove extremes')
        self.imageData = adjustedimagedata
        print 'newmean', adjustedimagedata.mean()
        #############loaded file #1###################
        
        # for background file
        im2=[]
        self.imageData2 = []
        adjustedimagedata = []

        print "loading data from ", videodwnf
        try:
            im2 = MetaArray(file = videodwnf,  subset=(slice(0,2), slice(64,128), slice(64,128)))
        except:
            print "Error loading upfile: %s\n" % videodwnf
            return
        print "data loaded"
        
         
        rawtimes=[]
        rawimageData=[]
        rawtimes = im2.axisValues('Time').astype('float32')

        rawimageData = im2.view(np.ndarray).astype('float32')
#  

        #reads the timestamps from the files
        indexFile = configfile.readConfigFile(basepath+'.index') 
        timestampdwn = indexFile.__getitem__('video_'+DB[options.fdict][1]+'.ma')[u'__timestamp__']
        audiodwnindex = configfile.readConfigFile(audiobasepath+DB[options.fdict][1]+'/.index')
        audiodwnstamp = audiodwnindex.__getitem__(u'.')[u'__timestamp__'] 
 
       
        diffdwn = audiodwnstamp - timestampdwn

        
        
        audio = MetaArray(file = audiodwnf, subset=(slice(0,2), slice(64,128), slice(64,128)))
        audiotime = audio.axisValues('Time').astype('float32')
        audiomin = np.min(audiotime) + diffdwn
        audiomax = np.max(audiotime) + diffdwn
        
        print 'audiomin', audiomin
        print 'audiomax', audiomax

        adjustedtime = rawtimes[np.logical_and(rawtimes <= audiomax+.5, rawtimes >= audiomin)]
        frame_start=np.amin(np.where(rawtimes >= audiomin))
        frame_end=np.amax(np.where(rawtimes <= audiomax+0.5))
        adjustedimagedata = rawimageData[frame_start:frame_end]
 
        self.times = [x-np.min(adjustedtime) for x in adjustedtime]
        outofbounds = 2*adjustedimagedata.std()

        for i in range(adjustedimagedata.shape[0]):
            for j in range(adjustedimagedata.shape[1]):
                for k in range(adjustedimagedata.shape[2]):
                    
                    if adjustedimagedata[i,j,k]< outofbounds:
                        adjustedimagedata[i,j,k]=1
        pg.image(np.mean(adjustedimagedata,axis=0),title='remove extremes')
        self.imageData2 = adjustedimagedata
        print 'newmean', adjustedimagedata.mean()

        # self.imageData2 = adjustedimagedata
        # self.imageData2=np.mean(self.imageData2, axis=0)
        print 'size of imagedata', self.imageData2.shape
        ##############loaded file #2##############

        diffframes = np.mean(self.imageData)/np.mean(self.imageData2)
        print 'mean:', diffframes.mean()
        print 'std:', diffframes.std()
        procimage=diffframes/diffframes.mean()/diffframes.std()

        self.avgframes = pg.image(diffframes, title='Average across frames')
        self.diff_frames = pg.image(procimage, title='Normalized Average across frames')
        
        #self.avgframes = pg.image(procimage, title='Average across frames')
        return
Ejemplo n.º 17
0
    def parse_and_go(self, argsin=None):
        global period
        global binsize
        parser = OptionParser()  # command line options
        ##### parses all of the options inputted at the command line TFR 11/13/2015
        parser.add_option("-u",
                          "--upfile",
                          dest="upfile",
                          metavar='FILE',
                          help="load the up-file")
        parser.add_option("-d",
                          "--downfile",
                          dest="downfile",
                          metavar='FILE',
                          help="load the down-file")
        parser.add_option("-D",
                          "--directory",
                          dest="directory",
                          metavar='FILE',
                          help="Use directory for data")
        parser.add_option("-t",
                          "--test",
                          dest="test",
                          action='store_true',
                          help="Test mode to check calculations",
                          default=False)
        parser.add_option("-p",
                          '--period',
                          dest="period",
                          default=4.25,
                          type="float",
                          help="Stimulus cycle period")
        parser.add_option("-c",
                          '--cycles',
                          dest="cycles",
                          default=0,
                          type="int",
                          help="# cycles to analyze")
        parser.add_option("-b",
                          '--binning',
                          dest="binsize",
                          default=0,
                          type="int",
                          help="bin reduction x,y")
        parser.add_option("-g",
                          '--gfilter',
                          dest="gfilt",
                          default=0,
                          type="float",
                          help="gaussian filter width")
        parser.add_option("-f",
                          '--fdict',
                          dest="fdict",
                          default=0,
                          type="int",
                          help="Use dictionary entry")

        if argsin is not None:
            (options, args) = parser.parse_args(argsin)
        else:
            (options, args) = parser.parse_args()

        if options.period is not None:
            measuredPeriod = options.period
        if options.cycles is not None:
            self.nCycles = options.cycles
        if options.binsize is not None:
            binsize = options.binsize
        if options.gfilt is not None:
            gfilt = options.gfilt

        if options.fdict is not None:
            if options.fdict in DB.keys():  # populate options
                options.upfile = DB[options.fdict][0]
                options.downfile = DB[options.fdict][1]
                options.period = DB[options.fdict][4]
            else:
                print "File %d NOT in DBase\n" % options.fdict
                return
        if options.directory is not None:
            self.directory = options.directory

        if options.upfile is not None:
            self.upfile = options.upfile
            target = 1

        if options.downfile is not None:
            self.downfile = options.downfile
            target = 2

        target = 0
        videoupf = None
        videodwnf = None
        audioupf = None
        audiodwnf = None

        if options.upfile is not None:
            videoupf = videobasepath + options.upfile + '.ma'
            audioupf = audiobasepath + options.upfile + '/DaqDevice.ma'
        if options.downfile is not None:
            videodwnf = videobasepath + options.downfile + '.ma'
            audiodwnf = audiobasepath + options.downfile + '/DaqDevice.ma'

        for file in (videoupf, videodwnf):
            #if options.upfile is not None and options.downfile is not None:
            if file is None:
                break
            im = []
            self.imageData = []
            print "loading data from ", file
            try:
                im = MetaArray(file=file,
                               subset=(slice(0, 2), slice(64,
                                                          128), slice(64,
                                                                      128)))

            except:
                print "Error loading upfile: %s\n" % file
                return
            print "data loaded"
            self.times = im.axisValues('Time')
            self.nFrames = numpy.shape(im)[0]
            self.imageData = numpy.array(im).astype(numpy.float32, copy=False)
            print 'min diff', np.amin(np.diff(self.times))
            print 'miax diff', np.amax(np.diff(self.times))
            dt = numpy.mean(numpy.diff(self.times))
            print 'dt:', dt
            target = target + 1
            rawtimes = []
            rawimageData = []
            rawtimes = im.axisValues('Time').astype('float32')
            rawimageData = im.view(np.ndarray).astype('float32')
            print 'size of im', np.shape(rawimageData)
            indexFile = configfile.readConfigFile(basepath + '.index')
            timestampup = indexFile.__getitem__('video_' +
                                                DB[options.fdict][0] +
                                                '.ma')[u'__timestamp__']
            timestampdown = indexFile.__getitem__('video_' +
                                                  DB[options.fdict][1] +
                                                  '.ma')[u'__timestamp__']
            audioupindex = configfile.readConfigFile(audiobasepath +
                                                     DB[options.fdict][0] +
                                                     '/.index')
            audioupstamp = audioupindex.__getitem__(u'.')[u'__timestamp__']
            audiodownindex = configfile.readConfigFile(audiobasepath +
                                                       DB[options.fdict][1] +
                                                       '/.index')
            audiodownstamp = audiodownindex.__getitem__(u'.')[u'__timestamp__']

            diffup = audioupstamp - timestampup
            diffdown = audiodownstamp - timestampdown

            if file is videoupf:
                audio = MetaArray(file=audioupf,
                                  subset=(slice(0, 2), slice(64, 128),
                                          slice(64, 128)))
                audiotime = audio.axisValues('Time').astype('float32')
                audiomin = np.min(audiotime) + diffup
                audiomax = np.max(audiotime) + diffup
            elif file is videodwnf:
                audio = MetaArray(file=audiodwnf,
                                  subset=(slice(0, 2), slice(64, 128),
                                          slice(64, 128)))
                audiotime = audio.axisValues('Time').astype('float32')
                audiomin = np.min(audiotime) + diffdown
                audiomax = np.max(audiotime) + diffdown
            else:
                print 'ERROR!  Unable to load audio file'
            print 'audiomin', audiomin
            print 'audiomax', audiomax

            adjustedtime = rawtimes[np.logical_and(rawtimes <= audiomax + .5,
                                                   rawtimes >= audiomin)]
            frame_start = np.amin(np.where(rawtimes >= audiomin))
            frame_end = np.amax(np.where(rawtimes <= audiomax))

            background = np.mean(rawimageData[10:25, :, :], axis=0)
            print 'size of background', np.shape(background)
            adjustedimagedata = rawimageData[frame_start:frame_end, :, :]
            # print 'adjtime', adjustedtime
            self.times = [x - np.min(adjustedtime) for x in adjustedtime]
            self.imageData = adjustedimagedata
            #print 'self.times:', self.times
            # print 'length of self.times', np.shape(self.times)
            # print 'shape of image data', np.shape(self.imageData)
            print 'size of im', np.shape(self.imageData)
            im = []
            if file is videoupf:
                upflag = 1
            else:
                upflag = 0
            #print 'target:', target

            framerate = 1 / dt
            xsize = self.imageData.shape[1]
            ysize = self.imageData.shape[2]
            nframes = self.imageData.shape[0]
            freq = 4
            frames = self.imageData.reshape(nframes, xsize * ysize)
            print 'seize of frames', np.shape(frames)
            const = np.arange(dt, nframes / framerate, nframes)
            fourvec = []
            for ii in range(const.shape[0]):
                fourvec[ii] = numpy.exp(2 * np.pi * np.complex(0, 1) *
                                        const[ii] * freq)
            print 'size of fourvec', np.shape(fourvec)
            data = frames[:, :] * fourvec
            print 'size of data', np.shape(data)
            data = np.mean(data, axis=0)
            data = data.reshape(xsize, ysize)

            pg.image(data)

            # pg.image(background)
            # diffimage=np.zeros(np.shape(self.imageData))

            # print 'single:', np.shape(self.imageData[4])
            # for i in range(self.imageData.shape[0]):
            #     diffimage[i]=self.imageData[i]/background
            #     #self.imageData[i]/background
            # print 'size of diffimage', np.shape(diffimage)
            # refimage=self.imageData[frame_start]/self.imageData[10]
            # conddiff=np.mean(diffimage,axis=0)-refimage
            # graphme=conddiff/conddiff.mean()/conddiff.std()
            # pg.image(graphme,title='graphme')
            #self.subtract_Background(diffup=diffup)
            #self.Analysis_FourierMap_TFR(period=measuredPeriod, target = target,  bins=binsize, up=upflag)
        print 'target:', target
Ejemplo n.º 18
0
    def parse_and_go(self, argsin = None):
        global period
        parser=OptionParser() # command line options
        ##### parses all of the options inputted at the command line TFR 11/13/2015
        parser.add_option("-u", "--upfile", dest="upfile", metavar='FILE',
                          help="load the up-file")
        parser.add_option("-d", "--downfile", dest="downfile", metavar='FILE',
                          help="load the down-file")
        parser.add_option("-D", "--directory", dest="directory", metavar='FILE',
                          help="Use directory for data")
        parser.add_option("-t", "--test", dest="test", action='store_true',
                          help="Test mode to check calculations", default=False)
        parser.add_option("-p", '--period', dest = "period", default=8.0, type="float",
                          help = "Stimulus cycle period")
        parser.add_option("-c", '--cycles', dest = "cycles", default=0, type="int",
                          help = "# cycles to analyze")
        parser.add_option("-b", '--binning', dest = "binsize", default=0, type="int",
                          help = "bin reduction x,y")
        parser.add_option("-g", '--gfilter', dest = "gfilt", default=0, type="float",
                          help = "gaussian filter width")
        parser.add_option("-f", '--fdict', dest = "fdict", default=0, type="int",
                          help = "Use dictionary entry")
        if argsin is not None:
            (options, args) = parser.parse_args(argsin)
        else:
            (options, args) = parser.parse_args()

        if options.period is not None:
            measuredPeriod = options.period
        if options.cycles is not None:
            self.nCycles = options.cycles
        if options.binsize is not None:
            binsize = options.binsize
        if options.gfilt is not None:
            gfilt = options.gfilt

            #TFR- this code generates a test signal for running a test analysis sequence
        print options.test
        if options.test is True:
            print "Running Test Sample"
            period = 8.0 # period and frame sample rate can be different
            framerate = 8.0
            nper = 1
            d = 10.0*numpy.random.normal(size=(2500,128,128)).astype('float32')
            ds = d.shape
            self.nFrames = d.shape[0]
            self.nPhases = 10
            maxdel = 50
            self.phasex = []
            self.phasey = []
            for i in range(0,self.nPhases):
                dx = i*ds[1]/self.nPhases # each phase is assigned to a region
                baseline = 0.0
                self.resp = numpy.zeros((self.nFrames,))
                phaseDelay = 0.25*period+period*(float(i)/self.nPhases) # phase delay for this region from 0 to nearly the stimulus repeat period
               # print '********phase delay: ', phaseDelay
                for j in range(0, nper): # for each period 
                    tdelay = (float(j) * period) + phaseDelay # time to phase delay point
                    idelay = int(numpy.floor(tdelay*framerate)) # convert to frame position in frame space
               #     print '     tdel: ', tdelay, '    idel: ', idelay
                #    if idelay < self.nFrames-maxdel:
                #        self.resp[idelay:idelay+maxdel] = (i+1)*numpy.exp(-numpy.linspace(0, 2, maxdel)) # marks amplitudes as well
                self.resp = numpy.sin(
                         numpy.linspace(0, 2.0*numpy.pi*self.nFrames/(period*framerate), self.nFrames)+i*numpy.pi/8 - numpy.pi/2.0)
                d[:, dx:dx+int(ds[1]/self.nPhases), 5:int(ds[2]/2)] += self.resp[:, numpy.newaxis, numpy.newaxis]
                self.phasex.append( (2+(dx+int(ds[1]/self.nPhases))/2))
                self.phasey.append((6+int(ds[2]/2)/2)) # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)
            d = (d*3000.0*1e-4)+3000.0 # scale and offset to match data scaling coming in
            self.imageData = d.astype('int16') # reduce to a 16-bit map to match camera data type
            self.times = numpy.arange(0, self.nFrames/framerate, 1.0/framerate)
            print "Test Image Created"
            self.Analysis_FourierMap(period = period, target = 1, mode=1, bins=binsize)
            print "Completed Analysis FourierMap"
            self.plotmaps_pg(mode = 2, gfilter = gfilt)
            print "Completed plot maps"

            return

        if options.fdict is not None:
            if options.fdict in DB.keys(): # populate options 
                options.upfile = DB[options.fdict][0]
                options.downfile = DB[options.fdict][1]
                options.period = DB[options.fdict][4]
            else:
               print "File %d NOT in DBase\n" % options.fdict
               return
        if options.directory is not None:
            self.directory = options.directory

        if options.upfile is not None:
            self.upfile = options.upfile
            target = 1
        
        if options.downfile is not None:
            self.downfile = options.downfile
            target = 2

        target = 0
        upf = None
        dwnf = None
        if options.upfile is not None:
            upf = basepath + options.upfile + '.ma'
        if options.downfile is not None:
            dwnf = basepath + options.downfile + '.ma'

        for file in (upf, dwnf):
#if options.upfile is not None and options.downfile is not None:
            if file is None:
               break
            im=[]
            self.imageData = []
            print "loading data from ", file
            try:
                im = MetaArray(file = file,  subset=(slice(0,2), slice(64,128), slice(64,128)))
            except:
                print "Error loading upfile: %s\n" % file
                return
            print "data loaded"
            target = target + 1
            self.times = im.axisValues('Time').astype('float32')
            self.imageData = im.view(ndarray).astype('float32')
            im=[]
            if file is upf:
               upflag = 1
            else:
               upflag = 0
            self.Analysis_FourierMap(period=measuredPeriod, target = target,  bins=binsize, up=upflag)
        if target > 0:
            self.plotmaps(mode = 1, target = target, gfilter = gfilt)
Ejemplo n.º 19
0
    def parse_and_go(self, argsin = None):
        global period
        global binsize
        parser=OptionParser() # command line options
        ##### parses all of the options inputted at the command line TFR 11/13/2015
        parser.add_option("-u", "--upfile", dest="upfile", metavar='FILE',
                          help="load the up-file")
        parser.add_option("-d", "--downfile", dest="downfile", metavar='FILE',
                          help="load the down-file")
        parser.add_option("-D", "--directory", dest="directory", metavar='FILE',
                          help="Use directory for data")
        parser.add_option("-t", "--test", dest="test", action='store_true',
                          help="Test mode to check calculations", default=False)
        parser.add_option("-p", '--period', dest = "period", default=4.25, type="float",
                          help = "Stimulus cycle period")
        parser.add_option("-c", '--cycles', dest = "cycles", default=0, type="int",
                          help = "# cycles to analyze")
        parser.add_option("-b", '--binning', dest = "binsize", default=0, type="int",
                          help = "bin reduction x,y")
        parser.add_option("-g", '--gfilter', dest = "gfilt", default=0, type="float",
                          help = "gaussian filter width")
        parser.add_option("-f", '--fdict', dest = "fdict", default=0, type="int",
                          help = "Use dictionary entry")
        
        if argsin is not None:
            (options, args) = parser.parse_args(argsin)
        else:
            (options, args) = parser.parse_args()

        if options.period is not None:
            measuredPeriod = options.period
        if options.cycles is not None:
            self.nCycles = options.cycles
        if options.binsize is not None:
            binsize = options.binsize
        if options.gfilt is not None:
            gfilt = options.gfilt

        if options.fdict is not None:
            if options.fdict in DB.keys(): # populate options 
                options.upfile = DB[options.fdict][0]
                options.downfile = DB[options.fdict][1]
                options.period = DB[options.fdict][4]
            else:
               print "File %d NOT in DBase\n" % options.fdict
               return
        if options.directory is not None:
            self.directory = options.directory

        if options.upfile is not None:
            self.upfile = options.upfile
            target = 1
        
        if options.downfile is not None:
            self.downfile = options.downfile
            target = 2

        target = 0
        videoupf = None
        videodwnf = None
        audioupf = None
        audiodwnf = None

        if options.upfile is not None:
            videoupf = videobasepath + options.upfile + '.ma'
            audioupf = audiobasepath + options.upfile + '/DaqDevice.ma'
        if options.downfile is not None:
            videodwnf = videobasepath + options.downfile + '.ma'
            audiodwnf = audiobasepath + options.downfile + '/DaqDevice.ma'

        for file in (videoupf, videodwnf):
#if options.upfile is not None and options.downfile is not None:
            if file is None:
               break
            im=[]
            self.imageData = []
            print "loading data from ", file
            try:
                im = MetaArray(file = file,  subset=(slice(0,2), slice(64,128), slice(64,128)))
                
            except:
                print "Error loading upfile: %s\n" % file
                return
            print "data loaded"
            self.times = im.axisValues('Time')
            self.nFrames = numpy.shape(im)[0]
            self.imageData = numpy.array(im).astype(numpy.float32, copy=False)
            print 'min diff', np.amin(np.diff(self.times))
            print 'miax diff', np.amax(np.diff(self.times))
            dt = numpy.mean(numpy.diff(self.times))
            print 'dt:',dt
            target = target + 1
            rawtimes=[]
            rawimageData=[]
            rawtimes = im.axisValues('Time').astype('float32')
            rawimageData = im.view(np.ndarray).astype('float32')
            print 'size of im', np.shape(rawimageData)
            indexFile = configfile.readConfigFile(basepath+'.index') 
            timestampup = indexFile.__getitem__('video_'+DB[options.fdict][0]+'.ma')[u'__timestamp__']
            timestampdown = indexFile.__getitem__('video_'+DB[options.fdict][1]+'.ma')[u'__timestamp__']
            audioupindex = configfile.readConfigFile(audiobasepath+DB[options.fdict][0]+'/.index')
            audioupstamp = audioupindex.__getitem__(u'.')[u'__timestamp__'] 
            audiodownindex = configfile.readConfigFile(audiobasepath+DB[options.fdict][1]+'/.index')
            audiodownstamp = audiodownindex.__getitem__(u'.')[u'__timestamp__'] 
           
            diffup = audioupstamp - timestampup
            diffdown = audiodownstamp - timestampdown 

            
            if file is videoupf:
                audio = MetaArray(file = audioupf, subset=(slice(0,2), slice(64,128), slice(64,128)))
                audiotime = audio.axisValues('Time').astype('float32')
                audiomin = np.min(audiotime) + diffup
                audiomax = np.max(audiotime) + diffup
            elif file is videodwnf:
                audio = MetaArray(file = audiodwnf, subset=(slice(0,2), slice(64,128), slice(64,128)))
                audiotime = audio.axisValues('Time').astype('float32')
                audiomin = np.min(audiotime) + diffdown
                audiomax = np.max(audiotime) + diffdown
            else:
                print 'ERROR!  Unable to load audio file'
            print 'audiomin', audiomin
            print 'audiomax', audiomax

            adjustedtime = rawtimes[np.logical_and(rawtimes <= audiomax+.5, rawtimes >= audiomin)]
            frame_start=np.amin(np.where(rawtimes >= audiomin))
            frame_end=np.amax(np.where(rawtimes <= audiomax))

            background = np.mean(rawimageData[10:25,:,:], axis=0)
            print 'size of background', np.shape(background)
            adjustedimagedata = rawimageData[frame_start:frame_end,:,:]
            # print 'adjtime', adjustedtime
            self.times = [x-np.min(adjustedtime) for x in adjustedtime]
            self.imageData = adjustedimagedata
            #print 'self.times:', self.times
            # print 'length of self.times', np.shape(self.times)
            # print 'shape of image data', np.shape(self.imageData)
            print 'size of im', np.shape(self.imageData)
            im=[]
            if file is videoupf:
               upflag = 1
            else:
               upflag = 0
            #print 'target:', target

            framerate=1/dt
            xsize=self.imageData.shape[1]
            ysize=self.imageData.shape[2]
            nframes=self.imageData.shape[0]
            freq=4
            frames=self.imageData.reshape(nframes,xsize*ysize)
            print 'seize of frames', np.shape(frames)
            const=np.arange(dt,nframes/framerate,nframes)
            fourvec=[]
            for ii in range(const.shape[0]):
                fourvec[ii]=numpy.exp(2*np.pi*np.complex(0,1)*const[ii]*freq)
            print 'size of fourvec', np.shape(fourvec)
            data=frames[:,:]*fourvec
            print 'size of data', np.shape(data)
            data=np.mean(data,axis=0)
            data=data.reshape(xsize,ysize)
            
            pg.image(data)
            
            # pg.image(background)
            # diffimage=np.zeros(np.shape(self.imageData))

            # print 'single:', np.shape(self.imageData[4])
            # for i in range(self.imageData.shape[0]):
            #     diffimage[i]=self.imageData[i]/background
            #     #self.imageData[i]/background
            # print 'size of diffimage', np.shape(diffimage)
            # refimage=self.imageData[frame_start]/self.imageData[10]
            # conddiff=np.mean(diffimage,axis=0)-refimage
            # graphme=conddiff/conddiff.mean()/conddiff.std()
            # pg.image(graphme,title='graphme')
            #self.subtract_Background(diffup=diffup)
            #self.Analysis_FourierMap_TFR(period=measuredPeriod, target = target,  bins=binsize, up=upflag)
        print 'target:', target
Ejemplo n.º 20
0
    def parse_and_go(self, argsin = None):
        global period
        global binsize
        parser=OptionParser() # command line options
        ##### parses all of the options inputted at the command line TFR 11/13/2015
        parser.add_option("-u", "--upfile", dest="upfile", metavar='FILE',
                          help="load the up-file")
        parser.add_option("-d", "--downfile", dest="downfile", metavar='FILE',
                          help="load the down-file")
        parser.add_option("-D", "--directory", dest="directory", metavar='FILE',
                          help="Use directory for data")
        parser.add_option("-t", "--test", dest="test", action='store_true',
                          help="Test mode to check calculations", default=False)
        parser.add_option("-p", '--period', dest = "period", default=4.25, type="float",
                          help = "Stimulus cycle period")
        parser.add_option("-c", '--cycles', dest = "cycles", default=0, type="int",
                          help = "# cycles to analyze")
        parser.add_option("-b", '--binning', dest = "binsize", default=0, type="int",
                          help = "bin reduction x,y")
        parser.add_option("-g", '--gfilter', dest = "gfilt", default=0, type="float",
                          help = "gaussian filter width")
        parser.add_option("-f", '--fdict', dest = "fdict", default=0, type="int",
                          help = "Use dictionary entry")
        
        if argsin is not None:
            (options, args) = parser.parse_args(argsin)
        else:
            (options, args) = parser.parse_args()

        if options.test is True:
            print "Running Test Sample"
            period = 8.0 # period and frame sample rate can be different
            framerate = 8.0
            nper = 1
            d = 10.0*numpy.random.normal(size=(2500,128,128)).astype('float32')
            ds = d.shape
            self.nFrames = d.shape[0]
            self.nPhases = 10
            maxdel = 50
            self.phasex = []
            self.phasey = []
            for i in range(0,self.nPhases):
                dx = i*ds[1]/self.nPhases # each phase is assigned to a region
                baseline = 0.0
                self.resp = numpy.zeros((self.nFrames,))
                phaseDelay = 0.25*period+period*(float(i)/self.nPhases) # phase delay for this region from 0 to nearly the stimulus repeat period
               # print '********phase delay: ', phaseDelay
                for j in range(0, nper): # for each period 
                    tdelay = (float(j) * period) + phaseDelay # time to phase delay point
                    idelay = int(numpy.floor(tdelay*framerate)) # convert to frame position in frame space
               #     print '     tdel: ', tdelay, '    idel: ', idelay
                #    if idelay < self.nFrames-maxdel:
                #        self.resp[idelay:idelay+maxdel] = (i+1)*numpy.exp(-numpy.linspace(0, 2, maxdel)) # marks amplitudes as well
                self.resp = 1000.0*numpy.sin(
                         numpy.linspace(0, 2.0*numpy.pi*self.nFrames/(period*framerate), self.nFrames)+i*numpy.pi/8.0 - numpy.pi/2.0)
                d[:, dx:dx+int(ds[1]/self.nPhases), 5:int(ds[2]/2)] += self.resp[:, numpy.newaxis, numpy.newaxis]
                self.phasex.append( (2+(dx+int(ds[1]/self.nPhases))/2))
                self.phasey.append((6+int(ds[2]/2)/2)) # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)
            d = (d*3000.0*1e-4)+3000.0 # scale and offset to match data scaling coming in
            self.imageData = d.astype('int16') # reduce to a 16-bit map to match camera data type
            self.times = numpy.arange(0, self.nFrames/framerate, 1.0/framerate)
            print "Test Image Created"
            getout2 = fac_lift(self.imageData, self.times)
            self.imageData=getout2
            self.Analysis_FourierMap_TFR(period=period, target = 1, mode=1, bins=binsize)
            print "Completed Analysis FourierMap"
            self.plotmaps_pg(mode = 2, gfilter = 0)
            print "Completed plot maps"

        if options.period is not None:
            measuredPeriod = options.period
        if options.cycles is not None:
            self.nCycles = options.cycles
        if options.binsize is not None:
            binsize = options.binsize
        if options.gfilt is not None:
            gfilt = options.gfilt
        if options.upfile is not None:
            self.upfile = options.upfile
            target = 1
        
        if options.downfile is not None:
            self.downfile = options.downfile
            target = 2

        target = 0
        videoupf = None
        videodwnf = None
        audioupf = None
        audiodwnf = None

        
        print 'DB keys', DB.keys()
        if options.fdict is not None:
            if options.fdict in DB.keys(): # populate options 
                options.upfile = DB[options.fdict][0]
                options.downfile = DB[options.fdict][1]
                options.period = DB[options.fdict][4]
            else:
               print "File %d NOT in DBase\n" % options.fdict
               return
        if options.directory is not None:
            self.directory = options.directory

        if options.upfile is not None:
            upf = basepath + options.upfile + '/Camera/frames.ma'
        if options.downfile is not None:
            dwnf = basepath + options.downfile + '/Camera/frames.ma'
        # if options.upfile is not None:
        #     videoupf = videobasepath + options.upfile + '.ma'
        #     audioupf = audiobasepath + options.upfile + '/DaqDevice.ma'
        # if options.downfile is not None:
        #     videodwnf = videobasepath + options.downfile + '.ma'
        #     audiodwnf = audiobasepath + options.downfile + '/DaqDevice.ma'

        
        im=[]
        self.imageData = []
        print "loading data from ", videoupf
        try:
            im = MetaArray(file = videoupf,  subset=(slice(0,2), slice(64,128), slice(64,128)))
        except:
            print "Error loading upfile: %s\n" % videoupf
            return
        print "data loaded"
        
         
        # rawtimes=[]
        # rawimageData=[]
        # rawtimes = im.axisValues('Time').astype('float32')

        # rawimageData = im.view(np.ndarray).astype('float32')
#   
        self.times = im.axisValues('Time').astype('float32')
        self.imageData = im.view(np.ndarray).astype('float32')
        
        #reads the timestamps from the files
        # indexFile = configfile.readConfigFile(basepath+'.index') 
        # timestampup = indexFile.__getitem__('video_'+DB[options.fdict][0]+'.ma')[u'__timestamp__']
        # audioupindex = configfile.readConfigFile(audiobasepath+DB[options.fdict][0]+'/.index')
        # audioupstamp = audioupindex.__getitem__(u'.')[u'__timestamp__'] 
 
       
        # diffup = audioupstamp - timestampup

        
        
        # audio = MetaArray(file = audioupf, subset=(slice(0,2), slice(64,128), slice(64,128)))
        # audiotime = audio.axisValues('Time').astype('float32')
        # audiomin = np.min(audiotime) + diffup
        # audiomax = np.max(audiotime) + diffup
        
        # print 'audiomin', audiomin
        # print 'audiomax', audiomax

        # adjustedtime = rawtimes[np.logical_and(rawtimes <= audiomax+5, rawtimes >= audiomin)]
        # frame_start=np.amin(np.where(rawtimes >= audiomin))
        # frame_end=np.amax(np.where(rawtimes <= audiomax+4))
        # adjustedimagedata = rawimageData[frame_start:frame_end]
        #adjustedimagedata = rawimageData[np.logical_and(rawtimes <= audiomax+.5, rawtimes >= audiomin)]
 
        # self.times = [x-np.min(adjustedtime) for x in adjustedtime]
        # self.imageData = adjustedimagedata
        # self.imageData=np.mean(self.imageData, axis=0)
        

        #background image
        background = rawimageData[5:25]
        pg.image(background[0], title='first background slice')

        background = np.mean(background,axis=0)
        print 'dimensions of background', np.shape(background)
        pg.image(background, title='mean background')
        #subtract background from image files

        print 'dimensions of imagedata', np.shape(self.imageData)
        subtracted = np.zeros(np.shape(self.imageData), float)
        divided = np.zeros(np.shape(self.imageData), float)
        for i in range(self.imageData.shape[0]):
            subtracted[i,:,:] = (self.imageData[i,:,:]-background)
            divided[i,:,:] = self.imageData[i,:,:]/background
            #subtracted = self.imageData-background
        subtracted=subtracted/subtracted.mean()
        divided=divided/divided.mean()
        print 'dimensions of subtracted', np.shape(subtracted)
        print 'dimensions of divided', np.shape(divided)
        subtracted = np.mean(subtracted, axis=0)
        divided = np.mean(divided,axis=0)
        pg.image(subtracted, title='subtracted')
        pg.image(divided,title='divided')

        self.imageData = np.mean(self.imageData, axis=0)
        print 'dimensions of imagedata', np.shape(self.imageData)
        pg.image(self.imageData,title='mean raw image')
        edges=feature.canny(self.imageData, sigma=3)
        pg.image(edges,title='edges')
        # for background file
#         im2=[]
#         self.imageData2 = []
#         adjustedimagedata = []

#         print "loading data from ", videodwnf
#         try:
#             im2 = MetaArray(file = videodwnf,  subset=(slice(0,2), slice(64,128), slice(64,128)))
#         except:
#             print "Error loading upfile: %s\n" % videodwnf
#             return
#         print "data loaded"
        
         
#         rawtimes=[]
#         rawimageData=[]
#         rawtimes = im2.axisValues('Time').astype('float32')

#         rawimageData = im2.view(np.ndarray).astype('float32')
# #  

#         #reads the timestamps from the files
#         indexFile = configfile.readConfigFile(basepath+'.index') 
#         timestampdwn = indexFile.__getitem__('video_'+DB[options.fdict][1]+'.ma')[u'__timestamp__']
#         audiodwnindex = configfile.readConfigFile(audiobasepath+DB[options.fdict][1]+'/.index')
#         audiodwnstamp = audiodwnindex.__getitem__(u'.')[u'__timestamp__'] 
 
       
#         diffdwn = audiodwnstamp - timestampdwn

        
        
#         audio = MetaArray(file = audiodwnf, subset=(slice(0,2), slice(64,128), slice(64,128)))
#         audiotime = audio.axisValues('Time').astype('float32')
#         audiomin = np.min(audiotime) + diffdwn
#         audiomax = np.max(audiotime) + diffdwn
        
#         print 'audiomin', audiomin
#         print 'audiomax', audiomax

#         adjustedtime = rawtimes[np.logical_and(rawtimes <= audiomax+.5, rawtimes >= audiomin)]
#         frame_start=np.amin(np.where(rawtimes >= audiomin))
#         frame_end=np.amax(np.where(rawtimes <= audiomax+0.5))
#         adjustedimagedata = rawimageData[frame_start:frame_end]
 
#         self.times = [x-np.min(adjustedtime) for x in adjustedtime]
#         self.imageData2 = adjustedimagedata
#         self.imageData2=np.mean(self.imageData2, axis=0)
#         print 'size of imagedata', self.imageData2.shape
#         diffframes = self.imageData/self.imageData2
#         print 'mean:', diffframes.mean()
#         print 'std:', diffframes.std()
        # procimage=diffframes/diffframes.mean()/diffframes.std()

        # self.avgframes = pg.image(diffframes, title='Average across frames')
        # self.diff_frames = pg.image(procimage, title='Normalized Average across frames')
        # self.imagebck=pg.image(rawimageData[10])
        #self.avgframes = pg.image(procimage, title='Average across frames')
        return
Ejemplo n.º 21
0
class MakeClamps():
    """
    Make non-acq4 data usable in our ephysanalysis routines by converting into an acq4-type structure
    Usage:
    Create the MakeClamps instance
    Use set_clamps to bring the data into the structure
    The instance of "MakeClamps" will have all the variables needed
    
    self.plot provides a simple plot of the data and stimlulus waveform to check the import
    
    self.read_pfile(filename) reads a simple dictionary-based pickled file and converts the data into
        a Clamps format.
    
    """
    
    def __init__(self, timeunits='ms'):
        self.timeunits = timeunits
        pass
    
    def set_clamps(self, dmode='CC', time=None, data=None, cmdwave=None, cmdvalues=None, tstep=[0.01, 0.100]):
        """
        Set up the clamp data from any source
        
        Parameters
        ----------
        dmode : str (default: 'CC')
            data mode. Should be one of 'CC', 'VC', or maybe 'I=0'
        
        time : np.array (default: None)
            time array. May be an array of arrays, one for each of the data traces
            organized by [trace, timevalues]
        
        data : np.array (default: None)
            data array. May be an array of arrays, one for each trace
            organized by [tace, datavalues]
        
        cmdwave : np.array or list (default: None)
            command waveform
            organized by [trace, command waveform]
        
        cmdvalues: np.array or list (default: None)
            a list of the command values (not the waveform)
        
        tstep : list or np.array, 2 element (default: [0.01, 0.1])
            command step on time and duration
        
        """
        
        self.data = data*1e-3
        if self.timeunits in ['ms']:
            tfactor = 1e-3
        elif self.timeunits in ['s', 'sec']:
            tfactor = 1.0
        if time.ndim > 0:
            self.rate = np.diff(time[0,:])*tfactor
            self.time = time[0,:]*tfactor
        else:
            self.rate = np.diff(time)*tfactor
            self.time = time*tfactor
        self.cmd_wave = cmdwave*1e-9
        self.cmd_values = [c*1e9 for c in cmdvalues]
        self.tstep = [t*tfactor for t in tstep]
        self.dmode = dmode
        self.getClampData()
    
    def read_pfile(self, filename):
        """
        Reads a file written from model_run in VCN models - simple dict structure
        ]"""
        fh = open(filename, 'rb')
        df = pickle.load(fh)
        r = df['Results'][0]
    
        vdat = []
        idat = []
        icmd = []
        time = []
        for trial in range(len(df['Results'])):
            icmd.append(trial)
            ds = df['Results'][trial]
            k0 = list(df['Results'][trial].keys())[0]
            dx = ds[k0]['monitor']
            vdat.append(dx['postsynapticV'])
            idat.append(dx['postsynapticI'])
            time.append(dx['time'])
            
        vdat = np.array(vdat)
        idat = np.array(idat)
        time = np.array(time)
        print(vdat.shape, idat.shape)
        self.set_clamps(time=time, data=vdat, cmdwave=idat, cmdvalues=icmd,
                        tstep=[df['runInfo']['stimDelay'], df['runInfo']['stimDur']])

        
        
    def plot(self):
        P = PH.Plotter((2, 1), figsize=(6, 4))
        cell_ax = list(P.axdict.keys())[0]
        iax = list(P.axdict.keys())[1]
        for i in range(self.traces.shape[0]):
            P.axdict[cell_ax].plot(self.time, self.traces.view(np.ndarray)[i], linewidth=1.0)
            P.axdict[iax].plot(self.time, self.cmd_wave.view(np.ndarray)[i], linewidth=1.0)
        P.axdict[cell_ax].set_xlim(0., 150.)
        P.axdict[cell_ax].set_ylim(-200., 50.)
        PH.calbar(P.axdict[cell_ax], calbar=[120., -95., 25., 20.], axesoff=True, orient='left', 
                unitNames={'x': 'ms', 'y': 'mV'}, font='Arial', fontsize=8)

        # mpl.savefig(outfile)
        mpl.show()
    

    def getClampData(self, verbose=False):
        """
        Translates fields as best as we can from the original DATAC structure
        create a Clamp structure for use in SpikeAnalysis and RMTauAnalysis.
        Fills in the fields that are returned by PatchEPhys getClamps:
        clampInfo['dirs]
        clampInfo['missingData']
        self.time_base
        self.values
        self.traceStartTimes = np.zeros(0)
        self.sequence
        self.clampValues (sequence)
        self.nclamp = len(self.clmapVlaues
        self.repc
        self.nrepc
        self.data_mode
        self.model_mode = False
        self.command_scale_factor
        self.command_units
        self.devicesUsed
        self.clampDevices
        self.holding
        self.clampState
        self.sample_interval
        self.RSeriesUncomp
        self.amplifeirSettings['WCCompValid', 'WCEmabled', 'CompEnabled', 'WCSeriesResistance']
        self.cmd_wave
        self.commandLevels (np.array(self.values))
        self.traces = MetaArray(traces, info=info)
        self.tstart
        self.tdur
        self.tend
        self.spikecount = np.zeros(len...) if in vcmode.
        
        Info from an example data file:
        [{'name': 'Channel', 'cols': [{'units': 'A', 'name': 'Command'}, {'units': 'V', 'name': 'primary'}, {'units': 'A', 'name': 'secondary'}]},
        {'units': 's', 'values': array([ 0.00000000e+00, 2.50000000e-05, 5.00000000e-05, ..., 6.99925000e-01, 6.99950000e-01, 6.99975000e-01]),
        'name': 'Time'}, {'ClampState': {'primaryGain': 10.0, 'ClampParams': {'OutputZeroEnable': 0, 'PipetteOffset': 0.05197399854660034,
        'Holding': -1.525747063413352e-11, 'PrimarySignalHPF': 0.0, 'BridgeBalResist': 20757020.0, 'PrimarySignalLPF': 20000.0, 'RsCompBandwidth':
        8.413395979806202e-42, 'WholeCellCompResist': 8.413395979806202e-42, 'WholeCellCompEnable': 6004, 'LeakSubResist': 8.413395979806202e-42,
        'HoldingEnable': 1, 'FastCompTau': 8.413395979806202e-42, 'SlowCompCap': 8.413395979806202e-42, 'WholeCellCompCap': 8.413395979806202e-42,
        'LeakSubEnable': 6004, 'NeutralizationCap': 1.9578947837994853e-12, 'BridgeBalEnable': 1, 'RsCompCorrection': 8.413395979806202e-42,
        'NeutralizationEnable': 1, 'RsCompEnable': 6004, 'OutputZeroAmplitude': -0.0009990156395360827, 'FastCompCap': 8.413395979806202e-42,
        'SlowCompTau': 8.413395979806202e-42}, 'secondarySignal': 'Command Current', 'secondaryGain': 1.0, 'secondaryScaleFactor': 2e-09,
        'primarySignal': 'Membrane Potential', 'extCmdScale': 4e-10, 'mode': 'IC', 'holding': 0.0, 'primaryUnits': 'V', 'LPFCutoff': 20000.0,
        'secondaryUnits': 'A', 'primaryScaleFactor': 0.1, 'membraneCapacitance': 0.0}, 'Protocol': {'recordState': True, 'secondary': None,
        'primary': None, 'mode': 'IC'}, 'DAQ': {'command': {'numPts': 28000, 'rate': 40000.0, 'type': 'ao', 'startTime': 1296241556.7347913},
        'primary': {'numPts': 28000, 'rate': 40000.0, 'type': 'ai', 'startTime': 1296241556.7347913}, 'secondary': {'numPts': 28000, 'rate':
        40000.0, 'type': 'ai', 'startTime': 1296241556.7347913}}, 'startTime': 1296241556.7347913}]

        )
        """
        if self.data is None:
            raise ValueError('No data has been set')
        protocol = ''

        points = self.data.shape[1]
        recs = range(self.data.shape[0])
        self.sample_interval = self.rate[0]# *1e-6  # express in seconds
        self.sample_rate = [1./self.sample_interval for r in recs]
        self.traces = np.array(self.data)
        dt = self.sample_interval  # make assumption that rate is constant in a block
        self.time_base = self.time # in seconds

        if self.dmode == 'CC':  # use first channel
            mainch = 0
            cmdch = 1
        else:  # assumption is swapped - for this data, that means voltage clamp mode.
            mainch = 1
            cmdch = 0

        self.tstart = self.tstep[0]  # could be pulled from protocol/stimulus information
        self.tdur = self.tstep[1]
        self.tend = self.tstart + self.tdur
        t0 = int(self.tstart/dt)
        t1 = int(self.tend/dt)
        if self.cmd_wave is not None:
            self.values = np.nanmean(self.cmd_wave[:, t0:t1], axis=1)  # express values in amps
        else:
            self.values = np.array(self.cmd_values)  # just given the values?
        self.commandLevels = self.values        
        
        info = [{'units': 'A', 'values': self.values, 'name': 'Command'},
                    {'name': 'Time', 'units': 's', 'values': self.time_base},
                    {'ClampState':  # note that many of these values are just defaults and cannot be relied upon
                            {'primaryGain': 1.0, 'ClampParams': 
                                {'OutputZeroEnable': 0, 'PipetteOffset': 0.0,
                                'Holding': 0, 'PrimarySignalHPF': 0.0, 'BridgeBalResist': 0.0, 
                                'PrimarySignalLPF': 20000.0, 'RsCompBandwidth': 0.0, 
                                'WholeCellCompResist': 0.0, 'WholeCellCompEnable': 6004, 'LeakSubResist': 0.0,
                                'HoldingEnable': 1, 'FastCompTau': 0.0, 'SlowCompCap': 0.0, 
                                'WholeCellCompCap': 0.,
                                'LeakSubEnable': 6004, 'NeutralizationCap': 0.,
                                'BridgeBalEnable': 0, 'RsCompCorrection': 0.0,
                                'NeutralizationEnable': 1, 'RsCompEnable': 6004,
                                'OutputZeroAmplitude': 0., 'FastCompCap': 0.,
                                'SlowCompTau': 0.0}, 'secondarySignal': 
                                'Command Current', 'secondaryGain': 1.0,
                                'secondaryScaleFactor': 2e-09,
                                'primarySignal': 'Membrane Potential', 'extCmdScale': 4e-10,
                                'mode': self.dmode, 'holding': 0.0, 'primaryUnits': 'V', 
                                'LPFCutoff': 10000.,
                                'secondaryUnits': 'A', 'primaryScaleFactor': 0.1,
                                'membraneCapacitance': 0.0}, 
                            'Protocol': {'recordState': True, 'secondary': None,
                                    'primary': None, 'mode': 'IC'}, 
                            'DAQ': {'command': {'numPts': points, 'rate': self.sample_interval,
                                    'type': 'ao', 'startTime': 0.},
                            '       primary': {'numPts': points, 'rate': self.sample_interval,
                                    'type': 'ai', 'startTime': 0.}, 
                                    'secondary': {'numPts': points, 'rate': self.sample_interval,
                                    'type': 'ai', 'startTime': 0.}
                             },
                    'startTime': 0.}
                ]

        # filled, automatically with default values
        self.repc = 1
        self.nrepc = 1
        self.model_mode = False
        self.command_scale_factor = 1
        self.command_units = 'A'
        self.devicesUsed = None
        self.clampDevices = None
        self.holding = 0.
        self.amplfierSettings = {'WCCompValid': False, 'WCEnabled': False, 
                'CompEnabled': False, 'WCSeriesResistance': 0.}
        self.WCComp = 0.
        self.CCComp = 0.
        self.clampState = None
        self.RSeriesUncomp = 0.
            
        self.tend = self.tstart + self.tdur

        # if self.traces.shape[0] > 1:
        #     # dependiung on the mode, select which channel goes to traces
        #     self.traces = self.traces[:,mainch,:]
        # else:
        #     self.traces[0,mainch,:] = self.traces[0,mainch,:]

        self.traces = MetaArray(self.traces, info=info)
        self.cmd_wave = MetaArray(self.cmd_wave,
             info=[{'name': 'Command', 'units': 'nA',
              'values': np.array(self.values)},
              self.traces.infoCopy('Time'), self.traces.infoCopy(-1)])
        
        self.spikecount = np.zeros(len(recs))
        self.rgnrmp = [0, 0.005]
Ejemplo n.º 22
0
    def parse_and_go(self, argsin=None):
        global period
        parser = OptionParser()  # command line options
        ##### parses all of the options inputted at the command line TFR 11/13/2015
        parser.add_option("-u",
                          "--upfile",
                          dest="upfile",
                          metavar='FILE',
                          help="load the up-file")
        parser.add_option("-d",
                          "--downfile",
                          dest="downfile",
                          metavar='FILE',
                          help="load the down-file")
        parser.add_option("-D",
                          "--directory",
                          dest="directory",
                          metavar='FILE',
                          help="Use directory for data")
        parser.add_option("-t",
                          "--test",
                          dest="test",
                          action='store_true',
                          help="Test mode to check calculations",
                          default=False)
        parser.add_option("-p",
                          '--period',
                          dest="period",
                          default=8.0,
                          type="float",
                          help="Stimulus cycle period")
        parser.add_option("-c",
                          '--cycles',
                          dest="cycles",
                          default=0,
                          type="int",
                          help="# cycles to analyze")
        parser.add_option("-b",
                          '--binning',
                          dest="binsize",
                          default=0,
                          type="int",
                          help="bin reduction x,y")
        parser.add_option("-g",
                          '--gfilter',
                          dest="gfilt",
                          default=0,
                          type="float",
                          help="gaussian filter width")
        parser.add_option("-f",
                          '--fdict',
                          dest="fdict",
                          default=0,
                          type="int",
                          help="Use dictionary entry")
        if argsin is not None:
            (options, args) = parser.parse_args(argsin)
        else:
            (options, args) = parser.parse_args()

        if options.period is not None:
            measuredPeriod = options.period
        if options.cycles is not None:
            self.nCycles = options.cycles
        if options.binsize is not None:
            binsize = options.binsize
        if options.gfilt is not None:
            gfilt = options.gfilt

            #TFR- this code generates a test signal for running a test analysis sequence
        print options.test
        if options.test is True:
            print "Running Test Sample"
            period = 8.0  # period and frame sample rate can be different
            framerate = 8.0
            nper = 1
            d = 10.0 * numpy.random.normal(size=(2500, 128,
                                                 128)).astype('float32')
            ds = d.shape
            self.nFrames = d.shape[0]
            self.nPhases = 10
            maxdel = 50
            self.phasex = []
            self.phasey = []
            for i in range(0, self.nPhases):
                dx = i * ds[
                    1] / self.nPhases  # each phase is assigned to a region
                baseline = 0.0
                self.resp = numpy.zeros((self.nFrames, ))
                phaseDelay = 0.25 * period + period * (
                    float(i) / self.nPhases
                )  # phase delay for this region from 0 to nearly the stimulus repeat period
                # print '********phase delay: ', phaseDelay
                for j in range(0, nper):  # for each period
                    tdelay = (float(j) *
                              period) + phaseDelay  # time to phase delay point
                    idelay = int(numpy.floor(
                        tdelay *
                        framerate))  # convert to frame position in frame space
            #     print '     tdel: ', tdelay, '    idel: ', idelay
            #    if idelay < self.nFrames-maxdel:
            #        self.resp[idelay:idelay+maxdel] = (i+1)*numpy.exp(-numpy.linspace(0, 2, maxdel)) # marks amplitudes as well
                self.resp = numpy.sin(
                    numpy.linspace(
                        0, 2.0 * numpy.pi * self.nFrames /
                        (period * framerate), self.nFrames) +
                    i * numpy.pi / 8 - numpy.pi / 2.0)
                d[:, dx:dx + int(ds[1] / self.nPhases),
                  5:int(ds[2] / 2)] += self.resp[:, numpy.newaxis,
                                                 numpy.newaxis]
                self.phasex.append((2 + (dx + int(ds[1] / self.nPhases)) / 2))
                self.phasey.append(
                    (6 + int(ds[2] / 2) / 2)
                )  # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)
            d = (d * 3000.0 * 1e-4
                 ) + 3000.0  # scale and offset to match data scaling coming in
            self.imageData = d.astype(
                'int16')  # reduce to a 16-bit map to match camera data type
            self.times = numpy.arange(0, self.nFrames / framerate,
                                      1.0 / framerate)
            print "Test Image Created"
            self.Analysis_FourierMap(period=period,
                                     target=1,
                                     mode=1,
                                     bins=binsize)
            print "Completed Analysis FourierMap"
            self.plotmaps_pg(mode=2, gfilter=gfilt)
            print "Completed plot maps"

            return

        if options.fdict is not None:
            if options.fdict in DB.keys():  # populate options
                options.upfile = DB[options.fdict][0]
                options.downfile = DB[options.fdict][1]
                options.period = DB[options.fdict][4]
            else:
                print "File %d NOT in DBase\n" % options.fdict
                return
        if options.directory is not None:
            self.directory = options.directory

        if options.upfile is not None:
            self.upfile = options.upfile
            target = 1

        if options.downfile is not None:
            self.downfile = options.downfile
            target = 2

        target = 0
        upf = None
        dwnf = None
        if options.upfile is not None:
            upf = basepath + options.upfile + '/Camera/frames.ma'
        if options.downfile is not None:
            dwnf = basepath + options.downfile + '/Camera/frames.ma'

        for file in (upf, dwnf):
            #if options.upfile is not None and options.downfile is not None:
            if file is None:
                break
            im = []
            self.imageData = []
            print "loading data from ", file
            try:
                im = MetaArray(file=file,
                               subset=(slice(0, 2), slice(64,
                                                          128), slice(64,
                                                                      128)))
            except:
                print "Error loading upfile: %s\n" % file
                return
            print "data loaded"
            target = target + 1
            self.times = im.axisValues('Time').astype('float32')
            self.imageData = im.view(np.ndarray).astype('float32')
            im = []
            if file is upf:
                upflag = 1
            else:
                upflag = 0
            measuredPeriod = 1.25
            self.Analysis_FourierMap(period=measuredPeriod,
                                     target=target,
                                     bins=binsize,
                                     up=upflag)
        if target > 0:
            self.plotmaps_pg(mode=1, target=target, gfilter=gfilt)
            self.firstframe = pg.image(self.imageData[0],
                                       title='first frame of image')
Ejemplo n.º 23
0
    def getClampData(self, block, verbose=False, tstart_tdur=[0.01, 0.100]):
        """
        Translates fields as best as we can from the original DATAC structure
        create a Clamp structure for use in SpikeAnalysis and RMTauAnalysis.
        Fills in the fields that are returned by PatchEPhys getClamps:
        clampInfo['dirs]
        clampInfo['missingData']
        self.time_base
        self.values
        self.traceStartTimes = np.zeros(0)
        self.sequence
        self.clampValues (sequence)
        self.nclamp = len(self.clmapVlaues
        self.repc
        self.nrepc
        self.data_mode
        self.model_mode = False
        self.command_scale_factor
        self.command_units
        self.devicesUsed
        self.clampDevices
        self.holding
        self.clampState
        self.sample_interval
        self.RSeriesUncomp
        self.amplifeirSettings['WCCompValid', 'WCEmabled', 'CompEnabled', 'WCSeriesResistance']
        self.cmd_wave
        self.commandLevels (np.array(self.values))
        self.traces = MetaArray(traces, info=info)
        self.tstart
        self.tdur
        self.tend
        self.spikecount = np.zeros(len...) if in vcmode.
        
        Info from an example data file:
        [{'name': 'Channel', 'cols': [{'units': 'A', 'name': 'Command'}, {'units': 'V', 'name': 'primary'}, {'units': 'A', 'name': 'secondary'}]},
        {'units': 's', 'values': array([ 0.00000000e+00, 2.50000000e-05, 5.00000000e-05, ..., 6.99925000e-01, 6.99950000e-01, 6.99975000e-01]),
        'name': 'Time'}, {'ClampState': {'primaryGain': 10.0, 'ClampParams': {'OutputZeroEnable': 0, 'PipetteOffset': 0.05197399854660034,
        'Holding': -1.525747063413352e-11, 'PrimarySignalHPF': 0.0, 'BridgeBalResist': 20757020.0, 'PrimarySignalLPF': 20000.0, 'RsCompBandwidth':
        8.413395979806202e-42, 'WholeCellCompResist': 8.413395979806202e-42, 'WholeCellCompEnable': 6004, 'LeakSubResist': 8.413395979806202e-42,
        'HoldingEnable': 1, 'FastCompTau': 8.413395979806202e-42, 'SlowCompCap': 8.413395979806202e-42, 'WholeCellCompCap': 8.413395979806202e-42,
        'LeakSubEnable': 6004, 'NeutralizationCap': 1.9578947837994853e-12, 'BridgeBalEnable': 1, 'RsCompCorrection': 8.413395979806202e-42,
        'NeutralizationEnable': 1, 'RsCompEnable': 6004, 'OutputZeroAmplitude': -0.0009990156395360827, 'FastCompCap': 8.413395979806202e-42,
        'SlowCompTau': 8.413395979806202e-42}, 'secondarySignal': 'Command Current', 'secondaryGain': 1.0, 'secondaryScaleFactor': 2e-09,
        'primarySignal': 'Membrane Potential', 'extCmdScale': 4e-10, 'mode': 'IC', 'holding': 0.0, 'primaryUnits': 'V', 'LPFCutoff': 20000.0,
        'secondaryUnits': 'A', 'primaryScaleFactor': 0.1, 'membraneCapacitance': 0.0}, 'Protocol': {'recordState': True, 'secondary': None,
        'primary': None, 'mode': 'IC'}, 'DAQ': {'command': {'numPts': 28000, 'rate': 40000.0, 'type': 'ao', 'startTime': 1296241556.7347913},
        'primary': {'numPts': 28000, 'rate': 40000.0, 'type': 'ai', 'startTime': 1296241556.7347913}, 'secondary': {'numPts': 28000, 'rate':
        40000.0, 'type': 'ai', 'startTime': 1296241556.7347913}}, 'startTime': 1296241556.7347913}]

        )
        """
        if self.datac.data is None:
            raise ValueError('No data has been read from the file %s' %
                             self.datac.fullfile)
        protocol = ''

        self.sample_interval = self.datac.rate[0] * 1e-6  # express in seconds
        self.traces = np.array(self.datac.data)
        self.datac.data.shape
        points = self.datac.nr_points
        nchannels = self.datac.nr_channel
        recs = self.datac.record
        dt = 1e-3 * nchannels / self.datac.rate[
            0]  # make assumption that rate is constant in a block
        self.time_base = 1e-3 * np.arange(
            0., self.datac.nr_points / self.datac.rate[0],
            1. / self.datac.rate[0])  # in seconds

        if self.datac.dmode == 'CC':  # use first channel
            mainch = 0
            cmdch = 1
        else:  # assumption is swapped - for this data, that means voltage clamp mode.
            mainch = 1
            cmdch = 0

        cmds = self.traces[:, cmdch, :]
        self.tstart = tstart_tdur[
            0]  # could be pulled from protocol/stimulus information
        self.tdur = tstart_tdur[1]
        self.tend = self.tstart + self.tdur
        t0 = int(self.tstart / dt)
        t1 = int(self.tend / dt)
        self.cmd_wave = np.squeeze(self.traces[:, cmdch, :])
        if cmds.shape[0] > 1:
            self.values = np.nanmean(self.cmd_wave[:, t0:t1],
                                     axis=1)  # express values in amps
        else:
            self.values = np.zeros_like(self.traces.shape[1:2])
        self.commandLevels = self.values

        info = [
            {
                'units': 'A',
                'values': self.values,
                'name': 'Command'
            },
            {
                'name': 'Time',
                'units': 's',
                'values': self.time_base
            },
            {
                'ClampState':  # note that many of these values are just defaults and cannot be relied upon
                {
                    'primaryGain': self.datac.gain,
                    'ClampParams': {
                        'OutputZeroEnable': 0,
                        'PipetteOffset': 0.0,
                        'Holding': 0,
                        'PrimarySignalHPF': 0.0,
                        'BridgeBalResist': 0.0,
                        'PrimarySignalLPF': 20000.0,
                        'RsCompBandwidth': 0.0,
                        'WholeCellCompResist': 0.0,
                        'WholeCellCompEnable': 6004,
                        'LeakSubResist': 0.0,
                        'HoldingEnable': 1,
                        'FastCompTau': 0.0,
                        'SlowCompCap': 0.0,
                        'WholeCellCompCap': 0.,
                        'LeakSubEnable': 6004,
                        'NeutralizationCap': 0.,
                        'BridgeBalEnable': 0,
                        'RsCompCorrection': 0.0,
                        'NeutralizationEnable': 1,
                        'RsCompEnable': 6004,
                        'OutputZeroAmplitude': 0.,
                        'FastCompCap': 0.,
                        'SlowCompTau': 0.0
                    },
                    'secondarySignal': 'Command Current',
                    'secondaryGain': 1.0,
                    'secondaryScaleFactor': 2e-09,
                    'primarySignal': 'Membrane Potential',
                    'extCmdScale': 4e-10,
                    'mode': self.datac.dmode,
                    'holding': 0.0,
                    'primaryUnits': 'V',
                    'LPFCutoff': self.datac.low_pass,
                    'secondaryUnits': 'A',
                    'primaryScaleFactor': 0.1,
                    'membraneCapacitance': 0.0
                },
                'Protocol': {
                    'recordState': True,
                    'secondary': None,
                    'primary': None,
                    'mode': 'IC'
                },
                'DAQ': {
                    'command': {
                        'numPts': points,
                        'rate': self.sample_interval,
                        'type': 'ao',
                        'startTime': 0.
                    },
                    '       primary': {
                        'numPts': points,
                        'rate': self.sample_interval,
                        'type': 'ai',
                        'startTime': 0.
                    },
                    'secondary': {
                        'numPts': points,
                        'rate': self.sample_interval,
                        'type': 'ai',
                        'startTime': 0.
                    }
                },
                'startTime': 0.
            }
        ]

        # filled, automatically with default values
        self.repc = 1
        self.nrepc = 1
        self.model_mode = False
        self.command_scale_factor = 1
        self.command_units = 'A'
        self.devicesUsed = None
        self.clampDevices = None
        self.holding = 0.
        self.amplfierSettings = {
            'WCCompValid': False,
            'WCEnabled': False,
            'CompEnabled': False,
            'WCSeriesResistance': 0.
        }
        self.clampState = None
        self.RSeriesUncomp = 0.

        self.protoTimes = {'drugtestiv': [0.21, 0.51], 'ap-iv2': [0.01, 0.5]}
        if protocol in self.protoTimes:
            self.tstart = self.protoTimes[protocol][0]
            self.tdur = self.protoTimes[protocol][1]

        self.tend = self.tstart + self.tdur

        if self.traces.shape[0] > 1:
            # dependiung on the mode, select which channel goes to traces
            self.traces = self.traces[:, mainch, :]
        else:
            self.traces[0, mainch, :] = self.traces[0, mainch, :]

        self.traces = MetaArray(self.traces, info=info)
        self.spikecount = np.zeros(len(recs))
        self.rgnrmp = [0, 0.005]
Ejemplo n.º 24
0
    def parse_and_go(self, argsin = None):
        global period
        global binsize
        parser=OptionParser() # command line options
        ##### parses all of the options inputted at the command line TFR 11/13/2015
        parser.add_option("-u", "--upfile", dest="upfile", metavar='FILE',
                          help="load the up-file")
        parser.add_option("-d", "--downfile", dest="downfile", metavar='FILE',
                          help="load the down-file")
        parser.add_option("-D", "--directory", dest="directory", metavar='FILE',
                          help="Use directory for data")
        parser.add_option("-t", "--test", dest="test", action='store_true',
                          help="Test mode to check calculations", default=False)
        parser.add_option("-p", '--period', dest = "period", default=4.25, type="float",
                          help = "Stimulus cycle period")
        parser.add_option("-c", '--cycles', dest = "cycles", default=0, type="int",
                          help = "# cycles to analyze")
        parser.add_option("-b", '--binning', dest = "binsize", default=0, type="int",
                          help = "bin reduction x,y")
        parser.add_option("-g", '--gfilter', dest = "gfilt", default=0, type="float",
                          help = "gaussian filter width")
        parser.add_option("-f", '--fdict', dest = "fdict", default=0, type="int",
                          help = "Use dictionary entry")
        
        if argsin is not None:
            (options, args) = parser.parse_args(argsin)
        else:
            (options, args) = parser.parse_args()

        if options.test is True:
            print "Running Test Sample"
            period = 8.0 # period and frame sample rate can be different
            framerate = 8.0
            nper = 1
            d = 10.0*numpy.random.normal(size=(2500,128,128)).astype('float32')
            ds = d.shape
            self.nFrames = d.shape[0]
            self.nPhases = 10
            maxdel = 50
            self.phasex = []
            self.phasey = []
            for i in range(0,self.nPhases):
                dx = i*ds[1]/self.nPhases # each phase is assigned to a region
                baseline = 0.0
                self.resp = numpy.zeros((self.nFrames,))
                phaseDelay = 0.25*period+period*(float(i)/self.nPhases) # phase delay for this region from 0 to nearly the stimulus repeat period
               # print '********phase delay: ', phaseDelay
                for j in range(0, nper): # for each period 
                    tdelay = (float(j) * period) + phaseDelay # time to phase delay point
                    idelay = int(numpy.floor(tdelay*framerate)) # convert to frame position in frame space
               #     print '     tdel: ', tdelay, '    idel: ', idelay
                #    if idelay < self.nFrames-maxdel:
                #        self.resp[idelay:idelay+maxdel] = (i+1)*numpy.exp(-numpy.linspace(0, 2, maxdel)) # marks amplitudes as well
                self.resp = 1000.0*numpy.sin(
                         numpy.linspace(0, 2.0*numpy.pi*self.nFrames/(period*framerate), self.nFrames)+i*numpy.pi/8.0 - numpy.pi/2.0)
                d[:, dx:dx+int(ds[1]/self.nPhases), 5:int(ds[2]/2)] += self.resp[:, numpy.newaxis, numpy.newaxis]
                self.phasex.append( (2+(dx+int(ds[1]/self.nPhases))/2))
                self.phasey.append((6+int(ds[2]/2)/2)) # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)
            d = (d*3000.0*1e-4)+3000.0 # scale and offset to match data scaling coming in
            self.imageData = d.astype('int16') # reduce to a 16-bit map to match camera data type
            self.times = numpy.arange(0, self.nFrames/framerate, 1.0/framerate)
            print "Test Image Created"
            getout2 = fac_lift(self.imageData, self.times)
            self.imageData=getout2
            self.Analysis_FourierMap_TFR(period=period, target = 1, mode=1, bins=binsize)
            print "Completed Analysis FourierMap"
            self.plotmaps_pg(mode = 2, gfilter = 0)
            print "Completed plot maps"

        if options.period is not None:
            measuredPeriod = options.period
        if options.cycles is not None:
            self.nCycles = options.cycles
        if options.binsize is not None:
            binsize = options.binsize
        if options.gfilt is not None:
            gfilt = options.gfilt

        print 'DB keys', DB.keys()
        if options.fdict is not None:
            if options.fdict in DB.keys(): # populate options 
                options.upfile = DB[options.fdict][0]
                options.downfile = DB[options.fdict][1]
                options.period = DB[options.fdict][4]
            else:
               print "File %d NOT in DBase\n" % options.fdict
               return
        if options.directory is not None:
            self.directory = options.directory

        if options.upfile is not None:
            self.upfile = options.upfile
            target = 1
        
        if options.downfile is not None:
            self.downfile = options.downfile
            target = 2

        target = 0
        videoupf = None
        videodwnf = None
        audioupf = None
        audiodwnf = None

        if options.upfile is not None:
            videoupf = videobasepath + options.upfile + '.ma'
            audioupf = audiobasepath + options.upfile + '/DaqDevice.ma'
        if options.downfile is not None:
            videodwnf = videobasepath + options.downfile + '.ma'
            audiodwnf = audiobasepath + options.downfile + '/DaqDevice.ma'

        # indexFile = configfile.readConfigFile(basepath+'.index') 
        # time = indexFile.__getitem__('video_019.ma')[u'__timestamp__'] 

        #indexFile = configfile.readConfigFile(basepath+'.index') 
        #print 'indexfile', indexfile
        for file in (videoupf, videodwnf):
#if options.upfile is not None and options.downfile is not None:
            if file is None:
               break
            im=[]
            self.imageData = []
            print "loading data from ", file
            try:
                im = MetaArray(file = file,  subset=(slice(0,2), slice(64,128), slice(64,128)))
            except:
                print "Error loading upfile: %s\n" % file
                return
            print "data loaded"
            target = target + 1
            # dir = acq4.util.DataManager.getHandle(basepath)
            # time = dir.info()['__timestamp__']
            # print 'time:', time
           #print 'im:', im
            # dir(im)
            rawtimes=[]
            rawimageData=[]
            rawtimes = im.axisValues('Time').astype('float32')
#            print 'time', rawtimes
            rawimageData = im.view(np.ndarray).astype('float32')
#            print 'shape of ra image data:', rawimageData.shape
            ## videobasepath = /......./2016.10.08_000/Intrinsic_Mapping/video_'
            ## indexFile = configFile.readConfigFile('/...../2016.10.08_000/Intrinsic_Mapping/.index') -> a dictionary

            # dir = acq4.util.DataManager.getHandle(videoupf)
            # time = dir.info()['__timestamp__']
            
            # #timestampup = timestamp[options.fdict][0]
            # audioupstamp = timestamp[options.fdict][1]
            # #timestampdown = timestamp[options.fdict][2]
            # audiodownstamp = timestamp[options.fdict][3]
            # #print 'optioins.dict', options.fdict[0]

            #reads the timestamps from the files
            indexFile = configfile.readConfigFile(basepath+'.index') 
            timestampup = indexFile.__getitem__('video_'+DB[options.fdict][0]+'.ma')[u'__timestamp__']
            timestampdown = indexFile.__getitem__('video_'+DB[options.fdict][1]+'.ma')[u'__timestamp__']
            audioupindex = configfile.readConfigFile(audiobasepath+DB[options.fdict][0]+'/.index')
            # audioupstamp = audioupindex.__getitem__(u'.')[u'__timestamp__'] 
            audioupstamp = audioupindex.__getitem__('DaqDevice.ma')[u'__timestamp__'] - 13.5
            audiodownindex = configfile.readConfigFile(audiobasepath+DB[options.fdict][1]+'/.index')
            #audiodownstamp = audiodownindex.__getitem__(u'.')[u'__timestamp__'] 
            audiodownstamp = audiodownindex.__getitem__('DaqDevice.ma')[u'__timestamp__'] -13.5

            diffup = audioupstamp - timestampup
            diffdown = audiodownstamp - timestampdown 

            
            if file is videoupf:
                audio = MetaArray(file = audioupf, subset=(slice(0,2), slice(64,128), slice(64,128)))
                audiotime = audio.axisValues('Time').astype('float32')
                audiomin = np.min(audiotime) + diffup
                audiomax = np.max(audiotime) + diffup
            elif file is videodwnf:
                audio = MetaArray(file = audiodwnf, subset=(slice(0,2), slice(64,128), slice(64,128)))
                audiotime = audio.axisValues('Time').astype('float32')
                audiomin = np.min(audiotime) + diffdown
                audiomax = np.max(audiotime) + diffdown
            else:
                print 'ERROR!  Unable to load audio file'
            print 'audiomin', audiomin
            print 'audiomax', audiomax

            adjustedtime = rawtimes[np.logical_and(rawtimes <= audiomax+4, rawtimes >= audiomin)]
            
            adjustedimagedata = rawimageData[np.logical_and(rawtimes <= audiomax+4, rawtimes >= audiomin)]
            # print 'adjtime', adjustedtime
            self.times = [x-np.min(adjustedtime) for x in adjustedtime]
            self.imageData = adjustedimagedata
            background = rawimageData[5:25]
            background = np.mean(background,axis=0)
            print 'dimensions of background', np.shape(background)
            pg.image(background, title='mean background')
            subtracted = np.zeros(np.shape(self.imageData), float)
            for i in range(self.imageData.shape[0]):
                subtracted[i,:,:] = (self.imageData[i,:,:]-background)
            subtracted=subtracted/subtracted.mean()
            self.imageData = subtracted
            #print 'self.times:', self.times
            # print 'length of self.times', np.shape(self.times)
            # print 'shape of image data', np.shape(self.imageData)

            #analyze a quarter of the image
            #xcut = (self.imageData.shape[1]+1)/8
            #ycut = (self.imageData.shape[2]+1)/8
            #self.imageData=self.imageData[:,3*xcut-1:7*xcut-1,ycut-1:7*ycut-1]
            im=[]
            if file is videoupf:
               upflag = 1
            else:
               upflag = 0
            #print 'target:', target
            measuredPeriod=4.25
            #self.subtract_Background(diffup=diffup)
            self.Analysis_FourierMap(period=measuredPeriod, target = target,  bins=binsize, up=upflag)
        print 'target:', target
        if target > 0:
            self.plotmaps_pg(mode = 1, target = target, gfilter = gfilt)

        return
Ejemplo n.º 25
0
    def parse_and_go(self, argsin=None):
        global period
        global binsize
        parser = OptionParser()  # command line options
        ##### parses all of the options inputted at the command line TFR 11/13/2015
        parser.add_option("-u",
                          "--upfile",
                          dest="upfile",
                          metavar='FILE',
                          help="load the up-file")
        parser.add_option("-d",
                          "--downfile",
                          dest="downfile",
                          metavar='FILE',
                          help="load the down-file")
        parser.add_option("-D",
                          "--directory",
                          dest="directory",
                          metavar='FILE',
                          help="Use directory for data")
        parser.add_option("-t",
                          "--test",
                          dest="test",
                          action='store_true',
                          help="Test mode to check calculations",
                          default=False)
        parser.add_option("-p",
                          '--period',
                          dest="period",
                          default=4.25,
                          type="float",
                          help="Stimulus cycle period")
        parser.add_option("-c",
                          '--cycles',
                          dest="cycles",
                          default=0,
                          type="int",
                          help="# cycles to analyze")
        parser.add_option("-b",
                          '--binning',
                          dest="binsize",
                          default=0,
                          type="int",
                          help="bin reduction x,y")
        parser.add_option("-g",
                          '--gfilter',
                          dest="gfilt",
                          default=0,
                          type="float",
                          help="gaussian filter width")
        parser.add_option("-f",
                          '--fdict',
                          dest="fdict",
                          default=0,
                          type="int",
                          help="Use dictionary entry")

        if argsin is not None:
            (options, args) = parser.parse_args(argsin)
        else:
            (options, args) = parser.parse_args()

        if options.test is True:
            print "Running Test Sample"
            period = 8.0  # period and frame sample rate can be different
            framerate = 8.0
            nper = 1
            d = 10.0 * numpy.random.normal(size=(2500, 128,
                                                 128)).astype('float32')
            ds = d.shape
            self.nFrames = d.shape[0]
            self.nPhases = 10
            maxdel = 50
            self.phasex = []
            self.phasey = []
            for i in range(0, self.nPhases):
                dx = i * ds[
                    1] / self.nPhases  # each phase is assigned to a region
                baseline = 0.0
                self.resp = numpy.zeros((self.nFrames, ))
                phaseDelay = 0.25 * period + period * (
                    float(i) / self.nPhases
                )  # phase delay for this region from 0 to nearly the stimulus repeat period
                # print '********phase delay: ', phaseDelay
                for j in range(0, nper):  # for each period
                    tdelay = (float(j) *
                              period) + phaseDelay  # time to phase delay point
                    idelay = int(numpy.floor(
                        tdelay *
                        framerate))  # convert to frame position in frame space
            #     print '     tdel: ', tdelay, '    idel: ', idelay
            #    if idelay < self.nFrames-maxdel:
            #        self.resp[idelay:idelay+maxdel] = (i+1)*numpy.exp(-numpy.linspace(0, 2, maxdel)) # marks amplitudes as well
                self.resp = 1000.0 * numpy.sin(
                    numpy.linspace(
                        0, 2.0 * numpy.pi * self.nFrames /
                        (period * framerate), self.nFrames) +
                    i * numpy.pi / 8.0 - numpy.pi / 2.0)
                d[:, dx:dx + int(ds[1] / self.nPhases),
                  5:int(ds[2] / 2)] += self.resp[:, numpy.newaxis,
                                                 numpy.newaxis]
                self.phasex.append((2 + (dx + int(ds[1] / self.nPhases)) / 2))
                self.phasey.append(
                    (6 + int(ds[2] / 2) / 2)
                )  # make the signal equivalent of digitized one (baseline 3000, signal at 1e-4 of baseline)
            d = (d * 3000.0 * 1e-4
                 ) + 3000.0  # scale and offset to match data scaling coming in
            self.imageData = d.astype(
                'int16')  # reduce to a 16-bit map to match camera data type
            self.times = numpy.arange(0, self.nFrames / framerate,
                                      1.0 / framerate)
            print "Test Image Created"
            getout2 = fac_lift(self.imageData, self.times)
            self.imageData = getout2
            self.Analysis_FourierMap_TFR(period=period,
                                         target=1,
                                         mode=1,
                                         bins=binsize)
            print "Completed Analysis FourierMap"
            self.plotmaps_pg(mode=2, gfilter=0)
            print "Completed plot maps"

        if options.period is not None:
            measuredPeriod = options.period
        if options.cycles is not None:
            self.nCycles = options.cycles
        if options.binsize is not None:
            binsize = options.binsize
        if options.gfilt is not None:
            gfilt = options.gfilt

        print 'DB keys', DB.keys()
        if options.fdict is not None:
            if options.fdict in DB.keys():  # populate options
                options.upfile = DB[options.fdict][0]
                options.downfile = DB[options.fdict][1]
                options.period = DB[options.fdict][4]
            else:
                print "File %d NOT in DBase\n" % options.fdict
                return
        if options.directory is not None:
            self.directory = options.directory

        if options.upfile is not None:
            self.upfile = options.upfile
            target = 1

        if options.downfile is not None:
            self.downfile = options.downfile
            target = 2

        target = 0
        videoupf = None
        videodwnf = None
        audioupf = None
        audiodwnf = None

        if options.upfile is not None:
            videoupf = videobasepath + options.upfile + '.ma'
            audioupf = audiobasepath + options.upfile + '/DaqDevice.ma'
        if options.downfile is not None:
            videodwnf = videobasepath + options.downfile + '.ma'
            audiodwnf = audiobasepath + options.downfile + '/DaqDevice.ma'

        # indexFile = configfile.readConfigFile(basepath+'.index')
        # time = indexFile.__getitem__('video_019.ma')[u'__timestamp__']

        #indexFile = configfile.readConfigFile(basepath+'.index')
        #print 'indexfile', indexfile
        for file in (videoupf, videodwnf):
            #if options.upfile is not None and options.downfile is not None:
            if file is None:
                break
            im = []
            self.imageData = []
            print "loading data from ", file
            try:
                im = MetaArray(file=file,
                               subset=(slice(0, 2), slice(64,
                                                          128), slice(64,
                                                                      128)))
            except:
                print "Error loading upfile: %s\n" % file
                return
            print "data loaded"
            target = target + 1
            # dir = acq4.util.DataManager.getHandle(basepath)
            # time = dir.info()['__timestamp__']
            # print 'time:', time
            #print 'im:', im
            # dir(im)
            rawtimes = []
            rawimageData = []
            rawtimes = im.axisValues('Time').astype('float32')
            #            print 'time', rawtimes
            rawimageData = im.view(np.ndarray).astype('float32')
            #            print 'shape of ra image data:', rawimageData.shape
            ## videobasepath = /......./2016.10.08_000/Intrinsic_Mapping/video_'
            ## indexFile = configFile.readConfigFile('/...../2016.10.08_000/Intrinsic_Mapping/.index') -> a dictionary

            # dir = acq4.util.DataManager.getHandle(videoupf)
            # time = dir.info()['__timestamp__']

            # #timestampup = timestamp[options.fdict][0]
            # audioupstamp = timestamp[options.fdict][1]
            # #timestampdown = timestamp[options.fdict][2]
            # audiodownstamp = timestamp[options.fdict][3]
            # #print 'optioins.dict', options.fdict[0]

            #reads the timestamps from the files
            indexFile = configfile.readConfigFile(basepath + '.index')
            timestampup = indexFile.__getitem__('video_' +
                                                DB[options.fdict][0] +
                                                '.ma')[u'__timestamp__']
            timestampdown = indexFile.__getitem__('video_' +
                                                  DB[options.fdict][1] +
                                                  '.ma')[u'__timestamp__']
            audioupindex = configfile.readConfigFile(audiobasepath +
                                                     DB[options.fdict][0] +
                                                     '/.index')
            # audioupstamp = audioupindex.__getitem__(u'.')[u'__timestamp__']
            audioupstamp = audioupindex.__getitem__(
                'DaqDevice.ma')[u'__timestamp__'] - 13.5
            audiodownindex = configfile.readConfigFile(audiobasepath +
                                                       DB[options.fdict][1] +
                                                       '/.index')
            #audiodownstamp = audiodownindex.__getitem__(u'.')[u'__timestamp__']
            audiodownstamp = audiodownindex.__getitem__(
                'DaqDevice.ma')[u'__timestamp__'] - 13.5

            diffup = audioupstamp - timestampup
            diffdown = audiodownstamp - timestampdown

            if file is videoupf:
                audio = MetaArray(file=audioupf,
                                  subset=(slice(0, 2), slice(64, 128),
                                          slice(64, 128)))
                audiotime = audio.axisValues('Time').astype('float32')
                audiomin = np.min(audiotime) + diffup
                audiomax = np.max(audiotime) + diffup
            elif file is videodwnf:
                audio = MetaArray(file=audiodwnf,
                                  subset=(slice(0, 2), slice(64, 128),
                                          slice(64, 128)))
                audiotime = audio.axisValues('Time').astype('float32')
                audiomin = np.min(audiotime) + diffdown
                audiomax = np.max(audiotime) + diffdown
            else:
                print 'ERROR!  Unable to load audio file'
            print 'audiomin', audiomin
            print 'audiomax', audiomax

            adjustedtime = rawtimes[np.logical_and(rawtimes <= audiomax + 4,
                                                   rawtimes >= audiomin)]

            adjustedimagedata = rawimageData[np.logical_and(
                rawtimes <= audiomax + 4, rawtimes >= audiomin)]
            # print 'adjtime', adjustedtime
            self.times = [x - np.min(adjustedtime) for x in adjustedtime]
            self.imageData = adjustedimagedata
            background = rawimageData[5:25]
            background = np.mean(background, axis=0)
            print 'dimensions of background', np.shape(background)
            pg.image(background, title='mean background')
            subtracted = np.zeros(np.shape(self.imageData), float)
            for i in range(self.imageData.shape[0]):
                subtracted[i, :, :] = (self.imageData[i, :, :] - background)
            subtracted = subtracted / subtracted.mean()
            self.imageData = subtracted
            #print 'self.times:', self.times
            # print 'length of self.times', np.shape(self.times)
            # print 'shape of image data', np.shape(self.imageData)

            #analyze a quarter of the image
            #xcut = (self.imageData.shape[1]+1)/8
            #ycut = (self.imageData.shape[2]+1)/8
            #self.imageData=self.imageData[:,3*xcut-1:7*xcut-1,ycut-1:7*ycut-1]
            im = []
            if file is videoupf:
                upflag = 1
            else:
                upflag = 0
            #print 'target:', target
            measuredPeriod = 4.25
            #self.subtract_Background(diffup=diffup)
            self.Analysis_FourierMap(period=measuredPeriod,
                                     target=target,
                                     bins=binsize,
                                     up=upflag)
        print 'target:', target
        if target > 0:
            self.plotmaps_pg(mode=1, target=target, gfilter=gfilt)

        return