def DelFile(root, interface):
    poolname = _GetPoolName(root, interface)
    if not poolname:
        return
    while 1:
        # get Protocol
        proto = clients.SelectProtocol(
            root, text='Select protocol class to remove file', canx='Done')
        if not proto:
            return
        # get list of files
        lst = glob.glob(poolname + '\\' + proto + '\\*.raw')
        lst.sort()
        # show files and allow a deletion IFF at least 5 files are left
        clist = []
        for f in lst:
            try:
                fd = open(f, 'rb')
            except:
                ##print 'no open ',f
                continue
            cname = rawlist.rawheaderfield(fd, 'CLIENTNAME')
            fd.close()
            if not cname:
                org = "unk"
            else:
                spl = cname.split('\\')
                org = spl[-1]
                if len(spl) > 1:
                    org = spl[-2] + '/' + org
                if len(spl) > 2:
                    org = spl[-3] + '/' + org

            base = os.path.basename(f)
            clist.append(proto + '/' + base + '    from  ' + org)
        if len(clist) < 6:
            tkMessageBox.showerror('Pool Files',
                                   'There must be 5 or more defined')
            ##continue
        md = FileDlg(root,
                     'Selecting files to delete',
                     clist,
                     buttons=['Delete File', 'Cancel'])
        md.Activate()
        if md.val == 'Delete File':
            fext = md.ans[0].split()
            pname = poolname + '\\' + fext[0]
            try:
                os.unlink(pname)
            except:
                tkMessageBox.showerror('Pool Files',
                                       'Failed to delete ' + pname)
        del md
def update_eyes(fobj):
    try:
        fdraw = open(fobj.fname[:-3] + 'raw', 'rb')
    except:
        return {}
    frq = rawlist.rawheaderfield(fdraw, 'CLOCKRATE')
    cbxx = rawlist.rawcb('', rawfd=fdraw)
    frq = rawlist.rawheaderfield(fdraw, 'CLOCKRATE')
    cbxx = rawlist.rawcb('', rawfd=fdraw)

    #must fix error in saving summary data status of eyes
    eoec = {}
    for cb in cbxx[0]:
        g, a, s = rawlist.unpack_action(cb[1])
        if g == 14 and a == 0:
            vv = struct.unpack('ff', cb[3])
            if vv[0] == 5.0:
                cls = int(vv[1])
                if vv[1] == 2.0:
                    cls = 2
                sec = cb[2] / frq
                eoec[sec] = cls
    return eoec
Exemple #3
0
def get_sum_file_fd(fd,
                    details=1,
                    prt=0,
                    filename='',
                    baseposition=0,
                    layout=''):
    "External entry point to 'load' a summary file using file descriptor"
    global allsitelist, headerversion, formatstring, BLK

    # builds a 'sumfile' object

    posit = fd.tell()
    fd.seek(0, 2)
    fsize = fd.tell()
    fd.seek(baseposition, 0)  # in case reading from inside raw file
    try:
        hdr = fd.read(struct.calcsize(HDR))
    except:
        if not baseposition:
            fd.close()
        if prt:
            print '*****Error reading file header', filename, posit
        return None
    try:
        header = struct.unpack(HDR, hdr)
    except:
        if not baseposition:
            fd.close()
        if prt:
            print '*****Error unpacking header', filename, posit
        return None
    ftd, ffmt, kind, streams, lowpass, datecode, timecode = header[:7]
    fmtstring, gameid, lcode, spare, cid, cfull = header[-6:]
    if ftd != 'SUMD':
        if not baseposition:
            fd.close()
        if prt:
            print '*****Not a SUM file', filename
        return None
    headerversion = int(ffmt)
    if headerversion not in validversions:
        if not baseposition:
            fd.close()
        if prt:
            print '*****incorrect format', filename
        return None
    if prt:
        print 'version ', headerversion
    if headerversion >= 114:  # for different header formats
        BLK = NEWBLK
    else:
        BLK = OLDBLK
    if headerversion >= 118:
        try:
            xxhdr = fd.read(struct.calcsize(HDR118))
        except:
            if not baseposition:
                fd.close()
            if prt:
                print '*****Error reading xtra file header', filename, posit
            return None

        try:
            hdr118 = struct.unpack(HDR118, xxhdr)
        except:
            if not baseposition:
                fd.close()
            if prt:
                print '*****Error unpacking xtra header', filename, posit
            return None
    fileminutes = fsize / (streams * 120 * 3)  # approx file minutes
    mxdt = mx.DateTime.DateTimeFromAbsDateTime(datecode, float(timecode))
    fobj = SumFile()
    fobj.datestring = mx.DateTime.ISO.str(mxdt)[0:10]
    fobj.timestring = mx.DateTime.ISO.str(mxdt)[11:16]
    fobj.datecode = datecode
    fobj.timecode = timecode
    fobj.version = headerversion
    sd = mx.DateTime.ARPA.str(mxdt)
    try:
        k = sd.index(',')
        k = k + 2
    except:
        k = 0
    fobj.simpledate = sd[k:k + 7] + sd[k + 9:k + 11]
    fobj.formatstring = nonull(fmtstring)
    formatstring = fobj.formatstring
    if layout == '':
        try:
            fraw = open(filename[:-4] + '.raw', 'rb')
            val = rawlist.rawheaderfield(fraw, 'FILLER')
            pos = val.find('|')
            if pos > 0:
                layout = val[pos + 1:]
            fraw.close()
        except:
            pass
    fobj.layout = layout
    fobj.uname = nonull(cfull)
    fobj.ucode = nonull(cid)
    fobj.fname = filename
    fobj.lcode = lcode
    fobj.gameid = nonull(gameid)
    hdr_size = struct.calcsize(BLK)
    fobj.number_streams = streams
    fobj.number_lowpass = lowpass
    if headerversion >= 118:
        fobj.number_periph = hdr118[0]
        fobj.multiplier = hdr118[1:]
    else:
        fobj.number_periph = 1

    allsitelist = {}
    if headerversion >= 112:
        fobj.has_total_in_percent = 1

    if prt:
        print fobj.__dict__
    dtm = []
    pct = []
    avg = []
    periph = []
    event = []
    prev_mode = 0
    prev_site = ''
    prev_freq = []
    newcl = 0
    prev_score = 0
    prev_time = 0
    total_per = 0
    fp = None  # for the data
    if prt:
        print 'fileminues,headerversion', fileminutes, headerversion
    while 1:
        posit = fd.tell()
        if prt > 2:
            print 'at ', posit
        try:
            blk = fd.read(hdr_size)
        except:
            if prt:
                print '*****Error reading a sub block header', posit
            if not baseposition:
                fd.close()
            return None
        if blk == '':
            if prt > 2:
                print 'blk length = 0'
            break
        try:
            bsize, fmt, tstamp = struct.unpack(BLK, blk)
        except:
            if not baseposition:
                fd.close()
            if prt:
                print '*****Partial block header', filename, posit
            return None

        ####print filename
        ####print 'fmt,num,tstamp',fmt,num,tstamp
        if prt:
            print 'bsize,fmt,offset', bsize, fmt, '%x' % posit
        if bsize == 0:
            if prt > 2:
                print 'block length = 0'
            break  #end of all
        if headerversion < 114:
            if (fileminutes > 45 and headerversion < 108) or (fileminutes > 95
                                                              and fmt == 3):
                # here to try to fix up the values
                # before 108, there was no clamp on sessions so the block size
                # was only modulo 65536 (16 bit saved value!!!)
                # after 108, I allowed 100 minutes but the overall pct value still
                # overflowed the 16 bits.  I now clamp at 90 minutes
                # but need to handle the bad files out there...
                # fix up bad headers
                if bsize < 0:
                    bsize = 65536 + bsize
                if fmt <= 3 and fmt != 1:
                    while 1:
                        mins = (bsize - 6) / 600
                        if fileminutes < mins:
                            if headerversion < 108:
                                bsize -= 65536
                            break
                        bsize += 65536

        rest = bsize - hdr_size
        posit = fd.tell()
        try:
            part = fd.read(rest)
        except:
            if prt:
                print '*****Block read error', filename, '%x' % posit
            if not baseposition:
                fd.close()
            return None
        if len(part) != rest:
            if prt:
                print '*****Incomplete block read', filename, '%x' % posit
            if not baseposition:
                fd.close()
            return None
        try:

            if prt:
                try:
                    ftype = NAMES[fmt].strip()
                    if ftype == 'DATA':
                        ftype = 'DATA+++++++++++'
                except:
                    ftype = '????'
                print 'Block->', fmt, '(%s)' % ftype, streams, len(
                    part), '%x' % posit
            if fmt == SUM_DATA:
                sg = data_part(streams, lowpass, part, tstamp, prt)
                dtm.append(sg)
            elif fmt == SUM_THRESH:  # thresh
                thr = thresh_part(streams, lowpass, part, tstamp, prt)
                event.append(thr)
            elif fmt == SUM_AVERAGE:  # long term avg
                if details:
                    sg = data_part(streams, lowpass, part, tstamp, prt)
                    sg.type = SUM_AVERAGE
                    fobj.avg.append(sg)
            elif fmt == SUM_PERCENT:  # percent
                if headerversion >= 112:
                    n = streams + 1  # account for total %
                else:
                    n = streams
                sg = data_part(n, lowpass, part, tstamp, prt)
                sg.type = SUM_PERCENT
                fobj.pct.append(sg)
            elif fmt == SUM_FREQ:  # freq
                fp = freq_part(streams, lowpass, part, tstamp, prt)
                event.append(fp)
            elif fmt == SUM_SCALE:  # scale
                sg = scale_part(streams, lowpass, part, tstamp, prt)
                event.append(sg)
            elif fmt == SUM_SITE:  # channel/site
                s, m = site_part(streams, lowpass, part, tstamp, prt)
                if s:
                    event.append(s)
                event.append(m)  #LATER we worry about changes

            elif fmt == SUM_PERIOD:  # period end
                pe = period_part(streams, lowpass, part, tstamp, prt)
                if pe != None and total_per < 128:
                    if total_per:
                        prevstart = fobj.per[-1].start
                        if pe.start == prevstart:
                            # aha must be the bogus one
                            continue
                    total_per += 1
                    pe.period_number = total_per
                    fobj.per.append(pe)
                    if pe.end > 5398:
                        if prt:
                            print 'Readjust pe.end ', pe.period_number, pe.end
                        pe.end = 5398
                    if pe.start > 5396:
                        if prt:
                            print 'Readjust pe.start ', pe.period_number, pe.start
                        pe.start = 5396
                    ev = EventSeg(
                        SUM_PERIOD)  # add two events also, begin/end period
                    ev.start = pe.start
                    ev.typename = 'BEGIN'
                    ev.period = pe
                    event.append(ev)
                    ev = EventSeg(SUM_PERIOD)
                    ev.start = pe.end
                    ev.typename = 'END'
                    ev.period = pe
                    ev.overall_reward = pe.overall_reward
                    event.append(ev)
            elif fmt == SUM_MODE:
                ev = mode_part(streams, lowpass, part, tstamp, prt)
                event.append(ev)
            elif fmt == SUM_MARK:
                ev = mark_part(streams, lowpass, part, tstamp, prt)
                event.append(ev)
            elif fmt == SUM_PERIPH:
                if fobj.number_periph:
                    sg = data_part(fobj.number_periph, lowpass, part, tstamp,
                                   prt)  # ?? streams stream in periph data
                    sg.type = SUM_PERIPH
                    fobj.periph.append(sg)
            elif fmt == SUM_ZSCORE:
                sg, zsckind = zscore_part(part, tstamp, prt)
                if sg:
                    ##print 'zsc',len(sg.data)
                    sg.type = SUM_ZSCORE
                    fobj.zscore.append(sg)
                    fobj.zscore_kind = zsckind
            elif fmt == (SUM_MARK | 0x100):
                # here for special recordings that have rawsub formatted data
                sg = misc_part(part, tstamp, prt)  #action code,frame,data
                fobj.misc.append(sg)
            else:
                if prt:
                    print '*****Invalid format code', filename, posit, fmt
##                fd.close()
##                return None
                continue
        except:
            if prt:
                print '******* error processing segment', filename, posit, fmt
                if prt > 2:
                    raise
            if not baseposition:
                fd.close()
            ##raise
            return None

    if not baseposition:
        fd.close()
    # now 'merge' the data lists
    if len(dtm) == 0:
        if prt:
            print 'data length = 0'
        return None
    fobj.seg = combine_possible_segments(dtm[0].start, dtm)
    if len(fobj.seg) == 0:
        return None
    if len(fobj.seg[0].data) < 3:
        return None  # sorry, I need 3 seconds of data
    if fobj.pct:
        fobj.pct = combine_possible_segments(fobj.pct[0].start, fobj.pct)
    if fobj.avg:
        fobj.avg = combine_possible_segments(fobj.avg[0].start, fobj.avg)
    if fobj.periph:
        fobj.periph = combine_possible_segments(fobj.periph[0].start,
                                                fobj.periph)

    # filter the combined event list
    #   things looked for:
    #       time a "change" begins for each kind of thing
    #       when last change occurs for each kind of thing
    #       value of last change!!
    #       initial value
    #   filtering  freq changes, thresh changes, site changes, reward dir changes, fback mode changes
    holds = map(lambda x: None, range(16))
    lasts = map(lambda x: None, range(16))
    fev = []
    for ev in event:
        t = ev.type
        if ev.type == SUM_MARK:
            fev.append(ev)


##        elif ev.type == SUM_MODE:
##            if holds[t] == None:
##                lasts[t]=holds[t]=ev
##                continue
##            if ev.start - fr_hold.start > HOLD_TIME:
##                fr_last.start=fr_hold.start
##                fev.append(fr_last)
##                fr_hold=ev

        elif ev.type == SUM_PERIOD:
            fev.append(ev)
        else:
            if holds[t] == None:
                holds[t] = lasts[t] = ev
                continue
            if ev.start - holds[t].start > HOLD_TIME:
                lasts[t].start = holds[t].start
                fev.append(lasts[t])
                lasts[t] = holds[t] = ev
        lasts[t] = ev

    # now add in the guys left
    for k in range(len(holds)):
        if holds[k]:
            if holds[k] == lasts[k]:
                # if same, must be just a FIRST instance
                fev.append(holds[k])
            else:
                # different, use time of first
                lasts[k].start = holds[k].start
                fev.append(lasts[k])

    # after all this, NOW we have to sort back into time order
    fev.sort(evtime_sort)

    # aggregate the site list
    ##    sitelist=[]
    ##    for ev in fev:
    ##        if ev.type == SUM_SITE:
    ##            for s in ev.sites:
    ##                try:
    ##                    sitelist.index??????????????
    ##==============================
    fobj.events = fev
    fobj.number_periods = total_per
    fobj.number_seconds = len(fobj.seg[0].data)  # ending seconds
    keys = allsitelist.keys()
    ##keys.sort()
    fobj.sitelist = keys
    if len(keys) == 0:
        fobj.sitelist = [
            'Unknown',
        ]
    return fobj
Exemple #4
0
def raw2edf(rawfile, destination_dir, debug=0):
    ''' Convert RAW file to EDF '''

    global SAMPLING_RATE
    global MAX_CHAN_LENGTH

    rawFD = open(rawfile, 'rb')  # open RAW file for reading
    rawfile_info = rawlist.rawinfo(rawfile)
    rawfile_subset = rawlist.rawheaderfield(rawFD, 'SUBSET')
    rawfile_formatstring = rawlist.rawheaderfield(rawFD, 'FORMATSTRING')
    # get base name for use with GetClientFile
    basename = "\\".join(rawfile.split("\\")[:-2])
    client = clients.GetClientFile(basename)

    # get patient identification, date and time from raw file
    datetime = rawlist.rawdate(rawfile)
    if debug > 1:
        print 'debugging'

    try:
        pid = client['client.xguid']
    except:
        # for old version 4.0.3h and before
        pid = client['client.fullname']

    patient = client['client.clientid']
    rec_date = datetime[8:10] + '-' + MONTHS[int(datetime[5:7]) -
                                             1] + '-' + datetime[0:4]
    startdate = datetime[8:10] + '.' + datetime[5:7] + '.' + datetime[2:4]
    starttime = datetime[11:13] + '.' + datetime[14:16] + '.00'

    # Combine directory for edf file and name of files that's going to be created
    rsplit = os.path.split(rawfile)
    rdir = os.path.split(rsplit[0])
    actname = rsplit[1].split('.')
    output_file = destination_dir + '\\' + actname[0] + rdir[1][
        0]  # add E,B,A for category

    c_scales = rawlist.rawparams(rawFD)[1:]  # list of channel scales
    c_channels = []  # list of channel data
    peripheral = []  # peripheral data

    # there will be up to 3 scales (2 for channel a and b, 1 for peripheral)
    c_channels.append(rawlist.rawread(rawFD, CHAN_A)[0])
    c_channels.append(rawlist.rawread(rawFD, CHAN_B)[0])
    if rawfile_formatstring[2] == 'C':
        # must be 4 chan
        if debug > 1:
            print 'aha- 4 channels'
        c_channels.append(rawlist.rawread(rawFD, CHAN_C)[0])
        c_channels.append(rawlist.rawread(rawFD, CHAN_D)[0])

    ####!!!!
    # check for peripheral data (NOT INCORPORATED INTO EDF FILE YET)
    #if len(c_scales) > 2:
    #    try:
    #        peripheral.append( rawlist.rawread(rawFD, PERIPH)[0] )
    #    except:
    #        pass

    # set 1) Sampling Rate, 2) Max channel length
    SAMPLING_RATE = rawfile_info[1]
    MAX_CHAN_LENGTH = ((len(max(c_channels)) / 2) / 256)
    if debug > 1:
        print SAMPLING_RATE, MAX_CHAN_LENGTH

    # get annotations
    annotations = []
    initial_site = []
    initial_site.append(rawfile_info[len(rawfile_info) - 1][1:])
    numchannels = 0
    if rawfile_subset >= 2:
        #sumfilepath = rawfile.split(".")[0] + ".SUM"
        #if os.path.exists(sumfilepath):
        #    summary = sumfile.get_sum_file(sumfilepath, 0, 0)
        #    if summary:
        #        annotations = get_annotation_list(summary)
        #        numchannels=summary.number_streams
        #if numchannels == 0:
        #    # do it the hard way
        #    while numchannels < 16:
        #        dta=rawlist.readrawamplvalues(fd,numchannels,countonly=1)
        #        if len(dta):
        #            numchannels += 1
        #        else:
        #            break
        for astr in range(len(rawfile_formatstring)):
            dta = rawlist.rawreadampl(rawFD, numchannels, prt=0)
            if len(dta):
                c_channels.append(dta[0][:])
                if debug > 1:
                    print 'achan ', len(c_channels[-1]), numchannels
                c_scales.append(0.01)
                numchannels += 1
    startinfo = (pid, patient, rec_date, startdate, starttime, initial_site,
                 numchannels, rawfile_formatstring)

    rawevents, ans = rawlist.rawcb(rawfile)
    if ans == 0:
        # now merge in the desired raw event data
        annotations = merge_rawsum(rawevents, annotations)

    #if numchannels:
    #    for ch in range(numchannels):
    #        c_scales.append(0.0)

    if debug > 1:
        print 'write2file', len(c_channels)
        for i in range(len(c_channels)):
            print i, len(c_channels[i])
    write2file(startinfo, c_channels, c_scales, annotations, output_file,
               debug)
    rawFD.close()

    return 1
def compute_data(fobj,prt=0):
    
    try:
        fdraw=open(fobj.fname[:-3]+'raw','rb')
    except:
        return []
    if prt:
        print fobj.fname
    rslt=csv_dump_out.csv_dump_out(None,fobj)
    seg=[]
    ampoffset=[0,0,0,0]
    throffset=[0,0,0,0]
    siteoffset=[0,0,0,0]
    
    frq=rawlist.rawheaderfield(fdraw,'CLOCKRATE')
    #cbxx=rawlist.rawcb('',rawfd=fdraw)
    #eoec=_update_eyes(cbxx[0],frq)
    if prt:
        print len(rslt)
    per=0
    # figure out which columns have data I want  period,eyes,ampl-0,ampl-1,thresh-0,thresh-1
    eyes=''
    for num,ln in enumerate(rslt):
        fld=ln.split(',')
        if not num:
            numraw=fobj.number_lowpass
            perslot=fld.index('"Period"')
            eyeslot=fld.index('"Eyes"')
            for i in range(numraw):
                try:
                    x=fld.index('"Ampl-%d"'%i)
                    ampoffset[i]=x
                except:
                    pass
                try:
                    x=fld.index('"Thresh-%d"'%i)
                    throffset[i]=x
                except:
                    pass
                try:
                    x=fld.index('"Chan-%c"' % chr(ord('A')+i))
                    siteoffset[i]=x
                except:
                    pass
            continue
                
        #find beg of per
        #if eoec.has_key(int(fld[0])):
        #    eyes=eoec[int(fld[0])]
        if int(fld[perslot]) == 0:
            if per == 0:
                ##if prt:
                ##    print 'not found first period ',num
                continue # not found end of first period
            if prt:
                print 'found end of period ',per,num
            #here to process previous period logic
            p=fobj.per[per-1]
            ssites=p.sitelist
            for i in range(numraw):
                for s in checksites:
                    look=s+'-a'
                    if fld[siteoffset[i]][1:-1].lower()[:4] == look:
                        # aha- this a a valid site
                        lnx=rslt[num-1].split(',')
                        seg.append((p.start,p.end,perstart,num-1,i,per,fld[eyeslot],fld[siteoffset[i]][1:-1]))
                        #if prt:
                        #    print 'match ',fld[siteoffset[i]][1:-1],look,i,numraw,len(ssites)
                        ##if prt:
                        ##    print 'no match ',ssites[j],look,i
                    else:
                        #if prt:
                        #    print 'no match ',fld[siteoffset[i]][1:-1],fld[siteoffset[i]][1:-1].lower()[:4],look,i
                        pass
            per=0
            continue
        if per== 0:
            perstart=num
            per=int(fld[perslot])
            if prt:
                print 'start of period ',per,num
        
    # get here with possibles in seg
    if prt:
        print 'found possible ',len(seg)
    ans=[]
    for start,end,pstart,pend,ch,per,eyes,site in seg:
        if prt:
            print "s,e,ps,pe,ch,per",start,end,pstart,pend,ch,per
        if end-start < 30:
            if prt:
                print ' less than 30 seconds'
            continue  # too short
        for i in range(numraw):
            if i != ch:
                #if prt:
                #    print "not this chan"
                continue
            ans.append([start,end,pstart,pend,ch+100,per,eyes,site])
                
            thrv=[]
            amplv=[]
            for k in range(pstart,pend):
                fld=rslt[k].split(',')
                thrv.append(fld[throffset[i]])
                amplv.append(fld[ampoffset[i]])
            beg=pstart
            org=None
            lstart=lend=0
            poss=[]
            while beg < pend:
                if thrv[beg-pstart] > amplv[beg-pstart]:
                    if org:
                        if beg-org >= 30:
                            poss.append((beg-org,org,beg-1))
                    org=None
                else:
                    if org == None:
                        org=beg
                beg += 1
            if org:
                if pend-org >= 30:
                    poss.append((pend-org,org,pend))
            else:
                if len(poss) == 0:
                    poss.append((pend-pstart,pstart,pend))
                
            #now find longest segment
            if prt:
                print poss
            beg=0
            for l,s,e in poss:
                if l > beg:
                    lstart=s
                    lend=e
                    beg=l
           
            # now have start end
            if beg:
                ans.append([start,end,lstart,lend,ch,per,eyes,site])
            if prt:
                print 'longest ',ch,per,start,end,lstart,lend
            # if not found, good sample
    # now do the fft stuff
    if prt:
        print 'found valid ',len(ans),' chans=',numraw
    final=[]
    dtaa=[]
    for i in range(numraw):
        dta=rawlist.rawreadvalues(fdraw,i)
        dtaa.append(dta)
    fdraw.close()
    extfft.initfft(frq)
    prevch=None
    prevper=0
    grp=[]
    for start,end,valid,pend,ch,per,eyes,site in ans:
        if prt:
            print ch,valid,pend,per,site
        if (ch%100) != prevch or prevper != per:
            if prevch == None:
                prevch = ch%100
                prevper=per
            else:
                final.append((prevch%100,site,prevper,eyes[1:-1],grp))
                grp=[]
                prevch = ch%100
                prevper=per
        dta=dtaa[ch% 100]
        #get the raw file
        span=pend-valid
        if ch < 100:
            if span > 60:
                span=60
        b=pend-span-10*frq
        if b < start:
            b=start
        sumr=[0 for x in range(640)]
        cntr=0
        for d in range((b+1)*frq,pend*frq,frq):
            x=extfft.compute_fft_array(dta[d-frq*10:d],frq*10,640)
            # take last 60 or less seconds of data and
            #    compute FFT in 0.1 Hz bins and average the bins
            # 
            for i in range(640):
                sumr[i] += x[i]
            cntr += 1
        biggie=0
        if cntr:
            for i in range(640):
                sumr[i] = sumr[i]/cntr
                if sumr[i] > biggie:
                    biggie=sumr[i]
        else:
            if prt:
                print 'no fft samples!',b+1,pend
        sumr.append(biggie)
        grp.append(sumr)
            
    if grp != []:
        final.append((ch%100,site,per,eyes[1:-1],grp))
        
    extfft.clean()
    #                             i    s      i,  s  65*f
    
    if prt:
        for item in final:
            print item[0],item[1],item[2],len(item[4]),item[4][0][-1],
            if len(item[4]) > 1:
                print item[4][1][-1],
            else:
                print 'None',
            print
            
            
    return final    #  array of (chan,site, per,eyes,64 bins of average+max)
def smdumpfile(fname, fmtcheck=0):
    # get # channels of raw
    svals = []
    try:
        fd = open(fname, 'rb')
    except:
        return None, None
    # make sure new format
    hasextradata = 0
    subset = rawlist.rawheaderfield(fd, 'SUBSET')
    if subset > 1:
        hasextradata = subset
    if fmtcheck:
        if hasextradata == 0:
            fd.close()
            return None, None
    delta = 0
    if hasextradata >= 3:
        delta = 1

    fstr = rawlist.rawheaderfield(fd, 'FORMATSTRING')
    maxchan = 0
    for c in fstr:
        if c == 'C':
            maxchan += 1
    # get data list for each channel
    datalist = []
    for ch in range(maxchan):
        dta = rawlist.rawreadvalues(fd, ch)
        if dta[0] == None:
            break
        datalist.append(dta)
    # get filtered data
    filtlist = []
    if hasextradata:
        for ch in range(14 + delta):
            dta = rawlist.rawreadamplvalues(fd, ch)
            if dta == []:
                break
            filtlist.append(dta)
    # get reward list
    rwddict = _rawrewards(fd)
    datecode = rawlist.rawheaderfield(fd, 'DATECODE')
    timecode = rawlist.rawheaderfield(fd, 'TIMECODE')
    dt = mx.DateTime.DateTimeFromAbsDateTime(int(datecode), int(timecode))
    datetime = (dt.year, dt.month, dt.day, dt.hour, dt.minute, int(dt.second))
    threshdict = None
    if hasextradata:
        pos = rawlist.rawheaderfield(fd, 'SUMMARYOFFSET')
        if pos:
            threshdict = _getthresh(fd, pos)
    fd.close()
    if len(datalist) == 0:
        return None, None

    fmftext = None
    for frm in range(len(datalist[0])):
        data = []
        for dta in datalist:
            try:
                data.append(dta[frm])
            except:
                data.append(0.0)
        filt = []
        if hasextradata:
            for flst in filtlist:
                try:
                    filt.append(flst[frm])
                except:
                    filt.append(0.0)
        events = rwddict.get(frm, ())
        other = ''
        rwd = 0
        if len(events):
            for t, reason, x in events:
                if reason.find('REWARD') >= 0:
                    rwd = 1
                    #for testing
                    if len(other):
                        other = other + ','
                    other = other + reason
                else:
                    if len(other):
                        other = other + ','
                    other = other + reason
        if not fmftext:
            fmftext = 'Time'
            fmf = '%.4f'
            for i in range(len(data)):
                fmftext = fmftext + ',Chan%s' % 'ABCD'[i]
                fmf = fmf + ",%.3f"
            fmftext = fmftext + ',Reward'
            fmf = fmf + ',%d'
            if hasextradata:
                delta = 0
                if hasextradata >= 3:
                    delta = 1
                for i in range(len(filt) - delta):
                    fmftext = fmftext + ',%d' % i
                    fmf = fmf + ',%.2f'
                if delta:
                    fmftext = fmftext + ',OT'
                    fmf = fmf + ',%x'
                    filt[-1] = 100 * filt[-1]
            fmftext = fmftext + ',other'
            fmf = fmf + ',"%s"'
            if threshdict:
                tx = threshdict.keys()
                tx.sort()
                thr = threshdict[tx[0]]
                for i in range(len(thr)):
                    fmf = fmf + ',%.3f'
                    fmftext = fmftext + ',Thrsh' + '%d' % i
                currthresh = thr
            svals.append(fmftext)
        if 0:
            line = '%.4f' % (frm / 256.0)
            for i in range(len(data)):
                line = line + ',%.3f' % data[i]
            line = line + ',%d' % rwd
            for i in range(len(filt)):
                line = line + ',%.2f' % filt[i]
            line = line + ',"%s"' % other
        else:
            dx = [
                frm / 256.0,
            ]
            dx.extend(data)
            dx.append(rwd)
            if len(filt):
                dx.extend(filt)
            dx.append(other)
            if threshdict:
                currthresh = threshdict.get(frm, currthresh)
                dx.extend(currthresh)
            try:
                line = fmf % tuple(dx)
            except:
                print fmf
                print dx
                print fmftext
                raise

        svals.append(line)

    return '\n'.join(svals), datetime