Example #1
1
 def IgorLoad(self,Source):
     from igor import binarywave,igorpy
     Waves={}
     Variables={}
     
     if Source[-3:] == 'ibw':
         Waves[binarywave.load(Source)['wave']['wave_header']['bname']]=binarywave.load(Source)['wave']['wData']
         Variables['SampleInterval']=1
     elif Source[-3:] == 'pxp':
         #tree=igorpy.load(Source).format()
         #print tree
         #print '#############'
         b=igorpy.load(Source)
         for i in b:
             if isinstance(i, igorpy.Wave):
                 Waves[str(i.name)]=i.data
             elif isinstance(i, igorpy.Variables):
                 Variables=i.uservar    
     elif Source[-3:] == 'txt':
         b=numpy.loadtxt(Source)
         Waves[Source.split("/")[-1].replace('.txt','').replace('.','_')]=b
         Variables=None 
     elif Source[-3:] == 'csv':
         b=numpy.loadtxt(Source)
         Waves[Source.split("/")[-1].replace('.txt','').replace('.','_')]=b
         Variables=None 
     elif Source[-3:] == 'wcp':
         print "not supported yet, but you can import an igor file" 
         b,c=self.read_block(Source)
         for i,j in enumerate(b):
             Waves[str(j[0])+str(i)]=numpy.array(j[1])
         for i in c:
             Variables[i]=c[i]  
     try:     
         return Waves,Variables
     except UnboundLocalError:
         msgBox = QtGui.QMessageBox()
         msgBox.setText(
         """
         <b>Filtype not Supported</b>
         <p>Only txt, ibw and pxp files are supported
         """)
         msgBox.exec_()
         return None, None
Example #2
0
def read_single_pxt(reference_path: typing.Union[Path, str], byte_order=None):
    """
    Uses igor.igorpy to load a single .PXT or .PXP file
    :return:
    """
    import igor.igorpy as igor

    if isinstance(reference_path, Path):
        reference_path = str(reference_path.absolute())

    loaded = None
    if byte_order is None:
        for try_byte_order in ['>', '=', '<']:
            try:
                loaded = igor.load(reference_path, initial_byte_order=try_byte_order)
                break
            except Exception: # pylint: disable=broad-except
                # bad byte ordering probably
                pass
    else:
        loaded = igor.load(reference_path, initial_byte_order=byte_order)

    children = [c for c in loaded.children if isinstance(c, igor.Wave)]

    if len(children) > 1:
        warnings.warn('Igor PXT file contained {} waves. Ignoring all but first.', len(children))

    return wave_to_xarray(children[0])
Example #3
0
def read_single_ibw(reference_path: typing.Union[Path, str]):
    """
    Currently igorpy does not support this though
    Uses igor.igorpy to load an .ibw file
    :param reference_path:
    :return:
    """
    import igor.igorpy as igor

    if isinstance(reference_path, Path):
        reference_path = str(reference_path.absolute())
    return igor.load(reference_path)
Example #4
0
def read_experiment(reference_path: typing.Union[Path, str], **kwargs):
    """
    Reads a whole experiment and translates all contained waves into xr.Dataset instances as appropriate

    :param reference_path:
    :return:
    """
    import igor.igorpy as igor

    if isinstance(reference_path, Path):
        reference_path = str(reference_path.absolute())

    return igor.load(reference_path, **kwargs)
Example #5
0
def extractDebleachedData(filePath):
    allData = igor.load(filePath)
    bbb = allData.children[0].userstr[b"S_waveNames"]
    aaa = bbb.decode("UTF-8")
    dataNames = aaa.split(";")[:-1]
    waves = list()
    for m in sc.arange(len(dataNames)):
        waveNum = sc.int32(dataNames[m][1 + str.rfind(dataNames[0], "e"):])
        str1 = "waves.append(allData." + dataNames[m] + "_%d.data)" % (
            waveNum - 1)
        #print(dataNames[m],str1)
        exec(str1)
    return sc.array(waves)
def extractDebleachedFminData(filePath):
    """
    extractDebleachedFminData takes a path to a pxp data file as only argument and extracts all the wave data found there, 
    provided the data has the suffix F_min
    
    Example:
    dataDir="./microcircuitsNetworks/"
    fileName= "cort76dp1c.pxp"
    waveData, timeStamps=extractDebleachedFminData(dataDir+fileName)
    """
    allData = igor.load(filePath)
    #dataNames= st.digits.split(allData.children[0].userstr["S_waveNames"], ";")[:-1]
    bbb = allData.children[0].userstr[b"S_waveNames"]
    aaa = bbb.decode("UTF-8")
    dataNames = aaa.split(";")[:-1]
    waves = list()
    for m in sc.arange(len(dataNames)):
        waveNum = sc.int32(dataNames[m][1 + str.rfind(dataNames[0], "e"):])
        str1 = "waves.append(allData." + dataNames[m] + "_%dF_min.data)" % (
            waveNum - 1)
        #print(dataNames[m],str1)
        exec(str1)
    return sc.array(waves), sc.array(allData.sec.data)
Example #7
0
def extractPXPData(fName):
    """
    extractPXPData
    Example:
    allData=extractPXPData("microcircuitsNetworks/cort76dp1c.pxp")
    """
    allData = igor.load(fName)
    # Dictionary containing the names of recorded variables during the experiment
    bbb = allData.children[0].userstr[b"S_waveNames"]
    aaa = bbb.decode("UTF-8")
    dataNames = aaa.split(";")[:-1]
    extractedData1 = list()
    extractedData2 = list()
    for nam in dataNames:
        if len(nam) > 3:
            myInd = sc.int32(nam[4:]) - 1
            str2 = "w2=allData." + nam + ".data" % (myInd)
            exec(str2)
            extractedData2.append(w2)
        else:
            print("Found empty string")

    rawData = sc.array(extractedData2)
    return rawData
    def Load_SFG(self,Parameters) :
        
        FolderPath = Parameters['FolderPath']
        FileName = Parameters['FileName']

        if FileName.endswith('.ibw') :
            d = binarywave.load(FolderPath + '/' + FileName)
            y = np.transpose(d['wave']['wData'])
            Start = d['wave']['wave_header']['sfB']
            Delta = d['wave']['wave_header']['sfA']
            x = np.arange(Start[0],Start[0]+y.shape[1]*Delta[0]-Delta[0]/2,Delta[0])
            z = np.arange(Start[1],Start[1]+y.shape[0]*Delta[1]-Delta[1]/2,Delta[1])
            print('Igor binary data loaded')
        elif FileName.endswith('.itx') :
            y = np.loadtxt(FolderPath + '/' + FileName,comments =list(string.ascii_uppercase))
            y = np.transpose(y)
            with open(FolderPath + '/' + FileName) as f:
                reader = csv.reader(f, delimiter="\t")
                for row in reader:
                    if 'SetScale/P' in row[0]:
                        SetScale = row[0]
            xScale = re.findall(r' x (.*?),"",', SetScale)
            xScale = xScale[0].split(',')
            zScale = re.findall(r' y (.*?),"",', SetScale)
            zScale = zScale[0].split(',')
            Start = [float(xScale[0]),float(zScale[0])]
            Delta = [float(xScale[1]),float(zScale[1])]
            x = np.arange(Start[0],Start[0]+y.shape[1]*Delta[0]-Delta[0]/2,Delta[0])
            z = np.arange(Start[1],Start[1]+y.shape[0]*Delta[1]-Delta[1]/2,Delta[1])
            print('Igor text data loaded')
        elif FileName.endswith('.pxp') :
            DataName = Parameters['DataName']
            igor.ENCODING = 'UTF-8'
            d = igor.load(FolderPath + '/' + FileName)
            for i in range(len(d.children)) :
                if 'data' in str(d[i]) and len(d[i].data) < 10000 :
                    globals()[d[i].name] = np.array(d[i].data)
                    if len(d[i].axis[0]) > 0 :
                        Name = d[i].name+'_x'
                        globals()[Name] = np.array([])
                        for j in range(len(d[i].axis[0])) :
                            globals()[Name] = np.append(globals()[Name], d[i].axis[0][-1] + d[i].axis[0][0] * j)
                    if len(d[i].axis[1]) > 0 :
                        globals()[d[i].name] = np.transpose(globals()[d[i].name])
                        Name = d[i].name+'_y'
                        globals()[Name] = np.array([])
                        for j in range(len(d[i].axis[1])) :
                            globals()[Name] = np.append(globals()[Name], d[i].axis[1][-1] + d[i].axis[1][0] * j)
            x = eval(DataName+'_x')
            y = eval(DataName)
            z = eval(DataName+'_y')
            z = np.round(z,decimals=1)

        elif FileName.endswith('sif') :
            FileData = sif_reader.xr_open(FolderPath + '/' + FileName)
            y = FileData.values[:,0,:]
            x = [i for i in range(len(np.transpose(y)))]
            z = [i+1 for i in range(len(y))]
            
            try :
                FileData.attrs['WavelengthCalibration0']
                FileData.attrs['WavelengthCalibration1']
                FileData.attrs['WavelengthCalibration2']
                FileData.attrs['WavelengthCalibration3']
            except :
                print('Warning: Wavelength calibration not found')
            else :
                c0 = FileData.attrs['WavelengthCalibration0']
                c1 = FileData.attrs['WavelengthCalibration1']
                c2 = FileData.attrs['WavelengthCalibration2']
                c3 = FileData.attrs['WavelengthCalibration3']
                for i in x :
                    x[i] = c0 + c1*i + c2*1**2 + c3*i**3
                x = np.array(x)
                x = 1e7 / x - 12500
            
            try :
                Frame = Parameters['Heating']['Frame']
                Temperature = Parameters['Heating']['Temperature']
            except :
                print('Warning: Temperature data not found')
            else :
                FitModel = QuadraticModel()
                ModelParameters = FitModel.make_params()
                FitResults = FitModel.fit(Temperature, ModelParameters,x=Frame)
                idx = np.array(z)
                z = FitResults.eval(x=idx)
                z = np.round(z,1)
        
        Data = df(np.transpose(y),index=x,columns=z)
            
        return Data
Example #9
0
    def read_block(self, Filter='RecordA'):
        """
        Return a Block.
        **Arguments**
            no arguments
        """
        #Valid up to 26 channels
        alph = list(string.ascii_uppercase)

        Array = {}  #each key corresponds to a wave
        Var = {}  #each key corresponds to a variable

        b = igorpy.load(self.filename)

        #Igor name channels RecordA, RecordB etc...
        #the first 7 characters are thus specific a a given channel
        ChannelNames = []

        for i in b:
            if isinstance(i, igorpy.Wave):
                if 'Record' in str(i.name):
                    ChannelNames.append(str(i.name)[0:7])
                    Array[str(i.name)] = i.data
            elif isinstance(i, igorpy.Variables):
                Var = i.uservar

        ChannelNames = list(set(ChannelNames))
        ChannelNames.sort()

        if Array == None and Var == None:
            return

        Filter = Filter
        templist1 = []
        templist2 = []
        AllWaveNames = []
        for i in Array:
            AllWaveNames.append(i)

        AllWaveNames = sorted(AllWaveNames, key=self.splitgroups)

        #for i in Var:
        #    exec("Analysis."+i+"= Var[i]")

        Waves = []
        # loop for record number
        counter = 0
        for i in AllWaveNames:
            if Filter in i:
                counter += 1
        #Listing suffixes
        Suffixes = list(AllWaveNames)
        for i, j in enumerate(Suffixes):
            for k in ChannelNames:
                Suffixes[i] = Suffixes[i].replace(k, '')
        Suffixes = sorted(set(Suffixes), key=lambda x: Suffixes.index(x))

        #Identify date of creation from filename
        #TODO, use variable instead
        filename = self.filename.split('/')[-1]
        blck = Block()
        convertiontable = [
            'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
            'Oct', 'Nov', 'Dec'
        ]
        Day = int(filename[2:4])
        Month = int(convertiontable.index(filename[4:7])) + 1
        Year = int(filename[7:11])
        date = datetime.date(Year, Month, Day)
        blck.datetime = date

        for n in Suffixes:
            seg = Segment()
            for i in AllWaveNames:
                for k, j in enumerate(ChannelNames):
                    if i == j + n:
                        seg.name = j
                        anaSig = AnalogSignal()
                        #ADCMAX = header['ADCMAX']
                        #VMax = analysisHeader['VMax'][c]
                        #YG = float(header['YG%d'%c].replace(',','.'))
                        anaSig.signal = Array[
                            i]  #[alph[c],data[:,header['YO%d'%c]].astype('f4')*Var['VMax']/Var['ADCMAX']/YG]
                        #Waves.append(signal)
                        anaSig.sampling_rate = 1000. / Var["SampleInterval"]
                        anaSig.t_start = 0.
                        anaSig.name = j
                        anaSig.channel = k
                        #anaSig.unit = header['YU%d'%c]
                        seg._analogsignals.append(anaSig)
            blck._segments.append(seg)

        return blck
    def import_pxp_file(self,
                        filename=None,
                        protein_column_title=None,
                        group_dictionary=None,
                        units=None,
                        create_group_dictionary_interactively=False):
        """ group_dictionary is a set of keys correspond to lists;
        each list contains the column headers in the pxp file for that group."""
        ##########
        if filename:
            self.filename = filename
        elif not self.filename:
            raise Exception(
                "No file name provided or set in attributes of object.")

        f = igor.load(self.filename)
        self.zmin = f.zaxis.data[0]
        self.zmax = f.zaxis.data[-1]
        self.zstep = f.zaxis.data[1] - f.zaxis.data[0]
        ##########
        if create_group_dictionary_interactively:
            group_dictionary = {}
            toplevel = True
            while toplevel:
                print(
                    """Interactive Group Definition:\n1. Add A Group To The Dictionary\n2. List Columns In The File\n3. Exit"""
                )
                answer = input(
                    """Please Enter The Integer Of Your Choice - """)
                if answer == "1":
                    secondlevel = True
                    answer = input("""Enter The Group Key - """)
                    group_dictionary.update({answer: []})
                    while secondlevel:
                        group = input(
                            """Enter Column Title Or EXIT To Leave This Group - """
                        )
                        if group == "EXIT":
                            secondlevel = False
                        elif not hasattr(f, group):
                            print(
                                """\nThat Is Not One Of The Columns, Try Again\n"""
                            )
                        else:
                            group_dictionary[answer].append(group)
                elif answer == "2":
                    key_list = f.__dict__.keys()
                    key_string = ''
                    for key in key_list:
                        key_string += key
                        a = 25 - len(key)
                        if a < 1:
                            a = 1
                        for i in range(a):
                            key_string += ' '
                    key_string += '\n'
                    print(key_string)
                elif answer == "3":
                    toplevel = False
                else:
                    print("\nNot One Of The Options, Try Again\n")
        if group_dictionary:
            self.group_dictionary = group_dictionary

        if len(self.group_dictionary.keys()) == 0:
            self.group_dictionary = {
                'headgroups': [
                    'headgroup1', 'headgroup1_2', 'headgroup1_3', 'headgroup2',
                    'headgroup2_2', 'headgroup2_3'
                ],
                'tethers': ['bME', 'tetherg', 'tether'],
                'tails': ['lipid1', 'methyl1', 'lipid2', 'methyl2'],
                'substrate': ['substrate']
            }
        if list(self.group_dictionary.keys()).count('tails') != 1:
            raise Exception(
                """A group named 'tails' (tail distribution) must be included for purposes of centering the bilayer."""
            )
            return
        ##########
        # Get the protein density
        if protein_column_title:
            self.protein_column_title = protein_column_title
        try:
            protein_density = np.array(
                eval('f.' + self.protein_column_title + '.data'))
        except:
            raise Exception(self.protein_column_title + ' was not found in ' +
                            self.filename +
                            '. I do need it for relative normalizing.')
            return
        ##########
        # Check the z-axis variables and the array lengths
        if round((self.zmax - self.zmin) / self.zstep, 8) % 1.0 != 0.0:
            raise Exception(
                'zmin and zmax are not separated by an integral multiple of zsteps, problem with the file?'
            )
            return
        elif round((((self.zmax - self.zmin) / self.zstep) + 1),
                   8) != np.shape(protein_density)[0]:
            raise Exception(
                'The number of points in the first dimension of the density array does not match the z-dimensions supplied.\n    This is a weird error for neutron data.'
            )
            return
        ##########
        # Units
        if units:
            self.units = units
        ##########
        # Setting the dictionary of various densities in the file
        temp_dict = {}
        for i in self.group_dictionary.keys():
            temp_dict.update({i: 0 * protein_density})
            for j in self.group_dictionary[i]:
                try:
                    temp_dict.update(
                        {i: temp_dict[i] + getattr(getattr(f, j), 'data')})
                except:
                    raise Exception(
                        j + ' was not found in ' + self.filename +
                        '. Please check your dictionary of component groups for the correct column headings.'
                    )
                    return
        self.density_dictionary = temp_dict
        ##########
        # Get the norm of the protein density (this is used for relative normalizing in plots)
        self.protein_norm = sum(protein_density) * self.zstep
        ##########
        # Calculate the center of the lipidic distribution
        norm = sum(self.density_dictionary['tails']) * self.zstep
        self.bilayer_center = sum([
            self.density_dictionary['tails'][i] * (self.zmin + i * self.zstep)
            for i in range(len(self.density_dictionary['tails']))
        ]) * self.zstep / norm
    def import_pxp_file(self,
                        filename=None,
                        data_column_title=None,
                        msigma_column_title=None,
                        psigma_column_title=None,
                        units='A',
                        include_confidence=True):
        if ((not filename) and (not self.filename)):
            raise Exception('No filename given to this object or function.')
        elif (filename):
            self.filename = filename

        if (data_column_title):
            self.data_column_title = data_column_title
        if (msigma_column_title):
            self.msigma_column_title = msigma_column_title
        if (psigma_column_title):
            self.psigma_column_title = psigma_column_title

        f = igor.load(self.filename)

        # Find the z-axis data
        self.zmin = f.zaxis.data[0]
        self.zmax = f.zaxis.data[-1]
        self.zstep = f.zaxis.data[1] - f.zaxis.data[0]

        # Locate protein median data
        try:
            self.density = np.array(
                eval('f.' + self.data_column_title + '.data'))
        except:
            raise Exception('Data for ' + self.data_column_title +
                            ' was not found in ' + self.filename)
            return

        # Locate confidence intervals
        if include_confidence:
            if not (hasattr(f, self.msigma_column_title)
                    and hasattr(f, self.psigma_column_title)):
                raise Exception("Could not find one or both of '" +
                                self.msigma_column_title + "' and '" +
                                self.psigma_column_title + "' in '" +
                                self.filename + "'")
            self.msigma = np.array(
                eval('f.' + self.msigma_column_title + '.data'))
            self.psigma = np.array(
                eval('f.' + self.psigma_column_title + '.data'))

        # Check the array lengths for inconsistencies
        if round((self.zmax - self.zmin) / self.zstep, 8) % 1.0 != 0.0:
            raise Exception(
                'zmin and zmax are not separated by an integral multiple of equal zsteps, problem with the file?'
            )
            return
        elif round((((self.zmax - self.zmin) / self.zstep) + 1),
                   8) != np.shape(self.density)[0]:
            raise Exception(
                'The number of points in the first dimension of the density array does not match the z-dimensions supplied.\n    This is a weird error for neutron data.'
            )
            return

        self.units = units
        print('\n\n\nUnits in the pxp are assumed to be ' + self.units +
              """. Please convert the units if this is incorrect.""")
        print(
            """ Try 'instance_name'.convert_units(1.0, 'desired_units')\n\n\n"""
        )

        # Normalize the density provided
        norm = sum(self.density) * self.zstep
        self.norm = norm
        if norm != 1.0:
            print(
                '\n\n\nThe profile has area = {:}'.format(norm) +
                """, changing that to be 1.\n The normalization is stored in 'instance_name'.norm\n\n\n"""
            )
            self.density = self.density / norm
            if include_confidence:
                self.msigma = self.msigma / norm
                self.psigma = self.psigma / norm

        # Calculate the mean and width of the profile
        self.mean = sum([
            self.density[i] * (self.zmin + i * self.zstep)
            for i in range(len(self.density))
        ]) * self.zstep
        square_sum = sum([
            self.density[i] * (self.zmin + i * self.zstep - self.mean)**2.0
            for i in range(len(self.density))
        ]) * self.zstep
        self.second_moment = np.sqrt(square_sum)
Example #12
0
def deconvolve(star, psf):
    star_fft = fftpack.fftshift(fftpack.fftn(star))
    psf_fft = fftpack.fftshift(fftpack.fftn(psf))
    return fftpack.fftshift(
        fftpack.ifftn(fftpack.ifftshift(star_fft / psf_fft)))


def fwhm(sigma):
    # Computes the FWHM from sigma for a Gaussian
    return 2.0 * np.sqrt(2.0 * np.log(2.0)) * sigma


# load data
fName = 'psf_8um_bead.pxp'
igorData = igor.load(fName)
xProfile = igorData.x_profile.data
yProfile = igorData.y_profile.data
zProfile = igorData.z_profile.data

#yProfile = yProfile[200:400]
#yProfile = smoothThroughAveragingNeighbors(yProfile,9)

# set parameters
dX = 7.09814e-08  # in m
dY = 7.09584e-08
dZ = 1e-06
beadsLength = 8.0e-6  # beads are on averge 8 um wide

# x, y and z axes
x = np.linspace(dX, len(xProfile) * dX, len(xProfile))
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 13 19:30:36 2018

@author: javier
"""

import igor.igorpy as igor
import numpy as np
import matplotlib.pyplot as plt
import Amperometry as amp

filePackage = '../EGFPv EXP8.pxp'
recording = igor.load(filePackage)
recordingDuration = 100.2 #duration in seconds
dt = 1./len(recording.Data.data)
Fs = 1./dt #1kHz

wave = recording.Data.data/1e-12
datos = wave-wave.mean()
time = np.linspace(0, recordingDuration, len(wave), endpoint=True)

avgPoints = 20
workingData = movingAvg(datos,avgPoints) #smothen the signal with moving average procedure
peakThr = 10
H = getPeaks(workingData,time, peakThr) #get peaks for the filtered signal

index = getIndex(time,H[:,0]) #get the index of the time where peaks occured
baseline = 0
extremos = getExtremes(index,datos,baseline) #get the init and end of a spike event based on the peak occurence
view7 = grid.add_view(row=2, col=3, col_span=1, bgcolor=(1,1,0,alpha),
					  border_color=(1,1,0), 
					  margin=10)

##################### Data and Meta-data #####################
import igor.igorpy as igor
igor.ENCODING = 'UTF-8'
datum = {}
exp_path = './data/2015-09-30/'
igo_path = './data/2015-09-30/CA1c2-1.pxp'
img_path = './data/2015-09-30/CA1c2/'
print('Experiment path @ %s' % exp_path)

# igor file (electrophysiology)
print('Loading igor file @ %s' % igo_path)
ig = igor.load(igo_path)
_id_igor = dir(ig)

# imaging tiff
import os
print('loading imaging @ %s' % img_path)
for filename in os.listdir(img_path):
    if filename.endswith(".tiff"):
        datum[filename.split('_')[0]] = [filename]

# datum is dictionary contains tiff and igor waveform
print('integrating imaging and electrophysiology')
for _id in datum.keys():
    if _id in _id_igor:
        datum[_id].append(ig[_id])
    else:
Example #15
0
    def read_block(self,Filter= 'RecordA'):
          
        """
        Return a Block.
        **Arguments**
            no arguments
        """
        #Valid up to 26 channels
        alph=list(string.ascii_uppercase)
                
        
        
        Array={} #each key corresponds to a wave
        Var={} #each key corresponds to a variable

        b=igorpy.load(self.filename)
        
        #Igor name channels RecordA, RecordB etc...
        #the first 7 characters are thus specific a a given channel
        ChannelNames=[]

        
        for i in b:
            if isinstance(i, igorpy.Wave):
                if 'Record' in str(i.name):
                    ChannelNames.append(str(i.name)[0:7])
                    Array[str(i.name)]=i.data
            elif isinstance(i, igorpy.Variables):
                Var=i.uservar    
        
        ChannelNames=list(set(ChannelNames))
        ChannelNames.sort()
        
       
        if Array == None and Var == None:
            return
            
        Filter=Filter
        templist1=[]
        templist2=[]
        AllWaveNames=[]
        for i in Array:
            AllWaveNames.append(i)
        
        
        AllWaveNames=sorted(AllWaveNames, key=self.splitgroups)
        
        #for i in Var:
        #    exec("Analysis."+i+"= Var[i]")

        Waves=[]
        # loop for record number
        counter=0
        for i in AllWaveNames:
            if Filter in i: 
                counter+=1
        #Listing suffixes
        Suffixes=list(AllWaveNames)
        for i,j in enumerate(Suffixes):
            for k in ChannelNames:
                Suffixes[i]=Suffixes[i].replace(k,'')  
        Suffixes = sorted(set(Suffixes), key=lambda x: Suffixes.index(x))
        
        
        #Identify date of creation from filename
        #TODO, use variable instead
        filename=self.filename.split('/')[-1]
        blck=Block()
        convertiontable=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
        Day=int(filename[2:4])
        Month=int(convertiontable.index(filename[4:7]))+1
        Year=int(filename[7:11])
        date = datetime.date(Year,Month,Day)
        blck.datetime=date
        
        for n in Suffixes:
            seg = Segment()
            for i in AllWaveNames:
                for k,j in enumerate(ChannelNames):
                    if i == j+n:
                        seg.name=j
                        anaSig = AnalogSignal()
                        #ADCMAX = header['ADCMAX']
                        #VMax = analysisHeader['VMax'][c]                  
                        #YG = float(header['YG%d'%c].replace(',','.'))
                        anaSig.signal = Array[i] #[alph[c],data[:,header['YO%d'%c]].astype('f4')*Var['VMax']/Var['ADCMAX']/YG]
                        #Waves.append(signal)
                        anaSig.sampling_rate = 1000./Var["SampleInterval"]
                        anaSig.t_start = 0.
                        anaSig.name = j
                        anaSig.channel = k
                        #anaSig.unit = header['YU%d'%c]
                        seg._analogsignals.append(anaSig)
            blck._segments.append(seg)
                

        return blck