예제 #1
0
    def read_block(
            self,
            # the 2 first keyword arguments are imposed by neo.io API
            lazy=False,
            cascade=True):
        """
        Return a Block.

        """
        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length - 6) / 2 - 2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin=os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0  # position of the current block in the file
        file_blocks = []  # list of data blocks available in the file

        if not cascade:
            # we read only the main header

            m_length, m_TypeBlock = struct.unpack('Hcx', fid.read(4))
            # m_TypeBlock should be 'h', as we read the first block
            block = HeaderReader(
                fid, dict_header_type.get(m_TypeBlock, Type_Unknown)).read_f()
            block.update({
                'm_length': m_length,
                'm_TypeBlock': m_TypeBlock,
                'pos': pos_block
            })
            file_blocks.append(block)

        else:  # cascade == True

            seg = Segment(file_origin=os.path.basename(self.filename))
            seg.file_origin = os.path.basename(self.filename)
            blck.segments.append(seg)

            while True:
                first_4_bytes = fid.read(4)
                if len(first_4_bytes) < 4:
                    # we have reached the end of the file
                    break
                else:
                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

                block = HeaderReader(
                    fid, dict_header_type.get(m_TypeBlock,
                                              Type_Unknown)).read_f()
                block.update({
                    'm_length': m_length,
                    'm_TypeBlock': m_TypeBlock,
                    'pos': pos_block
                })

                if m_TypeBlock == '2':
                    # The beggining of the block of type '2' is identical for
                    # all types of channels, but the following part depends on
                    # the type of channel. So we need a special case here.

                    # WARNING: How to check the type of channel is not
                    # described in the documentation. So here I use what is
                    # proposed in the C code [2].
                    # According to this C code, it seems that the 'm_isAnalog'
                    # is used to distinguished analog and digital channels, and
                    # 'm_Mode' encodes the type of analog channel:
                    # 0 for continuous, 1 for level, 2 for external trigger.
                    # But in some files, I found channels that seemed to be
                    # continuous channels with 'm_Modes' = 128 or 192. So I
                    # decided to consider every channel with 'm_Modes'
                    # different from 1 or 2 as continuous. I also couldn't
                    # check that values of 1 and 2 are really for level and
                    # external trigger as I had no test files containing data
                    # of this types.

                    type_subblock = 'unknown_channel_type(m_Mode=' \
                                    + str(block['m_Mode'])+ ')'
                    description = Type2_SubBlockUnknownChannels
                    block.update({'m_Name': 'unknown_name'})
                    if block['m_isAnalog'] == 0:
                        # digital channel
                        type_subblock = 'digital'
                        description = Type2_SubBlockDigitalChannels
                    elif block['m_isAnalog'] == 1:
                        # analog channel
                        if block['m_Mode'] == 1:
                            # level channel
                            type_subblock = 'level'
                            description = Type2_SubBlockLevelChannels
                        elif block['m_Mode'] == 2:
                            # external trigger channel
                            type_subblock = 'external_trigger'
                            description = Type2_SubBlockExtTriggerChannels
                        else:
                            # continuous channel
                            type_subblock = 'continuous(Mode' \
                                            + str(block['m_Mode']) +')'
                            description = Type2_SubBlockContinuousChannels

                    subblock = HeaderReader(fid, description).read_f()

                    block.update(subblock)
                    block.update({'type_subblock': type_subblock})

                file_blocks.append(block)
                pos_block += m_length
                fid.seek(pos_block)

            # step 2: find the available channels
            list_chan = []  # list containing indexes of channel blocks
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '2':
                    list_chan.append(ind_block)

            # step 3: find blocks containing data for the available channels
            list_data = []  # list of lists of indexes of data blocks
            # corresponding to each channel
            for ind_chan, chan in enumerate(list_chan):
                list_data.append([])
                num_chan = file_blocks[chan]['m_numChannel']
                for ind_block, block in enumerate(file_blocks):
                    if block['m_TypeBlock'] == '5':
                        if block['m_numChannel'] == num_chan:
                            list_data[ind_chan].append(ind_block)

            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype=np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                        file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0  # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l'  # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos'] + 6 + count * 2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form, buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype=np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                            file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos'] + 6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)

                ana_sig.channel_index = \
                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h':  # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
            # the 10000 is here to convert m_time_hsecond from centisecond
            # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version=version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version=version)
        if cascade:
            populate_RecordingChannel(blck, remove_from_annotation=True)
            blck.create_many_to_one_relationship()

        return blck
예제 #2
0
    def read_block(self,
                   # the 2 first keyword arguments are imposed by neo.io API
                   lazy = False,
                   cascade = True):
        """
        Return a Block.

        """

        def count_samples(m_length):
            """
            Count the number of signal samples available in a type 5 data block
            of length m_length

            """

            # for information about type 5 data block, see [1]
            count = int((m_length-6)/2-2)
            # -6 corresponds to the header of block 5, and the -2 take into
            # account the fact that last 2 values are not available as the 4
            # corresponding bytes are coding the time stamp of the beginning
            # of the block
            return count

        # create the neo Block that will be returned at the end
        blck = Block(file_origin = os.path.basename(self.filename))
        blck.file_origin = os.path.basename(self.filename)

        fid = open(self.filename, 'rb')

        # NOTE: in the following, the word "block" is used in the sense used in
        # the alpha-omega specifications (ie a data chunk in the file), rather
        # than in the sense of the usual Block object in neo

        # step 1: read the headers of all the data blocks to load the file
        # structure

        pos_block = 0 # position of the current block in the file
        file_blocks = [] # list of data blocks available in the file

        if not cascade:
            # we read only the main header

            m_length, m_TypeBlock = struct.unpack('Hcx' , fid.read(4))
            # m_TypeBlock should be 'h', as we read the first block
            block = HeaderReader(fid,
                                 dict_header_type.get(m_TypeBlock,
                                                      Type_Unknown)).read_f()
            block.update({'m_length': m_length,
                          'm_TypeBlock': m_TypeBlock,
                          'pos': pos_block})
            file_blocks.append(block)

        else: # cascade == True

            seg = Segment(file_origin = os.path.basename(self.filename))
            seg.file_origin = os.path.basename(self.filename)
            blck.segments.append(seg)

            while True:
                first_4_bytes = fid.read(4)
                if len(first_4_bytes) < 4:
                    # we have reached the end of the file
                    break
                else:
                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)

                block = HeaderReader(fid,
                                dict_header_type.get(m_TypeBlock,
                                                     Type_Unknown)).read_f()
                block.update({'m_length': m_length,
                              'm_TypeBlock': m_TypeBlock,
                              'pos': pos_block})

                if m_TypeBlock == '2':
                    # The beggining of the block of type '2' is identical for
                    # all types of channels, but the following part depends on
                    # the type of channel. So we need a special case here.

                    # WARNING: How to check the type of channel is not
                    # described in the documentation. So here I use what is
                    # proposed in the C code [2].
                    # According to this C code, it seems that the 'm_isAnalog'
                    # is used to distinguished analog and digital channels, and
                    # 'm_Mode' encodes the type of analog channel:
                    # 0 for continuous, 1 for level, 2 for external trigger.
                    # But in some files, I found channels that seemed to be
                    # continuous channels with 'm_Modes' = 128 or 192. So I
                    # decided to consider every channel with 'm_Modes'
                    # different from 1 or 2 as continuous. I also couldn't
                    # check that values of 1 and 2 are really for level and
                    # external trigger as I had no test files containing data
                    # of this types.

                    type_subblock = 'unknown_channel_type(m_Mode=' \
                                    + str(block['m_Mode'])+ ')'
                    description = Type2_SubBlockUnknownChannels
                    block.update({'m_Name': 'unknown_name'})
                    if block['m_isAnalog'] == 0:
                        # digital channel
                        type_subblock = 'digital'
                        description = Type2_SubBlockDigitalChannels
                    elif block['m_isAnalog'] == 1:
                        # analog channel
                        if block['m_Mode'] == 1:
                            # level channel
                            type_subblock = 'level'
                            description = Type2_SubBlockLevelChannels
                        elif block['m_Mode'] == 2:
                            # external trigger channel
                            type_subblock = 'external_trigger'
                            description = Type2_SubBlockExtTriggerChannels
                        else:
                            # continuous channel
                            type_subblock = 'continuous(Mode' \
                                            + str(block['m_Mode']) +')'
                            description = Type2_SubBlockContinuousChannels

                    subblock = HeaderReader(fid, description).read_f()

                    block.update(subblock)
                    block.update({'type_subblock': type_subblock})

                file_blocks.append(block)
                pos_block += m_length
                fid.seek(pos_block)

            # step 2: find the available channels
            list_chan = [] # list containing indexes of channel blocks
            for ind_block, block in enumerate(file_blocks):
                if block['m_TypeBlock'] == '2':
                    list_chan.append(ind_block)

            # step 3: find blocks containing data for the available channels
            list_data = [] # list of lists of indexes of data blocks
                           # corresponding to each channel
            for ind_chan, chan in enumerate(list_chan):
                list_data.append([])
                num_chan = file_blocks[chan]['m_numChannel']
                for ind_block, block in enumerate(file_blocks):
                    if block['m_TypeBlock'] == '5':
                        if block['m_numChannel'] == num_chan:
                            list_data[ind_chan].append(ind_block)


            # step 4: compute the length (number of samples) of the channels
            chan_len = np.zeros(len(list_data), dtype = np.int)
            for ind_chan, list_blocks in enumerate(list_data):
                for ind_block in list_blocks:
                    chan_len[ind_chan] += count_samples(
                                          file_blocks[ind_block]['m_length'])

            # step 5: find channels for which data are available
            ind_valid_chan = np.nonzero(chan_len)[0]

            # step 6: load the data
            # TODO give the possibility to load data as AnalogSignalArrays
            for ind_chan in ind_valid_chan:
                list_blocks = list_data[ind_chan]
                ind = 0 # index in the data vector

                # read time stamp for the beginning of the signal
                form = '<l' # reading format
                ind_block = list_blocks[0]
                count = count_samples(file_blocks[ind_block]['m_length'])
                fid.seek(file_blocks[ind_block]['pos']+6+count*2)
                buf = fid.read(struct.calcsize(form))
                val = struct.unpack(form , buf)
                start_index = val[0]

                # WARNING: in the following blocks are read supposing taht they
                # are all contiguous and sorted in time. I don't know if it's
                # always the case. Maybe we should use the time stamp of each
                # data block to choose where to put the read data in the array.
                if not lazy:
                    temp_array = np.empty(chan_len[ind_chan], dtype = np.int16)
                    # NOTE: we could directly create an empty AnalogSignal and
                    # load the data in it, but it is much faster to load data
                    # in a temporary numpy array and create the AnalogSignals
                    # from this temporary array
                    for ind_block in list_blocks:
                        count = count_samples(
                                file_blocks[ind_block]['m_length'])
                        fid.seek(file_blocks[ind_block]['pos']+6)
                        temp_array[ind:ind+count] = \
                            np.fromfile(fid, dtype = np.int16, count = count)
                        ind += count

                sampling_rate = \
                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
                t_start = (start_index / sampling_rate).simplified
                if lazy:
                    ana_sig = AnalogSignal([],
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)
                    ana_sig.lazy_shape = chan_len[ind_chan]
                else:
                    ana_sig = AnalogSignal(temp_array,
                                           sampling_rate = sampling_rate,
                                           t_start = t_start,
                                           name = file_blocks\
                                               [list_chan[ind_chan]]['m_Name'],
                                           file_origin = \
                                               os.path.basename(self.filename),
                                           units = pq.dimensionless)

                ana_sig.channel_index = \
                            file_blocks[list_chan[ind_chan]]['m_numChannel']
                ana_sig.annotate(channel_name = \
                            file_blocks[list_chan[ind_chan]]['m_Name'])
                ana_sig.annotate(channel_type = \
                            file_blocks[list_chan[ind_chan]]['type_subblock'])
                seg.analogsignals.append(ana_sig)

        fid.close()

        if file_blocks[0]['m_TypeBlock'] == 'h': # this should always be true
            blck.rec_datetime = datetime.datetime(\
                file_blocks[0]['m_date_year'],
                file_blocks[0]['m_date_month'],
                file_blocks[0]['m_date_day'],
                file_blocks[0]['m_time_hour'],
                file_blocks[0]['m_time_minute'],
                file_blocks[0]['m_time_second'],
                10000 * file_blocks[0]['m_time_hsecond'])
                # the 10000 is here to convert m_time_hsecond from centisecond
                # to microsecond
            version = file_blocks[0]['m_version']
            blck.annotate(alphamap_version = version)
            if cascade:
                seg.rec_datetime = blck.rec_datetime.replace()
                # I couldn't find a simple copy function for datetime,
                # using replace without arguments is a twisted way to make a
                # copy
                seg.annotate(alphamap_version = version)
        if cascade:
            populate_RecordingChannel(blck, remove_from_annotation = True)
            blck.create_many_to_one_relationship()

        return blck
예제 #3
0
blk = Block()
seg = Segment(name='segment foo')
blk.segments.append(seg)

source_ids = np.arange(64)
channel_ids = source_ids + 42
chx = ChannelIndex(name='Array probe', index=np.arange(64),
                   channel_ids=channel_ids,
                   channel_names=['Channel %i' % chid for chid in channel_ids])
blk.channel_indexes.append(chx)

a = AnalogSignal(np.random.randn(10000, 64)*nA, sampling_rate=10*kHz)

# link AnalogSignal and ID providing channel_index
a.channel_index = chx
chx.analogsignals.append(a)
seg.analogsignals.append(a)

seg1 = blk.segments[0]
a1 = seg1.analogsignals[0]
chx1 = a1.channel_index
print(chx1)
print(chx1.index)

io = neo.io.PickleIO(filename="test.pickle")
io.write(blk)

with open("test.pickle", "r") as pickle_file:
    blk2 = pickle.load(pickle_file)
예제 #4
0
파일: helpers.py 프로젝트: Erycite/neuromod
def analyse(params, folder='results', addon='', removeDataFile=False):
    print("analysing data")
    # populations key-recorders match
    populations = {}
    for popKey, popVal in params['Populations'].items():
        if popKey != 'ext':
            populations[popKey] = list(params['Recorders'][popKey].keys())
            # print(popKey, populations[popKey])

    scores = {}

    # default results name folder
    if folder == 'results':
        dt = datetime.now()
        date = dt.strftime("%d-%m-%I-%M")
        folder = folder + '/' + date

    # iteration over populations and selctive plotting based on available recorders
    for key, rec in populations.items():
        # print(key)

        neo = pickle.load(open(folder + '/' + key + addon + '.pkl', "rb"))
        data = neo.segments[0]

        panels = []

        if 'v' in rec:
            vm = data.filter(name='v')[0]
            # print(vm)
            panels.append(
                Panel(vm,
                      ylabel="Membrane potential (mV)",
                      xlabel="Time (ms)",
                      xticks=True,
                      yticks=True,
                      legend=None))
            ###################################
            # # workaround
            # fig = plot.figure()
            # plot.plot(vm,linewidth=2)
            # plot.ylim([-100,-20.0]) # just to plot it nicely
            # # plot.ylim([-100,0.0])
            # fig.savefig(folder+'/vm_'+key+addon+'.svg')
            # fig.clf()
            # plot.close()
            ###################################

        if 'w' in rec:
            w = data.filter(name='w')[0]
            # print(w)
            ###################################
            # workaround
            fig = plot.figure()
            plot.plot(w, linewidth=2)
            # plot.ylim([-100,-20.0]) # just to plot it nicely
            # plot.ylim([-100,0.0])
            fig.savefig(folder + '/w_' + key + addon + '.svg')
            fig.clf()
            plot.close()
            ###################################

        if 'w' in rec and 'v' in rec:
            vm = data.filter(name='v')[0]
            w = data.filter(name='w')[0]
            I = 0  # at rest
            xn1, xn2 = nullcline(v_nullcline, params, I, (-100, 0), 100)
            I = params['Injections']['cell']['amplitude'][0]
            xI1, xI2 = nullcline(v_nullcline, params, I, (-100, 0), 100)
            yn1, yn2 = nullcline(w_nullcline, params, I, (-100, 0), 100)
            fig = plot.figure()
            plot.plot(vm, w, linewidth=2, color="red")
            plot.plot(xn1, xn2, '--', color="black")
            plot.plot(xI1, xI2, color="black")
            plot.plot(yn1, np.array(yn2) / 1000, color="blue")
            plot.axis([-100, -30, -.4, .6])
            plot.xlabel("V (mV)")
            plot.ylabel("w (nA)")
            plot.title("Phase space")
            fig.savefig(folder + '/phase_' + key + addon + '.svg')
            fig.clf()
            plot.close()

        if 'gsyn_exc' in rec:
            gsyn_exc = data.filter(name="gsyn_exc")[0]
            panels.append(
                Panel(gsyn_exc,
                      ylabel="Exc Synaptic conductance (uS)",
                      xlabel="Time (ms)",
                      xticks=True,
                      legend=None))

        if 'gsyn_inh' in rec:
            gsyn_inh = data.filter(name="gsyn_inh")[0]
            panels.append(
                Panel(gsyn_inh,
                      ylabel="Inh Synaptic conductance (uS)",
                      xlabel="Time (ms)",
                      xticks=True,
                      legend=None))

        if params['Injections']:
            amplitude = np.array([0.] +
                                 params['Injections']['cell']['amplitude'] +
                                 [0.])  #[0.,-.25, 0.0, .25, 0.0, 0.]
            start = np.array([0.] + params['Injections']['cell']['start'] +
                             [params['run_time']]) / params['dt']
            start_int = start.astype(int)
            current = np.array([])

            for i in range(1, len(amplitude)):
                if current.shape == (0, ):
                    current = np.ones((start_int[i] - start_int[i - 1] + 1,
                                       1)) * amplitude[i - 1]
                else:
                    current = np.concatenate(
                        (current, np.ones(
                            (start_int[i] - start_int[i - 1], 1)) *
                         amplitude[i - 1]), 0)
            current = AnalogSignal(current,
                                   units='mA',
                                   sampling_rate=params['dt'] * pq.Hz)
            current.channel_index = np.array([0])
            panels.append(
                Panel(current,
                      ylabel="Current injection (mA)",
                      xlabel="Time (ms)",
                      xticks=True,
                      legend=None))

        if 'spikes' in rec:
            panels.append(
                Panel(data.spiketrains,
                      xlabel="Time (ms)",
                      xticks=True,
                      markersize=1))
            ###################################
            # # workaround
            # fig = plot.figure()
            # for row,st in enumerate(data.spiketrains):
            #     plot.scatter( st, [row]*len(st), marker='o', edgecolors='none' )
            # fig.savefig(folder+'/spikes_'+key+addon+'.svg')
            # plot.xlim([0,300])
            # fig.clf()
            # plot.close()
            ###################################

            scores = []
            scores.append(0)  # Spike count
            scores.append(0.0)  # Inter-Spike Interval
            scores.append(0.0)  # Coefficient of Variation
            scores.append(0)  # Spike Interval

            # Spike Count
            if hasattr(data.spiketrains[0], "__len__"):
                scores[0] = len(data.spiketrains[0])
            # ISI
            isitot = isi([data.spiketrains[0]])

            if hasattr(isitot, "__len__"):
                scores[1] = np.mean(isitot) / len(isitot)  # mean ISI
                scores[2] = cv([data.spiketrains[0]])  # CV
            # print(scores)

            # print(isitot)
            if isinstance(isitot, (np.ndarray)):
                if len(isitot[0]) > 0:
                    # if strictly increasing, then spiking is adapting
                    # but check that there are no spikes beyond stimulation
                    # if data.spiketrains[0][-1] < params['Injections']['cell']['start'][-1] and all(x<y for x, y in zip(isitot, isitot[1:])):
                    if data.spiketrains[0][-1] < params['run_time']:
                        if all(x < y for x, y in zip(isitot, isitot[1:])):
                            scores[3] = 'adapting'
                            # ISIs plotted against spike interval position
                            fig = plot.figure()
                            plot.plot(isitot[0], linewidth=2)
                            plot.title("CV:" + str(scores[2]) + " " +
                                       str(addon))
                            # plot.xlim([0,10])
                            # plot.ylim([0,50.])
                            fig.savefig(folder + '/ISI_interval_' + key +
                                        addon + '.svg')
                            fig.clf()
                            plot.close()

            # firing rate
            fr = rate(params, data.spiketrains, bin_size=10)  # ms
            fig = plot.figure(56)
            plot.plot(fr, linewidth=2)
            plot.title(str(scores))
            plot.ylim([.0, 1.])
            fig.savefig(folder + '/firingrate_' + key + addon + '.svg')
            fig.clf()
            plot.close()

        # Figure( *panels ).save(folder+'/'+key+addon+".png")
        # Figure( *panels ).save(folder+'/'+key+addon+".svg")

        # LFP
        if 'v' in rec and 'gsyn_exc' in rec:
            # LFP
            lfp = LFP(data)
            vm = data.filter(name='v')[0]
            fig = plot.figure()
            plot.plot(lfp)
            fig.savefig(folder + '/LFP_' + key + addon + '.png')
            fig.clear()
            # Vm histogram
            fig = plot.figure()
            ylabel = key
            n, bins, patches = plot.hist(np.mean(vm, 1), 50)
            fig.savefig(folder + '/Vm_histogram_' + key + addon + '.png')
            fig.clear()

            fig, axes = plot.subplots(nrows=1, ncols=2, figsize=(7, 4))
            Fs = 1 / params['dt']  # sampling frequency
            # plot different spectrum types:
            axes[0].set_title("Log. Magnitude Spectrum")
            axes[0].magnitude_spectrum(lfp, Fs=Fs, scale='dB', color='red')
            axes[1].set_title("Phase Spectrum ")
            axes[1].phase_spectrum(lfp, Fs=Fs, color='red')
            fig.tight_layout()
            fig.savefig(folder + '/Spectrum_' + key + addon + '.png')
            fig.clear()

        # for systems with low memory :)
        if removeDataFile:
            os.remove(folder + '/' + key + addon + '.pkl')

    # print(scores)
    return scores