Exemplo n.º 1
0
def nsxload(
    fn_mwk,
    fn_nev,
    fn_nsx,
    override_delay_us=OVERRIDE_DELAY_US,
    verbose=False,
    extinfo=False,
    c_success=C_SUCCESS,
    data_only=True,
    list_form=False,
):
    mf = MWKFile(fn_mwk)
    mf.open()
    br = BRReader(fn_nev, fn_nsx)
    br.open()

    # read TOC info from the "merged" mwk file
    toc = mf.get_events(codes=[Merge.C_MAGIC])[0].value
    c_spikes = toc[Merge.K_SPIKE]  # get the code name from the toc

    # when the visual stimuli presented is valid?
    t_success = [ev.time for ev in mf.get_events(codes=[c_success])]
    t_success = np.array(t_success)

    img_onset, img_id = get_stim_info(mf, extinfo=extinfo)
    n_stim = len(img_onset)

    # MAC-NSP time translation
    if override_delay_us != None:
        t_delay = toc["align_info"]["delay"]
        t_adjust = int(np.round(override_delay_us - t_delay))
    else:
        t_adjust = 0
    a, b = toc["align_info"]["params"]
    f = lambda t_mwk: float(t_mwk - b) / a
    t_start = T_START - t_adjust
    t_stop = T_STOP - t_adjust

    # actual calculation -------------------------------
    for i in range(n_stim):
        t0 = img_onset[i]
        iid = img_id[i]
        # check if this presentation is successful. if it's not ignore this.
        if np.sum((t_success > t0) & (t_success < (t0 + T_SUCCESS))) < 1:
            continue

        if verbose:
            print "At", (i + 1), "out of", n_stim

        t_nsx_start = f(t0 + t_start)
        t_nsx_stop = f(t0 + t_stop)
        yield [
            data for data in br.nsx_read_range_ts(t_nsx_start, t_nsx_stop, data_only=data_only, list_form=list_form)
        ], br.nsx_chn_order
Exemplo n.º 2
0
def selection_counts(filename):
    with MWKFile(filename) as f:
        r_codec = f.reverse_codec
        red_code = r_codec['red_selected']
        green_code = r_codec['green_selected']
        blue_code = r_codec['blue_selected']

        red_count = 0
        green_count = 0
        blue_count = 0

        for evt in f.get_events_iter(codes=[red_code, green_code, blue_code]):
            if evt.data:
                if evt.code == red_code:
                    red_count += 1
                elif evt.code == green_code:
                    green_count += 1
                else:
                    assert evt.code == blue_code
                    blue_count += 1

        index = numpy.arange(3)
        pyplot.bar(index, [red_count, green_count, blue_count],
                   0.5,
                   color=['r', 'g', 'b'],
                   align='center')
        pyplot.xticks(index, ['Red', 'Green', 'Blue'])
        pyplot.title('Selection Counts')
        pyplot.show()
Exemplo n.º 3
0
    def setUpClass(cls):
        cls.event_counts = defaultdict(lambda: 0)
        cls.event_times = []

        for evt in cls.file_reader(cls.filename):
            cls.event_counts[evt[0]] += 1
            cls.event_times.append(evt[1])

        cls.event_counts = dict(cls.event_counts)
        cls.event_times = numpy.array(cls.event_times)

        cls.fp = MWKFile(cls.filename)
Exemplo n.º 4
0
    def setUpClass(cls):
        cls.event_counts = defaultdict(lambda: 0)
        cls.event_times = []

        with MWKStream.open_file(cls.filename) as stream:
            for evt in stream:
                cls.event_counts[evt.code] += 1
                cls.event_times.append(evt.time)

        cls.event_counts = dict(cls.event_counts)
        cls.event_times = numpy.array(cls.event_times)

        cls.fp = MWKFile(cls.filename)
Exemplo n.º 5
0
def validate_dots_data(filename):
    with MWKFile(filename) as f:
        num_events = 0
        for e in f.get_events_iter(codes=['#announceStimulus']):
            value = e.value
            if (isinstance(value, dict) and (value['type'] == 'moving_dots')
                    and ('dots' in value)):
                data = numpy.fromstring(value['dots'], numpy.float32)
                assert len(data) == (2 * value['num_dots'])

                x = data[::2]
                y = data[1::2]
                assert (x * x + y * y).max() <= 1.0

                num_events += 1

        assert num_events > 0
        print 'Processed %d events' % num_events
Exemplo n.º 6
0
 def receive(self):
     try:
         with MWKFile(self.filename) as fp:
             evts = fp.get_events()
             self.assertEqual(1, len(evts))
             e = evts[0]
             self.assertEqual(1, e.code)
             self.assertEqual(2, e.time)
             try:
                 return e.data
             except Exception as e:
                 return e
     finally:
         try:
             fp.unindex()
         except mworks.data.IndexingException:
             pass
         os.remove(self.filename)
Exemplo n.º 7
0
class FileTestMixin(object):

    def setUp(self):
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)
            self.filename = tempfile.mktemp(suffix=self.file_extension)
        self.fp = MWKFile(self.filename)

    def tearDown(self):
        self.fp.close()  # OK to close even if not opened
        if os.path.exists(self.filename):
            try:
                self.fp.unindex()
            except mworks.data.IndexingException:
                pass
            os.remove(self.filename)

    def create_file(self, events=()):
        fp = self.file_writer(self.filename)
        for code, time, data in events:
            fp.write_event(code, time, data)

    def open_file(self):
        self.fp.open()

    def assertEvent(self, evt, code, time, value):
        self.assertIsInstance(evt, EventWrapper)
        self.assertFalse(evt.empty)
        self.assertIsInstance(evt.code, int)
        self.assertEqual(code, evt.code)
        self.assertIsInstance(evt.time, (int, long))
        self.assertEqual(time, evt.time)
        self.assertIsInstance(evt.value, type(value))
        self.assertEqual(value, evt.value)

        # Alternative name for value
        self.assertIsInstance(evt.data, type(evt.value))
        self.assertEqual(evt.value, evt.data)
Exemplo n.º 8
0
class MWKFileTestMixin(DataTestMixin):
    def setUp(self):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", RuntimeWarning)
            self.filename = os.tempnam() + ".mwk"
        self.fp = MWKFile(self.filename)

    def tearDown(self):
        self.fp.close()  # OK to close even if not opened
        if os.path.exists(self.filename):
            try:
                self.fp.unindex()
            except mworks.data.IndexingException:
                pass
            os.remove(self.filename)

    def create_file(self, events=()):
        with MWKStream._create_file(self.filename) as fp:
            for evt in events:
                fp._write(evt)

    def open_file(self):
        self.fp.open()
Exemplo n.º 9
0
class MWKFileTestMixin(DataTestMixin):
    def setUp(self):
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)
            self.filename = os.tempnam() + '.mwk'
        self.fp = MWKFile(self.filename)

    def tearDown(self):
        self.fp.close()  # OK to close even if not opened
        if os.path.exists(self.filename):
            try:
                self.fp.unindex()
            except mworks.data.IndexingException:
                pass
            os.remove(self.filename)

    def create_file(self, events=()):
        with MWKStream._create_file(self.filename) as fp:
            for evt in events:
                fp._write(evt)

    def open_file(self):
        self.fp.open()
Exemplo n.º 10
0
class FileTestMixin(object):
    def setUp(self):
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)
            self.filename = tempfile.mktemp(suffix=self.file_extension)
        self.fp = MWKFile(self.filename)

    def tearDown(self):
        self.fp.close()  # OK to close even if not opened
        if os.path.exists(self.filename):
            try:
                self.fp.unindex()
            except mworks.data.IndexingException:
                pass
            os.remove(self.filename)

    def create_file(self, events=()):
        fp = self.file_writer(self.filename)
        for code, time, data in events:
            fp.write_event(code, time, data)

    def open_file(self):
        self.fp.open()

    def assertEvent(self, evt, code, time, value):
        self.assertIsInstance(evt, EventWrapper)
        self.assertFalse(evt.empty)
        self.assertIsInstance(evt.code, int)
        self.assertEqual(code, evt.code)
        self.assertIsInstance(evt.time, (int, long))
        self.assertEqual(time, evt.time)
        self.assertIsInstance(evt.value, type(value))
        self.assertEqual(value, evt.value)

        # Alternative name for value
        self.assertIsInstance(evt.data, type(evt.value))
        self.assertEqual(evt.value, evt.data)
Exemplo n.º 11
0
 def setUp(self):
     with warnings.catch_warnings():
         warnings.simplefilter('ignore', RuntimeWarning)
         self.filename = tempfile.mktemp(suffix=self.file_extension)
     self.fp = MWKFile(self.filename)
Exemplo n.º 12
0
 def setUp(self):
     with warnings.catch_warnings():
         warnings.simplefilter("ignore", RuntimeWarning)
         self.filename = os.tempnam() + ".mwk"
     self.fp = MWKFile(self.filename)
Exemplo n.º 13
0
 def setUp(self):
     with warnings.catch_warnings():
         warnings.simplefilter('ignore', RuntimeWarning)
         self.filename = tempfile.mktemp(suffix=self.file_extension)
     self.fp = MWKFile(self.filename)
Exemplo n.º 14
0
def get_data(
    fn_mwk,
    fn_nev,
    clus_info,
    override_delay_us=None,
    override_elecs=None,
    verbose=True,
    c_success=C_SUCCESS,
    t_success_lim=T_SUCCESS,
    max_clus=MAX_CLUS,
    extinfo=False,
    imgmax=IMG_MAX,
    exclude_img=EXCLUDE_IMG,
):
    mf = MWKFile(fn_mwk)
    mf.open()
    nf = load_spike_data(fn_nev)
    nf.open()
    cnt = defaultdict(int)

    # read TOC info from the "merged" mwk file
    toc = xget_events(mf, codes=[Merge.C_MAGIC])[0].value
    c_spikes = toc[Merge.K_SPIKE]  # get the code name from the toc

    # when the visual stimuli presented is valid?
    t_success = [ev.time for ev in xget_events(mf, codes=[c_success])]
    t_success = np.array(t_success)

    # get the active electrodes
    if override_elecs is None:
        actvelecs = toc[Merge.K_SPIKEWAV].keys()
    else:
        actvelecs = override_elecs  # e.g, range(1, 97)

    img_onset, img_id = get_stim_info(mf, extinfo=extinfo, exclude_img=exclude_img)
    n_stim = len(img_onset)

    # MAC-NSP time translation
    if override_delay_us != None:
        t_delay = toc["align_info"]["delay"]
        t_adjust = int(np.round(override_delay_us - t_delay))
    else:
        t_adjust = 0
    t_start = T_START - t_adjust
    t_stop = T_STOP - t_adjust

    # actual calculation -------------------------------
    # all_spike[chn_id][img_id]: when the neurons spiked?
    for i in range(n_stim):
        if i > imgmax:
            break
        t0 = img_onset[i]
        iid = img_id[i]
        # -- check if this presentation is successful. if it's not ignore this.
        if np.sum((t_success > t0) & (t_success < (t0 + t_success_lim))) < 1:
            continue

        if verbose:
            print "At", (i + 1), "out of", n_stim, "         \r",
            sys.stdout.flush()

        spikes = xget_events(mf, codes=[c_spikes], time_range=[t0 + t_start, t0 + t_stop])
        for s in spikes:
            ch = s.value["id"]
            cid = s.value["cluster_id"]
            pos = s.value["foffset"]
            key = (ch, cid)

            if cnt[key] >= MAX_SPK:
                continue
            cnt[key] += 1

            dat = nf.read_once(pos=pos, proc_wav=True)
            wav = dat["waveform"]
            clus_info[key].append(np.array(wav))
Exemplo n.º 15
0
def dump_events(filename, photodiode_file, sample_on_file):
    print(filename)

    event_file = MWKFile(filename)
    event_file.open()

    # Variables we'd like to fetch data for
    names = ['trial_start_line',
             'correct_fixation',
             'stimulus_presented',
             'stim_on_time',
             'stim_off_time',
             'stim_on_delay',
             'stimulus_size',
             'fixation_window_size',
             'fixation_point_size_min']
    data = get_events(event_file=event_file, name=names)
    # event_file.close()

    ###########################################################################
    # Create a dict to store output information
    ###########################################################################
    output = {
        'stim_on_time_ms': data[data.name == 'stim_on_time']['data'].values[-1] / 1000.,
        'stim_off_time_ms': data[data.name == 'stim_off_time']['data'].values[-1] / 1000.,
        'stim_on_delay_ms': data[data.name == 'stim_on_delay']['data'].values[-1] / 1000.,
        'stimulus_size_degrees': data[data.name == 'stimulus_size']['data'].values[-1],
        'fixation_window_size_degrees': data[data.name == 'fixation_window_size']['data'].values[-1],
        'fixation_point_size_degrees': data[data.name == 'fixation_point_size_min']['data'].values[-1],
    }

    ###########################################################################
    # Add column in data to indicate whether stimulus was first in trial or not
    ###########################################################################
    data['first_in_trial'] = False
    # Filter data to only get `trial_start_line` and `stimulus_presented` information
    df = data[(data.name == 'trial_start_line') | ((data.name == 'stimulus_presented') & (data.data != -1))]
    # Extract `time` for the first `stimulus_presented` (which is right after `trial_start_line` has been pulsed)
    first_in_trial_times = [df.time.values[i] for i in range(1, len(df))
                            if ((df.name.values[i - 1] == 'trial_start_line') and
                                (df.name.values[i] == 'stimulus_presented'))]
    data['first_in_trial'] = data['time'].apply(lambda x: True if x in first_in_trial_times else False)

    ###########################################################################
    # Extract stimulus presentation order and fixation information
    ###########################################################################
    stimulus_presented_df = data[data.name == 'stimulus_presented'].reset_index(drop=True)
    correct_fixation_df = data[data.name == 'correct_fixation'].reset_index(drop=True)
    # stimulus_presented_df = stimulus_presented_df[:len(correct_fixation_df)]  # If you have one extra stimulus event but not fixation, use this
    assert len(stimulus_presented_df) == len(correct_fixation_df)
    # Drop `empty` data (i.e. -1) before the experiment actually began and after it had already ended
    correct_fixation_df = correct_fixation_df[stimulus_presented_df.data != -1].reset_index(drop=True)
    stimulus_presented_df = stimulus_presented_df[stimulus_presented_df.data != -1].reset_index(drop=True)
    # Add `first_in_trial` info to other data frame too
    correct_fixation_df['first_in_trial'] = stimulus_presented_df['first_in_trial']

    ###########################################################################
    # Add column to indicate order in trial (1 2 3 1 2 3 etc.)
    ###########################################################################
    assert stimulus_presented_df.iloc[0].first_in_trial
    stimulus_presented_df['stimulus_order_in_trial'] = ''
    counter = 1
    for index, row in stimulus_presented_df.iterrows():
        if row['first_in_trial']:
            counter = 1
        stimulus_presented_df.at[index, 'stimulus_order_in_trial'] = counter
        counter += 1
    correct_fixation_df['stimulus_order_in_trial'] = stimulus_presented_df['stimulus_order_in_trial']

    ###########################################################################
    # Read sample on file
    ###########################################################################
    fid = open(sample_on_file, 'r')
    filesize = os.path.getsize(filename)  # in bytes
    num_samples = filesize // 2  # uint16 = 2 bytes
    digital_in = np.fromfile(fid, 'uint16', num_samples)
    fid.close()

    samp_on, = np.nonzero(digital_in[:-1] < digital_in[1:])  # Look for 0->1 transitions
    samp_on = samp_on + 1  # Previous line returns indexes of 0s seen before spikes, but we want indexes of first spikes

    if len(stimulus_presented_df) > len(samp_on):
        print(f'Warning: Trimming MWorks files as ({len(stimulus_presented_df)} > {len(samp_on)})')
        stimulus_presented_df = stimulus_presented_df[:len(samp_on)]
        correct_fixation_df = correct_fixation_df[:len(samp_on)]

    # samp_on = samp_on[:len(correct_fixation_df)]   # If you have one extra stimulus event but not fixation, use this
    assert len(samp_on) == len(stimulus_presented_df)

    ###########################################################################
    # Read photodiode file
    ###########################################################################
    fid = open(photodiode_file, 'r')
    filesize = os.path.getsize(photodiode_file)  # in bytes
    num_samples = filesize // 2  # uint16 = 2 bytes
    v = np.fromfile(fid, 'uint16', num_samples)
    fid.close()

    # Convert to volts (use this if the data file was generated by Recording Controller)
    v = (v - 32768) * 0.0003125

    # Detect rises in the oscillating photodiode signal
    peaks, _ = find_peaks(v, height=0)  # Find all peaks
    peaks = np.asarray([p for p in peaks if v[p] > THRESHOLD])  # Apply threshold
    photodiode_on = np.asarray([min(peaks[(peaks >= s) & (peaks < (s + 100_000))]) for s in samp_on])
Exemplo n.º 16
0
 def setUp(self):
     with warnings.catch_warnings():
         warnings.simplefilter('ignore', RuntimeWarning)
         self.filename = os.tempnam() + '.mwk'
     self.fp = MWKFile(self.filename)
Exemplo n.º 17
0
def getspk(fn_mwk, fn_nev=None, override_elecs=None, \
        ch_shift=None, ign_unregistered=False, \
        override_delay_us=None, verbose=False, \
        extinfo=False, exclude_img=None, \
        c_success=C_SUCCESS, t_success_lim=T_SUCCESS, \
        t_start0=T_START, t_stop0=T_STOP, \
        new_thr=None, \
        only_new_t=False, reject_sloppy=REJECT_SLOPPY, \
        c_stim=C_STIM, c_msg=C_MSG, \
        err_utime_msg=ERR_UTIME_MSG, err_utime_type=ERR_UTIME_TYPE):
    """Get all valid spiking info"""

    mf = MWKFile(fn_mwk)
    mf.open()

    if fn_nev is not None:
        br = load_spike_data(fn_nev)
        assert br.open()
    else:
        br = None

    # read TOC info from the "merged" mwk file
    toc = xget_events(mf, codes=[Merge.C_MAGIC])[0].value
    c_spikes = toc[Merge.K_SPIKE]   # get the code name from the toc

    # when the visual stimuli presented is valid?
    t_success = [ev.time for ev in xget_events(mf, codes=[c_success])]
    t_success = np.array(t_success)

    # get the active electrodes
    if override_elecs is None:
        actvelecs = toc[Merge.K_SPIKEWAV].keys()
    else:
        actvelecs = override_elecs    # e.g, range(1, 97)

    # -- preps
    img_onset, img_id = get_stim_info(mf, extinfo=extinfo, \
            exclude_img=exclude_img)

    # if requested, remove all sloppy
    # (time spent during update main window > 2 frames)
    if reject_sloppy:
        # since get_stim_info ignores fixation point,
        # all stimuli info must be retrived.
        all_stims = xget_events(mf, codes=[c_stim])
        all_times = np.array([s.time for s in all_stims])
        msgs = xget_events(mf, codes=[c_msg])
        errs = [m for m in msgs if \
                m.value['type'] == err_utime_type and \
                err_utime_msg in m.value['message']]

        for e in errs:
            t0 = e.time
            rel_t = all_times - t0
            # index to the closest prior stimulus
            ci = int(np.argsort(rel_t[rel_t < 0])[-1])
            # ...and its presented MWK time
            tc = all_stims[ci].time
            # get all affected sloppy stimuli
            ss = list(np.nonzero(np.array(img_onset) == tc)[0])

            new_img_onset = []
            new_img_id = []

            # I know this is kinda O(n^2), but since ss is short,
            # it's essentially O(n)
            for i, (io, ii) in enumerate(zip(img_onset, img_id)):
                if i in ss:
                    if verbose > 1:
                        print '** Removing sloppy:', img_id[i]
                    continue  # if i is sloppy stimuli, remove it.
                new_img_onset.append(io)
                new_img_id.append(ii)

            # trimmed the bad guys..
            img_onset = new_img_onset
            img_id = new_img_id
        assert len(img_onset) == len(img_id)

    n_stim = len(img_onset)

    # MAC-NSP time translation
    if override_delay_us is not None:
        t_delay = toc['align_info']['delay']
        t_adjust = int(np.round(override_delay_us - t_delay))
    else:
        t_adjust = 0
    t_start = t_start0 - t_adjust
    t_stop = t_stop0 - t_adjust

    # yield metadata
    infometa = {'type': 'preamble', 'actvelecs': actvelecs,
            't_adjust': t_adjust}
    if fn_nev is not None:
        infometa['n_packets'] = br._n_packets
        infometa['chn_info'] = br.chn_info
    yield infometa

    # actual calculation -------------------------------
    t0_valid = []
    iid_valid = []
    for i in xrange(n_stim):
        t0 = img_onset[i]
        iid = img_id[i]
        # -- check if this presentation is successful. if it's not ignore this.
        if np.sum((t_success > t0) & \
                (t_success < (t0 + t_success_lim))) < 1:
            continue
        t0_valid.append(t0)
        iid_valid.append(iid)
    n_stim_valid = len(t0_valid)

    # -- deal with readaheads
    t_diff = t_stop - t_start + 1  # 1 should be there as t_stop is inclusive.
    readaheads = np.zeros(n_stim_valid, 'int')
    i_cnkbegin = 0            # beginning of the chunk
    for i in xrange(1, n_stim_valid):
        t0 = t0_valid[i]
        t0p = t0_valid[i - 1]
        t0b = t0_valid[i_cnkbegin]

        if (t0 - t0p > DIFF_CUTOFF) or (t0 - t0b > MAX_READAHEAD):
            readaheads[i_cnkbegin:i] = t0p - t0b + t_diff
            i_cnkbegin = i
            continue
    readaheads[i_cnkbegin:] = t0 - t0b + t_diff

    # -- iter over all valid stims
    te_prev = -1
    for i in xrange(n_stim_valid):
        iid = iid_valid[i]
        readahead = int(readaheads[i])

        t0 = t0_valid[i]
        tb = t0 + t_start
        te = t0 + t_stop    # this is inclusive!!
        if only_new_t and tb <= te_prev:
            tb = te_prev + 1
        te_prev = te

        if verbose > 0:
            print 'At', (i + 1), 'out of', n_stim_valid, '         \r',
            sys.stdout.flush()

        # -- yield new image onset info
        infoimg = {'t_imgonset': t0, 'imgid': iid, 'i_img': i, 'type': 'begin'}
        yield infoimg

        spikes, spk_last = xget_events_readahead(mf, c_spikes, \
                (tb, te), readahead=readahead, peeklast=True)
        try:
            if br is not None:
                readahead_br = (spk_last.value['foffset'] - \
                        spikes[0].value['foffset']) / br._l_packet + 10
                readahead_br = int(readahead_br)
        except:
            # sometimes "spk_last" becomes None
            readahead_br = 1024  # some default value

        # -- yield actual spike info
        for i_spk, spk in enumerate(spikes):
            infospk = infoimg.copy()
            ch = ch0 = spk.value['id']
            pos = spk.value['foffset']
            t_abs = spk.time

            if ch_shift is not None:   # if mapping is requested
                if ch0 not in ch_shift:
                    continue
                ch = ch_shift[ch0]

            if ign_unregistered and ch not in actvelecs:
                continue

            # prepare yield fields
            t_rel = int(t_abs + t_adjust - t0)
            infospk['t_abs'] = t_abs
            infospk['t_rel'] = t_rel
            infospk['ch'] = ch
            infospk['ch_orig'] = ch0
            infospk['i_spk_per_img'] = i_spk
            infospk['pos'] = pos
            infospk['type'] = 'spike'
            # cluster info support for the old merged-mwk format
            infospk['cluster_id'] = None
            infospk['cluster_value'] = None
            if 'cluster_id' in spk.value:
                infospk['cluster_id'] = spk.value['cluster_id']
                infospk['cluster_value'] = spk.value

            # -- if no BRReader is specified, stop here
            if br is None:
                yield infospk
                continue

            # -- or... get the waveform
            try:
                wavinfo = br.read_once(pos=pos, proc_wav=True, \
                        readahead=readahead_br)
            except Exception, e:
                print '*** Exception:', e
                continue

            # apply new threshold if requested
            if new_thr is not None:
                if 'mult' in new_thr:
                    lthr = br.chn_info[ch0]['low_thr']
                    hthr = br.chn_info[ch0]['high_thr']
                    if lthr == 0:
                        thr0 = hthr
                    else:
                        thr0 = lthr
                    thr = thr0 * new_thr['mult']
                elif 'abs' in new_thr:
                    thr = new_thr['abs']

                wf0 = wavinfo['waveform']
                wf = set_new_threshold(wf0, thr)

                if wf == None:
                    continue   # `wf0` is smaller than `thr`
                wavinfo['waveform'] = wf

            # done.
            infospk['wavinfo'] = wavinfo
            yield infospk

        # -- finished sweeping all spikes. yield end of image info
        infoimg['type'] = 'end'
        yield infoimg
Exemplo n.º 18
0
def main(c_success=C_SUCCESS):
    #mf = MWKFile('../analysis/data_merged/Chabo_20110426_MovieGallant110413_S110204_RefAA_001.mwk')
    mf = MWKFile('../analysis/data_merged/Chabo_20110331_RSVPNicole305_S110204_RefAA_001.mwk')
    mf.open()
    #br = BRReader('../analysis/data_nev/Chabo_20110426_MovieGallant110413_S110204_RefAA_001.nev')
    br = BRReader('../analysis/data_nev/Chabo_20110331_RSVPNicole305_S110204_RefAA_001.nev')
    br.open()

    if PLOT_ONLY_ABV:
        adj_reject = 5./3.
        new_thr = {}
        for ch in br.chn_info:
            lthr = br.chn_info[ch]['low_thr']; hthr = br.chn_info[ch]['high_thr']
            if lthr == 0: thr0 = hthr
            else: thr0 = lthr
            new_thr[ch] = thr0 * adj_reject * 0.249
            print '*', ch, new_thr[ch]

    toc = xget_events(mf, codes=['#merged_data_toc'])[0].value
    # MAC-NSP time translation
    if OVERRIDE_DELAY_US != None: 
        t_delay = toc['align_info']['delay']
        t_adjust = int(np.round(OVERRIDE_DELAY_US - t_delay))
    else: 
        t_adjust = 0

    # when the visual stimuli presented is valid?
    t_success = [ev.time for ev in mf.get_events(codes=[c_success])]
    t_success = np.array(t_success)

    img_onset, img_id = get_stim_info(mf)
    print 'Len =', len(img_onset)

    i_plot_ac = 0
    i_spk = 0
    i_spk2 = 0
    i_spk_nvis = 0
    i_spk_vis = 0
    M = np.zeros((MAX_SPK, DIM))
    M2 = np.zeros((MAX_SPK, DIM))
    Mnvis = np.zeros((MAX_SPK, DIM))
    Mvis  = np.zeros((MAX_SPK, DIM))
    t = (np.arange(DIM) - 11) * 1000. / 30000.   # in ms
    #t = np.arange(len(DIM))    # in index

    for t0 in img_onset:
        if i_spk >= MAX_SPK: break
        if i_spk_nvis >= MAX_SPK: break
        if i_spk_vis >= MAX_SPK: break
        if np.sum((t_success > t0) & (t_success < (t0 + T_SUCCESS))) < 1: continue

        spks = mf.get_events(codes=['merged_spikes'], time_range=[t0 +START, t0 + END])

        for spk in spks:
            ch = spk.value['id']
            ts = spk.time
            if ch != CH: continue

            offset = spk.value['foffset']
            wav = br.read_once(pos=offset, proc_wav=True)['waveform']
            y = np.array(wav) * 0.249  # in uV

            if PLOT_ONLY_DOWN:
                if y[12] > y[11]: continue
            if PLOT_ONLY_ABV:
                wav = set_new_threshold(wav, new_thr[ch], rng=(11, 13), i_chg=32) 
                if wav == None: continue
            if PLOT_ONLY_EXCHG:
                if np.max(y[:32]) < 0: continue

            t_rel = ts + t_adjust - t0
            # print t_rel
            # -- monitor noise?
            if (t_rel/1000.) % NPERIOD < 1. or (t_rel/1000.) % NPERIOD > (NPERIOD - 1):
                if t_rel > -50000 and t_rel < 50000:
                    M[i_spk] = y
                    i_spk += 1
                    pl.figure(ch)
                    pl.plot(t, y, 'k-')
                    pl.title('Noise responses (OFF region)')
                    pl.xlabel('Time/ms')
                    pl.ylabel(r'Response/$\mu$V')
                elif t_rel > 70000 and t_rel < 170000:
                    M2[i_spk2] = y
                    i_spk2 += 1
                    pl.figure(1000 + ch)
                    pl.plot(t, y, 'k-')
                    pl.title('Noise responses (ON region)')
                    pl.xlabel('Time/ms')
                    pl.ylabel(r'Response/$\mu$V')

            elif (t_rel/1000.) % NPERIOD > 2. and (t_rel/1000.) % NPERIOD < (NPERIOD - 2):
                if t_rel > -50000 and t_rel < 50000:
                    Mnvis[i_spk_nvis] = y
                    i_spk_nvis += 1
                    pl.figure(2000 + ch)
                    pl.plot(t, y, 'k-')
                    pl.title('Non-noise blank responses')
                    pl.xlabel('Time/ms')
                    pl.ylabel(r'Response/$\mu$V')
                elif t_rel > 70000 and t_rel < 170000:
                    Mvis[i_spk_vis] = y
                    i_spk_vis += 1
                    pl.figure(3000 + ch)
                    pl.plot(t, y, 'k-')
                    pl.title('Non-noise visual responses')
                    pl.xlabel('Time/ms')
                    pl.ylabel(r'Response/$\mu$V')

            i_plot_ac += 1

    M = M[:i_spk]
    # pl.figure()
    # pl.hist(np.ravel(M[:,12] - M[:,11]), bins=20)
    print 'i_spk =', i_spk
    print 'i_spk2 =', i_spk2
    print 'i_spk_nvis =', i_spk_nvis
    print 'i_spk_vis =', i_spk_vis
    print 'i_plot_ac =', i_plot_ac
    M = M[:i_spk,:]
    M2 = M2[:i_spk2,:]
    Mnvis = Mnvis[:i_spk_nvis,:]
    Mvis  = Mvis[:i_spk_vis,:] 

    pl.figure()
    xb, yb = myhist(np.min(M,axis=1), norm='peak')
    xb2, yb2 = myhist(np.min(M2,axis=1), norm='peak')
    xg, yg = myhist(np.min(Mnvis,axis=1), norm='peak')
    xv, yv = myhist(np.min(Mvis,axis=1), norm='peak')
    pl.plot(xb, yb, 'r-', label='Noise responses (OFF)')
    pl.plot(xb2, yb2, 'm-', label='Noise responses (ON)')
    pl.plot(xg, yg, 'g-', label='Non-noise responses (OFF)')
    pl.plot(xv, yv, 'b-', label='Non-noise responses (ON)')
    #pl.axvline(ptbad.mean(), color='r',alpha=0.3)
    #pl.axvline(ptgood.mean(), color='b',alpha=0.3)
    #pl.axvline(ptvis.mean(), color='g',alpha=0.3)
    pl.xlabel(r'Peak response/$\mu$V')
    pl.ylabel('Normalized probability')
    pl.legend(loc='upper left')

    pl.figure()
    # --
    m = M.mean(axis=0); s = M.std(axis=0, ddof=1)
    pl.plot(t, m, 'r-', label='Noise responses (OFF)')
    pl.fill_between(t, m-s, m+s, color='r', alpha=0.2)
    # --
    m = Mnvis.mean(axis=0); s = Mnvis.std(axis=0, ddof=1)
    pl.plot(t, m, 'g-', label='Non-noise responses (OFF)')
    pl.fill_between(t, m-s, m+s, color='g', alpha=0.2)
    # -- 
    pl.xlabel('Time/ms')
    pl.ylabel(r'Response/$\mu$V')
    pl.legend(loc='lower right')

    pl.figure()
    # --
    m = M2.mean(axis=0); s = M2.std(axis=0, ddof=1)
    pl.plot(t, m, 'm-', label='Noise responses (ON)')
    pl.fill_between(t, m-s, m+s, color='m', alpha=0.2)
    # --
    m = Mvis.mean(axis=0); s = Mvis.std(axis=0, ddof=1)
    pl.plot(t, m, 'b-', label='Non-noise responses (ON)')
    pl.fill_between(t, m-s, m+s, color='b', alpha=0.2)
    # --
    pl.xlabel('Time/ms')
    pl.ylabel(r'Response/$\mu$V')
    pl.legend(loc='lower right')

    pl.show()
Exemplo n.º 19
0
def firrate(fn_mwk, fn_out, override_delay_us=None, override_elecs=None, verbose=2, \
        extinfo=False, c_success=C_SUCCESS, t_success_lim=T_SUCCESS, proc_cluster=PROC_CLUSTER, max_clus=MAX_CLUS, \
        t_start0=T_START, t_stop0=T_STOP, c_msg=C_MSG, c_stim=C_STIM, exclude_img=EXCLUDE_IMG, \
        reject_sloppy=REJECT_SLOPPY, err_utime_msg=ERR_UTIME_MSG, err_utime_type=ERR_UTIME_TYPE, \
        movie_begin_fname=None, ign_unregistered=False, ch_shift=None):
    """TODO: merge with get_spk() in common_fn.py"""

    mf = MWKFile(fn_mwk)
    mf.open()

    # read TOC info from the "merged" mwk file
    toc = xget_events(mf, codes=[Merge.C_MAGIC])[0].value
    c_spikes = toc[Merge.K_SPIKE]                  # get the code name from the toc

    # when the visual stimuli presented is valid?
    t_success = [ev.time for ev in xget_events(mf, codes=[c_success])]
    t_success = np.array(t_success)

    # get the active electrodes
    if override_elecs is None: actvelecs = toc[Merge.K_SPIKEWAV].keys() 
    else: actvelecs = override_elecs               # e.g, range(1, 97)
    n_actvelec = len(actvelecs)                    # number of active spike electrodes

    img_onset, img_id = get_stim_info(mf, extinfo=extinfo, exclude_img=exclude_img)

    # if requested, remove all sloppy (time spent during update main window > 2 frames)
    if reject_sloppy:
        # since get_stim_info ignores fixation point, all stimuli info must be retrived.
        all_stims = xget_events(mf, codes=[c_stim])
        all_times = np.array([s.time for s in all_stims])
        msgs = xget_events(mf, codes=[c_msg])
        errs = [m for m in msgs if m.value['type'] == err_utime_type and \
                err_utime_msg in m.value['message']]

        for e in errs:
            t0 = e.time
            rel_t = all_times - t0
            # index to the closest prior stimulus
            ci = int(np.argsort(rel_t[rel_t < 0])[-1])
            # ...and its presented MWK time
            tc = all_stims[ci].time
            # get all affected sloppy stimuli
            ss = list(np.nonzero(np.array(img_onset) == tc)[0])

            new_img_onset = []
            new_img_id = []

            # I know this is kinda O(n^2), but since ss is short, it's essentially O(n)
            for i, (io, ii) in enumerate(zip(img_onset, img_id)):
                if i in ss:
                    if verbose > 1:
                        print '** Removing sloppy:', img_id[i]
                    continue      # if i is sloppy stimuli, remove it.
                new_img_onset.append(io)
                new_img_id.append(ii)

            # trimmed the bad guys..
            img_onset = new_img_onset
            img_id = new_img_id
        assert len(img_onset) == len(img_id)

    n_stim = len(img_onset)

    # MAC-NSP time translation
    if override_delay_us != None: 
        t_delay = toc['align_info']['delay']
        t_adjust = int(np.round(override_delay_us - t_delay))
    else: 
        t_adjust = 0
    t_start = t_start0 - t_adjust
    t_stop = t_stop0 - t_adjust

    # actual calculation -------------------------------
    # all_spike[chn_id][img_id]: when the neurons spiked?
    all_spike = {}
    all_foffset = {}
    clus_info = {}

    frame_onset = {}
    movie_iid = None
    movie_onsets = []
    movie_onset0 = 0

    t0_valid = []
    iid_valid = []
    for i in xrange(n_stim):
        t0 = img_onset[i]; iid = img_id[i]
        # -- check if this presentation is successful. if it's not ignore this.
        if np.sum((t_success > t0) & (t_success < (t0 + t_success_lim))) < 1: continue
        t0_valid.append(t0)
        iid_valid.append(iid)
    n_stim_valid = len(t0_valid)

    t_slack = t_stop - t_start
    readaheads = np.zeros(n_stim_valid, 'int')
    i_cnkbegin = 0            # beginning of the chunk
    for i in xrange(1, n_stim_valid):
        t0 = t0_valid[i]
        t0p = t0_valid[i - 1]
        t0b = t0_valid[i_cnkbegin]

        if (t0 - t0p > DIFF_CUTOFF) or (t0 - t0b > MAX_READAHEAD):
            readaheads[i_cnkbegin:i] = t0p - t0b + t_slack
            i_cnkbegin = i
            continue
    readaheads[i_cnkbegin:] = t0 - t0b + t_slack

    for i in xrange(n_stim_valid):
        t0 = t0_valid[i]; iid = iid_valid[i]; readahead=int(readaheads[i])
        # -- process movie?
        if movie_begin_fname != None:
            # begin new clip?
            if movie_begin_fname in iid:
                # was there previous clip?
                if movie_iid != None:
                    if movie_iid not in frame_onset: frame_onset[movie_iid] = []
                    frame_onset[movie_iid].append(movie_onsets)
                # init for new clip
                movie_onsets = []
                iid = movie_iid = iid.replace(movie_begin_fname, '')
                movie_onset0 = t0
                movie_onsets.append(0)
            elif movie_iid != None:
                movie_onsets.append(t0 - movie_onset0)
                continue

        if verbose > 0: 
            print 'At', (i + 1), 'out of', n_stim_valid, '         \r',
            sys.stdout.flush()
   
        #spikes = xget_events(mf, codes=[c_spikes], time_range=[t0 + t_start, t0 + t_stop])
        spikes = xget_events_readahead(mf, c_spikes, (t0 + t_start, t0 + t_stop), readahead=readahead)
        actvunits = {}
        t_rel = {}
        foffset = {}
        # -- prepare the t_rel & foffset
        for ch in actvelecs:
            # if no clustering info is used...
            if not proc_cluster:
                t_rel[ch] = []
                foffset[ch] = []
                continue
            # if clustering info is used...
            cids = range(max_clus)
            actvunits[ch] = cids
            for cid in cids:
                t_rel[(ch,cid)] = []
                foffset[(ch,cid)] = []

        # -- put actual spiking info
        for s in spikes:
            ch = s.value['id']
            if ch_shift != None:   # if mapping is requested
                if ch not in ch_shift: continue
                ch = ch_shift[ch]

            if proc_cluster: 
                cid = s.value['cluster_id']
                key = (ch, cid)
            else:
                key = ch
          
            # put the relative time
            if ign_unregistered and key not in t_rel: continue
            t_rel[key].append(int(s.time + t_adjust - t0))
            foffset[key].append(int(s.value['foffset']))
            # update the clus_info and n_cluster
            if proc_cluster and key not in clus_info:
                clus_info[key] = s.value
                if s.value['nclusters'] > max_clus:
                    raise ValueError, '*** Unit %s: max_cluster(=%d) is smaller than the actual number of clusters(=%d)!' \
                            % (str(key), max_clus, s.value['nclusters'])

        # -- combine all
        for el in actvelecs:
            if proc_cluster: cids = actvunits[el]
            else: cids = [-1]
            for cid in cids:
                if proc_cluster: key = (el, cid)
                else: key = el
                if key not in all_spike: 
                    # not using defaultdict here:
                    # all_spike[key] = defaultdict(list)
                    all_spike[key] = {}
                    all_foffset[key] = {}
                if iid not in all_spike[key]:
                    all_spike[key][iid] = []
                    all_foffset[key][iid] = []
                all_spike[key][iid].append(t_rel[key])
                all_foffset[key][iid].append(foffset[key])

    # flush movie data
    if movie_iid != None:
        if movie_iid not in frame_onset: frame_onset[movie_iid] = []
        frame_onset[movie_iid].append(movie_onsets)

    # finished calculation....
    f = open(fn_out, 'w')
    out =  {'all_spike': all_spike, 
            't_start': t_start0,
            't_stop': t_stop0,
            't_adjust': t_adjust,
            'actvelecs': actvelecs}
    if proc_cluster:
        out['clus_info'] = clus_info
        out['max_clus'] = max_clus
    if movie_begin_fname != None:
        out['frame_onset'] = frame_onset
    pk.dump(out, f)

    # put all_foffset into the 2nd half to speed up reading
    out2 = {'all_foffset': all_foffset,}
    pk.dump(out2, f)

    f.close()