コード例 #1
0
ファイル: current_tools.py プロジェクト: miketrumpis/ecogdata
def smooth_transfer_functions(bb, aa, mask_poles=-1, mask_zeros=-1):
    g = aa.shape[:2]
    cm = ChannelMap(np.arange(g[0] * g[1]), g, col_major=False)

    # going to punch out the corners of all maps, as well as ...
    # * poles that are < 1 Hz
    # * zeros that are < 1 Hz

    nzr = bb.shape[-1] - 1
    npl = aa.shape[-1] - 1

    aa = aa.reshape(len(cm), -1)
    bb = bb.reshape(len(cm), -1)

    k = np.zeros(len(aa))
    z = np.zeros((len(aa), nzr))
    p = np.zeros((len(aa), npl))

    m = np.ones(len(aa), dtype='?')
    for n in range(len(aa)):
        try:
            z[n], p[n], k[n] = signal.tf2zpk(bb[n], aa[n])
        except BaseException:
            # if there is a dimension error, because xfer fun is null
            m[n] = False
            pass

    # corners
    m[[0, 7, 64 - 8, 64 - 1]] = False
    if mask_poles > 0:
        m[p.max(1) > -2 * np.pi * mask_poles] = False
    if mask_zeros > 0:
        m[z.min(1) < -2 * np.pi * mask_zeros] = False

    z, p, k = [x[m] for x in (z, p, k)]

    cm = cm.subset(m.nonzero()[0])

    # then the order-of-magnitude maps will be smoothed with a median filter
    z_ = cm.embed(np.log10(-z), axis=0, fill='median')
    p_ = cm.embed(np.log10(-p), axis=0, fill='median')
    k_ = cm.embed(np.log10(k), axis=0, fill='median')

    z = -np.power(10, z_)
    p = -np.power(10, p_)
    k = np.power(10, k_)

    aa_sm = np.zeros(g + (npl + 1, ))
    bb_sm = np.zeros(g + (nzr + 1, ))
    for ij in itertools.product(range(g[0]), range(g[1])):
        bb_sm[ij], aa_sm[ij] = signal.zpk2tf(z[ij], p[ij], k[ij])

    return bb_sm, aa_sm
コード例 #2
0
ファイル: matlab_data.py プロジェクト: miketrumpis/ecogdata
def load_preproc(f, load=True, sharedmem=True):
    shared_paths = ('/data',) if sharedmem else ()
    if load:
        with closing(tables.open_file(f)) as h5:
            pre = traverse_table(h5, load=True, shared_paths=shared_paths)
        # convert a few arrays
        for key in ('trig_coding', 'emap', 'egeo', 'orig_condition'):
            if key in pre and pre[key] is not None:
                arr = pre[key]
                pre[key] = arr.astype('i')
                if key == 'egeo':
                    pre[key] = tuple(pre[key])
        # transpose
        if pre.trig_coding is not None:
            pre.trig_coding = pre.trig_coding.T
            # convert indexing
            pre.trig_coding[0] -= 1
        if pre.emap is not None:
            pre.emap -= 1
            pre.chan_map = ChannelMap(pre.emap, pre.egeo, col_major=True)
    else:
        # this keeps the h5 file open?
        h5 = tables.open_file(f)
        pre = traverse_table(h5, load=False)
    return pre


        
            
コード例 #3
0
    def make_channel_map(self):
        unmix = get_daq_unmix(self.daq, self.headstage, self.electrode)
        with h5py.File(self.file, 'r') as f:
            #ncol_full = f['numChan'][()]
            #ncol_data = f['numCol'][()]
            nrow = int(f['numRow'][()])

        pitch = pitch_lookup.get(self.electrode, 1.0)
        # go through channels,
        # if channel is data, put down the array matrix location
        # else, put down a disconnected channel
        data_rows = list(unmix.row)
        data_cols = list(unmix.col)
        chan_map = []
        no_con = []
        for c in self.data_channels:
            col = c // nrow
            row = c % nrow
            if col in data_cols:
                arow = data_rows.index(row)
                acol = data_cols.index(col)
                chan_map.append(arow * len(data_cols) + acol)
            else:
                no_con.append(c)
        nr = len(unmix.row)
        nc = len(unmix.col)
        cm = ChannelMap(chan_map, (nr, nc), pitch=pitch, col_major=False)
        return cm, no_con
コード例 #4
0
    def make_channel_map(self):
        unmix = get_daq_unmix(self.daq_type, self.headstage_type,
                              self.electrode)
        with h5py.File(self.data_file, 'r') as h5file:
            nrow = int(h5file['numRow'][()])
            ncol = int(h5file['numCol'][()])

        pitch = pitch_lookup.get(self.electrode, 1.0)
        # go through channels,
        # if channel is data, put down the array matrix location
        # else, put down a disconnected channel
        data_rows = list(unmix.row)
        data_cols = list(unmix.col)
        # data_chans = np.array(data_cols) * nrow + np.array(data_rows)
        electrode_chans = []
        chan_map = []
        other_chans = []
        for c in range(nrow * ncol):
            col = c // nrow
            row = c % nrow
            if col in data_cols:
                arow = data_rows.index(row)
                acol = data_cols.index(col)
                chan_map.append(arow * len(data_cols) + acol)
                electrode_chans.append(c)
            else:
                other_chans.append(c)
        nr = len(unmix.row)
        nc = len(unmix.col)
        cm = ChannelMap(chan_map, (nr, nc), pitch=pitch, col_major=False)
        return cm, electrode_chans, other_chans, []
コード例 #5
0
def _load_cooked(pth, test, half=False, avg=False):
    # august 21 test -- now using a common test-name prefix
    # with different recording channels appended
    test_pfx = osp.join(pth, test)

    chans = sio.loadmat(test_pfx + '.ndata.mat')['raw_data'].T
    try:
        trigs = sio.loadmat(test_pfx + '.ndatastim.mat')['qw'].T
        trigs = trigs.squeeze()
    except IOError:
        trigs = np.zeros(10)

    _columns = np.roll(columns, 2)
    _rows = np.roll(rows, 2)

    electrode_chans = _rows >= 0

    chan_flat = mat_to_flat((8, 8),
                            _rows[electrode_chans],
                            7 - _columns[electrode_chans],
                            col_major=False)
    chan_map = ChannelMap(chan_flat, (8, 8), col_major=False, pitch=0.406)

    # don't need to convert dynamic range
    #chans = np.zeros(chans_int.shape, dtype='d')
    #dr_lo, dr_hi = _dyn_range_lookup[dyn_range]
    #chans = convert_dyn_range(chans_int, 2**20, (dr_lo, dr_hi))

    if avg:
        chans = 0.5 * (chans[:, 1::2] + chans[:, 0::2])
        trigs = trigs[:, 1::2]
    if half:
        chans = chans[:, 1::2]
        trigs = trigs[:, 1::2]

    data = shm.shared_copy(chans[electrode_chans])
    disconnected = chans[~electrode_chans]

    binary_trig = (np.any(trigs == 1, axis=0)).astype('i')
    if binary_trig.any():
        pos_edge = np.where(np.diff(binary_trig) > 0)[0] + 1
    else:
        pos_edge = ()

    return data, disconnected, trigs, pos_edge, chan_map
コード例 #6
0
def _load_cooked_pre_august_2014(pth, test, dyn_range, Fs):
    test_dir = osp.join(pth, test)

    chans_int = sio.loadmat(osp.join(test_dir, 'recs.mat'))['adcreads_sort']
    trigs = sio.loadmat(osp.join(test_dir, 'trigs.mat'))['stim_trig_sort']
    order = sio.loadmat(osp.join(test_dir,
                                 'channels.mat'))['channel_numbers_sort']

    # this tells me how to reorder the row/column vectors above to match
    # the channel order in the file
    order = order[:, -1]

    _columns = columns[order]
    _rows = rows[order]

    electrode_chans = _rows >= 0

    chan_flat = mat_to_flat((8, 8),
                            _rows[electrode_chans],
                            7 - _columns[electrode_chans],
                            col_major=False)
    chan_map = ChannelMap(chan_flat, (8, 8), col_major=False, pitch=0.406)

    chans = np.zeros(chans_int.shape, dtype='d')
    dr_lo, dr_hi = _dyn_range_lookup[dyn_range]
    #chans = (chans_int * ( Fs * (dr_hi - dr_lo) * 2**-20 )) + dr_lo*Fs
    chans = convert_dyn_range(chans_int, 2**20, (dr_lo, dr_hi))

    data = shm.shared_copy(chans[electrode_chans])
    disconnected = chans[~electrode_chans]

    binary_trig = (np.any(trigs == 1, axis=0)).astype('i')
    if binary_trig.any():
        pos_edge = np.where(np.diff(binary_trig) > 0)[0] + 1
    else:
        pos_edge = ()

    return data, disconnected, trigs, pos_edge, chan_map
コード例 #7
0
def load_openephys_ddc(exp_path,
                       test,
                       electrode,
                       drange,
                       trigger_idx,
                       rec_num='auto',
                       bandpass=(),
                       notches=(),
                       save=False,
                       snip_transient=True,
                       units='nA',
                       **extra):

    rawload = load_open_ephys_channels(exp_path, test, rec_num=rec_num)
    all_chans = rawload.chdata
    Fs = rawload.Fs

    d_chans = len(rows)
    ch_data = all_chans[:d_chans]
    if np.iterable(trigger_idx):
        trigger = all_chans[int(trigger_idx[0])]
    else:
        trigger = all_chans[int(trigger_idx)]

    electrode_chans = rows >= 0
    chan_flat = mat_to_flat((8, 8),
                            rows[electrode_chans],
                            7 - columns[electrode_chans],
                            col_major=False)
    chan_map = ChannelMap(chan_flat, (8, 8), col_major=False, pitch=0.406)

    dr_lo, dr_hi = _dyn_range_lookup[drange]  # drange 0 3 or 7
    ch_data = convert_dyn_range(ch_data, (-2**15, 2**15), (dr_lo, dr_hi))

    data = shm.shared_copy(ch_data[electrode_chans])
    disconnected = ch_data[~electrode_chans]

    trigger -= trigger.mean()
    binary_trig = (trigger > 100).astype('i')
    if binary_trig.any():
        pos_edge = np.where(np.diff(binary_trig) > 0)[0] + 1
    else:
        pos_edge = ()

    # change units if not nA
    if 'a' in units.lower():
        # this puts it as picoamps
        data *= Fs
        data = convert_scale(data, 'pa', units)
    elif 'c' in units.lower():
        data = convert_scale(data, 'pc', units)

    if bandpass:  # how does this logic work?
        (b, a) = ft.butter_bp(lo=bandpass[0], hi=bandpass[1], Fs=Fs)
        filtfilt(data, b, a)

    if notches:
        ft.notch_all(data, Fs, lines=notches, inplace=True, filtfilt=True)

    if snip_transient:
        snip_len = min(10000, pos_edge[0]) if len(pos_edge) else 10000
        data = data[..., snip_len:].copy()
        if len(disconnected):
            disconnected = disconnected[..., snip_len:].copy()
        if len(pos_edge):
            trigger = trigger[..., snip_len:]
            pos_edge -= snip_len

    dset = Bunch()
    dset.data = data
    dset.pos_edge = pos_edge
    dset.trigs = trigger
    dset.ground_chans = disconnected
    dset.Fs = Fs
    dset.chan_map = chan_map
    dset.bandpass = bandpass
    dset.transient_snipped = snip_transient
    dset.units = units
    dset.notches = notches
    return dset
コード例 #8
0
    def add_to_tiles(self,
                     xdata: np.ndarray,
                     ydata: np.ndarray,
                     channel_map: ChannelMap,
                     initialize: bool = False):
        x_gap = 1.05 * (self.x_limits[1] - self.x_limits[0])
        y_gap = self.y_limits[1] - self.y_limits[0]

        # Store offsets as (x_off, y_off) and stack rows from top to bottom
        offsets = list()
        rows = channel_map.geometry[0]
        for i, j in zip(*channel_map.to_mat()):
            offsets.append((j * x_gap, (rows - i - 1) * y_gap))

        lines = list()
        # Assume xdata matches ydata, or can be tiled to do so
        if xdata.ndim == 1:
            xdata = np.tile(xdata, (len(ydata), 1))
        for n in range(len(ydata)):
            x0, y0 = offsets[n]
            lines.append(np.c_[xdata[n] + x0, ydata[n] + y0])
        lines = mpl.collections.LineCollection(
            lines, linewidths=0.5, colors=self.colors[self._line_count])
        self.ax.add_collection(lines)
        self._line_count += 1
        if initialize:
            self.ax.axis('off')
            # y_scale = y_gap / 3
            # y_scale = np.round(y_scale, decimals=-int(np.floor(np.log10(y_scale))))
            self.fig.tight_layout()
            self.fig.subplots_adjust(left=0.05)
            self.ax.autoscale_view(True, True, True)
            self.fig.canvas.draw()
            # Axes 0-1 coordinates
            data_loc = self.ax.transData.transform(
                [xdata.min() - 0.1 * x_gap,
                 ydata.min() - 0.05 * y_gap])
            scale_loc = self.ax.transAxes.inverted().transform(data_loc)

            y_size, y_text = self._bar_info['y']
            ybar = AnchoredScaleBar(scale_loc,
                                    self.ax,
                                    size=y_size,
                                    label=y_text,
                                    pad=0,
                                    borderpad=0,
                                    a_loc='lower right',
                                    vertical=True,
                                    linekw=dict(color='k', lw=1.5))
            x_size, x_text = self._bar_info['x']
            xbar = AnchoredScaleBar(scale_loc,
                                    self.ax,
                                    size=x_size,
                                    label=x_text,
                                    pad=0,
                                    borderpad=0,
                                    a_loc='upper left',
                                    vertical=False,
                                    linekw=dict(color='k', lw=1.5))
            self.ax.add_artist(ybar)
            self.ax.add_artist(xbar)
コード例 #9
0
    def launch(self):
        if not os.path.exists(self.file_data.file):
            return

        # Logic to normalize channel mapping
        # TODO: handle reference channels correctly
        if self.chan_map == 'unknown':
            try:
                nc = np.array(list(map(int, self.skip_chan.split(','))))
            except:
                nc = []
            geo = list(map(int, self.elec_geometry.split(',')))
            n_sig_chan = self.n_chan - len(nc)
            chan_map = ChannelMap(np.arange(n_sig_chan), geo)
        elif self.chan_map == 'active':
            chan_map, nc = self.file_data.make_channel_map()
        elif self.chan_map in _subset_chan_maps:
            cnx = self.chan_map_connectors.split(',')
            cnx = [c.strip() for c in cnx]
            chan_map, nc, rf = get_electrode_map(self.chan_map, connectors=cnx)
            nc = list(set(nc).union(rf))
        elif self.chan_map in _subset_shortcuts:
            cnx = _subset_shortcuts[self.chan_map]
            map_name, shortcut = self.chan_map.split('/')
            chan_map, nc, rf = get_electrode_map(map_name, connectors=cnx)
            nc = list(set(nc).union(rf))
        elif self.chan_map == 'settable':
            chan_map = self.set_chan_map
            nc = []
        elif self.chan_map == 'pickled':
            try:
                chan_map = find_pickled_map(self.file_data.file)
                nc = []
            except NoPickleError:
                MessageDialog(message='No pickled ChannelMap').open()
                return
        else:
            chan_map, nc, rf = get_electrode_map(self.chan_map)
            nc = list(set(nc).union(rf))

        # Check for transposed active data (coming from matlab)
        if isinstance(self.file_data,
                      ActiveArrayFileData) and self.file_data.is_transpose:
            self.file_data.create_transposed()
            print(self.file_data.file)

        with h5py.File(self.file_data.file, 'r') as h5:
            #x_scale = h5[self.file_data.fs_field].value ** -1.0
            x_scale = self.file_data.Fs**-1.0
            array_size = h5[self.file_data.data_field].shape[0]
        num_vectors = len(chan_map) + len(nc)

        data_channels = [
            self.file_data.data_channels[i] for i in range(num_vectors)
            if i not in nc
        ]

        # permute  channels to stack rows
        chan_idx = list(zip(*chan_map.to_mat()))
        chan_order = chan_map.lookup(*list(zip(*sorted(chan_idx)[::-1])))
        data_channels = [data_channels[i] for i in chan_order]
        cls = type(chan_map)
        chan_map = cls([chan_map[i] for i in chan_order],
                       chan_map.geometry,
                       pitch=chan_map.pitch,
                       col_major=chan_map.col_major)

        filters = self.filters.make_pipeline(x_scale**-1.0)
        array = self.file_data._compose_arrays(filters)
        if self.screen_channels:
            data_channels, chan_map = \
              self._get_screen(array, data_channels, chan_map, x_scale**-1.0)

        rm = np.zeros((array_size, ), dtype='?')
        rm[data_channels] = True

        nav = h5mean(array.file_array, 0, rowmask=rm)
        nav *= self.file_data.y_scale

        modules = [ana_modules[k] for k in self.module_set]
        new_vis = FastScroller(array,
                               self.file_data.y_scale,
                               self.offset * 1e-6,
                               chan_map,
                               nav,
                               x_scale=x_scale,
                               load_channels=data_channels,
                               max_zoom=self.max_window_width)
        file_name = os.path.split(self.file_data.file)[1]
        file_name = os.path.splitext(file_name)[0]
        v_win = VisWrapper(new_vis,
                           x_scale=x_scale,
                           chan_map=chan_map,
                           y_spacing=self.offset,
                           modules=modules,
                           recording=file_name)
        view = v_win.default_traits_view()
        # TODO: it would be nice to be able to directly call launch() without first showing *this* object's panel
        view.kind = 'live'
        ui = v_win.edit_traits(view=view)
        return v_win
コード例 #10
0
def get_chan_map(geometry, scrambled=False, col_major=False, pitch=1.0):
    idx = list(range(geometry[0] * geometry[1]))
    if scrambled:
        idx = np.random.permutation(idx)
    return ChannelMap(idx, geometry, col_major=col_major, pitch=pitch)
コード例 #11
0
 def map_curves(self, channel_map: ChannelMap):
     return channel_map.subset(self._active_channels)