Exemplo n.º 1
0
 def test_sync(self):
     """
     This test is just to document current use in libraries in case of refactoring
     """
     sd = core.Bunch({'label': 'toto', 'ap': None, 'lf': 8})
     self.assertTrue(sd['label'] is sd.label)
     self.assertTrue(sd['ap'] is sd.ap)
     self.assertTrue(sd['lf'] is sd.lf)
Exemplo n.º 2
0
 def test_sync(self):
     """
     This test is just to document current use in libraries in case of refactoring
     """
     sd = core.Bunch({'label': 'toto', 'ap': None, 'lf': 8})
     self.assertTrue(sd['label'] is sd.label)
     self.assertTrue(sd['ap'] is sd.ap)
     self.assertTrue(sd['lf'] is sd.lf)
     sda = core.Bunch({'label': np.array('toto'), 'ap': np.array(None), 'lf': np.array(8)})
     dfa = sda.to_df()
     self.assertTrue(sda is dfa)
     sdb = core.Bunch({'label': np.array(['toto', 'tata']),
                       'ap': np.array([None, 1]),
                       'lf': np.array([10, 8])})
     dfb = sdb.to_df()
     for k in sdb:
         self.assertTrue(np.all(sdb[k] == dfb[k].values))
Exemplo n.º 3
0
    def test_bunch_io(self):
        a = np.random.rand(50, 1)
        b = np.random.rand(50, 1)
        abunch = core.Bunch({'a': a, 'b': b})

        with tempfile.TemporaryDirectory() as td:
            npz_file = Path(td).joinpath('test_bunch.npz')
            abunch.save(npz_file)
            another_bunch = core.Bunch.load(npz_file)
            [self.assertTrue(np.all(abunch[k]) == np.all(another_bunch[k])) for k in abunch]
            npz_filec = Path(td).joinpath('test_bunch_comp.npz')
            abunch.save(npz_filec, compress=True)
            another_bunch = core.Bunch.load(npz_filec)
            [self.assertTrue(np.all(abunch[k]) == np.all(another_bunch[k])) for k in abunch]
Exemplo n.º 4
0
def get_units_bunch(spks_b, *args):
    '''
    Returns a bunch, where the bunch keys are keys from `spks` with labels of spike information
    (e.g. unit IDs, times, features, etc.), and the values for each key are arrays with values for
    each unit: these arrays are ordered and can be indexed by unit id.

    Parameters
    ----------
    spks_b : bunch
        A spikes bunch containing fields with spike information (e.g. unit IDs, times, features,
        etc.) for all spikes.
    features : list of strings (optional positional arg)
        A list of names of labels of spike information (which must be keys in `spks`) that specify
        which labels to return as keys in `units`. If not provided, all keys in `spks` are returned
        as keys in `units`.

    Returns
    -------
    units_b : bunch
        A bunch with keys of labels of spike information (e.g. cluster IDs, times, features, etc.)
        whose values are arrays that hold values for each unit. The arrays for each key are ordered
        by unit ID.

    Examples
    --------
    1) Create a units bunch given a spikes bunch, and get the amps for unit #4 from the units
    bunch.
        >>> import brainbox as bb
        >>> import alf.io as aio
        >>> import ibllib.ephys.spikes as e_spks
        (*Note, if there is no 'alf' directory, make 'alf' directory from 'ks2' output directory):
        >>> e_spks.ks2_to_alf(path_to_ks_out, path_to_alf_out)
        >>> spks_b = aio.load_object(path_to_alf_out, 'spikes')
        >>> units_b = bb.processing.get_units_bunch(spks_b)
        # Get amplitudes for unit 4.
        >>> amps = units_b['amps']['4']

    TODO add computation time estimate?
    '''

    # Initialize `units`
    units_b = core.Bunch()
    # Get the keys to return for `units`:
    if not args:
        feat_keys = list(spks_b.keys())
    else:
        feat_keys = args[0]
    # Get unit id for each spike and number of units. *Note: `n_units` might not equal `len(units)`
    # because some clusters may be empty (due to a "wontfix" bug in ks2).
    spks_unit_id = spks_b['clusters']
    n_units = np.max(spks_unit_id)
    units = np.unique(spks_b['clusters'])
    # For each key in `units`, iteratively get each unit's values and add as a key to a bunch,
    # `feat_bunch`. After iterating through all units, add `feat_bunch` as a key to `units`:
    for feat in feat_keys:
        # Initialize `feat_bunch` with a key for each unit.
        feat_bunch = core.Bunch(
            (str(unit), np.array([])) for unit in np.arange(n_units))
        for unit in units:
            unit_idxs = np.where(spks_unit_id == unit)[0]
            feat_bunch[str(unit)] = spks_b[feat][unit_idxs]
        units_b[feat] = feat_bunch
    return units_b