def raw_dataset(request): sample_rate = 20000 curdir = op.realpath(op.dirname(__file__)) path = op.join(curdir, 'traces/default_settings.py') params = _read_python(path)['spikedetekt'] data_type = request.param if data_type == 'real': path = download_test_data('test-32ch-10s.dat') traces = np.fromfile(path, dtype=np.int16).reshape((200000, 32)) traces = traces[:45000] n_samples, n_channels = traces.shape params['use_single_threshold'] = False probe = load_probe('1x32_buzsaki') else: probe = { 'channel_groups': { 0: { 'channels': [0, 1, 2], 'graph': [[0, 1], [0, 2], [1, 2]], }, 1: { 'channels': [3], 'graph': [], 'geometry': { 3: [0., 0.] }, } } } if data_type == 'null': n_samples, n_channels = 25000, 4 traces = np.zeros((n_samples, n_channels)) elif data_type == 'artificial': n_samples, n_channels = 25000, 4 traces = artificial_traces(n_samples, n_channels) traces[5000:5010, 1] *= 5 traces[15000:15010, 3] *= 5 n_samples_w = params['extract_s_before'] + params['extract_s_after'] yield Bunch( n_channels=n_channels, n_samples=n_samples, sample_rate=sample_rate, n_samples_waveforms=n_samples_w, traces=traces, params=params, probe=probe, )
def raw_dataset(request): sample_rate = 20000 curdir = op.realpath(op.dirname(__file__)) path = op.join(curdir, 'traces/default_settings.py') params = _read_python(path)['spikedetekt'] data_type = request.param if data_type == 'real': path = download_test_data('test-32ch-10s.dat') traces = np.fromfile(path, dtype=np.int16).reshape((200000, 32)) traces = traces[:45000] n_samples, n_channels = traces.shape params['use_single_threshold'] = False probe = load_probe('1x32_buzsaki') else: probe = {'channel_groups': { 0: {'channels': [0, 1, 2], 'graph': [[0, 1], [0, 2], [1, 2]], }, 1: {'channels': [3], 'graph': [], 'geometry': {3: [0., 0.]}, } }} if data_type == 'null': n_samples, n_channels = 25000, 4 traces = np.zeros((n_samples, n_channels)) elif data_type == 'artificial': n_samples, n_channels = 25000, 4 traces = artificial_traces(n_samples, n_channels) traces[5000:5010, 1] *= 5 traces[15000:15010, 3] *= 5 n_samples_w = params['extract_s_before'] + params['extract_s_after'] yield Bunch(n_channels=n_channels, n_samples=n_samples, sample_rate=sample_rate, n_samples_waveforms=n_samples_w, traces=traces, params=params, probe=probe, )
def model(tempdir): model = Bunch() n_spikes = 51 n_samples_w = 31 n_samples_t = 20000 n_channels = 11 n_clusters = 3 n_features = 4 model.path = op.join(tempdir, 'test') model.n_channels = n_channels # TODO: test with permutation and dead channels model.channel_order = None model.n_spikes = n_spikes model.sample_rate = 20000. model.duration = n_samples_t / float(model.sample_rate) model.spike_times = artificial_spike_samples(n_spikes) * 1. model.spike_times /= model.spike_times[-1] model.spike_clusters = artificial_spike_clusters(n_spikes, n_clusters) model.cluster_ids = np.unique(model.spike_clusters) model.channel_positions = staggered_positions(n_channels) model.all_waveforms = artificial_waveforms(n_spikes, n_samples_w, n_channels) model.all_masks = artificial_masks(n_spikes, n_channels) model.all_traces = artificial_traces(n_samples_t, n_channels) model.all_features = artificial_features(n_spikes, n_channels, n_features) # features_masks array f = model.all_features.reshape((n_spikes, -1)) m = np.repeat(model.all_masks, n_features, axis=1) model.all_features_masks = np.dstack((f, m)) model.spikes_per_cluster = _spikes_per_cluster(model.spike_clusters) model.n_features_per_channel = n_features model.n_samples_waveforms = n_samples_w model.cluster_groups = {c: None for c in range(n_clusters)} return model