Ejemplo n.º 1
0
data = load_restaurant_row()

# Clean up stimulus data
stimulus_times = data['full_stimulus_times']
stimulus_data = data['full_stimulus']
stimulus_data = smooth_stimulus(stimulus_times, stimulus_data)
# stimulus_data -= np.mean(stimulus_data, axis=0)[np.newaxis, :]

# Convert to a single signal
# Ensure unique cell ids
# Bin time, get firing rates with history in previous bins
# Notice firing rates are unnormalized which means its just spike counts
T, X = process_clustered_signal_data(data['signal_times'],
                                     data['signal_cellids'],
                                     temporal_bin_size=RESOLUTION,
                                     bins_before=5,
                                     bins_after=5,
                                     flatten_history=False,
                                     normalize_by_max_rate=False,
                                     normalize_by_bin_size=False)

# Sum over the history to get a per neuron spike count over that whole time range
X = np.sum(X, axis=1)

# Discard neurons with a mean firing rate outside bounds
# spikes_second =  X.sum(axis=0) / (T.max() - T.min()) / 6
# X = X[:, spikes_second < 200]

pipeline = PoissonBayesianRegressor(ybins=STIMULUS_BINS,
                                    n_jobs=-1,
                                    use_prior=False)
Ejemplo n.º 2
0
# Load data
from mlneuro.datasets import load_restaurant_row
data = load_restaurant_row()

# Clean up stimulus data
stimulus_times = data['full_stimulus_times']
stimulus_data = data['full_stimulus']
stimulus_data = smooth_stimulus(stimulus_times, stimulus_data)
y_train_mask = stimulus_gradient_mask(stimulus_times, stimulus_data, min_g=8, max_g=500)

# Convert to a single signal
# Ensure unique cell ids
# Bin time, get firing rates with history in previous bins
T, X = process_clustered_signal_data(data['signal_times'], data['signal_cellids'],
                                        temporal_bin_size=0.5,
                                        bins_before=2,
                                        bins_after=2,
                                        flatten_history=True)
# Get stimulusv alues at spike times
y = stimulus_at_times(stimulus_times, stimulus_data, T)

# Get scores and display for each estimator
results = {}
for name, estimator in ESTIMATORS.items():
    print('[{}] Starting...'.format(name))
    pipeline = make_pipeline(StandardScaler(), estimator)

    cv = generate_crossvalidator(pipeline, X, y, training_mask=None, n_splits=N_FOLDS)

    # Use the convenient cross_validate function to score
    results[name] = cross_validate(pipeline, X, y, scoring=SCORERS, cv=cv, return_train_score=True)
Ejemplo n.º 3
0
                      [4040, 4050],
                      [4060, 4070],
                      [4200, 4300],
                      [4400, 4550]
                     ]

temporal_bin_edges = np.array(temporal_bin_edges)

# Convert to a single signal
# Ensure unique cell ids
# Bin time, get firing rates
# Make sure there are no bins before and after since they are non-sequential
# Make sure that the firing rate is normalzied by bin size (default=True) to account for different length bins
T, X = process_clustered_signal_data(data['signal_times'], data['signal_cellids'],
                                    temporal_bin_size=temporal_bin_edges,
                                    bins_before=0,
                                    bins_after=0,
                                    normalize_by_bin_size=True)

# Get the stimulus value at the spike times
y = stimulus_at_times(data['full_stimulus_times'], data['full_stimulus'], T)

# Split the data, not shuffling so that the displayed plot will be over a small range
X_train, X_test, T_train, T_test, y_train, y_test = train_test_split(X, T, y, test_size=0.25, shuffle=False)

# Build a basic pipeline
# Notice, the SGDRegressor only supports single dimensional outputs so it is wrapped
# in a `MultiOutputRegressor` meta-class which fits an `SGDRegressor` per output dimension
pipeline = make_pipeline(StandardScaler(), MultiOutputRegressor(SGDRegressor()))

# Fit and predict on the pipeline