Ejemplo n.º 1
0
# Clean up stimulus data
stimulus_times = data['full_stimulus_times']
stimulus_data = data['full_stimulus']
stimulus_data = smooth_stimulus(stimulus_times, stimulus_data)
y_train_mask = stimulus_gradient_mask(stimulus_times, stimulus_data, min_g=8, max_g=500)

# Convert to a single signal
# Ensure unique cell ids
# Bin time, get firing rates with history in previous bins
T, X = process_clustered_signal_data(data['signal_times'], data['signal_cellids'],
                                        temporal_bin_size=0.5,
                                        bins_before=2,
                                        bins_after=2,
                                        flatten_history=True)
# Get stimulusv alues at spike times
y = stimulus_at_times(stimulus_times, stimulus_data, T)

# Get scores and display for each estimator
results = {}
for name, estimator in ESTIMATORS.items():
    print('[{}] Starting...'.format(name))
    pipeline = make_pipeline(StandardScaler(), estimator)

    cv = generate_crossvalidator(pipeline, X, y, training_mask=None, n_splits=N_FOLDS)

    # Use the convenient cross_validate function to score
    results[name] = cross_validate(pipeline, X, y, scoring=SCORERS, cv=cv, return_train_score=True)

    fit_time = results[name]['fit_time']
    score_time = results[name]['score_time']
Ejemplo n.º 2
0
temporal_bin_edges = np.array(temporal_bin_edges)

# Convert to a single signal
# Ensure unique cell ids
# Bin time, get firing rates
# Make sure there are no bins before and after since they are non-sequential
# Make sure that the firing rate is normalzied by bin size (default=True) to account for different length bins
T, X = process_clustered_signal_data(data['signal_times'], data['signal_cellids'],
                                    temporal_bin_size=temporal_bin_edges,
                                    bins_before=0,
                                    bins_after=0,
                                    normalize_by_bin_size=True)

# Get the stimulus value at the spike times
y = stimulus_at_times(data['full_stimulus_times'], data['full_stimulus'], T)

# Split the data, not shuffling so that the displayed plot will be over a small range
X_train, X_test, T_train, T_test, y_train, y_test = train_test_split(X, T, y, test_size=0.25, shuffle=False)

# Build a basic pipeline
# Notice, the SGDRegressor only supports single dimensional outputs so it is wrapped
# in a `MultiOutputRegressor` meta-class which fits an `SGDRegressor` per output dimension
pipeline = make_pipeline(StandardScaler(), MultiOutputRegressor(SGDRegressor()))

# Fit and predict on the pipeline
pipeline.fit(X_train, y_train)
y_predicted = pipeline.predict(X_test)

# Already single signal but this will sort the arrays quickly
T_test, (y_predicted, y_test) = multi_to_single_signal([T_test], [y_predicted], [y_test])