def start(self, current, selections): current.progress.begin(self.get_name()) param_grid = self.get_param_grid() current.progress.set_ticks(int( self.n_folds * len(param_grid[self.tau_key]) * len(param_grid[self.c_key]))) print "Align spike trains" # FIXME: This needs to be done completely different as we need the # spike trains per unit which is a dict and not a list. Then the lists # in the dict have to be split into the individual sets for training, # test, cross-validation and so on. But because the lists are values in # a dict it cannot be done automatically by scikit learn. trains = [] targets = [] for i, s in enumerate(selections): events = s.labeled_events(self.align) for seg in events: # Align on first event in each segment events[seg] = events[seg][0] aligned_trains = rate_estimation.aligned_spike_trains( s.spike_trains(), events) trains.extend(aligned_trains) targets.extend([i] * len(s.spike_trains())) parent_conn, child_conn = multiprocessing.Pipe() p = multiprocessing.Process( target=self._run, args=(child_conn, trains, targets)) print "Starting grid search" p.start() while True: data = parent_conn.recv() if isinstance(data, str): self._process_gridsearch_msg(current.progress, data) else: grid_scores = data break best_score = parent_conn.recv() best_params = parent_conn.recv() report = parent_conn.recv() confmat = parent_conn.recv() parent_conn.close() p.join() print "Best score %.3f (tau=%s, C=%f)." % \ (best_score, str(best_params[self.tau_key]), best_params[self.c_key]) print report print "Confusion matrix:" print confmat self.plot_gridsearch_scores(grid_scores) current.progress.done() plt.savefig(self.output_file)
def start(self, current, selections): current.progress.begin(self.get_name()) param_grid = self.get_param_grid() current.progress.set_ticks(int( self.n_folds * len(param_grid[self.metric_key]) * len(param_grid[self.tau_key]) * len(param_grid[self.c_key]))) print "Align spike trains" trains = [] targets = [] for i, s in enumerate(selections): events = s.labeled_events(self.align) for seg in events: # Align on first event in each segment events[seg] = events[seg][0] aligned_trains = rate_estimation.aligned_spike_trains( s.spike_trains(), events) trains.extend(aligned_trains) targets.extend([i] * len(s.spike_trains())) parent_conn, child_conn = multiprocessing.Pipe() p = multiprocessing.Process( target=self._run, args=(child_conn, trains, targets)) print "Starting grid search" p.start() while True: data = parent_conn.recv() if isinstance(data, str): self._process_gridsearch_msg(current.progress, data) else: grid_scores = data break best_score = parent_conn.recv() best_params = parent_conn.recv() report = parent_conn.recv() confmat = parent_conn.recv() parent_conn.close() p.join() print "Best score %.3f with %s (tau=%s, C=%f)." % \ (best_score, metric_defs[best_params[self.metric_key]][0], str(best_params[self.tau_key]), best_params[self.c_key]) print report print "Confusion matrix:" print confmat self.plot_gridsearch_scores_per_metric(grid_scores) current.progress.done() plt.savefig(self.output_file)