예제 #1
0
class NeuCube():
    """
    This class integrates all stages of the NeuCube model.
    """
    def __init__(self, input_electrodes, number_of_training_samples,
                 signal_duration, signal_timestep, simulation_timestep,
                 subject):

        self.path = os.getcwd()
        self.input_electrodes = input_electrodes
        self.number_of_training_samples = number_of_training_samples
        self.encoder = Encoder(self.path, len(input_electrodes),
                               number_of_training_samples, signal_duration,
                               signal_timestep, subject)
        self.reservoir = NeuCubeReservoir(self.path, simulation_timestep)
        self.classifier = Classifier()

    def encode_eeg_input(self, encoding_method, save_data, plot_data, subject):
        #This method encodes the EEG data in the input_stage_1 folder using the defined encoding_method and saves / plots the data if needed.
        spike_trains = self.encoder.encode(encoding_method, subject)
        if save_data or plot_data:
            rec_sig = self.encoder.decode(encoding_method)
            error = self.encoder.calc_error()
            if save_data:
                self.encoder.save_output()
                self.encoder.save_rec_sig()
            if plot_data:
                self.encoder.plot_output(encoding_method)
                self.encoder.plot_rec_sig()
        return spike_trains

    def create_reservoir(self, new_reservoir, plot_stability, input_electrodes,
                         inhibitory_split, connection_probability,
                         small_world_conn_factor, max_syn_len, w_dist_ex_mean,
                         w_dist_inh_mean, save_structure):
        if new_reservoir:
            self.reservoir.initialize_reservoir_structure(
                input_electrodes, inhibitory_split, connection_probability,
                small_world_conn_factor, max_syn_len, w_dist_ex_mean,
                w_dist_inh_mean, save_structure)
            if plot_stability:
                self.reservoir.reservoir_structure.calculate_stability(
                    inhibitory_split, w_dist_ex_mean, w_dist_inh_mean)
        else:
            self.reservoir.load_reservoir_structure()

    def train_reservoir_STDP(self, use_STDP, encoding_method, simulation_time,
                             number_of_neurons_per_core,
                             number_of_training_samples, spike_train_data,
                             tau_plus, tau_minus, A_plus, A_minus, w_min,
                             w_max, save_training_result, plot_spikes,
                             plot_voltage):
        if use_STDP:
            self.reservoir.train_network_STDP(
                encoding_method, simulation_time, number_of_neurons_per_core,
                number_of_training_samples, spike_train_data, tau_plus,
                tau_minus, A_plus, A_minus, w_min, w_max, save_training_result,
                plot_spikes, plot_voltage)

    def train_deSNN(self, load_spikes, save_reservoir_spikes, save_neurons,
                    encoding_method, simulation_time,
                    number_of_neurons_per_core, number_of_training_samples,
                    spike_train_data, tau_plus, tau_minus, A_plus, A_minus,
                    w_min, w_max, alpha, mod, drift_up, drift_down,
                    number_of_classes, plot_spikes, plot_voltage):
        print('Training the deSNN network...')
        # Read target_class_labels:
        tar_class_labels = []
        for item in csv.reader(open(
                os.path.join(self.path, 'input_stage_3',
                             'tar_class_labels.txt'), 'r'),
                               delimiter=' '):
            tar_class_labels.append(item)
        if len(tar_class_labels) < number_of_training_samples:
            print('Error: Not enough class lables for number of samples!')
        else:
            tar_class_labels = tar_class_labels[0:number_of_training_samples]
            if load_spikes:  #load spikes from storage
                for s in range(number_of_training_samples):
                    sample_spikes = self.classifier.load_reservoir_spikes(
                        os.path.join(
                            self.path, 'input_stage_3',
                            'reservoir_spikes_sam_' + str(s + 1) + '.txt'))
                    neuron = Output_Neuron(
                        sample_spikes,
                        len(self.reservoir.reservoir_structure.get_positions()
                            ), alpha, mod, drift_up, drift_down,
                        tar_class_labels[s])
                    if save_neurons:
                        neuron.save_whole_neuron(self.path, s)
                    self.classifier.add_neuron(neuron)
            else:  #create spikes from reservoir
                STDP = False  #enable/disable STDP during deSNN training
                reservoir_spikes = self.reservoir.train_network_deSNN(
                    encoding_method, simulation_time,
                    number_of_neurons_per_core, number_of_training_samples,
                    spike_train_data, tau_plus, tau_minus, A_plus, A_minus,
                    w_min, w_max, STDP, plot_spikes, plot_voltage,
                    save_reservoir_spikes)
                for s in range(number_of_training_samples):
                    sample_spikes = []
                    for spike in reservoir_spikes:
                        if spike[1] >= s * 1.5 * simulation_time and spike[
                                1] <= (s * 1.5 + 1) * simulation_time:
                            sample_spikes.append(spike)
                    neuron = Output_Neuron(
                        sample_spikes,
                        len(self.reservoir.reservoir_structure.get_positions()
                            ), alpha, mod, drift_up, drift_down,
                        tar_class_labels[s])
                    if save_neurons:
                        neuron.save_whole_neuron(self.path, s)
                    self.classifier.add_neuron(neuron)
            print('Added all samples/neurons to the deSNN classifier!')
        return self.classifier.separation(
            self.path, number_of_training_samples, number_of_classes,
            len(self.reservoir.reservoir_structure.get_positions()))

    def classify(self, subject, save_reservoir_spikes, first_test_sample_index,
                 number_of_test_samples, encoding_method, simulation_time,
                 number_of_neurons_per_core, number_of_training_samples, alpha,
                 mod, drift_up, drift_down, feature, k_neighbors):
        # Classify test samples
        labels = []
        for test_sample_index in range(
                first_test_sample_index,
                first_test_sample_index + number_of_test_samples):
            sample_EEG = self.encoder.load_sample(test_sample_index, subject)
            sample_SSA = self.encoder.encode_sample(sample_EEG,
                                                    encoding_method)
            sample_reservoir_spikes = self.reservoir.filter_sample(
                encoding_method, test_sample_index, sample_SSA,
                simulation_time, number_of_neurons_per_core,
                save_reservoir_spikes)
            test_neuron = Output_Neuron(
                sample_reservoir_spikes,
                len(self.reservoir.reservoir_structure.get_positions()), alpha,
                mod, drift_up, drift_down)
            fitting_type = 'normal'  #'normal' for fitting to all neurons, 'COM' for fitting to only center of mass vectors
            class_label = self.classifier.classify(test_neuron, feature,
                                                   k_neighbors, fitting_type)
            labels.append(class_label[0])
        print('Predicted labels for all samples: ' + str(labels))
        # Calculate Accuracy
        tar_class_labels = []
        for item in csv.reader(open(
                os.path.join(self.path, 'input_stage_3',
                             'tar_class_labels.txt'), 'r'),
                               delimiter=' '):
            tar_class_labels.append(item[0])
        count = 0
        for i in range(number_of_test_samples):
            if labels[i] == tar_class_labels[number_of_training_samples + i]:
                count += 1
        print('Real labels for all samples:      ' +
              str(tar_class_labels[first_test_sample_index -
                                   1:first_test_sample_index +
                                   number_of_test_samples - 1]))
        accuracy = count / float(number_of_test_samples)
        print('Accuracy: ' + str(accuracy))
        return (accuracy)