コード例 #1
0
ファイル: test_size.py プロジェクト: sagacitysite/SORN
def runAll(i):
    j = 0
    # Transitions
    for transitions in c.source.transitions_array:

        k = 0
        # Number of excitatory neurons
        for num_neurons in c.N_e_array:

            l = 0
            # Number of input neurons
            for num_input in c.N_u_e_coverage_array:
                # Set number of excitatory neurons and calculate inhibitory and overall number of neurons
                c.N_e = num_neurons
                c.N_i = int(np.floor(0.2 * c.N_e))
                c.N = c.N_e + c.N_i

                # Set number of input neurons
                c.N_u_e = np.floor(num_input * c.N_e)

                # Set transitions and source
                c.source.transitions = transitions
                source = CountingSource(states, transitions, c.N_u_e, c.N_u_i,
                                        c.source.avoid)

                # Calculate some more stuff, depending on c.N_e, c.N_i, c.N, etc.
                h_ip_mean = float(2 * c.N_u_e) / float(c.N_e)
                h_ip_range = 0.01
                c.h_ip = np.random.rand(
                    c.N_e) * h_ip_range * 2 + h_ip_mean - h_ip_range

                # Correct connection matrix lamb value
                c.W_ee.lamb = 0.1 * c.N_e

                # Print where we are
                print(datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S") +
                      ": run " + str(i + 1) + " / model " + str(j + 1) +
                      " / neurons " + str(num_neurons) + " / input " +
                      str(int(np.floor(num_input * c.N_e))))

                # Name of folder for results in this step
                #c.multi_name = "run" + str(i) + "_model" + str(j) + "_neurons" + str(k) + "_input" + str(l)
                c.file_name = "run" + str(i)
                c.state.index = (j, k, l
                                 )  # runs, models, num_neurons, num_input
                runSORN(c, source)

                # Free memory
                gc.collect()

                # Increase number of inputs counter
                l += 1

            # Increase number of excitatory counter
            k += 1

        # Increase number of transitions matrices counter
        j += 1
コード例 #2
0
    def start(self):
        super().start()
        c = self.params.c

        word = 'ABCDEFGHIJ'
        m_trans = np.ones((1,1))

        self.inputsource = CountingSource([word],m_trans,
                                          c.N_u_e,c.N_u_i,avoid=False)
        return self.inputsource
コード例 #3
0
    def start(self):
        super().start()
        c = self.params.c

        words = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmn'
        word1 = words[:n_middle]
        word2 = word1[::-1]
        m_trans = np.ones((2,2)) * 0.5
        self.inputsource = CountingSource([word1,word2],m_trans,
                                          c.N_u_e,c.N_u_i,avoid=False)
        return self.inputsource
コード例 #4
0
    def start(self):
        super(Experiment_sequence, self).start()
        c = self.params.c
        # Create paper-specific sources
        self.test_words = c.source.test_words
        if not c.source.control:
            source = CountingSource(['ABCD'], np.array([[1.]]), c.N_u_e,
                                    c.N_u_i, c.source.avoid)
        else:
            from itertools import permutations
            source = CountingSource.init_simple(
                24,
                4, [4, 4],
                1,
                c.N_u_e,
                c.N_u_i,
                c.source.avoid,
                words=[''.join(x) for x in (permutations('ABCD'))])
        # Already add letters for later
        source.alphabet = unique("".join(source.words) + 'E_')
        source.N_a = len(source.alphabet)
        source.lookup = dict(zip(source.alphabet,\
                                       range(source.N_a)))

        source = TrialSource(source, c.wait_min_plastic, c.wait_var_plastic,
                             zeros(source.N_a), 'reset')
        self.source_archived = copy.deepcopy(source)

        inputtrainsteps = c.steps_plastic + c.steps_noplastic_train

        stats_single = [
            ActivityStat(),
            InputIndexStat(),
            SpikesStat(),
            ISIsStat(interval=[0, c.steps_plastic]),
            ConnectionFractionStat(),
        ]
        stats_all = [
            ParamTrackerStat(),
            EndWeightStat(),
            InputUnitsStat(),
            MeanActivityStat(start=inputtrainsteps,
                             stop=c.N_steps,
                             N_indices=len(''.join((self.test_words))) + 1,
                             LFP=False),
            MeanPatternStat(start=c.steps_plastic,
                            stop=c.N_steps,
                            N_indices=len(''.join((self.test_words))) + 1)
        ]

        return (source, stats_single + stats_all, stats_all)
コード例 #5
0
ファイル: experiment_RMTask.py プロジェクト: Xingfush/RM-SORN
    def start(self):
        super().start()
        c = self.params.c

        word1 = 'A'
        word2 = 'D'
        for i in range(n_middle):
            word1 += 'B'
            word2 += 'E'
        word1 += 'C'
        word2 += 'F'
        m_trans = np.ones((2, 2)) * 0.5
        self.inputsource = CountingSource([word1, word2],
                                          m_trans,
                                          c.N_u_e,
                                          c.N_u_i,
                                          avoid=False)
        return self.inputsource
コード例 #6
0
ファイル: experiment_sequence.py プロジェクト: chrhartm/SORN
    def start(self):
        super(Experiment_sequence,self).start()   
        c = self.params.c
        # Create paper-specific sources
        self.test_words = c.source.test_words
        if not c.source.control:
            source = CountingSource(['ABCD'],np.array([[1.]]),
                           c.N_u_e,c.N_u_i,c.source.avoid)
        else:
            from itertools import permutations
            source = CountingSource.init_simple(24,4,[4,4],1,
                           c.N_u_e,c.N_u_i,c.source.avoid, words = 
                           [''.join(x) for x in (permutations('ABCD'))])
        # Already add letters for later
        source.alphabet = unique("".join(source.words)+'E_')
        source.N_a = len(source.alphabet)
        source.lookup = dict(zip(source.alphabet,\
                                       range(source.N_a)))

        source = TrialSource(source, c.wait_min_plastic, 
                       c.wait_var_plastic,zeros(source.N_a),'reset')
        self.source_archived = copy.deepcopy(source)

        inputtrainsteps = c.steps_plastic + c.steps_noplastic_train

        stats_single = [
                         ActivityStat(),
                         InputIndexStat(),
                         SpikesStat(),
                         ISIsStat(interval=[0, c.steps_plastic]),
                         ConnectionFractionStat(),
                        ]
        stats_all = [
                     ParamTrackerStat(),
                     EndWeightStat(),
                     InputUnitsStat(),
                     MeanActivityStat(start=inputtrainsteps,
                      stop=c.N_steps,
                      N_indices=len(''.join((self.test_words)))+1,
                                    LFP=False),
                     MeanPatternStat(start=c.steps_plastic,
                      stop=c.N_steps,
                      N_indices=len(''.join((self.test_words)))+1)
                    ]
        
        return (source,stats_single+stats_all,stats_all)
コード例 #7
0
ファイル: experiment_RMTask.py プロジェクト: Xingfush/RM-SORN
    def run(self, sorn):
        super().run(sorn)
        c = self.params.c
        accs1 = []
        accs2 = []
        filepath = os.path.abspath(os.path.join(os.getcwd(), "..",
                                                "rm_models"))

        # --------- Training Phase One ----------
        print('\nTraining Phase One: %d models are generated...' %
              int(round(c.steps_train / c.interval_train)))
        # Compute the expectation and std deviation
        filename1 = os.path.abspath(
            os.path.join("..", "art_models/overall-fixed.csv"))
        filename2 = os.path.abspath(
            os.path.join("..", "art_models/onlylast-fixed.csv"))
        filename3 = os.path.abspath(
            os.path.join("..", "art_models/fraction.csv"))
        for n_middle in n_middles:
            best1 = []
            best2 = []
            frac = []
            # Create the input Source
            word1 = 'A'
            word2 = 'D'
            for i in range(n_middle):
                word1 += 'B'
                word2 += 'E'
            word1 += 'C'
            word2 += 'F'
            m_trans = np.ones((2, 2)) * 0.5
            self.inputsource = CountingSource([word1, word2],
                                              m_trans,
                                              c.N_u_e,
                                              c.N_u_i,
                                              avoid=False)
            for i in range(10):
                nums = int(round(c.steps_train / c.interval_train))
                accs1 = []
                accs2 = []
                fraction = []
                self.reset(sorn)
                for i in range(nums):
                    _1, _2 = sorn.simulation(c.interval_train)
                    sorn.update = False
                    sorn.display = True
                    acc1, acc2 = sorn.simulation(c.interval_test)
                    accs1.append(acc1)
                    accs2.append(acc2)
                    fraction.append(get_ConnFraction(sorn.W_ee.M))
                    sorn.update = True
                    sorn.display = False
                best1.append(max(accs1))
                best2.append(max(accs2))
                frac.append(fraction[np.argmax(accs1)])
            with open(filename1, 'a+', newline='') as f:
                csv.writer(f).writerow(best1)
            with open(filename2, 'a+', newline='') as f:
                csv.writer(f).writerow(best2)
            with open(filename3, 'a+', newline='') as f:
                csv.writer(f).writerow(frac)
            print("The mean expectation of RM-SORN is %.4f and %.4f.\n" %
                  (np.mean(best1), np.mean(best2)))
            print("The std deviation of RM-SORN is %.4f and %.4f.\n" %
                  (np.std(best1), np.std(best2)))
コード例 #8
0
    def run(self, sorn):
        super().run(sorn)
        c = self.params.c
        # Compute the expectation and std deviation
        filename1 = os.path.abspath(
            os.path.join("..", "art_models/overall-sorn.csv"))
        filename2 = os.path.abspath(
            os.path.join("..", "art_models/onlylast-sorn.csv"))
        for n_middle in n_middles:
            acc1 = []
            acc2 = []
            # Create the input Source
            word1 = 'A'
            word2 = 'D'
            for i in range(n_middle):
                word1 += 'B'
                word2 += 'E'
            word1 += 'C'
            word2 += 'F'
            m_trans = np.ones((2, 2)) * 0.5
            self.inputsource = CountingSource([word1, word2],
                                              m_trans,
                                              c.N_u_e,
                                              c.N_u_i,
                                              avoid=False)

            for i in range(100):
                self.reset(sorn)
                #----- Input with plasticity
                print("\nInput plasticity period:")
                ans1 = sorn.simulation(c.steps_plastic)
                #----- Input without plasticity - train
                print("\nInput training period:")
                # Turn off plasticity
                sorn.W_ee.c.eta_stdp = 0
                sorn.W_ei.c.eta_istdp = 0
                sorn.W_ee.c.sp_prob = 0
                c.eta_ip = 0
                # turn off noise
                c.noise_sig = 0
                ans2 = sorn.simulation(c.steps_readouttrain)
                #----- Input without plasticity - test performance
                print('\nInput test period:')
                ans3 = sorn.simulation(c.steps_readouttest)
                # Compute the accuracy of SORN.
                y_read_train = np.zeros((6, len(ans2['C']) + 1))
                y_read_test = np.zeros((6, len(ans3['C']) + 1))
                for i, y in enumerate(ans2['C']):
                    y_read_train[y, i] = 1
                for i, y in enumerate(ans3['C']):
                    y_read_test[y, i] = 1
                y_read_train = y_read_train[:, :5000]
                y_read_test = y_read_test[:, :5000]
                target = np.argmax(y_read_test, axis=0)
                # States variables
                X_train = (ans2['R_x'] >= 0) + 0.
                X_test = (ans3['R_x'] >= 0) + 0.
                # Typical Calculation of Reservior Computing Readout
                X_train_pinv = np.linalg.pinv(X_train)
                W_trained = np.dot(y_read_train, X_train_pinv.T)

                y_predicted = np.dot(W_trained, X_test.T)
                prediction = np.argmax(y_predicted, axis=0)
                except_first = np.where((target != 0) & (target != 3))[0]
                only_last = np.where((target == 2) | (target == 5))[0]
                # Reduced performance(i.e. reduce the random initial char of word)
                y_test_red = target[except_first]
                y_pred_red = prediction[except_first]
                y_test_only = target[only_last]
                y_pred_only = prediction[only_last]
                perf_red = (y_test_red == y_pred_red).sum() / float(
                    len(y_pred_red))
                perf_only = (y_test_only == y_pred_only
                             ).sum() / float(len(y_pred_only) + 1)
                print(
                    "The testing accuracy of Counting Task is: %0.2f%% and %0.2f%%\n"
                    % (perf_red * 100, perf_only * 100))
                acc1.append(perf_red)
                acc2.append(perf_only)
            with open(filename1, 'a+', newline='') as f:
                csv.writer(f).writerow(acc1)
            with open(filename2, 'a+', newline='') as f:
                csv.writer(f).writerow(acc2)
            print("The mean expectation of SORN is %.4f and %.4f.\n" %
                  (np.mean(acc1), np.mean(acc2)))
            print("The std deviation of SORN is %.4f and %.4f.\n" %
                  (np.std(acc1), np.std(acc2)))
コード例 #9
0
ファイル: experiment_mcmc.py プロジェクト: sagacitysite/SORN
    def start(self, src, testCountSource=None):
        super(Experiment_mcmc, self).start()
        c = self.params.c

        if not c.has_key('display'):
            c.showPrint = True

        # In case we are using cluster calculation?
        if self.cluster_param == 'source.prob':
            prob = c.source.prob
            assert (prob >= 0 and prob <= 1)
            self.params.source = CountingSource(
                self.params.source.words,
                np.array([[prob, 1.0 - prob], [prob, 1.0 - prob]]), c.N_u_e,
                c.source.avoid)

        # Random source? Parameter is false, so actually not used
        if c.source.use_randsource:
            self.params.source = CountingSource.init_simple(
                c.source.N_words, c.source.N_letters, c.source.word_length,
                c.source.max_fold_prob, c.N_u_e, c.N_u_i, c.source.avoid)

        # Make inputsource out of source by using TrialSource object from sources.py
        # It now has some nice methods to deal with the network, like generate connections, etc.
        self.inputsource = TrialSource(src,
                                       c.wait_min_plastic, c.wait_var_plastic,
                                       zeros(src.N_a), 'reset')

        self.testsource = None
        if testCountSource is not None:
            self.testsource = TrialSource(testCountSource, c.wait_min_plastic,
                                          c.wait_var_plastic,
                                          zeros(testCountSource.N_a), 'reset')

        # Stats
        inputtrainsteps = c.steps_plastic + c.steps_noplastic_train  # steps during self-organization + steps for first phase w/o plasticity

        # Begin: For PatternProbabilityStat
        # Set burn-in phase to half of steps for 2nd phase w/o plasticity, but maximum 3000
        if c.steps_noplastic_test > 6000:
            burnin = 3000
        else:
            burnin = c.steps_noplastic_test // 2

        # Shuffle indices of excitatory neurons
        shuffled_indices = arange(c.N_e)
        np.random.shuffle(shuffled_indices)

        N_subset = 8  # 8
        start_train = c.steps_plastic + burnin  # step when training begins
        half_train = start_train + (inputtrainsteps - start_train
                                    ) // 2  # step when network is half trained
        start_test = inputtrainsteps + burnin  # step when testing begins
        half_test = start_test + (
            c.N_steps - start_test) // 2  # step when network is half tested
        # End: For PatternProbabilityStat

        # Initialize statistics
        stats_all = [
            InputIndexStat(),
            SpikesStat(),
            ThresholdsStat(),
            InputUnitsStat(),
            NormLastStat(),
            SpontPatternStat(),
            ParamTrackerStat(),
            WeightStat()
        ]

        if c.double_synapses:  # if we have two excitatory synapses for each connection
            stats_single += [WeightHistoryStat('W_ee_2', record_every_nth=100)]

        # Return inputsource, testsource and statistics
        return (self.inputsource, stats_all)
コード例 #10
0
# For cluster: same randsource for entire simulation
#~ from common.sources import CountingSource
#~ source = CountingSource.init_simple(
#~ c.source.N_words,
#~ c.source.N_letters,c.source.word_length,
#~ c.source.max_fold_prob,c.N_u_e,c.N_u_i,
#~ c.source.avoid, seed=42)
#~ import random
#~ print source.words, np.random.randint(0,500), random.random()

#~ from common.sources import RandomLetterSource
#~ source = RandomLetterSource(c.source.N_letters,c.N_u_e,c.N_u_i,
#~ c.source.avoid)
from common.sources import CountingSource
source = CountingSource(['ABCD', 'EFGH'],
                        np.array([[2. / 3., 1. / 3.], [2. / 3., 1. / 3.]]),
                        c.N_u_e, c.N_u_i, c.source.avoid)

c.wait_min_plastic = 0
c.wait_var_plastic = 0
c.wait_min_train = 0
c.wait_var_train = 0

# Cluster
c.cluster.vary_param = 'source.prob'  #'with_plasticity'#
c.cluster.params = np.linspace(0.1, 0.9, 11)  #[False,True]#
if c.imported_mpi:
    c.cluster.NUMBER_OF_SIMS = len(c.cluster.params)
    c.cluster.NUMBER_OF_CORES = MPI.COMM_WORLD.size
    c.cluster.NUMBER_LOCAL = c.cluster.NUMBER_OF_SIMS\
                             // c.cluster.NUMBER_OF_CORES
コード例 #11
0
    def run(self,sorn):
        super(Experiment_hesselmann,self).run(sorn)
        c = self.params.c
        
        sorn.simulation(c.steps_plastic)
        sorn.update = False
        
        ## Generate Training and test data
        # Andreea also did this
        sorn.x = np.zeros(sorn.c.N_e)
        sorn.y = np.zeros(sorn.c.N_i)
        sorn.source = self.trainsource
        # Generate Training Data
        sorn.simulation(c.steps_noplastic_train)
        sorn.x = np.zeros(sorn.c.N_e)
        sorn.y = np.zeros(sorn.c.N_i)

        # Generate new source with mixed letters
        # Save old mappings
        old_W_eu = sorn.W_eu.W
        source = self.source.source
        A_neurons = where(sorn.W_eu.W[:,source.lookup['A']]==1)[0]
        B_neurons = where(sorn.W_eu.W[:,source.lookup['B']]==1)[0]
        X_neurons = where(sorn.W_eu.W[:,source.lookup['X']]==1)[0]
        # First generate source with len(frac_A) words of equal prob
        N_words = len(c.frac_A)
        letters = "CDEFGHIJKL" 
        Xes = (sorn.c.source.N_x*'X')
        word_list = ['A%s_'%Xes,'B%s_'%Xes]                               
        frac_A_letters = ['B']
        for i in range(N_words-2):
            word_list.append('%s%s_'%(letters[i],Xes))                     
            frac_A_letters.append(letters[i])
        frac_A_letters.append('A')
        
        probs = np.ones((N_words,N_words))*(1.0/N_words)
        source_new = CountingSource(word_list,probs,c.N_u_e,True,
                           permute_ambiguous=c.source.permute_ambiguous)
        # Then take weight matrices of these guys and set them according
        # to frac_A
        new_W_eu = source_new.generate_connection_e(c.N_e)
        new_W_eu.W *= 0
        new_W_eu.W[A_neurons,source_new.lookup['A']] = 1
        new_W_eu.W[B_neurons,source_new.lookup['B']] = 1
        new_W_eu.W[X_neurons,source_new.lookup['X']] = 1
        # from 1 to len-1 because 0 and 1 already included in fracs
        for i in range(1,len(frac_A_letters)-1):
            new_neurons = hstack(
                          (A_neurons[:int(len(A_neurons)*c.frac_A[i])],
                           B_neurons[int(len(A_neurons)*c.frac_A[i]):]))
            new_W_eu.W[new_neurons,
                       source_new.lookup[frac_A_letters[i]]] = 1
            
        # Assign the source and new Matrix to SORN
        sorn.W_eu = new_W_eu
        sorn.W_iu = source_new.generate_connection_i(c.N_i)
        trialsource_new = TrialSource(source_new, c.wait_min_test,
                                c.wait_var_test, zeros(source_new.N_a), 
                                'reset')
        sorn.source = trialsource_new
        
        sorn.simulation(c.steps_noplastic_test)
        
        return {'source_plastic':self.source,
                'source_train':self.trainsource,
                'source_test':trialsource_new}
コード例 #12
0
    def start(self):
        super(Experiment_spont, self).start()
        c = self.params.c

        if self.cluster_param == 'source.prob':
            prob = c.source.prob
            assert (prob >= 0 and prob <= 1)
            self.params.source = CountingSource(
                self.params.source.words,
                np.array([[prob, 1.0 - prob], [prob, 1.0 - prob]]), c.N_u_e,
                c.source.avoid)

        if c.source.use_randsource:
            self.params.source = CountingSource.init_simple(
                c.source.N_words, c.source.N_letters, c.source.word_length,
                c.source.max_fold_prob, c.N_u_e, c.N_u_i, c.source.avoid)

        self.inputsource = TrialSource(self.params.source, c.wait_min_plastic,
                                       c.wait_var_plastic,
                                       zeros(self.params.source.N_a), 'reset')

        # Stats
        inputtrainsteps = c.steps_plastic + c.steps_noplastic_train
        # For PatternProbabilityStat
        if c.steps_noplastic_test > 6000:
            burnin = 3000
        else:
            burnin = c.steps_noplastic_test // 2
        shuffled_indices = arange(c.N_e)
        np.random.shuffle(shuffled_indices)
        N_subset = 8
        start_train = c.steps_plastic + burnin
        half_train = start_train + (inputtrainsteps - start_train) // 2
        start_test = inputtrainsteps + burnin
        half_test = start_test + (c.N_steps - start_test) // 2

        stats_all = [
            InputIndexStat(),
            SpikesStat(),
            InputUnitsStat(),
            NormLastStat(),
            SpontPatternStat(),
            ParamTrackerStat(),
            EvokedPredStat(traintimes=[
                c.steps_plastic, c.steps_plastic + c.steps_noplastic_train // 2
            ],
                           testtimes=[
                               c.steps_plastic + c.steps_noplastic_train // 2,
                               c.steps_plastic + c.steps_noplastic_train
                           ],
                           traintest=c.stats.quenching),
        ]
        stats_single = [
            ActivityStat(),
            SpikesStat(inhibitory=True),
            ISIsStat(interval=[start_test, c.N_steps]),
            ConnectionFractionStat(),
            EndWeightStat(),
            #~ BalancedStat(), # takes lots of time and mem
            CondProbStat(),
            SpontIndexStat(),
            SVDStat(),
            SVDStat_U(),
            SVDStat_V(),
            SpontTransitionStat(),
            InputUnitsStat(),
            PatternProbabilityStat(
                [[start_train, half_train], [half_train, inputtrainsteps],
                 [start_test, half_test], [half_test, c.N_steps]],
                shuffled_indices[:N_subset]),
            WeightHistoryStat('W_ee', record_every_nth=100),
            WeightHistoryStat('W_eu', record_every_nth=9999999)
        ]
        if c.double_synapses:
            stats_single += [WeightHistoryStat('W_ee_2', record_every_nth=100)]
        return (self.inputsource, stats_all + stats_single, stats_all)
コード例 #13
0
def run_all(i):

    j = 0
    # Transitions (Models)
    for transitions in c.source.transitions_array:

        h = 0
        # Hamming thresholds
        for hamming_threshold in c.stats.hamming_threshold_array:

            g = 0
            # Average number of EE-connections
            for connections_density in c.connections_density_array:

                # IP
                #for l in range(np.shape(c.h_ip_array)[1]):  # for h_ip
                l = 0  # for eta_ip / range
                #for eta_ip in c.eta_ip_array:
                for h_ip_range in c.h_ip_range_array:

                    k = 0
                    # Training steps
                    for steps in c.steps_plastic_array:

                        # Set H_IP
                        #c.h_ip = c.h_ip_array[:,l]  # for h_ip
                        #c.eta_ip = eta_ip  # for eta_ip
                        c.h_ip = np.random.rand(c.N_e)*h_ip_range*2 + c.h_ip_mean - h_ip_range  # for h_ip_range

                        # Print where we are
                        #ip_str = str(np.round(np.mean(c.h_ip), 3))  # for h_ip
                        #ip_str = str(np.round(eta_ip, 4))  # for eta_ip
                        ip_str = str(h_ip_range)  # for h_ip_range

                        print(datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S") +": run "+ str(i+1) +" / model "+ str(j+1) +" / threshold "+ str(hamming_threshold) +" / ip "+ ip_str +" / connections "+ str(connections_density*c.N_e) +" / "+ str(steps))

                        testSource = None
                        if c.source.testing:
                            # If testing input is given, train network with given training matrix
                            c.source.transitions = c.source.training
                            source = CountingSource(states, c.source.training, c.N_u_e, c.N_u_i, c.source.avoid)
                            # If testing is varied, add another Counting Source for testing phase, which is given by current loop
                            testSource = CountingSource(states, transitions, c.N_u_e, c.N_u_i, c.source.avoid)
                        else:
                            # If input is only given while training, vary transitions while training (as usual)
                            c.source.transitions = transitions
                            source = CountingSource(states, transitions, c.N_u_e, c.N_u_i, c.source.avoid)

                        # Set steps_plastic and correct N_steps
                        c.steps_plastic = steps
                        c.N_steps = c.steps_plastic + c.steps_noplastic_train + c.steps_noplastic_test

                        # Set hamming threshold
                        c.stats.hamming_threshold = hamming_threshold

                        # Set number of average EE-connections
                        c.W_ee.lamb = connections_density*c.N_e

                        # Name of folder for results in this step
                        c.file_name = "run"+str(i)
                        c.state.index = (j, k, h, l, g)  # models, training steps, threshold, h_ip, #EE-connections
                        runSORN(c, source, testSource)

                        # Free memory
                        gc.collect()

                        # Increase trainig steps counter
                        k += 1

                    # H_IP counter needs no increase (is range) (for h_ip)
                    l+=1 # for eta_ip

                # Increase average # EE-connections counter
                g += 1

            # Increase hamming threshold counter
            h += 1

        # Increase models counter
        j += 1
コード例 #14
0
ファイル: experiment_spont.py プロジェクト: chrhartm/SORN
 def start(self):
     super(Experiment_spont,self).start()   
     c = self.params.c
     
     if self.cluster_param == 'source.prob':
         prob = c.source.prob
         assert(prob>=0 and prob<=1)
         self.params.source = CountingSource(
                                       self.params.source.words,
                                       np.array([[prob,1.0-prob],
                                                 [prob,1.0-prob]]),
                                       c.N_u_e,c.source.avoid)
     
     if c.source.use_randsource:
         self.params.source = CountingSource.init_simple(
                 c.source.N_words,
                 c.source.N_letters,c.source.word_length,
                 c.source.max_fold_prob,c.N_u_e,c.N_u_i,
                 c.source.avoid)
                 
     self.inputsource = TrialSource(self.params.source, 
                      c.wait_min_plastic, c.wait_var_plastic, 
                      zeros(self.params.source.N_a), 'reset')
     
     # Stats
     inputtrainsteps = c.steps_plastic + c.steps_noplastic_train
     # For PatternProbabilityStat
     if c.steps_noplastic_test > 6000:
         burnin = 3000
     else:
         burnin = c.steps_noplastic_test//2
     shuffled_indices = arange(c.N_e)
     np.random.shuffle(shuffled_indices)
     N_subset = 8
     start_train = c.steps_plastic+burnin
     half_train = start_train+(inputtrainsteps-start_train)//2
     start_test = inputtrainsteps+burnin
     half_test = start_test+(c.N_steps-start_test)//2
     
     stats_all = [
                  InputIndexStat(),
                  SpikesStat(),
                  InputUnitsStat(),
                  NormLastStat(),
                  SpontPatternStat(),
                  ParamTrackerStat(),
                  EvokedPredStat(
                         traintimes=[c.steps_plastic,
                                     c.steps_plastic+
                                     c.steps_noplastic_train//2],
                         testtimes =[c.steps_plastic+
                                     c.steps_noplastic_train//2,
                                     c.steps_plastic+
                                     c.steps_noplastic_train], 
                                     traintest=c.stats.quenching),
                 ]
     stats_single = [
                      ActivityStat(),
                      SpikesStat(inhibitory=True),
                      ISIsStat(interval=[start_test,c.N_steps]),
                      ConnectionFractionStat(),
                      EndWeightStat(),
                      #~ BalancedStat(), # takes lots of time and mem
                      CondProbStat(),
                      SpontIndexStat(),
                      SVDStat(),
                      SVDStat_U(),
                      SVDStat_V(),
                      SpontTransitionStat(),
                      InputUnitsStat(),
                      PatternProbabilityStat(
                                     [[start_train,half_train],
                                      [half_train,inputtrainsteps],
                                      [start_test,half_test],
                                      [half_test,c.N_steps]],
                                       shuffled_indices[:N_subset]),
                      WeightHistoryStat('W_ee',record_every_nth=100),
                      WeightHistoryStat('W_eu',
                                        record_every_nth=9999999)
                     ]
     if c.double_synapses:
         stats_single += [WeightHistoryStat('W_ee_2',
                                        record_every_nth=100)]
     return (self.inputsource,stats_all+stats_single,stats_all)
コード例 #15
0
c.source.prob = 0.75  # This is only here to be changed by cluster
c.source.avoid = False
c.source.control = False  # For sequence_test

# For cluster: same randsource for entire simulation
#~ from common.sources import CountingSource
#~ source = CountingSource.init_simple(
#~ c.source.N_words,
#~ c.source.N_letters,c.source.word_length,
#~ c.source.max_fold_prob,c.N_u_e,c.N_u_i,
#~ c.source.avoid, seed=42)
#~ import random
#~ print source.words, np.random.randint(0,500), random.random()

#~ from common.sources import RandomLetterSource
#~ source = RandomLetterSource(c.source.N_letters,c.N_u_e,c.N_u_i,
#~ c.source.avoid)
from common.sources import CountingSource
states = ['A', 'B', 'C', 'D']
c.source.transitions = np.array([[0, 1, 0, 0], [0.5, 0, 0.5, 0],
                                 [0, 0.5, 0, 0.5], [0.5, 0, 0.5, 0]])
source = CountingSource(states, c.source.transitions, c.N_u_e, c.N_u_i,
                        c.source.avoid)
c.src = source

c.wait_min_plastic = 0
c.wait_var_plastic = 0
c.wait_min_train = 0
c.wait_var_train = 0
コード例 #16
0
    def start(self):
        super(Experiment_fiser,self).start()   
        c = self.params.c

        if self.cluster_param == 'source.prob':
            prob = c.source.prob
            assert(prob>=0 and prob<=1)
            self.params.source = CountingSource(
                                          self.params.source.words,
                                          np.array([[prob,1.0-prob],
                                                    [prob,1.0-prob]]),
                                          c.N_u_e,c.source.avoid)
        
        if c.source.use_randsource:
            self.params.source = CountingSource.init_simple(
                    c.source.N_words,
                    c.source.N_letters,c.source.word_length,
                    c.source.max_fold_prob,c.N_u_e,c.N_u_i,
                    c.source.avoid)
 
        controlsource = copy.deepcopy(self.params.source)
        controlsource.words = [x[::-1] for x in 
                               self.params.source.words]
                               
        # Control with single letters
        #~ controlsource.words = controlsource.alphabet
        #~ N_words = len(controlsource.words)
        #~ probs = array([ones(N_words)]*N_words)
        #~ controlsource.probs = probs/sum(probs,1)
        #~ controlsource.glob_ind = [0]
        #~ controlsource.glob_ind.extend(cumsum(map(len,
                                            #~ controlsource.words)))
        
        # Control with different words
        # Check if ABCD source
        if controlsource.words == ['DCBA','HGFE']:
            controlsource.words = ['EDCBA','HGF']
                    
        self.plasticsource = TrialSource(self.params.source, 
                                c.wait_min_plastic, c.wait_var_plastic, 
                                zeros(self.params.source.N_a), 'reset')
        self.controlsource = TrialSource(controlsource,
                                c.wait_min_train, c.wait_var_train,
                                zeros(controlsource.N_a), 'reset')
        self.spontsource = NoSource(controlsource.N_a)
                
        #Stats
        inputtrainsteps = c.steps_plastic + c.steps_noplastic_train
        # For PatternProbabilityStat
        if c.steps_noplastic_test > 10000:
            burnin = 5000
        else:
            burnin = c.steps_noplastic_test//2
        shuffled_indices = arange(c.N_e)
        np.random.shuffle(shuffled_indices)
        N_subset = 16
        start_train = c.steps_plastic+burnin
        half_train = start_train+(inputtrainsteps-start_train)//2
        start_test = inputtrainsteps+burnin
        half_test = start_test+(c.N_steps-start_test)//2
        # The output dimensions of these stats have to be independent
        # of the number of steps!
        stats_all = [                         
                     ParamTrackerStat(),
                     PatternProbabilityStat(
                                    [[start_train,half_train],
                                     [half_train+burnin,inputtrainsteps],
                                     [start_test,half_test],
                                     [half_test,c.N_steps]],
                                      shuffled_indices[:N_subset],
                                      zero_correction=True)
                    ]
        stats_single = [      
                         InputIndexStat(),
                         SpikesStat(),
                         InputUnitsStat(),
                         ActivityStat(),
                         SpikesStat(inhibitory=True),
                         ISIsStat(interval=[c.steps_plastic,
                                            inputtrainsteps]),
                         ConnectionFractionStat(),
                         InputUnitsStat(),
                        ]
                        
        return (self.plasticsource,stats_single+stats_all,stats_all)
コード例 #17
0
 def start(self):
     super(Experiment_hesselmann,self).start()   
     c = self.params.c
     
     if self.cluster_param == 'source.prob' and c.imported_mpi:
         prob = c.source.prob
         assert(prob>=0 and prob<=1)
         self.params.source = CountingSource(
                                       self.params.source.words,
                                       np.array([[prob,1.0-prob],
                                                 [prob,1.0-prob]]),
                                       c.N_u_e,c.source.avoid)
     
     self.source = TrialSource(self.params.source, 
                      c.wait_min_plastic, c.wait_var_plastic, 
                      zeros(self.params.source.N_a), 'reset')
                      
     self.trainsource = copy.deepcopy(self.source)
     self.trainsource.blank_min_length = c.wait_min_train
     self.trainsource.blank_var_length = c.wait_var_train
     
     
     # Stats
     inputtrainsteps = c.steps_plastic + c.steps_noplastic_train
     if c.steps_noplastic_test < 100000:
         pred_test = c.steps_noplastic_test // 2
     else:
         pred_test = 50000
     if c.stats.quenching == 'train':
         evoked_train = [c.steps_plastic,
                         c.steps_plastic+c.steps_noplastic_train//2]
         evoked_test = [c.steps_plastic+c.steps_noplastic_train//2,
                        c.steps_plastic+c.steps_noplastic_train]
     elif c.stats.quenching == 'test':
         evoked_train = [inputtrainsteps,-pred_test]
         evoked_test = [-pred_test,-1]
         
     stats_all = [
                  InputIndexStat(),
                  SpikesStat(),
                  ParamTrackerStat(),
                  BayesStat(pred_pos=0),
                  #~ AttractorDynamicsStat(),
                  HistoryStat(var='W_eu.W',collection='gather',
                              record_every_nth=100000000),
                  TrialBayesStat(),
                  SpontBayesStat(),
                  OutputDistStat(),
                  EvokedPredStat(traintimes = evoked_train,
                                 testtimes = evoked_test,
                                 traintest = c.stats.quenching),
                  InputUnitsStat(),
                  ]
     
     stats_single = [
                  ActivityStat(),
                  WeightHistoryStat('W_ee',record_every_nth=100),
                  WeightHistoryStat('W_eu',
                                        record_every_nth=9999999),
                  ConnectionFractionStat(),
                  ISIsStat(),
                  SpikesStat(inhibitory=True),
                  CondProbStat(),
                  EndWeightStat(),
                  #~ BalancedStat(), # Takes forever
                  #~ RateStat(),
                  NormLastStat(),
                  #~ SVDStat(),
                  #~ SVDStat_U(),
                  #~ SVDStat_V(),
                  ]
              
     return (self.source,stats_all+stats_single,stats_all)