def start(self): super(Experiment_sequence, self).start() c = self.params.c # Create paper-specific sources self.test_words = c.source.test_words if not c.source.control: source = CountingSource(['ABCD'], np.array([[1.]]), c.N_u_e, c.N_u_i, c.source.avoid) else: from itertools import permutations source = CountingSource.init_simple( 24, 4, [4, 4], 1, c.N_u_e, c.N_u_i, c.source.avoid, words=[''.join(x) for x in (permutations('ABCD'))]) # Already add letters for later source.alphabet = unique("".join(source.words) + 'E_') source.N_a = len(source.alphabet) source.lookup = dict(zip(source.alphabet,\ range(source.N_a))) source = TrialSource(source, c.wait_min_plastic, c.wait_var_plastic, zeros(source.N_a), 'reset') self.source_archived = copy.deepcopy(source) inputtrainsteps = c.steps_plastic + c.steps_noplastic_train stats_single = [ ActivityStat(), InputIndexStat(), SpikesStat(), ISIsStat(interval=[0, c.steps_plastic]), ConnectionFractionStat(), ] stats_all = [ ParamTrackerStat(), EndWeightStat(), InputUnitsStat(), MeanActivityStat(start=inputtrainsteps, stop=c.N_steps, N_indices=len(''.join((self.test_words))) + 1, LFP=False), MeanPatternStat(start=c.steps_plastic, stop=c.N_steps, N_indices=len(''.join((self.test_words))) + 1) ] return (source, stats_single + stats_all, stats_all)
def start(self): super(Experiment_sequence,self).start() c = self.params.c # Create paper-specific sources self.test_words = c.source.test_words if not c.source.control: source = CountingSource(['ABCD'],np.array([[1.]]), c.N_u_e,c.N_u_i,c.source.avoid) else: from itertools import permutations source = CountingSource.init_simple(24,4,[4,4],1, c.N_u_e,c.N_u_i,c.source.avoid, words = [''.join(x) for x in (permutations('ABCD'))]) # Already add letters for later source.alphabet = unique("".join(source.words)+'E_') source.N_a = len(source.alphabet) source.lookup = dict(zip(source.alphabet,\ range(source.N_a))) source = TrialSource(source, c.wait_min_plastic, c.wait_var_plastic,zeros(source.N_a),'reset') self.source_archived = copy.deepcopy(source) inputtrainsteps = c.steps_plastic + c.steps_noplastic_train stats_single = [ ActivityStat(), InputIndexStat(), SpikesStat(), ISIsStat(interval=[0, c.steps_plastic]), ConnectionFractionStat(), ] stats_all = [ ParamTrackerStat(), EndWeightStat(), InputUnitsStat(), MeanActivityStat(start=inputtrainsteps, stop=c.N_steps, N_indices=len(''.join((self.test_words)))+1, LFP=False), MeanPatternStat(start=c.steps_plastic, stop=c.N_steps, N_indices=len(''.join((self.test_words)))+1) ] return (source,stats_single+stats_all,stats_all)
def start(self): super(Experiment_fiser,self).start() c = self.params.c if self.cluster_param == 'source.prob': prob = c.source.prob assert(prob>=0 and prob<=1) self.params.source = CountingSource( self.params.source.words, np.array([[prob,1.0-prob], [prob,1.0-prob]]), c.N_u_e,c.source.avoid) if c.source.use_randsource: self.params.source = CountingSource.init_simple( c.source.N_words, c.source.N_letters,c.source.word_length, c.source.max_fold_prob,c.N_u_e,c.N_u_i, c.source.avoid) controlsource = copy.deepcopy(self.params.source) controlsource.words = [x[::-1] for x in self.params.source.words] # Control with single letters #~ controlsource.words = controlsource.alphabet #~ N_words = len(controlsource.words) #~ probs = array([ones(N_words)]*N_words) #~ controlsource.probs = probs/sum(probs,1) #~ controlsource.glob_ind = [0] #~ controlsource.glob_ind.extend(cumsum(map(len, #~ controlsource.words))) # Control with different words # Check if ABCD source if controlsource.words == ['DCBA','HGFE']: controlsource.words = ['EDCBA','HGF'] self.plasticsource = TrialSource(self.params.source, c.wait_min_plastic, c.wait_var_plastic, zeros(self.params.source.N_a), 'reset') self.controlsource = TrialSource(controlsource, c.wait_min_train, c.wait_var_train, zeros(controlsource.N_a), 'reset') self.spontsource = NoSource(controlsource.N_a) #Stats inputtrainsteps = c.steps_plastic + c.steps_noplastic_train # For PatternProbabilityStat if c.steps_noplastic_test > 10000: burnin = 5000 else: burnin = c.steps_noplastic_test//2 shuffled_indices = arange(c.N_e) np.random.shuffle(shuffled_indices) N_subset = 16 start_train = c.steps_plastic+burnin half_train = start_train+(inputtrainsteps-start_train)//2 start_test = inputtrainsteps+burnin half_test = start_test+(c.N_steps-start_test)//2 # The output dimensions of these stats have to be independent # of the number of steps! stats_all = [ ParamTrackerStat(), PatternProbabilityStat( [[start_train,half_train], [half_train+burnin,inputtrainsteps], [start_test,half_test], [half_test,c.N_steps]], shuffled_indices[:N_subset], zero_correction=True) ] stats_single = [ InputIndexStat(), SpikesStat(), InputUnitsStat(), ActivityStat(), SpikesStat(inhibitory=True), ISIsStat(interval=[c.steps_plastic, inputtrainsteps]), ConnectionFractionStat(), InputUnitsStat(), ] return (self.plasticsource,stats_single+stats_all,stats_all)
def start(self, src, testCountSource=None): super(Experiment_mcmc, self).start() c = self.params.c if not c.has_key('display'): c.showPrint = True # In case we are using cluster calculation? if self.cluster_param == 'source.prob': prob = c.source.prob assert (prob >= 0 and prob <= 1) self.params.source = CountingSource( self.params.source.words, np.array([[prob, 1.0 - prob], [prob, 1.0 - prob]]), c.N_u_e, c.source.avoid) # Random source? Parameter is false, so actually not used if c.source.use_randsource: self.params.source = CountingSource.init_simple( c.source.N_words, c.source.N_letters, c.source.word_length, c.source.max_fold_prob, c.N_u_e, c.N_u_i, c.source.avoid) # Make inputsource out of source by using TrialSource object from sources.py # It now has some nice methods to deal with the network, like generate connections, etc. self.inputsource = TrialSource(src, c.wait_min_plastic, c.wait_var_plastic, zeros(src.N_a), 'reset') self.testsource = None if testCountSource is not None: self.testsource = TrialSource(testCountSource, c.wait_min_plastic, c.wait_var_plastic, zeros(testCountSource.N_a), 'reset') # Stats inputtrainsteps = c.steps_plastic + c.steps_noplastic_train # steps during self-organization + steps for first phase w/o plasticity # Begin: For PatternProbabilityStat # Set burn-in phase to half of steps for 2nd phase w/o plasticity, but maximum 3000 if c.steps_noplastic_test > 6000: burnin = 3000 else: burnin = c.steps_noplastic_test // 2 # Shuffle indices of excitatory neurons shuffled_indices = arange(c.N_e) np.random.shuffle(shuffled_indices) N_subset = 8 # 8 start_train = c.steps_plastic + burnin # step when training begins half_train = start_train + (inputtrainsteps - start_train ) // 2 # step when network is half trained start_test = inputtrainsteps + burnin # step when testing begins half_test = start_test + ( c.N_steps - start_test) // 2 # step when network is half tested # End: For PatternProbabilityStat # Initialize statistics stats_all = [ InputIndexStat(), SpikesStat(), ThresholdsStat(), InputUnitsStat(), NormLastStat(), SpontPatternStat(), ParamTrackerStat(), WeightStat() ] if c.double_synapses: # if we have two excitatory synapses for each connection stats_single += [WeightHistoryStat('W_ee_2', record_every_nth=100)] # Return inputsource, testsource and statistics return (self.inputsource, stats_all)
def start(self): super(Experiment_spont, self).start() c = self.params.c if self.cluster_param == 'source.prob': prob = c.source.prob assert (prob >= 0 and prob <= 1) self.params.source = CountingSource( self.params.source.words, np.array([[prob, 1.0 - prob], [prob, 1.0 - prob]]), c.N_u_e, c.source.avoid) if c.source.use_randsource: self.params.source = CountingSource.init_simple( c.source.N_words, c.source.N_letters, c.source.word_length, c.source.max_fold_prob, c.N_u_e, c.N_u_i, c.source.avoid) self.inputsource = TrialSource(self.params.source, c.wait_min_plastic, c.wait_var_plastic, zeros(self.params.source.N_a), 'reset') # Stats inputtrainsteps = c.steps_plastic + c.steps_noplastic_train # For PatternProbabilityStat if c.steps_noplastic_test > 6000: burnin = 3000 else: burnin = c.steps_noplastic_test // 2 shuffled_indices = arange(c.N_e) np.random.shuffle(shuffled_indices) N_subset = 8 start_train = c.steps_plastic + burnin half_train = start_train + (inputtrainsteps - start_train) // 2 start_test = inputtrainsteps + burnin half_test = start_test + (c.N_steps - start_test) // 2 stats_all = [ InputIndexStat(), SpikesStat(), InputUnitsStat(), NormLastStat(), SpontPatternStat(), ParamTrackerStat(), EvokedPredStat(traintimes=[ c.steps_plastic, c.steps_plastic + c.steps_noplastic_train // 2 ], testtimes=[ c.steps_plastic + c.steps_noplastic_train // 2, c.steps_plastic + c.steps_noplastic_train ], traintest=c.stats.quenching), ] stats_single = [ ActivityStat(), SpikesStat(inhibitory=True), ISIsStat(interval=[start_test, c.N_steps]), ConnectionFractionStat(), EndWeightStat(), #~ BalancedStat(), # takes lots of time and mem CondProbStat(), SpontIndexStat(), SVDStat(), SVDStat_U(), SVDStat_V(), SpontTransitionStat(), InputUnitsStat(), PatternProbabilityStat( [[start_train, half_train], [half_train, inputtrainsteps], [start_test, half_test], [half_test, c.N_steps]], shuffled_indices[:N_subset]), WeightHistoryStat('W_ee', record_every_nth=100), WeightHistoryStat('W_eu', record_every_nth=9999999) ] if c.double_synapses: stats_single += [WeightHistoryStat('W_ee_2', record_every_nth=100)] return (self.inputsource, stats_all + stats_single, stats_all)
def start(self): super(Experiment_spont,self).start() c = self.params.c if self.cluster_param == 'source.prob': prob = c.source.prob assert(prob>=0 and prob<=1) self.params.source = CountingSource( self.params.source.words, np.array([[prob,1.0-prob], [prob,1.0-prob]]), c.N_u_e,c.source.avoid) if c.source.use_randsource: self.params.source = CountingSource.init_simple( c.source.N_words, c.source.N_letters,c.source.word_length, c.source.max_fold_prob,c.N_u_e,c.N_u_i, c.source.avoid) self.inputsource = TrialSource(self.params.source, c.wait_min_plastic, c.wait_var_plastic, zeros(self.params.source.N_a), 'reset') # Stats inputtrainsteps = c.steps_plastic + c.steps_noplastic_train # For PatternProbabilityStat if c.steps_noplastic_test > 6000: burnin = 3000 else: burnin = c.steps_noplastic_test//2 shuffled_indices = arange(c.N_e) np.random.shuffle(shuffled_indices) N_subset = 8 start_train = c.steps_plastic+burnin half_train = start_train+(inputtrainsteps-start_train)//2 start_test = inputtrainsteps+burnin half_test = start_test+(c.N_steps-start_test)//2 stats_all = [ InputIndexStat(), SpikesStat(), InputUnitsStat(), NormLastStat(), SpontPatternStat(), ParamTrackerStat(), EvokedPredStat( traintimes=[c.steps_plastic, c.steps_plastic+ c.steps_noplastic_train//2], testtimes =[c.steps_plastic+ c.steps_noplastic_train//2, c.steps_plastic+ c.steps_noplastic_train], traintest=c.stats.quenching), ] stats_single = [ ActivityStat(), SpikesStat(inhibitory=True), ISIsStat(interval=[start_test,c.N_steps]), ConnectionFractionStat(), EndWeightStat(), #~ BalancedStat(), # takes lots of time and mem CondProbStat(), SpontIndexStat(), SVDStat(), SVDStat_U(), SVDStat_V(), SpontTransitionStat(), InputUnitsStat(), PatternProbabilityStat( [[start_train,half_train], [half_train,inputtrainsteps], [start_test,half_test], [half_test,c.N_steps]], shuffled_indices[:N_subset]), WeightHistoryStat('W_ee',record_every_nth=100), WeightHistoryStat('W_eu', record_every_nth=9999999) ] if c.double_synapses: stats_single += [WeightHistoryStat('W_ee_2', record_every_nth=100)] return (self.inputsource,stats_all+stats_single,stats_all)