def clear(self): self.recorders = set([]) self.id_counter = 0 self.segment_counter = -1 if self.network: for item in self.network.groups + self.network._all_operations: del item self.network = brian.Network() self.network.clock = brian.Clock() self.reset()
def __init__(self, timestep, min_delay, max_delay): """Initialize the simulator.""" self.network = brian.Network() self.network.clock = brian.Clock(t=0*ms, dt=timestep*ms) self.initialized = True self.num_processes = 1 self.mpi_rank = 0 self.min_delay = min_delay self.max_delay = max_delay self.gid = 0
'normalize_inputs', 'save_weights', 'save_best_model', 'test_no_inhibition', \ 'save_spikes', 'weights_noise', 'test_adaptive_threshold', 'interactive_mode' ]: if locals()[var] == 'True': locals()[var] = True elif locals()[var] == 'False': locals()[var] = False else: raise Exception( 'Expecting True or False-valued command line argument "' + var + '".') num_examples = num_test data_size = 10000 # set brian global preferences b.set_global_preferences(defaultclock=b.Clock(dt=0.5 * b.ms), useweave=True, gcc_options=['-ffast-math -march=native'], usecodegen=True, usecodegenweave=True, usecodegenstateupdate=True, usecodegenthreshold=False, usenewpropagate=True, usecstdp=True, openmp=False, magic_useframes=False, useweave_linear_diffeq=True) # for reproducibility's sake np.random.seed(random_seed)
end = time.time() print 'time needed to load training set:', end - start else: start = time.time() testing = get_labeled_data(MNIST_data_path + 'testing', bTrain=False) end = time.time() print 'time needed to load test set:', end - start #------------------------------------------------------------------------------ # set parameters and equations #------------------------------------------------------------------------------ b.set_global_preferences( defaultclock=b.Clock( dt=0.5 * b.ms ), # The default clock to use if none is provided or defined in any enclosing scope. useweave= True, # Defines whether or not functions should use inlined compiled C code where defined. gcc_options=[ '-ffast-math -march=native' ], # Defines the compiler switches passed to the gcc compiler. #For gcc versions 4.2+ we recommend using -march=native. By default, the -ffast-math optimizations are turned on usecodegen= True, # Whether or not to use experimental code generation support. usecodegenweave= True, # Whether or not to use C with experimental code generation support. usecodegenstateupdate= True, # Whether or not to use experimental code generation support on state updaters. usecodegenthreshold= False, # Whether or not to use experimental code generation support on thresholds.
def run_sim(ffExcInputMult=None, ffInhInputMult=None): """Run the cond-based LIF neuron simulation. Takes a few minutes to construct network and run Parameters ---------- ffExcInputMult: scalar: FF input magnitude to E cells. multiply ffInputV by this value and connect to E cells ffInhInputMult: scalar: FF input magnitude to I cells. Returns ------- outDict - spike times, records of continuous values from simulation """ # use helper to get input timecourses (ffInputV, condAddV) = create_input_vectors( doDebugPlot=False) # multiplied by scalars below # setup initial state stT = time.time() brian.set_global_preferences(usecodegen=True) brian.set_global_preferences(useweave=True) brian.set_global_preferences(usecodegenweave=True) brian.clear(erase=True, all=True) brian.reinit_default_clock() clk = brian.Clock(dt=0.05 * ms) ################ # create neurons, define connections neurNetwork = brian.NeuronGroup(nNet, model=eqs, threshold=vthresh, reset=vrest, refractory=absRefractoryMs * msecond, order=1, compile=True, freeze=False, clock=clk) # create neuron pools neurCE = neurNetwork.subgroup(nExc) neurCI = neurNetwork.subgroup(nInh) connCE = brian.Connection(neurCE, neurNetwork, 'ge') connCI = brian.Connection(neurCI, neurNetwork, 'gi') print('n cells: %d, nE,I %d,%d, %s, absRefractoryMs: %d' % (nNet, nExc, nInh, repr(clk), absRefractoryMs)) # connect the network to itself connCE.connect_random(neurCE, neurNetwork, internalSparseness, weight=connENetWeight) connCI.connect_random(neurCI, neurNetwork, internalSparseness, weight=connINetWeight) # connect inputs that change spont rate assert ( spontAddRate <= 0 ), 'Spont add rate should be negative - convention: neg, excite inhibitory cells' spontAddNInpSyn = 100 nTotalSpontNeurons = (spontAddNInpSyn * nInh * 0.02) neurSpont = brian.PoissonGroup(nTotalSpontNeurons, -1.0 * spontAddRate * Hz) connCSpont = brian.Connection(neurSpont, neurCI, 'ge') connCSpont.connect_random( p=spontAddNInpSyn * 1.0 / nTotalSpontNeurons, weight=connENetWeight, # match internal excitatory strengths fixed=True) # connect the feedforward visual (poisson) inputs to excitatory cells (ff E) ffExcInputNInpSyn = 100 nTotalFfNeurons = (ffExcInputNInpSyn * ffExcInputNTargs * 0.02 ) # one pop of input cells for both E and I FF _ffExcInputV = ffExcInputMult * np.abs(a_(ffInputV).copy()) assert (np.all( _ffExcInputV >= 0)), 'Negative FF rates are rectified to zero' neurFfExcInput = brian.PoissonGroup( nTotalFfNeurons, lambda t: _ffExcInputV[int(t * 1000)] * Hz) connCFfExcInput = brian.Connection(neurFfExcInput, neurNetwork, 'ge') connCFfExcInput.connect_random(neurFfExcInput, neurCE[0:ffExcInputNTargs], ffExcInputNInpSyn * 1.0 / nTotalFfNeurons, weight=connENetWeight, fixed=True) # connect the feedforward visual (poisson) inputs to inhibitory cells (ff I) ffInhInputNInpSyn = 100 _ffInhInputV = ffInhInputMult * np.abs(ffInputV.copy()) assert (np.all( _ffInhInputV >= 0)), 'Negative FF rates are rectified to zero' neurFfInhInput = brian.PoissonGroup( nTotalFfNeurons, lambda t: _ffInhInputV[int(t * 1000)] * Hz) connCFfInhInput = brian.Connection(neurFfInhInput, neurNetwork, 'ge') connCFfInhInput.connect_random( neurFfInhInput, neurCI[0:ffInhInputNTargs], ffInhInputNInpSyn * 1.0 / nTotalFfNeurons, # sparseness weight=connENetWeight, fixed=True) # connect added step (ChR2) conductance to excitatory cells condAddAmp = 4.0 gAdd = brian.TimedArray(condAddAmp * condAddV, dt=1 * ms) print('Adding conductance for %d cells (can be slow): ' % len(condAddNeurNs), end=' ') for (iN, tN) in enumerate(condAddNeurNs): neurCE[tN].gAdd = gAdd print('done') # Initialize using some randomness so all neurons don't start in same state. # Alternative: initialize with constant values, give net extra 100-300ms to evolve from initial state. neurNetwork.v = (brian.randn(1) * 5.0 - 65) * mvolt neurNetwork.ge = brian.randn(nNet) * 1.5 + 4 neurNetwork.gi = brian.randn(nNet) * 12 + 20 # Record continuous variables and spikes monSTarg = brian.SpikeMonitor(neurNetwork) if contRecNs is not None: contRecClock = brian.Clock(dt=contRecStepMs * ms) monVTarg = brian.StateMonitor(neurNetwork, 'v', record=contRecNs, clock=contRecClock) monGETarg = brian.StateMonitor(neurNetwork, 'ge', record=contRecNs, clock=contRecClock) monGAddTarg = brian.StateMonitor(neurNetwork, 'gAdd', record=contRecNs, clock=contRecClock) monGITarg = brian.StateMonitor(neurNetwork, 'gi', record=contRecNs, clock=contRecClock) # construct brian.Network before running (so brian explicitly knows what to update during run) netL = [ neurNetwork, connCE, connCI, monSTarg, neurFfExcInput, connCFfExcInput, neurFfInhInput, connCFfInhInput, neurSpont, connCSpont ] if contRecNs is not None: # noinspection PyUnboundLocalVariable netL.append([monVTarg, monGETarg, monGAddTarg, monGITarg]) # cont monitors net = brian.Network(netL) print("Network construction time: %3.1f seconds" % (time.time() - stT)) # run print("Simulation running...") sys.stdout.flush() start_time = time.time() net.run(simRunTimeS * second, report='text', report_period=30.0 * second) durationS = time.time() - start_time print("Simulation time: %3.1f seconds" % durationS) outNTC = collections.namedtuple( 'outNTC', 'vm ge gadd gi clockDtS clockStartS clockEndS spiketimes contRecNs') outNTC.__new__.__defaults__ = (None, ) * len( outNTC._fields) # default to None outNT = outNTC(clockDtS=float(monSTarg.clock.dt), clockStartS=float(monSTarg.clock.start), clockEndS=float(monSTarg.clock.end), spiketimes=a_(monSTarg.spiketimes.values(), dtype='O'), contRecNs=contRecNs) if contRecNs is not None: outNT = outNT._replace(vm=monVTarg.values, ge=monGETarg.values, gadd=monGAddTarg.values, gi=monGITarg.values) return outNT
def _set_dt(self, timestep): if self.simclock is None or timestep != self._get_dt(): self.simclock = brian.Clock(dt=timestep * ms)
parser.add_argument('--top_percent', type=int, default=10) args = parser.parse_args() mode, connectivity, weight_dependence, post_pre, conv_size, conv_stride, conv_features, weight_sharing, lattice_structure, random_inhibition_prob, top_percent = \ args.mode, args.connectivity, args.weight_dependence, args.post_pre, args.conv_size, args.conv_stride, args.conv_features, args.weight_sharing, \ args.lattice_structure, args.random_inhibition_prob, args.top_percent print '\n' print args.mode, args.connectivity, args.weight_dependence, args.post_pre, args.conv_size, args.conv_stride, args.conv_features, args.weight_sharing, \ args.lattice_structure, args.random_inhibition_prob, args.top_percent print '\n' # set global preferences b.set_global_preferences(defaultclock = b.Clock(dt=0.5*b.ms), useweave = True, gcc_options = ['-ffast-math -march=native'], usecodegen = True, usecodegenweave = True, usecodegenstateupdate = True, usecodegenthreshold = False, usenewpropagate = True, usecstdp = True, openmp = False, magic_useframes = False, useweave_linear_diffeq = True) # for reproducibility's sake np.random.seed(0) # STDP rule stdp_input = weight_dependence + '_' + post_pre if weight_dependence == 'weight_dependence': use_weight_dependence = True else: use_weight_dependence = False if post_pre == 'post_pre': use_post_pre = True else:
import sys import numpy as np import brian_no_units import brian as b from brian import ms experiment_number = 0 experiment_path = './results/' if len(sys.argv) == 2: experiment_number = int(sys.argv[1]) print 'EXPERIMENT', experiment_number b.set_global_preferences( defaultclock=b.Clock( dt=0.1 * b.ms), # The default clock to use if none is provided. useweave= True, # 301 Defines whether or not functions should use inlined compiled C code where defined. gcc_options=[ '-march=native' ], # Defines the compiler switches passed to the gcc compiler. usecodegen= True, # Whether or not to use experimental code generation support. usecodegenweave= True, # Whether or not to use C with experimental code generation support. usecodegenstateupdate= True, # Whether or not to use experimental code generation support on state updaters. usecodegenthreshold= False, # Whether or not to use experimental code generation support on thresholds. usenewpropagate= True, # Whether or not to use experimental new C propagation functions.
training = pickle.load(input) with open('data/testing.o', "rb") as input: testing = pickle.load(input) num_train = len(training['y']) num_test = len(testing['y']) # ------------------------------------------------------------------------------ # set parameters and equations # ------------------------------------------------------------------------------ test_mode = True b.set_global_preferences( openmp_threads=10, defaultclock=b.Clock(dt=0.5 * b.ms), # The default clock to use if none is provided or defined in any enclosing scope. useweave= True, # Defines whether or not functions should use inlined compiled C code where defined. gcc_options=[ '-ffast-math -march=native' ], # Defines the compiler switches passed to the gcc compiler. # For gcc versions 4.2+ we recommend using -march=native. By default, the -ffast-math optimizations are turned on usecodegen= True, # Whether or not to use experimental code generation support. usecodegenweave= True, # Whether or not to use C with experimental code generation support. usecodegenstateupdate= True, # Whether or not to use experimental code generation support on state updaters. usecodegenthreshold= False, # Whether or not to use experimental code generation support on thresholds.