def _run_microcircuit(plot_filename, conf):
    import plotting
    import logging

    simulator = conf['simulator']
    # we here only need nest as simulator, simulator = 'nest'
    import pyNN.nest as sim

    # prepare simulation
    logging.basicConfig()

    # extract parameters from config file
    master_seed = conf['params_dict']['nest']['master_seed']
    layers = conf['layers']
    pops = conf['pops']
    plot_spiking_activity = conf['plot_spiking_activity']
    raster_t_min = conf['raster_t_min']
    raster_t_max = conf['raster_t_max']
    frac_to_plot = conf['frac_to_plot']
    record_corr = conf['params_dict']['nest']['record_corr']
    tau_max = conf['tau_max']

    # Numbers of neurons from which to record spikes
    n_rec = helper_functions.get_n_rec(conf)

    sim.setup(**conf['simulator_params'][simulator])

    if simulator == 'nest':
        n_vp = sim.nest.GetKernelStatus('total_num_virtual_procs')
        if sim.rank() == 0:
            print 'n_vp: ', n_vp
            print 'master_seed: ', master_seed
        sim.nest.SetKernelStatus({'print_time': False,
                                  'dict_miss_is_error': False,
                                  'grng_seed': master_seed,
                                  'rng_seeds': range(master_seed + 1,
                                                     master_seed + n_vp + 1),
                                  'data_path': conf['system_params'] \
                                                   ['output_path']})

    import network

    # result of export-files
    results = []

    # create network
    start_netw = time.time()
    n = network.Network(sim)

    # contains the GIDs of the spike detectors and voltmeters needed for
    # retrieving filenames later
    device_list = n.setup(sim, conf)

    end_netw = time.time()
    if sim.rank() == 0:
        print 'Creating the network took ', end_netw - start_netw, ' s'

    # simulate
    if sim.rank() == 0:
        print "Simulating..."
    start_sim = time.time()
    sim.run(conf['simulator_params'][simulator]['sim_duration'])
    end_sim = time.time()
    if sim.rank() == 0:
        print 'Simulation took ', end_sim - start_sim, ' s'

    # extract filename from device_list (spikedetector/voltmeter),
    # gid of neuron and thread. merge outputs from all threads
    # into a single file which is then added to the task output.
    for dev in device_list:
        label = sim.nest.GetStatus(dev)[0]['label']
        gid = sim.nest.GetStatus(dev)[0]['global_id']
        # use the file extension to distinguish between spike and voltage
        # output
        extension = sim.nest.GetStatus(dev)[0]['file_extension']
        if extension == 'gdf':  # spikes
            data = np.empty((0, 2))
        elif extension == 'dat':  # voltages
            data = np.empty((0, 3))
        for thread in xrange(conf['simulator_params']['nest']['threads']):
            filenames = glob.glob(conf['system_params']['output_path']
                                  + '%s-*%d-%d.%s' % (label, gid, thread, extension))
            assert(
                len(filenames) == 1), 'Multiple input files found. Use a clean output directory.'
            data = np.vstack([data, np.loadtxt(filenames[0])])
            # delete original files
            os.remove(filenames[0])
        order = np.argsort(data[:, 1])
        data = data[order]
        outputfile_name = 'collected_%s-%d.%s' % (label, gid, extension)
        outputfile = open(outputfile_name, 'w')
        # the outputfile should have same format as output from NEST.
        # i.e., [int, float] for spikes and [int, float, float] for voltages,
        # hence we write it line by line and assign the corresponding filetype
        if extension == 'gdf':  # spikes
            for line in data:
                outputfile.write('%d\t%.3f\n' % (line[0], line[1]))
            outputfile.close()
            filetype = 'application/vnd.juelich.nest.spike_times'

        elif extension == 'dat':  # voltages
            for line in data:
                outputfile.write(
                    '%d\t%.3f\t%.3f\n' % (line[0], line[1], line[2]))
            outputfile.close()
            filetype = 'application/vnd.juelich.nest.analogue_signal'

        res = (outputfile_name, filetype)
        results.append(res)

    if record_corr and simulator == 'nest':
        start_corr = time.time()
        if sim.nest.GetStatus(n.corr_detector, 'local')[0]:
            print 'getting count_covariance on rank ', sim.rank()
            cov_all = sim.nest.GetStatus(
                n.corr_detector, 'count_covariance')[0]
            delta_tau = sim.nest.GetStatus(n.corr_detector, 'delta_tau')[0]

            cov = {}
            for target_layer in np.sort(layers.keys()):
                for target_pop in pops:
                    target_index = conf['structure'][target_layer][target_pop]
                    cov[target_index] = {}
                    for source_layer in np.sort(layers.keys()):
                        for source_pop in pops:
                            source_index = conf['structure'][
                                source_layer][source_pop]
                            cov[target_index][source_index] = \
                                np.array(list(
                                    cov_all[target_index][source_index][::-1])
                                + list(cov_all[source_index][target_index][1:]))

            f = open(conf['system_params'][
                     'output_path'] + '/covariances.dat', 'w')
            print >>f, 'tau_max: ', tau_max
            print >>f, 'delta_tau: ', delta_tau
            print >>f, 'simtime: ', conf['simulator_params'][
                simulator]['sim_duration'], '\n'

            for target_layer in np.sort(layers.keys()):
                for target_pop in pops:
                    target_index = conf['structure'][target_layer][target_pop]
                    for source_layer in np.sort(layers.keys()):
                        for source_pop in pops:
                            source_index = conf['structure'][
                                source_layer][source_pop]
                            print >>f, target_layer, target_pop, '-', source_layer, source_pop
                            print >>f, 'n_events_target: ', sim.nest.GetStatus(
                                n.corr_detector, 'n_events')[0][target_index]
                            print >>f, 'n_events_source: ', sim.nest.GetStatus(
                                n.corr_detector, 'n_events')[0][source_index]
                            for i in xrange(len(cov[target_index][source_index])):
                                print >>f, cov[target_index][source_index][i]
                            print >>f, ''
            f.close()

            # add file covariances.dat into bundle
            res_cov = ('covariances.dat',
                       'text/plain')
            results.append(res_cov)

        end_corr = time.time()
        print "Writing covariances took ", end_corr - start_corr, " s"

    if plot_spiking_activity and sim.rank() == 0:
        plotting.plot_raster_bars(raster_t_min, raster_t_max, n_rec,
                                  frac_to_plot, n.pops,
                                  conf['system_params']['output_path'],
                                  plot_filename, conf)
        res_plot = (plot_filename, 'image/png')
        results.append(res_plot)

    sim.end()

    return results
Example #2
0
    def setup(self, sim, conf):

        # extract parameters
        pyseed = conf['params_dict']['nest']['pyseed']
        parallel_safe = conf['params_dict']['nest']['parallel_safe']
        input_type = conf['params_dict']['nest']['input_type']
        layers = conf['layers']
        pops = conf['pops']
        bg_rate = conf['bg_rate']
        w_mean = conf['w_mean']
        K_scaling = conf['params_dict']['nest']['K_scaling']
        N_scaling = conf['params_dict']['nest']['N_scaling']
        n_record = conf['params_dict']['nest']['n_record']
        neuron_model = conf['neuron_model']
        tau_max = conf['tau_max']
        record_corr = conf['params_dict']['nest']['record_corr']
        n_layers = conf['n_layers']
        n_pops_per_layer = conf['n_pops_per_layer']
        V0_mean = conf['V0_mean']
        n_record_v = conf['params_dict']['nest']['n_record_v']
        record_v = conf['params_dict']['nest']['record_v']
        record_fraction = conf['params_dict']['nest']['record_fraction']
        thalamic_input = conf['thalamic_input']
        w_rel = conf['w_rel']
        w_rel_234 = conf['w_rel_234']
        simulator = conf['simulator']
        N_full = conf['N_full']
        K_ext = conf['K_ext']
        tau_syn_E = conf['neuron_params']['tau_syn_E']
        v_thresh = conf['neuron_params']['v_thresh']
        v_rest = conf['neuron_params']['v_rest']
        neuron_params = conf['neuron_params']
        thal_params = conf['thal_params']
        structure = conf['structure']
        d_mean = conf['d_mean']
        d_sd = conf['d_sd']
        frac_record_v = conf['params_dict']['nest']['frac_record_v']
        n_rec = helper_functions.get_n_rec(conf)

        # if parallel_safe=False, PyNN offsets the seeds by 1 for each rank
        script_rng = NumpyRNG(seed=pyseed,
                              parallel_safe=parallel_safe)

        # Compute DC input before scaling
        if input_type == 'DC':
            self.DC_amp = {}
            for target_layer in layers:
                self.DC_amp[target_layer] = {}
                for target_pop in pops:
                    self.DC_amp[target_layer][target_pop] = bg_rate * \
                        K_ext[target_layer][target_pop] * \
                        w_mean * tau_syn_E / 1000.
        else:
            self.DC_amp = {'L23': {'E': 0., 'I': 0.},
                           'L4': {'E': 0., 'I': 0.},
                           'L5': {'E': 0., 'I': 0.},
                           'L6': {'E': 0., 'I': 0.}}

        # In-degrees of the full-scale and scaled models
        K_full = Scaling.get_indegrees(conf)
        self.K = K_scaling * K_full

        self.K_ext = {}
        for layer in layers:
            self.K_ext[layer] = {}
            for pop in pops:
                self.K_ext[layer][pop] = K_scaling * K_ext[layer][pop]

        self.w = helper_functions.create_weight_matrix(conf)
        # Network scaling
        if K_scaling != 1:
            self.w, self.w_ext, self.DC_amp = Scaling.adjust_w_and_ext_to_K(
                K_full, K_scaling, self.w, self.DC_amp, conf)
        else:
            self.w_ext = w_mean

        Vthresh = {}
        for layer in layers:
            Vthresh[layer] = {}
            for pop in pops:
                Vthresh[layer][pop] = v_thresh

        # Initial membrane potential distributions
        # The original study used V0_mean = -58 mV, V0_sd = 5 mV.
        # This is adjusted here to any changes in v_rest and scaling of V.
        V0_mean = {}
        V0_sd = {}
        for layer in layers:
            V0_mean[layer] = {}
            V0_sd[layer] = {}
            for pop in pops:
                V0_mean[layer][pop] = (v_rest + Vthresh[layer][pop]) / 2.
                V0_sd[layer][pop] = (Vthresh[layer][pop] -
                                     v_rest) / 3.

        V_dist = {}
        for layer in layers:
            V_dist[layer] = {}
            for pop in pops:
                V_dist[layer][pop] = RandomDistribution('normal',
                                                        [V0_mean[layer][pop],
                                                         V0_sd[layer][pop]],
                                                        rng=script_rng)

        model = getattr(sim, neuron_model)

        if record_corr and simulator == 'nest':
            # Create correlation recording device
            sim.nest.SetDefaults('correlomatrix_detector', {'delta_tau': 0.5})
            self.corr_detector = sim.nest.Create('correlomatrix_detector')
            sim.nest.SetStatus(
                self.corr_detector, {'N_channels': n_layers * n_pops_per_layer,
                                     'tau_max': tau_max,
                                     'Tstart': tau_max,
                                     })

        if sim.rank() == 0:
            print 'neuron_params:', conf['neuron_params']
            print 'K: ', self.K
            print 'K_ext: ', self.K_ext
            print 'w: ', self.w
            print 'w_ext: ', self.w_ext
            print 'DC_amp: ', self.DC_amp
            print 'V0_mean: '
            for layer in layers:
                for pop in pops:
                    print layer, pop, V0_mean[layer][pop]
            print 'n_rec:'
            for layer in layers:
                for pop in pops:
                    print layer, pop, n_rec[layer][pop]
                    if not record_fraction and n_record > int(round(N_full[layer][pop] * N_scaling)):
                        print 'Note that requested number of neurons to record exceeds ', \
                            layer, pop, ' population size'

        # Create cortical populations
        self.pops = {}
        global_neuron_id = 1
        self.base_neuron_ids = {}
        # list containing the GIDs of recording devices, needed for output
        # bundle
        device_list = []
        for layer in layers:
            self.pops[layer] = {}
            for pop in pops:
                cellparams = neuron_params

                self.pops[layer][pop] = sim.Population(
                    int(round(N_full[layer][pop] * N_scaling)),
                    model,
                    cellparams=cellparams,
                    label=layer + pop)
                this_pop = self.pops[layer][pop]

                # Provide DC input in the current-based case
                # DC input is assumed to be absent in the conductance-based
                # case
                this_pop.set('i_offset', self.DC_amp[layer][pop])

                self.base_neuron_ids[this_pop] = global_neuron_id
                global_neuron_id += len(this_pop) + 2

                this_pop.initialize('v', V_dist[layer][pop])

                # Spike recording
                # PYTHON2.6: SINCE SPIKES CANNOT BE RECORDED AT THE MOMENT
                # USING PYNN'S record(), WE CREATE AND CONNECT SPIKE DETECTORS
                # WITH PYNEST
                # this_pop[0:n_rec[layer][pop]].record()
                sd = sim.nest.Create('spike_detector',
                                     params={
                                         'label': 'spikes_{0}{1}'.format(layer, pop),
                                         'withtime': True,
                                         'withgid': True,
                                         'to_file': True})
                device_list.append(sd)
                sim.nest.Connect(
                    list(this_pop[0:n_rec[layer][pop]].all_cells), sd)

                # Membrane potential recording
                if record_v:
                    if record_fraction:
                        n_rec_v = round(this_pop.size * frac_record_v)
                    else:
                        n_rec_v = n_record_v
                    # PYTHON2.6: SINCE VOLTAGES CANNOT BE RECORDED AT THE MOMENT
                    # USING PYNN'S record_v(), WE CREATE AND CONNECT VOLTMETERS
                    # WITH PYNEST
                    # this_pop[0 : n_rec_v].record_v()
                    vm = sim.nest.Create('voltmeter',
                                         params={
                                             'label': 'voltages_{0}{1}'.format(layer, pop),
                                             'withtime': True,
                                             'withgid': True,
                                             'to_file': True})
                    device_list.append(vm)
                    sim.nest.Connect(vm, list(this_pop[0:n_rec_v]))

                # Correlation recording
                if record_corr and simulator == 'nest':
                    index = structure[layer][pop]
                    sim.nest.SetDefaults(
                        'static_synapse', {'receptor_type': index})
                    sim.nest.Connect(list(this_pop.all_cells),
                                     self.corr_detector)
                    # PYTHON2.6: reset receptor type because Connect is used
                    # also for the spike detector and the voltmeter
                    sim.nest.SetDefaults(
                        'static_synapse', {'receptor_type': 0})

        # if record_corr and simulator == 'nest':
        # reset receptor_type
        #     sim.nest.SetDefaults('static_synapse', {'receptor_type': 0})

        # Currently, we get an error if we try to generate a population with
        # sim.SpikeSourcePoisson elements. Therefore, thalamic neurons are
        # created via PyNEST in this version
        if thalamic_input:
            self.thalamic_population = sim.nest.Create('parrot_neuron',
                                                       thal_params['n_thal'])
            # create and connect a poisson generator for stimulating the
            # thalamic population
            thal_pg = sim.nest.Create('poisson_generator',
                                      params={'rate': thal_params['rate'],
                                              'start': thal_params['start'],
                                              'stop': thal_params['start'] + thal_params['duration']})
            sim.nest.Connect(thal_pg, self.thalamic_population)

        # Create thalamic population
        #     self.thalamic_population = sim.Population(thal_params['n_thal'],
        #                                               sim.SpikeSourcePoisson,
        #                                               {'rate': thal_params['rate'],
        #                                                'start': thal_params['start'],
        #                                                'duration': thal_params['duration']},
        #                                               label='thalamic_population')
        #     self.base_neuron_ids[self.thalamic_population] = global_neuron_id
        #     global_neuron_id += len(self.thalamic_population) + 2

        possible_targets_curr = ['inhibitory', 'excitatory']

        # Connect

        for target_layer in layers:
            for target_pop in pops:
                target_index = structure[target_layer][target_pop]
                this_target_pop = self.pops[target_layer][target_pop]
                w_ext = self.w_ext
                # External inputs
                if input_type == 'poisson':
                    rate = bg_rate * self.K_ext[target_layer][target_pop]
                    if simulator == 'nest':
                        # create only a single Poisson generator for each population,
                        # since the native NEST implementation sends
                        # independent spike trains to all targets
                        if sim.rank() == 0:
                            print 'connecting Poisson generator to', target_layer, target_pop

                        pg = sim.nest.Create(
                            'poisson_generator', params={'rate': rate})

                        conn_dict = {'rule': 'all_to_all'}
                        syn_dict = {'model': 'static_synapse',
                                    'weight': 1000. * w_ext,
                                    'delay': d_mean['E']}
                        sim.nest.Connect(
                            pg, list(this_target_pop.all_cells), conn_dict, syn_dict)

                if thalamic_input:
                    if sim.rank() == 0:
                        print 'creating thalamic connections to ' + target_layer + target_pop
                    C_thal = thal_params['C'][target_layer][target_pop]
                    n_target = N_full[target_layer][target_pop]
                    K_thal = round(np.log(1 - C_thal) / np.log((n_target * thal_params['n_thal'] - 1.) /
                                                               (n_target * thal_params['n_thal']))) / n_target * K_scaling

                    # Currently, the thalamic populations is created with PyNEST
                    # (it is not a PyNN Population)
                    # and has to be connected to the cortical neurons similar to
                    # FixedTotalNumberConnect()
                    target_neurons = list(this_target_pop.all_cells)
                    n_syn = int(round(K_thal * len(target_neurons)))
                    conn_dict = {'rule': 'fixed_total_number', 'N': n_syn}
                    syn_dict = {'model': 'static_synapse',
                                'weight': {'distribution': 'normal_clipped',
                                           'mu': 1000. * w_ext,
                                           'sigma': 1000. * w_rel * w_ext},
                                'delay': {'distribution': 'normal_clipped',
                                          'low': conf['simulator_params'][simulator]['min_delay'],
                                          'mu': d_mean['E'],
                                          'sigma': d_sd['E']}}
                    sim.nest.Connect(self.thalamic_population, target_neurons,
                                     conn_dict, syn_dict)

                    # Connectivity.FixedTotalNumberConnect(sim, self.thalamic_population,
                    #                                      this_target_pop,
                    #                                      K_thal, w_ext,
                    #                                      w_rel * w_ext,
                    #                                      d_mean['E'],
                    #                                      d_sd['E'], conf)

                # Recurrent inputs
                for source_layer in layers:
                    for source_pop in pops:
                        source_index = structure[source_layer][source_pop]
                        this_source_pop = self.pops[source_layer][source_pop]
                        weight = self.w[target_index][source_index]

                        possible_targets_curr[int((np.sign(weight) + 1) / 2)]

                        if sim.rank() == 0:
                            print 'creating connections from ' + source_layer + \
                                source_pop + ' to ' + target_layer + target_pop

                        if source_pop == 'E' and source_layer == 'L4' and target_layer == 'L23' and target_pop == 'E':
                            w_sd = weight * w_rel_234
                        else:
                            w_sd = abs(weight * w_rel)

                        Connectivity.FixedTotalNumberConnect(sim,
                                                             this_source_pop,
                                                             this_target_pop,
                                                             self.K[target_index][
                                                                 source_index],
                                                             weight, w_sd,
                                                             d_mean[source_pop], d_sd[source_pop], conf)

        return device_list
Example #3
0
    def setup(self, sim, conf):

        # extract parameters
        pyseed = conf["params_dict"]["nest"]["pyseed"]
        parallel_safe = conf["params_dict"]["nest"]["parallel_safe"]
        input_type = conf["params_dict"]["nest"]["input_type"]
        layers = conf["layers"]
        pops = conf["pops"]
        bg_rate = conf["bg_rate"]
        w_mean = conf["w_mean"]
        K_scaling = conf["params_dict"]["nest"]["K_scaling"]
        N_scaling = conf["params_dict"]["nest"]["N_scaling"]
        n_record = conf["params_dict"]["nest"]["n_record"]
        neuron_model = conf["neuron_model"]
        tau_max = conf["tau_max"]
        record_corr = conf["params_dict"]["nest"]["record_corr"]
        n_layers = conf["n_layers"]
        n_pops_per_layer = conf["n_pops_per_layer"]
        V0_mean = conf["V0_mean"]
        n_record_v = conf["params_dict"]["nest"]["n_record_v"]
        record_v = conf["params_dict"]["nest"]["record_v"]
        record_fraction = conf["params_dict"]["nest"]["record_fraction"]
        thalamic_input = conf["thalamic_input"]
        w_rel = conf["w_rel"]
        w_rel_234 = conf["w_rel_234"]
        simulator = conf["simulator"]
        N_full = conf["N_full"]
        K_ext = conf["K_ext"]
        tau_syn_E = conf["neuron_params"]["tau_syn_E"]
        v_thresh = conf["neuron_params"]["v_thresh"]
        v_rest = conf["neuron_params"]["v_rest"]
        neuron_params = conf["neuron_params"]
        thal_params = conf["thal_params"]
        structure = conf["structure"]
        d_mean = conf["d_mean"]
        d_sd = conf["d_sd"]
        frac_record_v = conf["params_dict"]["nest"]["frac_record_v"]
        n_rec = helper_functions.get_n_rec(conf)

        # if parallel_safe=False, PyNN offsets the seeds by 1 for each rank
        script_rng = NumpyRNG(seed=pyseed, parallel_safe=parallel_safe)

        # Compute DC input before scaling
        if input_type == "DC":
            self.DC_amp = {}
            for target_layer in layers:
                self.DC_amp[target_layer] = {}
                for target_pop in pops:
                    self.DC_amp[target_layer][target_pop] = (
                        bg_rate * K_ext[target_layer][target_pop] * w_mean * tau_syn_E / 1000.0
                    )
        else:
            self.DC_amp = {
                "L23": {"E": 0.0, "I": 0.0},
                "L4": {"E": 0.0, "I": 0.0},
                "L5": {"E": 0.0, "I": 0.0},
                "L6": {"E": 0.0, "I": 0.0},
            }

        # In-degrees of the full-scale and scaled models
        K_full = scaling.get_indegrees(conf)
        self.K = K_scaling * K_full

        self.K_ext = {}
        for layer in layers:
            self.K_ext[layer] = {}
            for pop in pops:
                self.K_ext[layer][pop] = K_scaling * K_ext[layer][pop]

        self.w = helper_functions.create_weight_matrix(conf)
        # Network scaling
        if K_scaling != 1:
            self.w, self.w_ext, self.DC_amp = scaling.adjust_w_and_ext_to_K(
                K_full, K_scaling, self.w, self.DC_amp, conf
            )
        else:
            self.w_ext = w_mean

        Vthresh = {}
        for layer in layers:
            Vthresh[layer] = {}
            for pop in pops:
                Vthresh[layer][pop] = v_thresh

        # Initial membrane potential distributions
        # The original study used V0_mean = -58 mV, V0_sd = 5 mV.
        # This is adjusted here to any changes in v_rest and scaling of V.
        V0_mean = {}
        V0_sd = {}
        for layer in layers:
            V0_mean[layer] = {}
            V0_sd[layer] = {}
            for pop in pops:
                V0_mean[layer][pop] = (v_rest + Vthresh[layer][pop]) / 2.0
                V0_sd[layer][pop] = (Vthresh[layer][pop] - v_rest) / 3.0

        V_dist = {}
        for layer in layers:
            V_dist[layer] = {}
            for pop in pops:
                V_dist[layer][pop] = RandomDistribution(
                    "normal", [V0_mean[layer][pop], V0_sd[layer][pop]], rng=script_rng
                )

        model = getattr(sim, neuron_model)

        if record_corr and simulator == "nest":
            # Create correlation recording device
            sim.nest.SetDefaults("correlomatrix_detector", {"delta_tau": 0.5})
            self.corr_detector = sim.nest.Create("correlomatrix_detector")
            sim.nest.SetStatus(
                self.corr_detector, {"N_channels": n_layers * n_pops_per_layer, "tau_max": tau_max, "Tstart": tau_max}
            )

        if sim.rank() == 0:
            print "neuron_params:", conf["neuron_params"]
            print "K: ", self.K
            print "K_ext: ", self.K_ext
            print "w: ", self.w
            print "w_ext: ", self.w_ext
            print "DC_amp: ", self.DC_amp
            print "V0_mean: "
            for layer in layers:
                for pop in pops:
                    print layer, pop, V0_mean[layer][pop]
            print "n_rec:"
            for layer in layers:
                for pop in pops:
                    print layer, pop, n_rec[layer][pop]
                    if not record_fraction and n_record > int(round(N_full[layer][pop] * N_scaling)):
                        print "Note that requested number of neurons to record",
                        print "exceeds ", layer, pop, " population size"

        # Create cortical populations
        self.pops = {}
        global_neuron_id = 1
        self.base_neuron_ids = {}
        # list containing the GIDs of recording devices, needed for output
        # bundle
        device_list = []
        for layer in layers:
            self.pops[layer] = {}
            for pop in pops:
                cellparams = neuron_params

                self.pops[layer][pop] = sim.Population(
                    int(round(N_full[layer][pop] * N_scaling)), model, cellparams=cellparams, label=layer + pop
                )
                this_pop = self.pops[layer][pop]

                # Provide DC input in the current-based case
                # DC input is assumed to be absent in the conductance-based
                # case
                this_pop.set("i_offset", self.DC_amp[layer][pop])

                self.base_neuron_ids[this_pop] = global_neuron_id
                global_neuron_id += len(this_pop) + 2

                this_pop.initialize("v", V_dist[layer][pop])

                # Spike recording
                sd = sim.nest.Create(
                    "spike_detector",
                    params={
                        "label": "spikes_{0}{1}".format(layer, pop),
                        "withtime": True,
                        "withgid": True,
                        "to_file": True,
                    },
                )
                device_list.append(sd)
                sim.nest.Connect(list(this_pop[0 : n_rec[layer][pop]].all_cells), sd)

                # Membrane potential recording
                if record_v:
                    if record_fraction:
                        n_rec_v = round(this_pop.size * frac_record_v)
                    else:
                        n_rec_v = n_record_v
                    vm = sim.nest.Create(
                        "voltmeter",
                        params={
                            "label": "voltages_{0}{1}".format(layer, pop),
                            "withtime": True,
                            "withgid": True,
                            "to_file": True,
                        },
                    )
                    device_list.append(vm)
                    sim.nest.Connect(vm, list(this_pop[0:n_rec_v]))

                # Correlation recording
                if record_corr and simulator == "nest":
                    index = structure[layer][pop]
                    sim.nest.SetDefaults("static_synapse", {"receptor_type": index})
                    sim.nest.Connect(list(this_pop.all_cells), self.corr_detector)
                    sim.nest.SetDefaults("static_synapse", {"receptor_type": 0})

        if thalamic_input:
            self.thalamic_population = sim.nest.Create("parrot_neuron", thal_params["n_thal"])
            # create and connect a poisson generator for stimulating the
            # thalamic population
            thal_pg = sim.nest.Create(
                "poisson_generator",
                params={
                    "rate": thal_params["rate"],
                    "start": thal_params["start"],
                    "stop": thal_params["start"] + thal_params["duration"],
                },
            )
            sim.nest.Connect(thal_pg, self.thalamic_population)

        possible_targets_curr = ["inhibitory", "excitatory"]

        # Connect
        for target_layer in layers:
            for target_pop in pops:
                target_index = structure[target_layer][target_pop]
                this_target_pop = self.pops[target_layer][target_pop]
                w_ext = self.w_ext
                # External inputs
                if input_type == "poisson":
                    rate = bg_rate * self.K_ext[target_layer][target_pop]
                    if simulator == "nest":
                        # create only a single Poisson generator for each
                        # population, since the native NEST implementation sends
                        # independent spike trains to all targets
                        if sim.rank() == 0:
                            print "connecting Poisson generator to",
                            print target_layer, target_pop

                        pg = sim.nest.Create("poisson_generator", params={"rate": rate})

                        conn_dict = {"rule": "all_to_all"}
                        syn_dict = {"model": "static_synapse", "weight": 1000.0 * w_ext, "delay": d_mean["E"]}
                        sim.nest.Connect(pg, list(this_target_pop.all_cells), conn_dict, syn_dict)

                if thalamic_input:
                    if sim.rank() == 0:
                        print "creating thalamic connections to ", target_layer,
                        print target_pop
                    C_thal = thal_params["C"][target_layer][target_pop]
                    n_target = N_full[target_layer][target_pop]
                    K_thal = (
                        round(
                            np.log(1 - C_thal)
                            / np.log((n_target * thal_params["n_thal"] - 1.0) / (n_target * thal_params["n_thal"]))
                        )
                        / n_target
                        * K_scaling
                    )

                    target_neurons = list(this_target_pop.all_cells)
                    n_syn = int(round(K_thal * len(target_neurons)))
                    conn_dict = {"rule": "fixed_total_number", "N": n_syn}
                    syn_dict = {
                        "model": "static_synapse",
                        "weight": {
                            "distribution": "normal_clipped",
                            "mu": 1000.0 * w_ext,
                            "sigma": 1000.0 * w_rel * w_ext,
                        },
                        "delay": {
                            "distribution": "normal_clipped",
                            "low": conf["simulator_params"][simulator]["min_delay"],
                            "mu": d_mean["E"],
                            "sigma": d_sd["E"],
                        },
                    }
                    sim.nest.Connect(self.thalamic_population, target_neurons, conn_dict, syn_dict)

                # Recurrent inputs
                for source_layer in layers:
                    for source_pop in pops:
                        source_index = structure[source_layer][source_pop]
                        this_source_pop = self.pops[source_layer][source_pop]
                        weight = self.w[target_index][source_index]

                        possible_targets_curr[int((np.sign(weight) + 1) / 2)]

                        if sim.rank() == 0:
                            print "creating connections from ", source_layer + source_pop + " to " + target_layer + target_pop

                        if source_pop == "E" and source_layer == "L4" and target_layer == "L23" and target_pop == "E":
                            w_sd = weight * w_rel_234
                        else:
                            w_sd = abs(weight * w_rel)

                        connectivity.FixedTotalNumberConnect(
                            sim,
                            this_source_pop,
                            this_target_pop,
                            self.K[target_index][source_index],
                            weight,
                            w_sd,
                            d_mean[source_pop],
                            d_sd[source_pop],
                            conf,
                        )

        return device_list
Example #4
0
    def setup(self, sim, conf):

        # extract parameters
        pyseed = conf['params_dict']['nest']['pyseed']
        parallel_safe = conf['params_dict']['nest']['parallel_safe']
        input_type = conf['params_dict']['nest']['input_type']
        layers = conf['layers']
        pops = conf['pops']
        bg_rate = conf['bg_rate']
        w_mean = conf['w_mean']
        K_scaling = conf['params_dict']['nest']['K_scaling']
        N_scaling = conf['params_dict']['nest']['N_scaling']
        n_record = conf['params_dict']['nest']['n_record']
        neuron_model = conf['neuron_model']
        tau_max = conf['tau_max']
        record_corr = conf['params_dict']['nest']['record_corr']
        n_layers = conf['n_layers']
        n_pops_per_layer = conf['n_pops_per_layer']
        V0_mean = conf['V0_mean']
        n_record_v = conf['params_dict']['nest']['n_record_v']
        record_v = conf['params_dict']['nest']['record_v']
        record_fraction = conf['params_dict']['nest']['record_fraction']
        thalamic_input = conf['thalamic_input']
        w_rel = conf['w_rel']
        w_rel_234 = conf['w_rel_234']
        simulator = conf['simulator']
        N_full = conf['N_full']
        K_ext = conf['K_ext']
        tau_syn_E = conf['neuron_params']['tau_syn_E']
        v_thresh = conf['neuron_params']['v_thresh']
        v_rest = conf['neuron_params']['v_rest']
        neuron_params = conf['neuron_params']
        thal_params = conf['thal_params']
        structure = conf['structure']
        d_mean = conf['d_mean']
        d_sd = conf['d_sd']
        frac_record_v = conf['params_dict']['nest']['frac_record_v']
        n_rec = helper_functions.get_n_rec(conf)

        # if parallel_safe=False, PyNN offsets the seeds by 1 for each rank
        script_rng = NumpyRNG(seed=pyseed, parallel_safe=parallel_safe)

        # Compute DC input before scaling
        if input_type == 'DC':
            self.DC_amp = {}
            for target_layer in layers:
                self.DC_amp[target_layer] = {}
                for target_pop in pops:
                    self.DC_amp[target_layer][target_pop] = bg_rate * \
                        K_ext[target_layer][target_pop] * \
                        w_mean * tau_syn_E / 1000.
        else:
            self.DC_amp = {
                'L23': {
                    'E': 0.,
                    'I': 0.
                },
                'L4': {
                    'E': 0.,
                    'I': 0.
                },
                'L5': {
                    'E': 0.,
                    'I': 0.
                },
                'L6': {
                    'E': 0.,
                    'I': 0.
                }
            }

        # In-degrees of the full-scale and scaled models
        K_full = scaling.get_indegrees(conf)
        self.K = K_scaling * K_full

        self.K_ext = {}
        for layer in layers:
            self.K_ext[layer] = {}
            for pop in pops:
                self.K_ext[layer][pop] = K_scaling * K_ext[layer][pop]

        self.w = helper_functions.create_weight_matrix(conf)
        # Network scaling
        if K_scaling != 1:
            self.w, self.w_ext, self.DC_amp = scaling.adjust_w_and_ext_to_K(
                K_full, K_scaling, self.w, self.DC_amp, conf)
        else:
            self.w_ext = w_mean

        Vthresh = {}
        for layer in layers:
            Vthresh[layer] = {}
            for pop in pops:
                Vthresh[layer][pop] = v_thresh

        # Initial membrane potential distributions
        # The original study used V0_mean = -58 mV, V0_sd = 5 mV.
        # This is adjusted here to any changes in v_rest and scaling of V.
        V0_mean = {}
        V0_sd = {}
        for layer in layers:
            V0_mean[layer] = {}
            V0_sd[layer] = {}
            for pop in pops:
                V0_mean[layer][pop] = (v_rest + Vthresh[layer][pop]) / 2.
                V0_sd[layer][pop] = (Vthresh[layer][pop] - v_rest) / 3.

        V_dist = {}
        for layer in layers:
            V_dist[layer] = {}
            for pop in pops:
                V_dist[layer][pop] = RandomDistribution(
                    'normal', [V0_mean[layer][pop], V0_sd[layer][pop]],
                    rng=script_rng)

        model = getattr(sim, neuron_model)

        if record_corr and simulator == 'nest':
            # Create correlation recording device
            sim.nest.SetDefaults('correlomatrix_detector', {'delta_tau': 0.5})
            self.corr_detector = sim.nest.Create('correlomatrix_detector')
            sim.nest.SetStatus(
                self.corr_detector, {
                    'N_channels': n_layers * n_pops_per_layer,
                    'tau_max': tau_max,
                    'Tstart': tau_max,
                })

        if sim.rank() == 0:
            print 'neuron_params:', conf['neuron_params']
            print 'K: ', self.K
            print 'K_ext: ', self.K_ext
            print 'w: ', self.w
            print 'w_ext: ', self.w_ext
            print 'DC_amp: ', self.DC_amp
            print 'V0_mean: '
            for layer in layers:
                for pop in pops:
                    print layer, pop, V0_mean[layer][pop]
            print 'n_rec:'
            for layer in layers:
                for pop in pops:
                    print layer, pop, n_rec[layer][pop]
                    if not record_fraction and n_record > \
                       int(round(N_full[layer][pop] * N_scaling)):
                        print 'Note that requested number of neurons to record',
                        print 'exceeds ', layer, pop, ' population size'

        # Create cortical populations
        self.pops = {}
        global_neuron_id = 1
        self.base_neuron_ids = {}
        # list containing the GIDs of recording devices, needed for output
        # bundle
        device_list = []
        for layer in layers:
            self.pops[layer] = {}
            for pop in pops:
                cellparams = neuron_params

                self.pops[layer][pop] = sim.Population(int(
                    round(N_full[layer][pop] * N_scaling)),
                                                       model,
                                                       cellparams=cellparams,
                                                       label=layer + pop)
                this_pop = self.pops[layer][pop]

                # Provide DC input in the current-based case
                # DC input is assumed to be absent in the conductance-based
                # case
                this_pop.set('i_offset', self.DC_amp[layer][pop])

                self.base_neuron_ids[this_pop] = global_neuron_id
                global_neuron_id += len(this_pop) + 2

                this_pop.initialize('v', V_dist[layer][pop])

                # Spike recording
                sd = sim.nest.Create('spike_detector',
                                     params={
                                         'label':
                                         'spikes_{0}{1}'.format(layer, pop),
                                         'withtime':
                                         True,
                                         'withgid':
                                         True,
                                         'to_file':
                                         True
                                     })
                device_list.append(sd)
                sim.nest.Connect(list(this_pop[0:n_rec[layer][pop]].all_cells),
                                 sd)

                # Membrane potential recording
                if record_v:
                    if record_fraction:
                        n_rec_v = round(this_pop.size * frac_record_v)
                    else:
                        n_rec_v = n_record_v
                    vm = sim.nest.Create('voltmeter',
                                         params={
                                             'label':
                                             'voltages_{0}{1}'.format(
                                                 layer, pop),
                                             'withtime':
                                             True,
                                             'withgid':
                                             True,
                                             'to_file':
                                             True
                                         })
                    device_list.append(vm)
                    sim.nest.Connect(vm, list(this_pop[0:n_rec_v]))

                # Correlation recording
                if record_corr and simulator == 'nest':
                    index = structure[layer][pop]
                    sim.nest.SetDefaults('static_synapse',
                                         {'receptor_type': index})
                    sim.nest.Connect(list(this_pop.all_cells),
                                     self.corr_detector)
                    sim.nest.SetDefaults('static_synapse',
                                         {'receptor_type': 0})

        if thalamic_input:
            self.thalamic_population = sim.nest.Create('parrot_neuron',
                                                       thal_params['n_thal'])
            # create and connect a poisson generator for stimulating the
            # thalamic population
            thal_pg = sim.nest.Create('poisson_generator',
                                      params={'rate': thal_params['rate'],
                                              'start': thal_params['start'],
                                              'stop': thal_params['start'] \
                                              + thal_params['duration']})
            sim.nest.Connect(thal_pg, self.thalamic_population)

        possible_targets_curr = ['inhibitory', 'excitatory']

        # Connect
        for target_layer in layers:
            for target_pop in pops:
                target_index = structure[target_layer][target_pop]
                this_target_pop = self.pops[target_layer][target_pop]
                w_ext = self.w_ext
                # External inputs
                if input_type == 'poisson':
                    rate = bg_rate * self.K_ext[target_layer][target_pop]
                    if simulator == 'nest':
                        # create only a single Poisson generator for each
                        # population, since the native NEST implementation sends
                        # independent spike trains to all targets
                        if sim.rank() == 0:
                            print 'connecting Poisson generator to',
                            print target_layer, target_pop

                        pg = sim.nest.Create('poisson_generator',
                                             params={'rate': rate})

                        conn_dict = {'rule': 'all_to_all'}
                        syn_dict = {
                            'model': 'static_synapse',
                            'weight': 1000. * w_ext,
                            'delay': d_mean['E']
                        }
                        sim.nest.Connect(pg, list(this_target_pop.all_cells),
                                         conn_dict, syn_dict)

                if thalamic_input:
                    if sim.rank() == 0:
                        print 'creating thalamic connections to ', target_layer,
                        print target_pop
                    C_thal = thal_params['C'][target_layer][target_pop]
                    n_target = N_full[target_layer][target_pop]
                    K_thal = round(np.log(1 - C_thal) / \
                                   np.log(
                                       (n_target * thal_params['n_thal'] - 1.) /
                                       (n_target * thal_params['n_thal']))) / \
                             n_target * K_scaling

                    target_neurons = list(this_target_pop.all_cells)
                    n_syn = int(round(K_thal * len(target_neurons)))
                    conn_dict = {'rule': 'fixed_total_number', 'N': n_syn}
                    syn_dict = {'model': 'static_synapse',
                                'weight': {'distribution': 'normal_clipped',
                                           'mu': 1000. * w_ext,
                                           'sigma': 1000. * w_rel * w_ext},
                                'delay': {'distribution': 'normal_clipped',
                                          'low': conf['simulator_params'] \
                                                     [simulator]['min_delay'],
                                          'mu': d_mean['E'],
                                          'sigma': d_sd['E']}}
                    sim.nest.Connect(self.thalamic_population, target_neurons,
                                     conn_dict, syn_dict)

                # Recurrent inputs
                for source_layer in layers:
                    for source_pop in pops:
                        source_index = structure[source_layer][source_pop]
                        this_source_pop = self.pops[source_layer][source_pop]
                        weight = self.w[target_index][source_index]

                        possible_targets_curr[int((np.sign(weight) + 1) / 2)]

                        if sim.rank() == 0:
                            print 'creating connections from ', source_layer + \
                                source_pop + ' to ' + target_layer + target_pop

                        if source_pop == 'E' and source_layer == 'L4' and \
                           target_layer == 'L23' and target_pop == 'E':
                            w_sd = weight * w_rel_234
                        else:
                            w_sd = abs(weight * w_rel)

                        connectivity.FixedTotalNumberConnect(
                            sim, this_source_pop, this_target_pop,
                            self.K[target_index][source_index], weight, w_sd,
                            d_mean[source_pop], d_sd[source_pop], conf)

        return device_list
Example #5
0
def _run_microcircuit(plot_filename, conf):
    import plotting
    import logging

    simulator = conf['simulator']
    # we here only need nest as simulator, simulator = 'nest'
    import pyNN.nest as sim

    # prepare simulation
    logging.basicConfig()

    # extract parameters from config file
    master_seed = conf['params_dict']['nest']['master_seed']
    layers = conf['layers']
    pops = conf['pops']
    plot_spiking_activity = conf['plot_spiking_activity']
    raster_t_min = conf['raster_t_min']
    raster_t_max = conf['raster_t_max']
    frac_to_plot = conf['frac_to_plot']
    record_corr = conf['params_dict']['nest']['record_corr']
    tau_max = conf['tau_max']

    # Numbers of neurons from which to record spikes
    n_rec = helper_functions.get_n_rec(conf)

    sim.setup(**conf['simulator_params'][simulator])

    if simulator == 'nest':
        n_vp = sim.nest.GetKernelStatus('total_num_virtual_procs')
        if sim.rank() == 0:
            print 'n_vp: ', n_vp
            print 'master_seed: ', master_seed
        sim.nest.SetKernelStatus({'print_time': False,
                                  'dict_miss_is_error': False,
                                  'grng_seed': master_seed,
                                  'rng_seeds': range(master_seed + 1,
                                                     master_seed + n_vp + 1),
                                  # PYTHON2.6: FOR WRITING OUTPUT FROM
                                  # RECORDING DEVICES WITH PYNEST FUNCTIONS,
                                  # THE OUTPUT PATH IS NOT AUTOMATICALLY THE
                                  # CWD BUT HAS TO BE SET MANUALLY
                                  'data_path': conf['system_params']['output_path']})

    import network

    # result of export-files
    results = []

    # create network
    start_netw = time.time()
    n = network.Network(sim)

    # PYTHON2.6: device_list CONTAINS THE GIDs OF THE SPIKE DETECTORS AND VOLTMETERS
    # NEEDED FOR RETRIEVING FILENAMES LATER
    device_list = n.setup(sim, conf)

    end_netw = time.time()
    if sim.rank() == 0:
        print 'Creating the network took ', end_netw - start_netw, ' s'

    # simulate
    if sim.rank() == 0:
        print "Simulating..."
    start_sim = time.time()
    sim.run(conf['simulator_params'][simulator]['sim_duration'])
    end_sim = time.time()
    if sim.rank() == 0:
        print 'Simulation took ', end_sim - start_sim, ' s'

    # extract filename from device_list (spikedetector/voltmeter),
    # gid of neuron and thread. merge outputs from all threads
    # into a single file which is then added to the task output.
    # PYTHON2.6: NEEDS TO BE ADAPTED IF NOT RECORDED VIA PYNEST
    for dev in device_list:
        label = sim.nest.GetStatus(dev)[0]['label']
        gid = sim.nest.GetStatus(dev)[0]['global_id']
        # use the file extension to distinguish between spike and voltage output
        extension = sim.nest.GetStatus(dev)[0]['file_extension']
        if extension == 'gdf':  # spikes
            data = np.empty((0, 2))
        elif extension == 'dat':  # voltages
            data = np.empty((0, 3))
        for thread in xrange(conf['simulator_params']['nest']['threads']):
            filenames = glob.glob(conf['system_params']['output_path']
                                  + '%s-*%d-%d.%s' % (label, gid, thread, extension))
            assert(len(filenames) == 1), 'Multiple input files found. Use a clean output directory.'
            data = np.vstack([data, np.loadtxt(filenames[0])])
            # delete original files
            os.remove(filenames[0])
        order = np.argsort(data[:, 1])
        data = data[order]
        outputfile_name = 'collected_%s-%d.%s' % (label, gid, extension)
        outputfile = open(outputfile_name, 'w')
        # the outputfile should have same format as output from NEST.
        # i.e., [int, float] for spikes and [int, float, float] for voltages,
        # hence we write it line by line and assign the corresponding filetype
        if extension == 'gdf':  # spikes
            for line in data:
                outputfile.write('%d\t%.3f\n' % (line[0], line[1]))
            outputfile.close()
            filetype = 'application/vnd.juelich.nest.spike_times'

        elif extension == 'dat':  # voltages
            for line in data:
                outputfile.write('%d\t%.3f\t%.3f\n' % (line[0], line[1], line[2]))
            outputfile.close()
            filetype = 'application/vnd.juelich.nest.analogue_signal'

        res = (outputfile_name, filetype)
        results.append(res)

    # start_writing = time.time()

    # PYTHON2.6: SPIKE AND VOLTAGE FILES ARE CURRENTLY WRITTEN WHEN A SPIKE
    # DETECTOR OR A VOLTMETER IS CONNECTED WITH 'to_file': True

    # for layer in layers:
    #     for pop in pops:
    #         # filename = conf['system_params']['output_path'] + '/spikes_' + layer + pop + '.dat'
    #         filename = conf['system_params']['output_path'] + 'spikes_' + layer + pop + '.dat'
    #         n.pops[layer][pop].printSpikes(filename, gather=False)

    #         # add filename and filepath into results
    #         subres = (filename, 'application/vnd.juelich.bundle.nest.data')
    #         results.append(subres)

    # if record_v:
    #     for layer in layers:
    #         for pop in pops:
    #             filename = conf['system_params']['output_path'] + '/voltages_' + layer + pop + '.dat'
    #             n.pops[layer][pop].print_v(filename, gather=False)

    if record_corr and simulator == 'nest':
        start_corr = time.time()
        if sim.nest.GetStatus(n.corr_detector, 'local')[0]:
            print 'getting count_covariance on rank ', sim.rank()
            cov_all = sim.nest.GetStatus(n.corr_detector, 'count_covariance')[0]
            delta_tau = sim.nest.GetStatus(n.corr_detector, 'delta_tau')[0]

            cov = {}
            for target_layer in np.sort(layers.keys()):
                for target_pop in pops:
                    target_index = conf['structure'][target_layer][target_pop]
                    cov[target_index] = {}
                    for source_layer in np.sort(layers.keys()):
                        for source_pop in pops:
                            source_index = conf['structure'][source_layer][source_pop]
                            cov[target_index][source_index] = np.array(list(cov_all[target_index][source_index][::-1])
                                                                       + list(cov_all[source_index][target_index][1:]))

            f = open(conf['system_params']['output_path'] + '/covariances.dat', 'w')
            print >>f, 'tau_max: ', tau_max
            print >>f, 'delta_tau: ', delta_tau
            print >>f, 'simtime: ', conf['simulator_params'][simulator]['sim_duration'], '\n'

            for target_layer in np.sort(layers.keys()):
                for target_pop in pops:
                    target_index = conf['structure'][target_layer][target_pop]
                    for source_layer in np.sort(layers.keys()):
                        for source_pop in pops:
                            source_index = conf['structure'][source_layer][source_pop]
                            print >>f, target_layer, target_pop, '-', source_layer, source_pop
                            print >>f, 'n_events_target: ', sim.nest.GetStatus(n.corr_detector, 'n_events')[0][target_index]
                            print >>f, 'n_events_source: ', sim.nest.GetStatus(n.corr_detector, 'n_events')[0][source_index]
                            for i in xrange(len(cov[target_index][source_index])):
                                print >>f, cov[target_index][source_index][i]
                            print >>f, ''
            f.close()

            # add file covariances.dat into bundle
            res_cov = ('covariances.dat',
                       'text/plain')
            results.append(res_cov)

        end_corr = time.time()
        print "Writing covariances took ", end_corr - start_corr, " s"

    # end_writing = time.time()
    # print "Writing data took ", end_writing - start_writing, " s"

    if plot_spiking_activity and sim.rank() == 0:
        plotting.plot_raster_bars(raster_t_min, raster_t_max, n_rec,
                                  frac_to_plot, n.pops,
                                  conf['system_params']['output_path'],
                                  plot_filename, conf)
        res_plot = (plot_filename, 'image/png')
        results.append(res_plot)

    sim.end()

    return results