Exemplo n.º 1
0
def add_input_decoders(encoding_pars, input_decoder_pars, kernel_pars):
    """
    Updates encoding parameters, adding decoders to the encoding layer
    :param encoding_pars: original encoding parameters to update
    :param encoder_label: label of encoder to readout
    :param input_decoder_pars: parameters of decoder to attach
    :param kernel_pars: main system parameters
    :return: updates encoding ParameterSet
    """
    if not isinstance(input_decoder_pars, ParameterSet):
        input_decoder_pars = ParameterSet(input_decoder_pars)
    if isinstance(encoding_pars, ParameterSet):
        encoding_pars = encoding_pars.as_dict()
    if encoding_pars['input_decoder'] is not None:
        enc_label = encoding_pars['input_decoder'].pop('encoder_label')
    else:
        enc_label = 'parrots'
    encoding_pars['input_decoder'] = {}
    decoder_dict = copy_dict(
        input_decoder_pars.as_dict(), {
            'decoded_population':
            [enc_label for _ in range(len(input_decoder_pars.state_variable))]
        })
    resolution = decoder_dict.pop('output_resolution')

    input_decoder = set_decoding_defaults(output_resolution=resolution,
                                          kernel_pars=kernel_pars,
                                          **decoder_dict)
    encoding_pars.update({'input_decoder': input_decoder.as_dict()})

    return ParameterSet(encoding_pars)
Exemplo n.º 2
0
def rec_device_defaults(start=0.,
                        stop=sys.float_info.max,
                        resolution=0.1,
                        record_to='memory',
                        device_type='spike_detector',
                        label=''):
    """
    Standard device parameters
    :param default_set:
    :return:
    """
    rec_devices = {
        'start': start,
        'stop': stop,
        'origin': 0.,
        'interval': resolution,
        'record_to': [record_to],
        'label': label,
        'model': device_type,
        'close_after_simulate': False,
        'flush_after_simulate': False,
        'flush_records': False,
        'close_on_reset': True,
        'withtime': True,
        'withgid': True,
        'withweight': False,
        'time_in_steps': False,
        'scientific': False,
        'precision': 3,
        'binary': False,
    }
    return ParameterSet(rec_devices)
Exemplo n.º 3
0
def set_kernel_defaults(run_type='local', data_label='', **system_pars):
    """
    Return pre-defined kernel parameters dictionary
    :param default_set:
    :return:
    """
    keys = [
        'nodes', 'ppn', 'mem', 'walltime', 'queue', 'sim_time',
        'transient_time'
    ]
    if not np.mean(np.sort(system_pars.keys()) == np.sort(keys)).astype(bool):
        raise TypeError(
            "system parameters dictionary must contain the following keys {0}".
            format(str(keys)))

    N_vp = system_pars['nodes'] * system_pars['ppn']
    np_seed = np.random.randint(1000000000) + 1
    np.random.seed(np_seed)
    msd = np.random.randint(100000000000)

    kernel_pars = {
        'resolution': 0.1,
        'sim_time': system_pars['sim_time'],
        'transient_t': system_pars['transient_time'],
        'data_prefix': data_label,
        'data_path': paths[run_type]['data_path'],
        'mpl_path': paths[run_type]['matplotlib_rc'],
        'overwrite_files': True,
        'print_time': (run_type == 'local'),
        'rng_seeds': range(msd + N_vp + 1, msd + 2 * N_vp + 1),
        'grng_seed': msd + N_vp,
        'total_num_virtual_procs': N_vp,
        'local_num_threads': system_pars['ppn'],
        'np_seed': np_seed,
        'system': {
            'local': (run_type == 'local'),
            'system_label': run_type,
            'queueing_system': paths[run_type]['queueing_system'],
            'jdf_template': paths[run_type]['jdf_template'],
            'remote_directory': paths[run_type]['remote_directory'],
            'jdf_fields': {
                '{{ script_folder }}': '',
                '{{ nodes }}': str(system_pars['nodes']),
                '{{ ppn }}': str(system_pars['ppn']),
                '{{ mem }}': str(system_pars['mem']),
                '{{ walltime }}': system_pars['walltime'],
                '{{ queue }}': system_pars['queue'],
                '{{ computation_script }}': ''
            }
        }
    }
    return ParameterSet(kernel_pars)
Exemplo n.º 4
0
def run(parameter_set, plot=False, display=False, save=True):
    """

    :param parameter_set:
    :param plot:
    :param display:
    :param save:
    :return:
    """
    if not isinstance(parameter_set, ParameterSet):
        if isinstance(parameter_set, basestring) or isinstance(
                parameter_set, dict):
            parameter_set = ParameterSet(parameter_set)
        else:
            raise TypeError(
                "parameter_set must be ParameterSet, string with full path to parameter file or "
                "dictionary")

    # ######################################################################################################################
    # Setup extra variables and parameters
    # ======================================================================================================================
    if plot:
        set_global_rcParams(parameter_set.kernel_pars['mpl_path'])
    paths = set_storage_locations(parameter_set, save)

    np.random.seed(parameter_set.kernel_pars['np_seed'])
    results = dict()

    # ######################################################################################################################
    # Set kernel and simulation parameters
    # ======================================================================================================================
    print('\nRuning ParameterSet {0}'.format(parameter_set.label))
    nest.ResetKernel()
    nest.set_verbosity('M_WARNING')
    nest.SetKernelStatus(
        extract_nestvalid_dict(parameter_set.kernel_pars.as_dict(),
                               param_type='kernel'))

    # ######################################################################################################################
    # Build network
    # ======================================================================================================================
    net = Network(parameter_set.net_pars)

    # ######################################################################################################################
    # Randomize initial variable values
    # ======================================================================================================================
    for idx, n in enumerate(list(iterate_obj_list(net.populations))):
        if hasattr(parameter_set.net_pars, "randomize_neuron_pars"):
            randomize = parameter_set.net_pars.randomize_neuron_pars[idx]
            for k, v in randomize.items():
                n.randomize_initial_states(k,
                                           randomization_function=v[0],
                                           **v[1])

    # ######################################################################################################################
    # Build and connect input
    # ======================================================================================================================
    # Poisson input
    enc_layer = EncodingLayer(parameter_set.encoding_pars)
    enc_layer.connect(parameter_set.encoding_pars, net)

    # ######################################################################################################################
    # Set-up Analysis
    # ======================================================================================================================
    net.connect_devices()

    # ######################################################################################################################
    # Connect Network
    # ======================================================================================================================
    net.connect_populations(parameter_set.connection_pars, progress=True)

    # ######################################################################################################################
    # Simulate
    # ======================================================================================================================
    if parameter_set.kernel_pars.transient_t:
        net.simulate(parameter_set.kernel_pars.transient_t)
        net.flush_records()

    net.simulate(parameter_set.kernel_pars.sim_time)

    # ######################################################################################################################
    # Extract and store data
    # ======================================================================================================================
    net.extract_population_activity()
    net.extract_network_activity()
    net.flush_records()

    # ######################################################################################################################
    # Analyse / plot data
    # ======================================================================================================================
    analysis_interval = [
        parameter_set.kernel_pars.transient_t,
        parameter_set.kernel_pars.sim_time +
        parameter_set.kernel_pars.transient_t
    ]
    parameter_set.analysis_pars.pop('label')
    start_analysis = time.time()
    results.update(
        characterize_population_activity(
            net,
            parameter_set,
            analysis_interval,
            epochs=None,
            color_map='jet',
            plot=plot,
            display=display,
            save=paths['figures'] + paths['label'],
            color_subpop=True,
            analysis_pars=parameter_set.analysis_pars))
    print("\nElapsed time (state characterization): {0}".format(
        str(time.time() - start_analysis)))

    # ######################################################################################################################
    # Save data
    # ======================================================================================================================
    if save:
        with open(paths['results'] + 'Results_' + parameter_set.label,
                  'w') as f:
            pickle.dump(results, f)
        parameter_set.save(paths['parameters'] + 'Parameters_' +
                           parameter_set.label)
Exemplo n.º 5
0
def run(parameter_set,
        plot=False,
        display=False,
        save=True,
        debug=False,
        online=True):
    """

    :param parameter_set:
    :param plot:
    :param display:
    :param save:
    :param debug:
    :param online:
    :return:
    """
    if not isinstance(parameter_set, ParameterSet):
        if isinstance(parameter_set, basestring) or isinstance(
                parameter_set, dict):
            parameter_set = ParameterSet(parameter_set)
        else:
            raise TypeError(
                "parameter_set must be ParameterSet, string with full path to parameter file or "
                "dictionary")

    # ##################################################################################################################
    # Setup extra variables and parameters
    # ==================================================================================================================
    if plot:
        set_global_rcParams(parameter_set.kernel_pars['mpl_path'])
    paths = io.set_storage_locations(parameter_set, save)

    np.random.seed(parameter_set.kernel_pars['np_seed'])

    # ##################################################################################################################
    # Set kernel and simulation parameters
    # ==================================================================================================================
    print('\nRuning ParameterSet {0}'.format(parameter_set.label))
    nest.ResetKernel()
    nest.set_verbosity('M_WARNING')
    nest.SetKernelStatus(
        extract_nestvalid_dict(parameter_set.kernel_pars.as_dict(),
                               param_type='kernel'))

    # ##################################################################################################################
    # Build network
    # ==================================================================================================================
    net = Network(parameter_set.net_pars)
    net.merge_subpopulations([net.populations[0], net.populations[1]],
                             name='EI')  # merge for EI case

    # ##################################################################################################################
    # Randomize initial variable values
    # ==================================================================================================================
    for idx, n in enumerate(list(iterate_obj_list(net.populations))):
        if hasattr(parameter_set.net_pars, "randomize_neuron_pars"):
            randomize = parameter_set.net_pars.randomize_neuron_pars[idx]
            for k, v in randomize.items():
                n.randomize_initial_states(k,
                                           randomization_function=v[0],
                                           **v[1])

    # ##################################################################################################################
    # Build Stimulus/Target datasets
    # ==================================================================================================================
    io.log_timer.start('stimulus_sets')

    stim_set = StimulusSet(parameter_set, unique_set=False)
    stim_set.generate_datasets(parameter_set.stim_pars)

    target_set = StimulusSet(parameter_set,
                             unique_set=False)  # for identity task.
    output_sequence = list(itertools.chain(*stim_set.full_set_labels))
    target_set.generate_datasets(parameter_set.stim_pars,
                                 external_sequence=output_sequence)

    # correct N for small sequences
    # parameter_set.input_pars.signal.N = len(np.unique(stim_set.full_set_labels))

    io.log_timer.stop('stimulus_sets')
    # ##################################################################################################################
    # Build Input Signal Sets
    # ==================================================================================================================
    io.log_timer.start('input_sets')

    inputs = InputSignalSet(parameter_set, stim_set, online=online)
    inputs.generate_datasets(stim_set)

    io.log_timer.stop('input_sets')

    parameter_set.kernel_pars.sim_time = inputs.train_stimulation_time + inputs.test_stimulation_time

    # Plot example signal
    if plot and debug and not online:
        plot_input_example(stim_set,
                           inputs,
                           set_name='test',
                           display=display,
                           save=paths['figures'] + paths['label'])
    if save:
        stim_set.save(paths['inputs'])
        if debug:
            inputs.save(paths['inputs'])

    # ##################################################################################################################
    # Encode Input
    # ==================================================================================================================
    io.log_timer.start('encoding_layer')

    enc_layer = EncodingLayer(parameter_set.encoding_pars,
                              signal=inputs.full_set_signal,
                              online=online)
    enc_layer.connect(parameter_set.encoding_pars, net)
    enc_layer.extract_connectivity(net, sub_set=True, progress=False)

    io.log_timer.stop('encoding_layer')
    # ##################################################################################################################
    # Connect Network
    # ==================================================================================================================
    io.log_timer.start('connection_setup')

    net.connect_populations(parameter_set.connection_pars)

    # ##################################################################################################################
    # Set-up Analysis
    # ==================================================================================================================
    net.connect_devices()
    if hasattr(parameter_set, "decoding_pars"):
        set_decoder_times(
            enc_layer, parameter_set.input_pars, parameter_set.encoding_pars,
            parameter_set.decoding_pars)  # iff using the fast sampling method!
        net.connect_decoders(parameter_set.decoding_pars)

    # Attach decoders to input encoding populations
    if not empty(enc_layer.encoders) and hasattr(parameter_set.encoding_pars, "input_decoder") and \
            parameter_set.encoding_pars.input_decoder is not None:
        enc_layer.connect_decoders(parameter_set.encoding_pars.input_decoder)

    io.log_timer.stop('connection_setup')
    # ##################################################################################################################
    # Run Simulation (full sequence)
    # ==================================================================================================================
    # fast state sampling
    io.log_timer.start('process_sequence')

    epochs, timing = process_input_sequence(parameter_set,
                                            net, [enc_layer], [stim_set],
                                            [inputs],
                                            set_name='full',
                                            record=True)

    io.log_timer.stop('process_sequence')
    # ##################################################################################################################
    # Process data
    # ==================================================================================================================
    io.log_timer.start('process_data')

    target_matrix = dict(EI=np.array(target_set.full_set.todense()))

    results = process_states(net,
                             target_matrix,
                             stim_set,
                             data_sets=None,
                             save=save,
                             accepted_idx=None,
                             plot=plot,
                             display=display,
                             save_paths=paths)

    results.update({'timing_info': timing, 'epochs': epochs})
    processed_results = dict()
    for ctr, n_pop in enumerate(
            list(
                itertools.chain(*[
                    net.merged_populations, net.populations, enc_layer.encoders
                ]))):
        if n_pop.decoding_layer is not None:
            processed_results.update({n_pop.name: {}})
            dec_layer = n_pop.decoding_layer
            for idx_var, var in enumerate(dec_layer.state_variables):
                processed_results[n_pop.name].update({
                    var:
                    compile_performance_results(dec_layer.readouts[idx_var],
                                                var)
                })

                # The labels are often not properly ordered
                readout_labels = processed_results[n_pop.name][var]['labels']
                all_indices = np.array([
                    int(''.join(c for c in x if c.isdigit()))
                    for x in readout_labels
                ])
                ordered_indices = np.argsort(all_indices)

                # Extract and plot example results
                if plot:
                    ordered_accuracy = processed_results[
                        n_pop.name][var]['accuracy'][ordered_indices]
                    fig, ax = pl.subplots()
                    ax.plot(all_indices[ordered_indices],
                            ordered_accuracy,
                            'o-',
                            lw=2)
                    ax.plot(
                        all_indices[ordered_indices],
                        np.ones_like(ordered_accuracy) * 1. /
                        parameter_set.stim_pars.n_stim, '--r')
                    ax.set_xlabel(r'$lag [n]$')
                    ax.set_ylabel(r'Accuracy')
                    if display:
                        pl.show(False)
                    if save:
                        fig.savefig(paths['figures'] + paths['label'])
    results.update({'processed_results': processed_results})

    io.log_timer.stop('process_data')
    # ##################################################################################################################
    # Save data
    # ==================================================================================================================
    if save:
        with open(paths['results'] + 'Results_' + parameter_set.label,
                  'w') as f:
            pickle.dump(results, f)
        parameter_set.save(paths['parameters'] + 'Parameters_' +
                           parameter_set.label)
Exemplo n.º 6
0
def run(parameter_set, plot=False, display=False, save=True):
    """
    Compute single neuron fI curves
    :param parameter_set: must be consistent with the computation
    :param plot: plot results - either show them or save to file
    :param display: show figures/reports
    :param save: save results
    :return results_dictionary:
    """
    if not isinstance(parameter_set, ParameterSet):
        if isinstance(parameter_set, basestring) or isinstance(
                parameter_set, dict):
            parameter_set = ParameterSet(parameter_set)
        else:
            raise TypeError(
                "parameter_set must be ParameterSet, string with full path to parameter file or "
                "dictionary")

    # ##################################################################################################################
    # Setup extra variables and parameters
    # ==================================================================================================================
    if plot:
        vis.set_global_rcParams(parameter_set.kernel_pars['mpl_path'])
    paths = set_storage_locations(parameter_set, save)

    np.random.seed(parameter_set.kernel_pars['np_seed'])

    # ##################################################################################################################
    # Set kernel and simulation parameters
    # ==================================================================================================================
    print('\nRuning ParameterSet {0}'.format(parameter_set.label))
    nest.ResetKernel()
    nest.set_verbosity('M_WARNING')
    nest.SetKernelStatus(
        extract_nestvalid_dict(parameter_set.kernel_pars.as_dict(),
                               param_type='kernel'))

    # ##################################################################################################################
    # Build network
    # ==================================================================================================================
    net = Network(parameter_set.net_pars)

    # ##################################################################################################################
    # Randomize initial variable values
    # ==================================================================================================================
    for idx, n in enumerate(list(iterate_obj_list(net.populations))):
        if hasattr(parameter_set.net_pars, "randomize_neuron_pars"):
            randomize = parameter_set.net_pars.randomize_neuron_pars[idx]
            for k, v in randomize.items():
                n.randomize_initial_states(k,
                                           randomization_function=v[0],
                                           **v[1])

    ####################################################################################################################
    # Build Input Signal Sets
    # ==================================================================================================================
    assert hasattr(parameter_set, "input_pars")

    total_stimulation_time = parameter_set.kernel_pars.sim_time + parameter_set.kernel_pars.transient_t

    # Current input (need to build 2 separate noise signals for the 2 input channels)
    # Generate input for channel 1
    input_noise_ch1 = InputNoise(parameter_set.input_pars.noise,
                                 rng=np.random,
                                 stop_time=total_stimulation_time)
    input_noise_ch1.generate()
    input_noise_ch1.re_seed(parameter_set.kernel_pars.np_seed)

    # Generate input for channel 2
    input_noise_ch2 = InputNoise(parameter_set.input_pars.noise,
                                 rng=np.random,
                                 stop_time=total_stimulation_time)
    input_noise_ch2.generate()
    input_noise_ch2.re_seed(parameter_set.kernel_pars.np_seed)

    if plot:
        inp_plot = vis.InputPlots(stim_obj=None,
                                  input_obj=None,
                                  noise_obj=input_noise_ch1)
        inp_plot.plot_noise_component(display=display,
                                      save=paths['figures'] +
                                      "/InputNoise_CH1")

        inp_plot = vis.InputPlots(stim_obj=None,
                                  input_obj=None,
                                  noise_obj=input_noise_ch2)
        inp_plot.plot_noise_component(display=display,
                                      save=paths['figures'] +
                                      "/InputNoise_CH2")

    # ##################################################################################################################
    # Build and connect input
    # ==================================================================================================================
    enc_layer_ch1 = EncodingLayer(parameter_set.encoding_ch1_pars,
                                  signal=input_noise_ch1)
    enc_layer_ch1.connect(parameter_set.encoding_ch1_pars, net)

    enc_layer_ch2 = EncodingLayer(parameter_set.encoding_ch2_pars,
                                  signal=input_noise_ch2)
    enc_layer_ch2.connect(parameter_set.encoding_ch2_pars, net)

    # ##################################################################################################################
    # Connect Devices
    # ==================================================================================================================
    net.connect_devices()

    # ##################################################################################################################
    # Simulate
    # ==================================================================================================================
    if parameter_set.kernel_pars.transient_t:
        net.simulate(parameter_set.kernel_pars.transient_t)
        net.flush_records()

    net.simulate(parameter_set.kernel_pars.sim_time +
                 nest.GetKernelStatus()['resolution'])

    # ##################################################################################################################
    # Extract and store data
    # ==================================================================================================================
    net.extract_population_activity(
        t_start=parameter_set.kernel_pars.transient_t,
        t_stop=parameter_set.kernel_pars.sim_time +
        parameter_set.kernel_pars.transient_t)
    net.extract_network_activity()

    # ##################################################################################################################
    # Analyse / plot data
    # ==================================================================================================================
    results = dict()

    analysis_interval = [
        parameter_set.kernel_pars.transient_t,
        parameter_set.kernel_pars.transient_t +
        parameter_set.kernel_pars.sim_time
    ]
    for idd, nam in enumerate(net.population_names):
        results.update({nam: {}})
        results[nam] = single_neuron_responses(net.populations[idd],
                                               parameter_set,
                                               pop_idx=idd,
                                               start=analysis_interval[0],
                                               stop=analysis_interval[1],
                                               plot=plot,
                                               display=display,
                                               save=paths['figures'] +
                                               paths['label'])
        if results[nam]['rate']:
            print('Output Rate [{0}] = {1} spikes/s'.format(
                str(nam), str(results[nam]['rate'])))

    # ##################################################################################################################
    # Save data
    # ==================================================================================================================
    if save:
        with open(paths['results'] + 'Results_' + parameter_set.label,
                  'w') as f:
            pickle.dump(results, f)
        parameter_set.save(paths['parameters'] + 'Parameters_' +
                           parameter_set.label)
Exemplo n.º 7
0
def __initialize_test_data(params_file_):
    plot = False
    display = True
    save = True

    # ##################################################################################################################
    # Extract parameters from file and build global ParameterSet
    # ==================================================================================================================
    parameter_set = ParameterSpace(params_file_)[0]
    parameter_set = parameter_set.clean(termination='pars')

    if not isinstance(parameter_set, ParameterSet):
        if isinstance(parameter_set, basestring) or isinstance(
                parameter_set, dict):
            parameter_set = ParameterSet(parameter_set)
        else:
            raise TypeError(
                "parameter_set must be ParameterSet, string with full path to parameter file or dictionary"
            )

    # ######################################################################################################################
    # Setup extra variables and parameters
    # ======================================================================================================================
    if plot:
        import modules.visualization as vis
        vis.set_global_rcParams(parameter_set.kernel_pars['mpl_path'])
    paths = set_storage_locations(parameter_set, save)

    np.random.seed(parameter_set.kernel_pars['np_seed'])
    results = dict()

    # ######################################################################################################################
    # Set kernel and simulation parameters
    # ======================================================================================================================
    print '\nRuning ParameterSet {0}'.format(parameter_set.label)
    nest.ResetKernel()
    nest.set_verbosity('M_WARNING')
    nest.SetKernelStatus(
        extract_nestvalid_dict(parameter_set.kernel_pars.as_dict(),
                               param_type='kernel'))

    # ######################################################################################################################
    # Build network
    # ======================================================================================================================
    net = Network(parameter_set.net_pars)

    # ######################################################################################################################
    # Randomize initial variable values
    # ======================================================================================================================
    for idx, n in enumerate(list(iterate_obj_list(net.populations))):
        if hasattr(parameter_set.net_pars, "randomize_neuron_pars"):
            randomize = parameter_set.net_pars.randomize_neuron_pars[idx]
            for k, v in randomize.items():
                n.randomize_initial_states(k,
                                           randomization_function=v[0],
                                           **v[1])

    # ######################################################################################################################
    # Build and connect input
    # ======================================================================================================================
    enc_layer = EncodingLayer(parameter_set.encoding_pars)
    enc_layer.connect(parameter_set.encoding_pars, net)

    # ######################################################################################################################
    # Set-up Analysis
    # ======================================================================================================================
    net.connect_devices()

    # ######################################################################################################################
    # Simulate
    # ======================================================================================================================
    if parameter_set.kernel_pars.transient_t:
        net.simulate(parameter_set.kernel_pars.transient_t)
        net.flush_records()

    net.simulate(parameter_set.kernel_pars.sim_time +
                 nest.GetKernelStatus()['resolution'])

    # ######################################################################################################################
    # Extract and store data
    # ======================================================================================================================
    net.extract_population_activity(
        t_start=parameter_set.kernel_pars.transient_t +
        nest.GetKernelStatus()['resolution'],
        t_stop=parameter_set.kernel_pars.sim_time +
        parameter_set.kernel_pars.transient_t)
    net.extract_network_activity()
    net.flush_records()

    # ######################################################################################################################
    # Analyse / plot data
    # ======================================================================================================================
    analysis_interval = [
        parameter_set.kernel_pars.transient_t +
        nest.GetKernelStatus()['resolution'],
        parameter_set.kernel_pars.sim_time +
        parameter_set.kernel_pars.transient_t
    ]

    for idd, nam in enumerate(net.population_names):
        results.update({nam: {}})
        results[nam] = single_neuron_dcresponse(net.populations[idd],
                                                parameter_set,
                                                start=analysis_interval[0],
                                                stop=analysis_interval[1],
                                                plot=plot,
                                                display=display,
                                                save=paths['figures'] +
                                                paths['label'])
        idx = np.min(np.where(results[nam]['output_rate']))

        print "Rate range for neuron {0} = [{1}, {2}] Hz".format(
            str(nam),
            str(
                np.min(results[nam]['output_rate'][
                    results[nam]['output_rate'] > 0.])),
            str(
                np.max(results[nam]['output_rate'][
                    results[nam]['output_rate'] > 0.])))
        results[nam].update({
            'min_rate':
            np.min(
                results[nam]['output_rate'][results[nam]['output_rate'] > 0.]),
            'max_rate':
            np.max(
                results[nam]['output_rate'][results[nam]['output_rate'] > 0.])
        })
        print "Rheobase Current for neuron {0} in [{1}, {2}]".format(
            str(nam), str(results[nam]['input_amplitudes'][idx - 1]),
            str(results[nam]['input_amplitudes'][idx]))

        x = np.array(results[nam]['input_amplitudes'])
        y = np.array(results[nam]['output_rate'])
        iddxs = np.where(y)
        slope, intercept, r_value, p_value, std_err = stats.linregress(
            x[iddxs], y[iddxs])
        print "fI Slope for neuron {0} = {1} Hz/nA [linreg method]".format(
            nam, str(slope * 1000.))

        results[nam].update({
            'fI_slope':
            slope * 1000.,
            'I_rh': [
                results[nam]['input_amplitudes'][idx - 1],
                results[nam]['input_amplitudes'][idx]
            ]
        })

    data = dict()

    data['connections_from'] = {
        pop.name: nest.GetConnections(source=pop.gids)
        for (idx, pop) in enumerate(net.populations)
    }
    data['connections_to'] = {
        pop.name: nest.GetConnections(target=pop.gids)
        for (idx, pop) in enumerate(net.populations)
    }
    data['results'] = results

    return data
Exemplo n.º 8
0
def __initialize_test_data(params_file_):
    plot = False
    display = True
    save = True

    # ##################################################################################################################
    # Extract parameters from file and build global ParameterSet
    # ==================================================================================================================
    parameter_set = ParameterSpace(params_file_)[0]
    parameter_set = parameter_set.clean(termination='pars')

    if not isinstance(parameter_set, ParameterSet):
        if isinstance(parameter_set, basestring) or isinstance(
                parameter_set, dict):
            parameter_set = ParameterSet(parameter_set)
        else:
            raise TypeError(
                "parameter_set must be ParameterSet, string with full path to parameter file or dictionary"
            )

    # ##################################################################################################################
    # Setup extra variables and parameters
    # ==================================================================================================================
    if plot:
        set_global_rcParams(parameter_set.kernel_pars['mpl_path'])
    paths = set_storage_locations(parameter_set, save)

    np.random.seed(parameter_set.kernel_pars['np_seed'])

    # ##################################################################################################################
    # Set kernel and simulation parameters
    # ==================================================================================================================
    print '\nRuning ParameterSet {0}'.format(parameter_set.label)
    nest.ResetKernel()
    nest.set_verbosity('M_WARNING')
    nest.SetKernelStatus(
        extract_nestvalid_dict(parameter_set.kernel_pars.as_dict(),
                               param_type='kernel'))

    # ##################################################################################################################
    # Build network
    # ==================================================================================================================
    net = Network(parameter_set.net_pars)

    # ##################################################################################################################
    # Randomize initial variable values
    # ==================================================================================================================
    for idx, n in enumerate(list(iterate_obj_list(net.populations))):
        if hasattr(parameter_set.net_pars, "randomize_neuron_pars"):
            randomize = parameter_set.net_pars.randomize_neuron_pars[idx]
            for k, v in randomize.items():
                n.randomize_initial_states(k,
                                           randomization_function=v[0],
                                           **v[1])

    ####################################################################################################################
    # Build Input Signal Sets
    # ==================================================================================================================
    assert hasattr(parameter_set, "input_pars")

    total_stimulation_time = parameter_set.kernel_pars.sim_time + parameter_set.kernel_pars.transient_t

    # Current input (need to build 2 separate noise signals for the 2 input channels)
    # Generate input for channel 1
    input_noise_ch1 = InputNoise(parameter_set.input_pars.noise,
                                 rng=np.random,
                                 stop_time=total_stimulation_time)
    input_noise_ch1.generate()
    input_noise_ch1.re_seed(parameter_set.kernel_pars.np_seed)

    # Generate input for channel 2
    input_noise_ch2 = InputNoise(parameter_set.input_pars.noise,
                                 rng=np.random,
                                 stop_time=total_stimulation_time)
    input_noise_ch2.generate()
    input_noise_ch2.re_seed(parameter_set.kernel_pars.np_seed)

    if plot:
        inp_plot = InputPlots(stim_obj=None,
                              input_obj=None,
                              noise_obj=input_noise_ch1)
        inp_plot.plot_noise_component(display=display,
                                      save=paths['figures'] +
                                      "/InputNoise_CH1")

        inp_plot = InputPlots(stim_obj=None,
                              input_obj=None,
                              noise_obj=input_noise_ch2)
        inp_plot.plot_noise_component(display=display,
                                      save=paths['figures'] +
                                      "/InputNoise_CH2")

    # ##################################################################################################################
    # Build and connect input
    # ==================================================================================================================
    enc_layer_ch1 = EncodingLayer(parameter_set.encoding_ch1_pars,
                                  signal=input_noise_ch1)
    enc_layer_ch1.connect(parameter_set.encoding_ch1_pars, net)

    enc_layer_ch2 = EncodingLayer(parameter_set.encoding_ch2_pars,
                                  signal=input_noise_ch2)
    enc_layer_ch2.connect(parameter_set.encoding_ch2_pars, net)

    # ##################################################################################################################
    # Connect Devices
    # ==================================================================================================================
    net.connect_devices()

    # ##################################################################################################################
    # Simulate
    # ==================================================================================================================
    if parameter_set.kernel_pars.transient_t:
        net.simulate(parameter_set.kernel_pars.transient_t)
        net.flush_records()

    net.simulate(parameter_set.kernel_pars.sim_time +
                 nest.GetKernelStatus()['resolution'])

    # ##################################################################################################################
    # Extract and store data
    # ==================================================================================================================
    net.extract_population_activity(
        t_start=parameter_set.kernel_pars.transient_t,
        t_stop=parameter_set.kernel_pars.sim_time +
        parameter_set.kernel_pars.transient_t)
    net.extract_network_activity()

    # ##################################################################################################################
    # Analyse / plot data
    # ==================================================================================================================
    analysis_interval = [
        parameter_set.kernel_pars.transient_t,
        parameter_set.kernel_pars.transient_t +
        parameter_set.kernel_pars.sim_time
    ]

    results = dict()

    for idd, nam in enumerate(net.population_names):
        results.update({nam: {}})
        results[nam] = single_neuron_responses(net.populations[idd],
                                               parameter_set,
                                               pop_idx=idd,
                                               start=analysis_interval[0],
                                               stop=analysis_interval[1],
                                               plot=plot,
                                               display=display,
                                               save=paths['figures'] +
                                               paths['label'])
        if results[nam]['rate']:
            print('Output Rate [{0}] = {1} spikes/s'.format(
                str(nam), str(results[nam]['rate'])))

    # ######################################################################################################################
    # Save data
    # ======================================================================================================================
    data = dict()

    data['connections_from'] = {
        pop.name: nest.GetConnections(source=pop.gids)
        for (idx, pop) in enumerate(net.populations)
    }
    data['connections_to'] = {
        pop.name: nest.GetConnections(target=pop.gids)
        for (idx, pop) in enumerate(net.populations)
    }
    data['results'] = results

    data['input'] = {
        'channel1': input_noise_ch1.noise_signal.analog_signals[.0].signal,
        'channel2': input_noise_ch2.noise_signal.analog_signals[.0].signal
    }

    data['network'] = {
        'populations': {
            net.populations[0].name: net.populations[0]
        }
    }

    return data
Exemplo n.º 9
0
display = True
save = True
debug = False

# ######################################################################################################################
# Extract parameters from file and build global ParameterSet
# ======================================================================================================================
params_file = '../parameters/noise_driven_dynamics.py'

parameter_set = ParameterSpace(params_file)[0]
parameter_set = parameter_set.clean(termination='pars')

if not isinstance(parameter_set, ParameterSet):
    if isinstance(parameter_set, basestring) or isinstance(
            parameter_set, dict):
        parameter_set = ParameterSet(parameter_set)
    else:
        raise TypeError(
            "parameter_set must be ParameterSet, string with full path to parameter file or dictionary"
        )

# ######################################################################################################################
# Setup extra variables and parameters
# ======================================================================================================================
if plot:
    set_global_rcParams(parameter_set.kernel_pars['mpl_path'])
paths = set_storage_locations(parameter_set, save)

np.random.seed(parameter_set.kernel_pars['np_seed'])
results = dict()
Exemplo n.º 10
0
def build_parameters(max_current):
    # ##################################################################################################################
    # DC input parameters
    # ==================================================================================================================
    sim_res = 0.1
    total_time = 10000. + sim_res  # total simulation time [ms]
    analysis_interval = 1000.  # duration of each current step [ms]
    min_current = 0.  # initial current amplitude [pA]
    # max_current = 600.            # final current amplitude [pA]

    # specify input times and input amplitudes
    times = list(np.arange(0.1, total_time, analysis_interval))
    amplitudes = list(np.linspace(min_current, max_current, len(times)))

    # ##################################################################################################################
    # System / Kernel Parameters
    # ##################################################################################################################
    # system-specific parameters (resource allocation, simulation times)
    system_pars = dict(nodes=1,
                       ppn=8,
                       mem=32,
                       walltime='00:20:00:00',
                       queue='singlenode',
                       transient_time=0.,
                       sim_time=total_time)

    # seeds for rngs
    N_vp = system_pars['nodes'] * system_pars['ppn']
    np_seed = np.random.randint(1000000000) + 1
    np.random.seed(np_seed)
    msd = np.random.randint(100000000000)

    # main kernel parameter set
    kernel_pars = ParameterSet({
        'resolution':
        sim_res,
        'sim_time':
        total_time,
        'transient_t':
        0.,
        'data_prefix':
        data_label,
        'data_path':
        paths[system_name]['data_path'],
        'mpl_path':
        paths[system_name]['matplotlib_rc'],
        'overwrite_files':
        True,
        'print_time': (system_name == 'local'),
        'rng_seeds':
        range(msd + N_vp + 1, msd + 2 * N_vp + 1),
        'grng_seed':
        msd + N_vp,
        'total_num_virtual_procs':
        N_vp,
        'local_num_threads':
        16,
        'np_seed':
        np_seed,
        'system': {
            'local': (system_name == 'local'),
            'system_label': system_name,
            'queueing_system': paths[system_name]['queueing_system'],
            'jdf_template': paths[system_name]['jdf_template'],
            'remote_directory': paths[system_name]['remote_directory'],
            'jdf_fields': {
                '{{ script_folder }}': '',
                '{{ nodes }}': str(system_pars['nodes']),
                '{{ ppn }}': str(system_pars['ppn']),
                '{{ mem }}': str(system_pars['mem']),
                '{{ walltime }}': system_pars['walltime'],
                '{{ queue }}': system_pars['queue'],
                '{{ computation_script }}': ''
            }
        }
    })
    # ##################################################################################################################
    # Recording devices
    # ##################################################################################################################
    multimeter = {
        'start': 0.,
        'stop': sys.float_info.max,
        'origin': 0.,
        'interval': 0.1,
        'record_to': ['memory'],
        'label': '',
        'model': 'multimeter',
        'close_after_simulate': False,
        'flush_after_simulate': False,
        'flush_records': False,
        'close_on_reset': True,
        'withtime': True,
        'withgid': True,
        'withweight': False,
        'time_in_steps': False,
        'scientific': False,
        'precision': 3,
        'binary': False,
    }

    # ##################################################################################################################
    # Neuron, Synapse and Network Parameters
    # ##################################################################################################################
    neuron_pars = {
        'AdEx': {
            'model': 'aeif_cond_exp',
            'C_m': 250.0,
            'Delta_T': 2.0,
            'E_L': -70.,
            'E_ex': 0.0,
            'E_in': -75.0,
            'I_e': 0.,
            'V_m': -70.,
            'V_th': -50.,
            'V_reset': -60.0,
            'V_peak': 0.0,
            'a': 4.0,
            'b': 80.5,
            'g_L': 16.7,
            'g_ex': 1.0,
            'g_in': 1.0,
            't_ref': 2.0,
            'tau_minus': 20.,
            'tau_minus_triplet': 200.,
            'tau_w': 144.0,
            'tau_syn_ex': 2.,
            'tau_syn_in': 6.0,
        }
    }

    multimeter.update({'record_from': ['V_m'], 'record_n': 1})
    pop_names = ['{0}'.format(str(n)) for n in neuron_pars.keys()]
    n_neurons = [1 for _ in neuron_pars.keys()]
    if len(neuron_pars.keys()) > 1:
        neuron_params = [neuron_pars[n] for n in neuron_pars.keys()]
    else:
        neuron_params = [neuron_pars[neuron_pars.keys()[0]]]

    net_pars = ParameterSet({
        'n_populations':
        len(neuron_pars.keys()),
        'pop_names':
        pop_names,
        'n_neurons':
        n_neurons,
        'neuron_pars':
        neuron_params,
        'randomize_neuron_pars': [{
            'V_m': (np.random.uniform, {
                'low': -70.,
                'high': -50.
            })
        }],
        'topology': [False for _ in neuron_pars.keys()],
        'topology_dict': [None for _ in neuron_pars.keys()],
        'record_spikes': [True for _ in neuron_pars.keys()],
        'spike_device_pars': [
            copy_dict(multimeter, {'model': 'spike_detector'})
            for _ in neuron_pars.keys()
        ],
        'record_analogs': [True for _ in neuron_pars.keys()],
        'analog_device_pars': [
            copy_dict(multimeter, {
                'record_from': ['V_m'],
                'record_n': 1
            }) for _ in neuron_pars.keys()
        ],
    })
    neuron_pars = ParameterSet(neuron_pars)

    # ##################################################################################################################
    # Input/Encoding Parameters
    # ##################################################################################################################
    connections = [('{0}'.format(str(n)), 'DC_Input')
                   for n in net_pars.pop_names]
    n_connections = len(connections)
    encoding_pars = ParameterSet({
        'generator': {
            'N':
            1,
            'labels': ['DC_Input'],
            'models': ['step_current_generator'],
            'model_pars': [{
                'start': 0.,
                'stop': kernel_pars['sim_time'],
                'origin': 0.,
                'amplitude_times': times,
                'amplitude_values': amplitudes
            }],
            'topology': [False],
            'topology_pars': [None]
        },
        'connectivity': {
            'connections': connections,
            'topology_dependent': [False for _ in range(n_connections)],
            'conn_specs': [{
                'rule': 'all_to_all'
            } for _ in range(n_connections)],
            'syn_specs': [{} for _ in range(n_connections)],
            'models': ['static_synapse' for _ in range(n_connections)],
            'model_pars': [{} for _ in range(n_connections)],
            'weight_dist': [1. for _ in range(n_connections)],
            'delay_dist': [0.1 for _ in range(n_connections)],
            'preset_W': [None for _ in range(n_connections)]
        },
    })

    # ##################################################################################################################
    # RETURN dictionary of Parameters dictionaries
    # ==================================================================================================================
    return dict([('kernel_pars', kernel_pars), ('neuron_pars', neuron_pars),
                 ('net_pars', net_pars), ('encoding_pars', encoding_pars)])
Exemplo n.º 11
0
def run(parameter_set, plot=False, display=False, save=True):
    """
    Compute single neuron fI curves
    :param parameter_set: must be consistent with the computation
    :param plot: plot results - either show them or save to file
    :param display: show figures/reports
    :param save: save results
    :return results_dictionary:
    """
    if not isinstance(parameter_set, ParameterSet):
        if isinstance(parameter_set, basestring) or isinstance(
                parameter_set, dict):
            parameter_set = ParameterSet(parameter_set)
        else:
            raise TypeError(
                "parameter_set must be ParameterSet, string with full path to parameter file or "
                "dictionary")

    # ######################################################################################################################
    # Setup extra variables and parameters
    # ======================================================================================================================
    if plot:
        vis.set_global_rcParams(parameter_set.kernel_pars['mpl_path'])
    paths = set_storage_locations(parameter_set, save)

    np.random.seed(parameter_set.kernel_pars['np_seed'])
    results = dict()

    # ######################################################################################################################
    # Set kernel and simulation parameters
    # ======================================================================================================================
    print('\nRuning ParameterSet {0}'.format(parameter_set.label))
    nest.ResetKernel()
    nest.set_verbosity('M_WARNING')
    nest.SetKernelStatus(
        extract_nestvalid_dict(parameter_set.kernel_pars.as_dict(),
                               param_type='kernel'))

    # ######################################################################################################################
    # Build network
    # ======================================================================================================================
    net = Network(parameter_set.net_pars)

    # ######################################################################################################################
    # Randomize initial variable values
    # ======================================================================================================================
    for idx, n in enumerate(list(iterate_obj_list(net.populations))):
        if hasattr(parameter_set.net_pars, "randomize_neuron_pars"):
            randomize = parameter_set.net_pars.randomize_neuron_pars[idx]
            for k, v in randomize.items():
                n.randomize_initial_states(k,
                                           randomization_function=v[0],
                                           **v[1])

    # ######################################################################################################################
    # Build and connect input
    # ======================================================================================================================
    enc_layer = EncodingLayer(parameter_set.encoding_pars)
    enc_layer.connect(parameter_set.encoding_pars, net)

    # ######################################################################################################################
    # Set-up Analysis
    # ======================================================================================================================
    net.connect_devices()

    # ######################################################################################################################
    # Simulate
    # ======================================================================================================================
    if parameter_set.kernel_pars.transient_t:
        net.simulate(parameter_set.kernel_pars.transient_t)
        net.flush_records()

    net.simulate(parameter_set.kernel_pars.sim_time +
                 nest.GetKernelStatus()['resolution'])

    # ######################################################################################################################
    # Extract and store data
    # ======================================================================================================================
    net.extract_population_activity(
        t_start=parameter_set.kernel_pars.transient_t +
        nest.GetKernelStatus()['resolution'],
        t_stop=parameter_set.kernel_pars.sim_time +
        parameter_set.kernel_pars.transient_t)
    net.extract_network_activity()
    net.flush_records()

    # ######################################################################################################################
    # Analyse / plot data
    # ======================================================================================================================
    analysis_interval = [
        parameter_set.kernel_pars.transient_t +
        nest.GetKernelStatus()['resolution'],
        parameter_set.kernel_pars.sim_time +
        parameter_set.kernel_pars.transient_t
    ]

    for idd, nam in enumerate(net.population_names):
        results.update({nam: {}})
        results[nam] = single_neuron_dcresponse(net.populations[idd],
                                                parameter_set,
                                                start=analysis_interval[0],
                                                stop=analysis_interval[1],
                                                plot=plot,
                                                display=display,
                                                save=paths['figures'] +
                                                paths['label'])
        idx = np.min(np.where(results[nam]['output_rate']))

        print("Rate range for neuron {0} = [{1}, {2}] Hz".format(
            str(nam),
            str(
                np.min(results[nam]['output_rate'][
                    results[nam]['output_rate'] > 0.])),
            str(
                np.max(results[nam]['output_rate'][
                    results[nam]['output_rate'] > 0.]))))

        results[nam].update({
            'min_rate':
            np.min(
                results[nam]['output_rate'][results[nam]['output_rate'] > 0.]),
            'max_rate':
            np.max(
                results[nam]['output_rate'][results[nam]['output_rate'] > 0.])
        })
        print("Rheobase Current for neuron {0} in [{1}, {2}]".format(
            str(nam), str(results[nam]['input_amplitudes'][idx - 1]),
            str(results[nam]['input_amplitudes'][idx])))

        x = np.array(results[nam]['input_amplitudes'])
        y = np.array(results[nam]['output_rate'])
        iddxs = np.where(y)
        slope, intercept, r_value, p_value, std_err = stats.linregress(
            x[iddxs], y[iddxs])
        print("fI Slope for neuron {0} = {1} Hz/nA [linreg method]".format(
            nam, str(slope * 1000.)))

        results[nam].update({
            'fI_slope':
            slope * 1000.,
            'I_rh': [
                results[nam]['input_amplitudes'][idx - 1],
                results[nam]['input_amplitudes'][idx]
            ]
        })

    # ######################################################################################################################
    # Save data
    # ======================================================================================================================
    if save:
        with open(paths['results'] + 'Results_' + parameter_set.label,
                  'w') as f:
            pickle.dump(results, f)
        parameter_set.save(paths['parameters'] + 'Parameters_' +
                           parameter_set.label)
Exemplo n.º 12
0
def set_decoding_defaults(output_resolution=1.,
                          to_memory=True,
                          **decoder_pars):
    """

    :return:
    """
    keys = [
        'decoded_population', 'state_variable', 'filter_time', 'readouts',
        'sampling_times', 'reset_states', 'average_states', 'standardize'
    ]
    if not all([n in decoder_pars.keys() for n in keys]) or len(decoder_pars['decoded_population']) != \
            len(decoder_pars['state_variable']):
        raise TypeError("Incorrect Decoder Parameters")

    dec_pars = ParameterSet(decoder_pars)
    n_decoders = len(dec_pars.decoded_population)
    if to_memory:
        rec_device = rec_device_defaults(start=0.,
                                         resolution=output_resolution)
    else:
        rec_device = rec_device_defaults(start=0.,
                                         resolution=output_resolution,
                                         record_to='file')
    state_specs = []
    for state_var in dec_pars.state_variable:
        if state_var == 'spikes':
            state_specs.append({
                'tau_m': dec_pars.filter_time,
                'interval': output_resolution
            })
        else:
            state_specs.append(
                copy_dict(
                    rec_device, {
                        'model': 'multimeter',
                        'record_n': None,
                        'record_from': [state_var]
                    }))

    if 'N' in decoder_pars.keys():
        N = decoder_pars['N']
    else:
        N = len(dec_pars.readouts)
    if len(dec_pars.readout_algorithms) == N:
        readouts = [{
            'N': N,
            'labels': dec_pars.readouts,
            'algorithm': dec_pars.readout_algorithms
        } for _ in range(n_decoders)]
    else:
        readouts = [{
            'N': N,
            'labels': dec_pars.readouts,
            'algorithm': [dec_pars.readout_algorithms[0]]
        } for _ in range(n_decoders)]

    decoding_pars = {
        'state_extractor': {
            'N': n_decoders,
            'filter_tau': dec_pars.filter_time,
            'source_population': dec_pars.decoded_population,
            'state_variable': dec_pars.state_variable,
            'state_specs': state_specs,
            'reset_states': dec_pars.reset_states,
            'average_states': dec_pars.average_states,
            'standardize': dec_pars.standardize
        },
        'readout': readouts,
        'sampling_times': dec_pars.sampling_times,
        'output_resolution': output_resolution
    }
    return ParameterSet(decoding_pars)
Exemplo n.º 13
0
def set_encoding_defaults(default_set=1,
                          input_dimensions=1,
                          n_encoding_neurons=0,
                          encoder_neuron_pars=None,
                          gen_label=None,
                          **synapse_pars):
    """

    :param default_set:
    :return:
    """
    if default_set == 0:
        print(
            "\nLoading Default Encoding Set 0 - Empty Settings (add background noise)"
        )
        encoding_pars = {
            'encoder': {
                'N': 0,
                'labels': [],
                'models': [],
                'model_pars': [],
                'n_neurons': [],
                'neuron_pars': [],
                'topology': [],
                'topology_dict': [],
                'record_spikes': [],
                'spike_device_pars': [],
                'record_analogs': [],
                'analog_device_pars': []
            },
            'generator': {
                'N': 0,
                'labels': [],
                'models': [],
                'model_pars': [],
                'topology': [],
                'topology_pars': []
            },
            'connectivity': {
                'synapse_name': [],
                'connections': [],
                'topology_dependent': [],
                'conn_specs': [],
                'syn_specs': [],
                'models': [],
                'model_pars': [],
                'weight_dist': [],
                'delay_dist': [],
                'preset_W': []
            },
            'input_decoder': None
        }

    elif default_set == 1:
        # ###################################################################
        # Encoding Type 1 - DC injection to target populations
        # ###################################################################
        if gen_label is None:
            gen_label = 'DC_input'
        keys = [
            'target_population_names', 'conn_specs', 'syn_specs', 'models',
            'model_pars', 'weight_dist', 'delay_dist', 'preset_W'
        ]
        if not np.mean([n in synapse_pars.keys() for n in keys]).astype(bool):
            raise TypeError("Incorrect Synapse Parameters")
        syn_pars = ParameterSet(synapse_pars)
        # n_connections = len(syn_pars.target_population_names)
        connections = [(n, gen_label)
                       for n in syn_pars.target_population_names]
        synapse_names = [
            gen_label + 'syn' for _ in syn_pars.target_population_names
        ]
        print("\nLoading Default Encoding Set 1 - DC input to {0}".format(
            str(syn_pars.target_population_names)))
        encoding_pars = {
            'encoder': {
                'N': 0,
                'labels': [],
                'models': [],
                'model_pars': [],
                'n_neurons': [],
                'neuron_pars': [],
                'topology': [],
                'topology_dict': [],
                'record_spikes': [],
                'spike_device_pars': [],
                'record_analogs': [],
                'analog_device_pars': []
            },
            'generator': {
                'N':
                input_dimensions,
                'labels': [gen_label],
                'models': ['step_current_generator'],
                'model_pars': [{
                    'start': 0.,
                    'stop': sys.float_info.max,
                    'origin': 0.
                }],
                'topology': [False for _ in range(input_dimensions)],
                'topology_pars': [None for _ in range(input_dimensions)]
            },
            'connectivity': {
                'synapse_name': synapse_names,
                'connections': connections,
                'topology_dependent': [False, False],
                'conn_specs': syn_pars.conn_specs,
                'syn_specs': syn_pars.syn_specs,
                'models': syn_pars.models,
                'model_pars': syn_pars.model_pars,
                'weight_dist': syn_pars.weight_dist,
                'delay_dist': syn_pars.delay_dist,
                'preset_W': syn_pars.preset_W
            },
            'input_decoder': None
        }

    elif default_set == 2:
        # ###################################################################
        # Encoding Type 2 - Deterministic spike encoding layer
        # ###################################################################
        rec_devices = rec_device_defaults()
        enc_label = 'NEF'
        keys = [
            'target_population_names', 'conn_specs', 'syn_specs', 'models',
            'model_pars', 'weight_dist', 'delay_dist', 'preset_W'
        ]
        if not np.mean([n in synapse_pars.keys() for n in keys]).astype(bool):
            raise TypeError("Incorrect Synapse Parameters")
        syn_pars = ParameterSet(synapse_pars)
        # n_connections = len(syn_pars.target_population_names) + 1
        labels = [
            enc_label + '{0}'.format(str(n)) for n in range(input_dimensions)
        ]
        connections = [(n, enc_label)
                       for n in syn_pars.target_population_names]
        conn_specs = syn_pars.conn_specs
        conn_specs.insert(0, {'rule': 'all_to_all'})  #None)
        synapse_names = [
            enc_label + '_' + n for n in syn_pars.target_population_names
        ]
        connections.insert(0, ('NEF', 'StepGen'))
        synapse_names.insert(0, 'Gen_Enc')
        spike_device_pars = [
            copy_dict(rec_devices, {
                'model': 'spike_detector',
                'label': 'input_Spikes'
            }) for _ in range(input_dimensions)
        ]
        models = syn_pars.models
        models.insert(0, 'static_synapse')
        model_pars = syn_pars.model_pars
        model_pars.insert(0, {})
        weight_dist = syn_pars.weight_dist
        weight_dist.insert(0, 1.)
        delay_dist = syn_pars.delay_dist
        delay_dist.insert(0, 0.1)
        syn_specs = syn_pars.syn_specs
        syn_specs.insert(0, {})
        if hasattr(syn_pars, 'gen_to_enc_W'):
            preset_W = syn_pars.preset_W
            preset_W.insert(0, syn_pars.gen_to_enc_W)
        else:
            preset_W = syn_pars.preset_W

        print("\nLoading Default Encoding Set 2 - Deterministic spike encoding, {0} input populations of {1} [{2} " \
              "neurons] connected to {3}".format(
                str(input_dimensions), str(n_encoding_neurons), str(encoder_neuron_pars['model']), str(
                syn_pars.target_population_names)))

        encoding_pars = {
            'encoder': {
                'N':
                input_dimensions,
                'labels': [enc_label for _ in range(input_dimensions)],
                'models': [enc_label for _ in range(input_dimensions)],
                'model_pars': [None for _ in range(input_dimensions)],
                'n_neurons':
                [n_encoding_neurons for _ in range(input_dimensions)],
                'neuron_pars':
                [encoder_neuron_pars for _ in range(input_dimensions)],
                'topology': [False for _ in range(input_dimensions)],
                'topology_dict': [None for _ in range(input_dimensions)],
                'record_spikes': [True for _ in range(input_dimensions)],
                'spike_device_pars':
                spike_device_pars,
                'record_analogs': [False for _ in range(input_dimensions)],
                'analog_device_pars': [None for _ in range(input_dimensions)]
            },
            'generator': {
                'N':
                1,
                'labels': ['StepGen'],
                'models': ['step_current_generator'],
                'model_pars': [{
                    'start': 0.,
                    'stop': sys.float_info.max,
                    'origin': 0.
                }],
                'topology': [False],
                'topology_pars': [None]
            },
            'connectivity': {
                'connections': connections,
                'synapse_name': synapse_names,
                'topology_dependent':
                [False for _ in range(len(synapse_names))],
                'conn_specs': conn_specs,
                'models': models,
                'model_pars': model_pars,
                'weight_dist': weight_dist,
                'delay_dist': delay_dist,
                'preset_W': preset_W,
                'syn_specs': syn_specs
            },
            'input_decoder': {
                'encoder_label': enc_label
            }
        }
    elif default_set == 3:
        # ###################################################################
        # Encoding Type 3 - Stochastic spike encoding layer
        # ###################################################################
        gen_label = 'inh_poisson'
        keys = [
            'target_population_names', 'conn_specs', 'syn_specs', 'models',
            'model_pars', 'weight_dist', 'delay_dist', 'preset_W'
        ]
        if not np.mean([n in synapse_pars.keys() for n in keys]).astype(bool):
            raise TypeError("Incorrect Synapse Parameters")
        syn_pars = ParameterSet(synapse_pars)
        # n_connections = len(syn_pars.target_population_names)
        connections = [(n, gen_label)
                       for n in syn_pars.target_population_names]
        synapse_names = [
            gen_label + 'syn' for _ in syn_pars.target_population_names
        ]
        print(
            "\nLoading Default Encoding Set 3 - Stochastic spike encoding, independent realizations of "
            "inhomogeneous Poisson processes connected to {0}".format(
                str(syn_pars.target_population_names)))

        encoding_pars = {
            'encoder': {
                'N': 0,
                'labels': [],
                'models': [],
                'model_pars': [],
                'n_neurons': [],
                'neuron_pars': [],
                'topology': [],
                'topology_dict': [],
                'record_spikes': [],
                'spike_device_pars': [],
                'record_analogs': [],
                'analog_device_pars': []
            },
            'generator': {
                'N':
                input_dimensions,
                'labels': ['inh_poisson'],
                'models': ['inh_poisson_generator'],
                'model_pars': [{
                    'start': 0.,
                    'stop': sys.float_info.max,
                    'origin': 0.
                }],
                'topology': [False],
                'topology_pars': [None]
            },
            'connectivity': {
                'synapse_name': synapse_names,
                'connections': connections,
                'topology_dependent': [False, False],
                'conn_specs': syn_pars.conn_specs,
                'syn_specs': syn_pars.syn_specs,
                'models': syn_pars.models,
                'model_pars': syn_pars.model_pars,
                'weight_dist': syn_pars.weight_dist,
                'delay_dist': syn_pars.delay_dist,
                'preset_W': syn_pars.preset_W
            }
        }
    elif default_set == 4:
        # ###################################################################
        # Encoding Type 4 - Precise Spatiotemporal spike encoding (Frozen noise)
        # ###################################################################
        gen_label = 'spike_pattern'
        keys = [
            'target_population_names', 'conn_specs', 'syn_specs', 'models',
            'model_pars', 'weight_dist', 'delay_dist', 'preset_W'
        ]
        if not np.mean([n in synapse_pars.keys() for n in keys]).astype(bool):
            raise TypeError("Incorrect Synapse Parameters")
        syn_pars = ParameterSet(synapse_pars)
        connections = [(n, gen_label)
                       for n in syn_pars.target_population_names]
        synapse_names = [
            gen_label + 'syn' for _ in syn_pars.target_population_names
        ]
        conn_tp = [False for _ in range(len(connections))]
        if hasattr(syn_pars, 'jitter'):
            jitter = syn_pars.jitter
        else:
            jitter = None

        print(
            "\nLoading Default Encoding Set 4 - Stochastic spike encoding, {0} fixed spike pattern templates "
            "composed of {1} independent spike trains connected to {2}".format(
                str(input_dimensions), str(n_encoding_neurons),
                str(syn_pars.target_population_names)))

        encoding_pars = {
            'encoder': {
                'N': 0,
                'labels': [],
                'models': [],
                'model_pars': [],
                'n_neurons': [],
                'neuron_pars': [],
                'topology': [],
                'topology_dict': [],
                'record_spikes': [],
                'spike_device_pars': [],
                'record_analogs': [],
                'analog_device_pars': []
            },
            'generator': {
                'N':
                1,
                'labels': ['spike_pattern'],
                'models': ['spike_generator'],
                'model_pars': [{
                    'start': 0.,
                    'stop': sys.float_info.max,
                    'origin': 0.,
                    'precise_times': True
                }],
                'jitter':
                jitter,
                'topology': [False],
                'topology_pars': [None],
                'gen_to_enc_W':
                syn_pars.gen_to_enc_W
            },
            'connectivity': {
                'synapse_name': synapse_names,
                'connections': connections,
                'topology_dependent': conn_tp,
                'conn_specs': syn_pars.conn_specs,
                'syn_specs': syn_pars.syn_specs,
                'models': syn_pars.models,
                'model_pars': syn_pars.model_pars,
                'weight_dist': syn_pars.weight_dist,
                'delay_dist': syn_pars.delay_dist,
                'preset_W': syn_pars.preset_W
            },
            'input_decoder': None  #{}
        }

    else:
        raise IOError("default_set not defined")

    return ParameterSet(encoding_pars)
Exemplo n.º 14
0
def set_network_defaults(neuron_set=0, N=1250, **synapse_pars):
    """
    Network default parameters
    :param default_set:
    :return:
    """
    print("\nLoading Default Network Set - Standard BRN, no topology")
    syn_pars = ParameterSet(synapse_pars)
    nE = 0.8 * N
    nI = 0.2 * N

    rec_devices = rec_device_defaults(start=0.)
    neuron_pars = set_neuron_defaults(default_set=neuron_set)

    #############################################################################################################
    # NETWORK Parameters
    # ===========================================================================================================
    net_pars = {
        'n_populations':
        2,
        'pop_names': ['E', 'I'],
        'n_neurons': [int(nE), int(nI)],
        'neuron_pars': [neuron_pars['E'], neuron_pars['I']],
        'randomize_neuron_pars': [{
            'V_m': (np.random.uniform, {
                'low': -70.,
                'high': -50.
            })
        }, {
            'V_m': (np.random.uniform, {
                'low': -70.,
                'high': -50.
            })
        }],
        'topology': [False, False],
        'topology_dict': [None, None],
        'record_spikes': [True, True],
        'spike_device_pars': [
            copy_dict(
                rec_devices, {
                    'model': 'spike_detector',
                    'record_to': ['memory'],
                    'interval': 0.1,
                    'label': ''
                }),
            copy_dict(
                rec_devices, {
                    'model': 'spike_detector',
                    'record_to': ['memory'],
                    'interval': 0.1,
                    'label': ''
                })
        ],
        'record_analogs': [False, False],
        'analog_device_pars': [None, None],
    }
    #############################################################################################################
    # SYNAPSE Parameters
    # ============================================================================================================
    connection_pars = set_connection_defaults(syn_pars=syn_pars)

    return ParameterSet(neuron_pars), ParameterSet(net_pars), ParameterSet(
        connection_pars)