def main():
    # Let's be very verbose!
    logging.basicConfig(level=logging.INFO)

    # Let's do multiprocessing this time with a lock (which is default)
    filename = os.path.join('hdf5', 'example_23.hdf5')
    env = Environment(
        trajectory='Example_23_BRIAN2',
        filename=filename,
        file_title='Example_23_Brian2',
        comment='Go Brian2!',
        dynamically_imported_classes=[Brian2MonitorResult, Brian2Parameter])

    traj = env.trajectory

    # 1st a) add the parameters
    add_params(traj)

    # 1st b) prepare, we want to explore the different network sizes and different tauw time scales
    traj.f_explore(
        cartesian_product({
            traj.f_get('N').v_full_name: [50, 60],
            traj.f_get('tauw').v_full_name: [30 * ms, 40 * ms]
        }))

    # 2nd let's run our experiment
    env.run(run_net)

    # You can take a look at the results in the hdf5 file if you want!

    # Finally disable logging and close all log-files
    env.disable_logging()
def main():
    # Let's be very verbose!
    logging.basicConfig(level = logging.INFO)


    # Let's do multiprocessing this time with a lock (which is default)
    filename = os.path.join('hdf5', 'example_07.hdf5')
    env = Environment(trajectory='Example_07_BRIAN',
                      filename=filename,
                      file_title='Example_07_Brian',
                      comment = 'Go Brian!',
                      dynamically_imported_classes=[BrianMonitorResult, BrianParameter],
                      multiproc=True,
                      wrap_mode='QUEUE',
                      ncores=2)

    traj = env.trajectory

    # 1st a) add the parameters
    add_params(traj)

    # 1st b) prepare, we want to explore the different network sizes and different tauw time scales
    traj.f_explore(cartesian_product({traj.f_get('N').v_full_name:[50,60],
                           traj.f_get('tauw').v_full_name:[30*ms,40*ms]}))

    # 2nd let's run our experiment
    env.run(run_net)

    # You can take a look at the results in the hdf5 file if you want!

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#3
0
def main():
    filename = os.path.join('hdf5', 'Clustered_Network.hdf5')
    env = Environment(trajectory='Clustered_Network',
                      add_time=False,
                      filename=filename,
                      continuable=False,
                      lazy_debug=False,
                      multiproc=True,
                      ncores=2,
                      use_pool=False, # We cannot use a pool, our network cannot be pickled
                      wrap_mode='QUEUE',
                      overwrite_file=True)

    #Get the trajectory container
    traj = env.trajectory

    # We introduce a `meta` parameter that we can use to easily rescale our network
    scale = 0.5  # To obtain the results from the paper scale this to 1.0
    # Be aware that your machine will need a lot of memory then!
    traj.f_add_parameter('simulation.scale', scale,
            comment='Meta parameter that can scale default settings. '
                    'Rescales number of neurons and connections strenghts, but '
                    'not the clustersize.')


    # We create a Manager and pass all our components to the Manager.
    # Note the order, CNNeuronGroups are scheduled before CNConnections,
    # and the Fano Factor computation depends on the CNMonitorAnalysis
    clustered_network_manager = NetworkManager(network_runner=CNNetworkRunner(),
                                component_list=(CNNeuronGroup(), CNConnections()),
                                analyser_list=(CNMonitorAnalysis(),CNFanoFactorComputer()))




    # Add original parameters (but scaled according to `scale`)
    clustered_network_manager.add_parameters(traj)

    # We need `tolist` here since our parameter is a python float and not a
    # numpy float.
    explore_list = np.arange(1.0, 2.6, 0.2).tolist()
    # Explore different values of `R_ee`
    traj.f_explore({'R_ee' : explore_list})

    # Pre-build network components
    clustered_network_manager.pre_build(traj)


    # Run the network simulation
    traj.f_store() # Let's store the parameters already before the run
    env.run(clustered_network_manager.run_network)

    # Finally disable logging and close all log-files
    env.disable_logging()
示例#4
0
def main():
    filename = os.path.join('hdf5', 'Clustered_Network.hdf5')
    env = Environment(
        trajectory='Clustered_Network',
        add_time=False,
        filename=filename,
        continuable=False,
        lazy_debug=False,
        multiproc=True,
        ncores=2,
        use_pool=False,  # We cannot use a pool, our network cannot be pickled
        wrap_mode='QUEUE',
        overwrite_file=True)

    #Get the trajectory container
    traj = env.trajectory

    # We introduce a `meta` parameter that we can use to easily rescale our network
    scale = 0.5  # To obtain the results from the paper scale this to 1.0
    # Be aware that your machine will need a lot of memory then!
    traj.f_add_parameter(
        'simulation.scale',
        scale,
        comment='Meta parameter that can scale default settings. '
        'Rescales number of neurons and connections strenghts, but '
        'not the clustersize.')

    # We create a Manager and pass all our components to the Manager.
    # Note the order, CNNeuronGroups are scheduled before CNConnections,
    # and the Fano Factor computation depends on the CNMonitorAnalysis
    clustered_network_manager = NetworkManager(
        network_runner=CNNetworkRunner(),
        component_list=(CNNeuronGroup(), CNConnections()),
        analyser_list=(CNMonitorAnalysis(), CNFanoFactorComputer()))

    # Add original parameters (but scaled according to `scale`)
    clustered_network_manager.add_parameters(traj)

    # We need `tolist` here since our parameter is a python float and not a
    # numpy float.
    explore_list = np.arange(1.0, 2.6, 0.2).tolist()
    # Explore different values of `R_ee`
    traj.f_explore({'R_ee': explore_list})

    # Pre-build network components
    clustered_network_manager.pre_build(traj)

    # Run the network simulation
    traj.f_store()  # Let's store the parameters already before the run
    env.run(clustered_network_manager.run_network)

    # Finally disable logging and close all log-files
    env.disable_logging()