コード例 #1
0
def add_LPU(config, manager):
    config_photor = config['Photoreceptor']
    gexf_file = config_photor['gexf_file']
    input_file = config_photor['input_file']
    output_file = config_photor['output_file']

    G = generate_gexf(config_photor, gexf_file)

    LPU.graph_to_dicts(G)
    comp_dict, conns = LPU.graph_to_dicts(G)
    LPU_id = 'photoreceptor'
    debug = config_photor['debug']

    dt = config['General']['dt']
    extra_comps = [PhotoreceptorModel]

    input_processor = StepInputProcessor('photon', G.nodes(), 10000, 0.2, 1)
    output_processor = FileOutputProcessor([('V', G.nodes())],
                                           output_file,
                                           sample_interval=1)
    manager.add(LPU,
                LPU_id,
                dt,
                comp_dict,
                conns,
                device=0,
                input_processors=[input_processor],
                output_processors=[output_processor],
                debug=debug,
                time_sync=False,
                extra_comps=extra_comps)
コード例 #2
0
def add_worker_LPU(config, retina_index, retina, manager):
    gexf_filename = config['Retina']['gexf_file']
    suffix = config['General']['file_suffix']

    dt = config['General']['dt']
    debug = config['Retina']['debug']
    time_sync = config['Retina']['time_sync']

    worker_num = config['Retina']['worker_num']
    gexf_file = '{}{}_{}{}.gexf.gz'.format(gexf_filename, 0, retina_index,
                                           suffix)

    G = retina.get_worker_graph(retina_index + 1, worker_num)
    #G = nx.convert_node_labels_to_integers(G)
    nx.write_gexf(G, gexf_file)

    worker_dev = retina_index

    (comp_dict, conns) = LPU.lpu_parser(gexf_file)
    worker_id = get_worker_id(retina_index)

    extra_comps = [Photoreceptor]
    manager.add(LPU,
                worker_id,
                dt,
                comp_dict,
                conns,
                device=worker_dev,
                debug=debug,
                time_sync=time_sync,
                extra_comps=extra_comps)
コード例 #3
0
ファイル: retina_demo.py プロジェクト: ScartleRoy/neurokernel
def main():
    import neurokernel.mpi_relaunch

    logger = setup_logger(file_name=None, screen=True)

    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-l", "--layers", dest="num_layers", type=int, default=16, help="number of layers of ommatidia on circle"
    )

    parser.add_argument("-i", "--input", action="store_true", help="generates input if set")
    parser.add_argument("-g", "--gexf", action="store_true", help="generates gexf of LPU if set")

    parser.add_argument("--steps", default=100, type=int, help="simulation steps")

    args = parser.parse_args()

    dt = 1e-4
    GEXF_FILE = "retina.gexf.gz"
    INPUT_FILE = "vision_input.h5"
    IMAGE_FILE = "image1.mat"
    OUTPUT_FILE = "retina_output.h5"

    if args.input:
        print("Generating input of model from image file")
        generate_input(INPUT_FILE, IMAGE_FILE, args.num_layers)
    if args.gexf:
        print("Writing retina lpu")
        n = args.num_layers
        photoreceptor_num = 6 * (3 * n * (n + 1) + 1)
        generate_gexf(GEXF_FILE, photoreceptor_num)

    man = core_gpu.Manager()

    print("Parsing lpu data")
    n_dict_ret, s_dict_ret = LPU.lpu_parser(GEXF_FILE)
    print("Initializing LPU")
    man.add(
        LPU,
        "retina",
        dt,
        n_dict_ret,
        s_dict_ret,
        input_file=INPUT_FILE,
        output_file=OUTPUT_FILE,
        device=0,
        debug=True,
        time_sync=False,
    )
    man.spawn()

    print("Starting simulation")
    start_time = time.time()
    man.start(steps=args.steps)
    man.wait()
    print("Simulation complete: Duration {} seconds".format(time.time() - start_time))
コード例 #4
0
def simulation(dt, N, output_n):
    start_time = time.time()

    dur = 0.01
    steps = int(np.round(dur / dt))

    G = create_graph(N)
    print("Creating graph completed in {} seconds.".format(time.time() -
                                                           start_time))

    start_time = time.time()
    #comp_dict, conns = LPU.graph_to_dicts(G, remove_edge_id=False)

    fl_input_processor = StepInputProcessor(
        'I', ['neuron_{}'.format(i) for i in range(N)], 20.0, 0.0, dur)
    #fl_output_processor = [FileOutputProcessor([('V', None), ('g', ['synapse_neuron_{}_to_neuron_1'.format(i) for i in range(N)])],# ('spike_state', None), ('g', None), ('E', None)],
    #                                           'neurodriver_output_{}.h5'.format(output_n), sample_interval=10, cache_length=2000)]
    fl_output_processor = []  # temporarily suppress generating output

    #fl_output_processor = [OutputRecorder([('spike_state', None), ('V', None), ('g', None), ('E', None)], dur, dt, sample_interval = 1)]

    lpu = LPU(dt,
              'obj',
              G,
              device=args.gpu_dev,
              id='ge',
              input_processors=[fl_input_processor],
              output_processors=fl_output_processor,
              debug=args.debug,
              manager=False,
              print_timing=False,
              time_sync=False,
              extra_comps=[])
    print("Instantiating LPU completed in {} seconds.".format(time.time() -
                                                              start_time))
    start_time1 = time.time()
    # LPU.run includes pre_run, run_steps and post_run
    lpu.run(steps=steps)
    execution_time = time.time() - start_time1
    compile_and_execute_time = time.time() - start_time
    print("LPUs Compilation and Execution Completed in {} seconds.".format(
        compile_and_execute_time))
    return compile_and_execute_time, execution_time
コード例 #5
0
def add_retina_LPU(config, retina_index, retina, manager):
    '''
        This method adds Retina LPU and its parameters to the manager
        so that it can be initialized later. Depending on configuration
        input can either be created in advance and read from file or
        generated during simulation by a generator object.

        --
        config: configuration dictionary like object
        i: identifier of eye in case more than one is used
        retina: retina array object required for the generation of
            graph.
        manager: manager object to which LPU will be added
        generator: generator object or None
    '''
    dt = config['General']['dt']
    debug = config['Retina']['debug']
    time_sync = config['Retina']['time_sync']

    input_filename = config['Retina']['input_file']
    output_filename = config['Retina']['output_file']
    gexf_filename = config['Retina']['gexf_file']
    suffix = config['General']['file_suffix']

    output_file = '{}{}{}.h5'.format(output_filename, retina_index, suffix)
    gexf_file = '{}{}{}.gexf.gz'.format(gexf_filename, retina_index, suffix)
    
    inputmethod = config['Retina']['inputmethod']
    if inputmethod == 'read':
        print('Generating input files')
        with Timer('input generation'):
            input_processor = RetinaFileInputProcessor(config, retina)
    else:
        print('Using input generating function')
        input_processor = RetinaInputProcessor(config, retina)

    output_processor = FileOutputProcessor([('V',None),('spike_state',None)], output_file, sample_interval=1)

    # retina also allows a subset of its graph to be taken
    # in case it is needed later to split the retina model to more
    # GPUs
    G = retina.get_worker_nomaster_graph()
    nx.write_gexf(G, gexf_file)

    (comp_dict, conns) = LPU.graph_to_dicts(G)
    retina_id = get_retina_id(retina_index)

    extra_comps = [LeakyIAF, BufferPhoton]

    manager.add(LPU, retina_id, dt, comp_dict, conns,
                device = retina_index, input_processors = [input_processor],
                output_processors = [output_processor], 
                debug=debug, time_sync=time_sync, extra_comps = extra_comps)
コード例 #6
0
def create_pattern(n_dict_1, n_dict_2, save_as=None):
    """
    If `save_as` is not None, save the pattern in GEXF format as the specified file name.
    """

    lpu1_sel_in_gpot = plsel.Selector(LPU.extract_in_gpot(n_dict_1))
    lpu1_sel_out_gpot = plsel.Selector(LPU.extract_out_gpot(n_dict_1))
    lpu2_sel_in_gpot = plsel.Selector(LPU.extract_in_gpot(n_dict_2))
    lpu2_sel_out_gpot = plsel.Selector(LPU.extract_out_gpot(n_dict_2))

    lpu1_sel_in_spike = plsel.Selector(LPU.extract_in_spk(n_dict_1))
    lpu1_sel_out_spike = plsel.Selector(LPU.extract_out_spk(n_dict_1))
    lpu2_sel_in_spike = plsel.Selector(LPU.extract_in_spk(n_dict_2))
    lpu2_sel_out_spike = plsel.Selector(LPU.extract_out_spk(n_dict_2))

    lpu1_sel_out = plsel.Selector.union(lpu1_sel_out_gpot, lpu1_sel_out_spike)
    lpu2_sel_out = plsel.Selector.union(lpu2_sel_out_gpot, lpu2_sel_out_spike)
    lpu1_sel_in = plsel.Selector.union(lpu1_sel_in_gpot, lpu1_sel_in_spike)
    lpu2_sel_in = plsel.Selector.union(lpu2_sel_in_gpot, lpu2_sel_in_spike)

    lpu1_sel = plsel.Selector.union(lpu1_sel_out, lpu1_sel_in)
    lpu2_sel = plsel.Selector.union(lpu2_sel_out, lpu2_sel_in)

    Neuron_list_12 = ["L1", "L2", "L3", "L4", "L5", "T1"]
    Neuron_list_21 = ["C2", "C3"]

    gpot_sel = plsel.Selector.union(lpu1_sel_out_gpot, lpu1_sel_in_gpot, lpu2_sel_out_gpot, lpu2_sel_in_gpot)
    spike_sel = plsel.Selector.union(lpu1_sel_out_spike, lpu1_sel_in_spike, lpu2_sel_out_spike, lpu2_sel_in_spike)

    Neuron_str_12 = "[" + ",".join(Neuron_list_12) + "]"
    Neuron_str_21 = "[" + ",".join(Neuron_list_21) + "]"
    cart_str = "[" + ",".join(["cart%i" % i for i in range(768)]) + "]"

    from_sel_12 = "/lamina" + cart_str + Neuron_str_12
    to_sel_12 = "/medulla" + cart_str + Neuron_str_12
    from_sel_21 = "/medulla" + cart_str + Neuron_str_21
    to_sel_21 = "/lamina" + cart_str + Neuron_str_21

    from_sel = from_sel_12 + "," + from_sel_21
    to_sel = to_sel_12 + "," + to_sel_21

    pat = Pattern.from_concat(
        lpu1_sel, lpu2_sel, from_sel=from_sel, to_sel=to_sel, gpot_sel=gpot_sel, spike_sel=spike_sel, data=1
    )

    if save_as:
        nx.write_gexf(pat.to_graph(), save_as, prettyprint=True)
    return pat
コード例 #7
0
def add_master_LPU(config, retina_index, retina, manager):
    dt = config['General']['dt']
    debug = config['Retina']['debug']
    time_sync = config['Retina']['time_sync']

    input_filename = config['Retina']['input_file']
    output_filename = config['Retina']['output_file']
    gexf_filename = config['Retina']['gexf_file']
    suffix = config['General']['file_suffix']

    output_file = '{}{}{}.h5'.format(output_filename, retina_index, suffix)
    gexf_file = '{}{}{}.gexf.gz'.format(gexf_filename, retina_index, suffix)

    inputmethod = config['Retina']['inputmethod']
    if inputmethod == 'read':
        print('Generating input files')
        with Timer('input generation'):
            input_processor = RetinaFileInputProcessor(config, retina)
    else:
        print('Using input generating function')
        input_processor = RetinaInputProcessor(config, retina)

    input_processor = get_input_gen(config, retina)
    uids_to_record = [
        'ret_{}_{}'.format(name, i) for i in range(retina.num_elements)
        for name in ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']
    ]
    output_processor = FileOutputProcessor([('V', uids_to_record)],
                                           output_file,
                                           sample_interval=1)

    G = retina.get_master_graph()
    nx.write_gexf(G, gexf_file)

    (comp_dict, conns) = LPU.lpu_parser(gexf_file)
    master_id = get_master_id(retina_index)

    extra_comps = [BufferPhoton, BufferVoltage]

    manager.add(LPU,
                master_id,
                dt,
                comp_dict,
                conns,
                device=retina_index,
                input_processors=[input_processor],
                output_processors=[output_processor],
                debug=debug,
                time_sync=time_sync,
                extra_comps=extra_comps)
コード例 #8
0
def create_pattern(n_dict_1, n_dict_2, save_as=None):
    """
    If `save_as` is not None, save the pattern as the specified file name.
    """

    lpu1_sel_in_gpot = plsel.Selector(LPU.extract_in_gpot(n_dict_1))
    lpu1_sel_out_gpot = plsel.Selector(LPU.extract_out_gpot(n_dict_1))
    lpu2_sel_in_gpot = plsel.Selector(LPU.extract_in_gpot(n_dict_2))
    lpu2_sel_out_gpot = plsel.Selector(LPU.extract_out_gpot(n_dict_2))

    lpu1_sel_in_spike = plsel.Selector(LPU.extract_in_spk(n_dict_1))
    lpu1_sel_out_spike = plsel.Selector(LPU.extract_out_spk(n_dict_1))
    lpu2_sel_in_spike = plsel.Selector(LPU.extract_in_spk(n_dict_2))
    lpu2_sel_out_spike = plsel.Selector(LPU.extract_out_spk(n_dict_2))

    lpu1_sel_out = plsel.Selector.union(lpu1_sel_out_gpot, lpu1_sel_out_spike)
    lpu2_sel_out = plsel.Selector.union(lpu2_sel_out_gpot, lpu2_sel_out_spike)
    lpu1_sel_in = plsel.Selector.union(lpu1_sel_in_gpot, lpu1_sel_in_spike)
    lpu2_sel_in = plsel.Selector.union(lpu2_sel_in_gpot, lpu2_sel_in_spike)

    lpu1_sel = plsel.Selector.union(lpu1_sel_out, lpu1_sel_in)
    lpu2_sel = plsel.Selector.union(lpu2_sel_out, lpu2_sel_in)

    pat = Pattern(lpu1_sel, lpu2_sel)

    pat.interface[lpu1_sel_in_gpot, 'io', 'type'] = ['in', 'gpot']
    pat.interface[lpu1_sel_out_gpot, 'io', 'type'] = ['out', 'gpot']
    pat.interface[lpu2_sel_in_gpot, 'io', 'type'] = ['in', 'gpot']
    pat.interface[lpu2_sel_out_gpot, 'io', 'type'] = ['out', 'gpot']
    pat.interface[lpu1_sel_in_spike, 'io', 'type'] = ['in', 'spike']
    pat.interface[lpu1_sel_out_spike, 'io', 'type'] = ['out', 'spike']
    pat.interface[lpu2_sel_in_spike, 'io', 'type'] = ['in', 'spike']
    pat.interface[lpu2_sel_out_spike, 'io', 'type'] = ['out', 'spike']

    Neuron_list_12 = ['L1', 'L2', 'L3', 'L4', 'L5', 'T1']
    Neuron_list_21 = ['C2', 'C3']
    
    for i in range(768):
        for neuron in Neuron_list_12:
            pat['/lamina/cart'+str(i)+'/'+neuron, '/medulla/cart'+str(i)+'/'+neuron] = 1
        for neuron in Neuron_list_21:
            pat['/medulla/cart'+str(i)+'/'+neuron, '/lamina/cart'+str(i)+'/'+neuron] = 1
    if save_as:
        with open(save_as, 'wb') as pat_file:
            pickle.dump(pat, pat_file)
    return pat
コード例 #9
0
def cx_component(graph):

	#lpu lists
	lpu_name_list = ['BU', 'bu', 'EB', 'FB', 'PB']
	lpu_name_to_node = {}      # LPU name -> pyorient LPU node
	lpu_name_to_g_na = {}      # LPU name -> NeuroArch-compatible graph
	lpu_name_to_g_nk_orig = {} # LPU name -> Neurokernel-compatible graph
	lpu_name_to_g_nk = {}      # LPU name -> Neurokernel-compatible graph with int IDs
	#lpu_name_to_n_dict = {}    # LPU name -> n_dict data struct
	#lpu_name_to_s_dict = {}    # LPU name -> s_dict data struct
	lpu_name_to_comp_dict = {} # LPU name -> comp_dict data struct
	lpu_name_to_conn_list = {} # LPU name -> conn_list data struct

	for name in lpu_name_list:
		lpu_name_to_node[name] = graph.LPUs.query(name=name).one()
		lpu_name_to_g_na[name] = lpu_name_to_node[name].traverse_owns(max_levels = 2).get_as('nx')
		lpu_name_to_g_nk_orig[name] = nk.na_lpu_to_nk_new(lpu_name_to_g_na[name])
		lpu_name_to_g_nk[name] = nx.convert_node_labels_to_integers(lpu_name_to_g_nk_orig[name], ordering = 'sorted')
		lpu_name_to_g_nk[name] = \
		partly_relabel_by_sorted_attr(lpu_name_to_g_nk[name], 'model', ['LeakyIAF'], 'name')
		#lpu_name_to_n_dict[name], lpu_name_to_s_dict[name] = LPU.graph_to_dicts(lpu_name_to_g_nk[name])
		lpu_name_to_comp_dict[name], lpu_name_to_conn_list[name] = LPU.graph_to_dicts(lpu_name_to_g_nk[name])

		nx.write_gexf(lpu_name_to_g_nk[name], name+'.gexf.gz')



	# Patterns:
	pat_name_list = [n.name for n in graph.Patterns.query().all()]

	pat_name_to_node = {}     # LPU pair -> pyorient Pattern node
	pat_name_to_g_na = {}     # LPU pair -> NeuroArch-compatible graph
	pat_name_to_g_nk = {}     # LPU pair -> Neurokernel-compatible graph
	pat_name_to_pat = {}      # LPU pair -> Pattern class instance

	for name in pat_name_list:
	    pat_name_to_node[name] = graph.Patterns.query(name=name).one()
	    pat_name_to_g_na[name] = pat_name_to_node[name].traverse_owns(max_levels = 2).get_as('nx')
	    pat_name_to_g_nk[name] = nk.na_pat_to_nk(pat_name_to_g_na[name])
	    pat_name_to_pat[name] = Pattern.Pattern.from_graph(nx.DiGraph(pat_name_to_g_nk[name]))

	return lpu_name_to_comp_dict, lpu_name_to_conn_list, pat_name_list, pat_name_to_pat
コード例 #10
0
def add_lamina_LPU(config, lamina_index, lamina, manager):
    '''
        This method adds Lamina LPU and its parameters to the manager
        so that it can be initialized later.

        --
        config: configuration dictionary like object
        i: identifier of eye in case more than one is used
        lamina: lamina array object required for the generation of
            graph.
        manager: manager object to which LPU will be added
        generator: generator object or None
    '''

    output_filename = config['Lamina']['output_file']
    gexf_filename = config['Lamina']['gexf_file']
    suffix = config['General']['file_suffix']

    dt = config['General']['dt']
    debug = config['Lamina']['debug']
    time_sync = config['Lamina']['time_sync']

    output_file = '{}{}{}.h5'.format(output_filename, lamina_index, suffix)
    gexf_file = '{}{}{}.gexf.gz'.format(gexf_filename, lamina_index, suffix)
    G = lamina.get_graph()
    nx.write_gexf(G, gexf_file)

    (comp_dict, conns) = LPU.graph_to_dicts(G)
    lamina_id = get_lamina_id(lamina_index)
    
    extra_comps = [BufferVoltage]
    
    output_processor = FileOutputProcessor(
                            [('V', None)], output_file,
                            sample_interval=1)

    manager.add(LPU, lamina_id, dt, comp_dict, conns,
                output_processors = [output_processor],
                device=lamina_index+1, debug=debug, time_sync=time_sync,
                extra_comps = extra_comps)
コード例 #11
0
ファイル: multi_demo.py プロジェクト: Hanyu-Li/neurokernel
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

for i, neu_num in neu_dict.iteritems():
    lpu_entry = {}

    if i == 0:
        in_file_name = in_file_name_0
    else:
        in_file_name = None
    lpu_file_name = 'generic_lpu_%s.gexf.gz' % i
    out_file_name = 'generic_output_%s.h5' % i

    g.create_lpu(lpu_file_name, *neu_num)
    (n_dict, s_dict) = LPU.lpu_parser(lpu_file_name)

    id = 'lpu_%s' % i
    lpu = LPU(dt, n_dict, s_dict, input_file=in_file_name,
              output_file=out_file_name,
              port_ctrl=port_ctrl, port_data=port_data,
              device=i, id=id,
              debug=args.debug)

    lpu_entry['lpu_file_name'] = lpu_file_name
    lpu_entry['in_file_name'] = in_file_name
    lpu_entry['out_file_name'] = out_file_name
    lpu_entry['lpu'] = lpu
    lpu_entry['id'] = id

    lpu_dict[i] = lpu_entry
コード例 #12
0
ファイル: intro_demo.py プロジェクト: Hanyu-Li/neurokernel
def run(connected):
    if args.port_data is None and args.port_ctrl is None:
        port_data = get_random_port()
        port_ctrl = get_random_port()
    else:
        port_data = args.port_data
        port_ctrl = args.port_ctrl

    out_name = 'un' if not connected else 'co'
    man = core.Manager(port_data, port_ctrl)
    man.add_brok()

    lpu_file_0 = './data/generic_lpu_0.gexf.gz'
    lpu_file_1 = './data/generic_lpu_1.gexf.gz'
    (n_dict_0, s_dict_0) = LPU.lpu_parser(lpu_file_0)
    (n_dict_1, s_dict_1) = LPU.lpu_parser(lpu_file_1)

    ge_0_id = 'ge_0'
    ge_0 = LPU(dt, n_dict_0, s_dict_0,
               input_file='./data/generic_input_0.h5',
               output_file='generic_output_0_%s.h5' % out_name,
               port_ctrl=port_ctrl, port_data=port_data,
               device=args.gpu_dev[0], id=ge_0_id,
               debug=args.debug)
    man.add_mod(ge_0)

    ge_1_id = 'ge_1'
    ge_1 = LPU(dt, n_dict_1, s_dict_1,
               input_file='./data/generic_input_1.h5',
               output_file='generic_output_1_%s.h5' % out_name,
               port_ctrl=port_ctrl, port_data=port_data,
               device=args.gpu_dev[1], id=ge_1_id,
               debug=args.debug)
    man.add_mod(ge_1)

    # Connect the public neurons in the two LPUs:
    df_neu_0, df_syn_0 = neurokernel.tools.graph.graph_to_df(nx.read_gexf(lpu_file_0))
    df_neu_1, df_syn_1 = neurokernel.tools.graph.graph_to_df(nx.read_gexf(lpu_file_1))

    # Number of public neurons in each LPU:
    N_spike_0 = len(df_neu_0[(df_neu_0['spiking']==True)&(df_neu_0['public']==True)])
    N_gpot_0 = len(df_neu_0[(df_neu_0['spiking']==False)&(df_neu_0['public']==True)])

    N_spike_1 = len(df_neu_1[(df_neu_1['spiking']==True)&(df_neu_1['public']==True)])
    N_gpot_1 = len(df_neu_1[(df_neu_1['spiking']==False)&(df_neu_1['public']==True)])

    # Alpha function synaptic parameters:
    alphasynapse_type_params = {'AlphaSynapse': ['ad', 'ar', 'gmax', 'id', 'class', 'conductance',
                                                 'reverse']}

    if connected:
        conn = core.Connectivity(N_gpot_0, N_spike_0, N_gpot_1, N_spike_1, 1,
                                 ge_0.id, ge_1.id, alphasynapse_type_params)
        for id, (i, j) in enumerate(itertools.product(xrange(N_spike_0), xrange(N_spike_1))):
            conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j] = 1
            conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'name'] = 'int_0to1_%s_%s' % (i, j)
            conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'model'] = 'AlphaSynapse'

            conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'ad'] = 0.19*1000
            conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'ar'] = 1.1*100
            conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'class'] = 0
            conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'conductance'] = True
            conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'gmax'] = 0.003
            conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'id'] = id
            conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'reverse'] = 0.065

        man.connect(ge_0, ge_1, conn)

    man.start(steps=args.steps)
    man.stop()
コード例 #13
0
ファイル: multi_demo.py プロジェクト: yluo42/neurokernel
# Set up several LPUs:
man = core.Manager()
for i, neu_num in neu_dict.iteritems():
    lpu_entry = {}

    if i == 0:
        in_file_name = in_file_name_0
    else:
        in_file_name = None
    lpu_file_name = 'generic_lpu_%s.gexf.gz' % i
    out_file_name = 'generic_lpu_%s_output.h5' % i

    id = 'lpu_%s' % i

    g.create_lpu(lpu_file_name, id, *neu_num)
    (n_dict, s_dict) = LPU.lpu_parser(lpu_file_name)

    man.add(LPU,
            id,
            dt,
            n_dict,
            s_dict,
            input_file=in_file_name,
            output_file=out_file_name,
            device=i,
            debug=args.debug,
            time_sync=args.time_sync)

    lpu_entry['lpu_file_name'] = lpu_file_name
    lpu_entry['in_file_name'] = in_file_name
    lpu_entry['out_file_name'] = out_file_name
コード例 #14
0
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = base.setup_logger(file_name, screen)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

man = core.Manager(port_data, port_ctrl)
man.add_brok()

(n_dict, s_dict) = LPU.lpu_parser('./data/generic_lpu.gexf.gz')

ge = LPU(dt,
         n_dict,
         s_dict,
         input_file='./data/generic_input.h5',
         output_file='generic_output.h5',
         port_ctrl=port_ctrl,
         port_data=port_data,
         device=args.gpu_dev,
         id='ge',
         debug=args.debug)
man.add_mod(ge)

man.start(steps=args.steps)
man.stop()
コード例 #15
0
import numpy as np

from neurokernel.LPU.LPU import LPU
import vision_configuration as vc

np.random.seed(10000)

lamina = vc.Lamina(24, 32, 'neuron_types_lamina.csv', 'synapse_lamina.csv', None)
lamina.create_cartridges()
lamina.connect_cartridges()
lamina.create_non_columnar_neurons()
lamina.connect_composition_II()
lamina.connect_composition_I()
lamina.add_selectors()
g_lam = lamina.export_to_gexf('lamina.gexf.gz')
n_dict_lam, s_dict_lam = LPU.graph_to_dicts(g_lam)

medulla = vc.Medulla(24, 32, 'neuron_types_medulla.csv', 'synapse_medulla.csv', 'synapse_medulla_other.csv')
medulla.create_cartridges()
medulla.connect_cartridges()
medulla.create_non_columnar_neurons()
medulla.connect_composition_I()
medulla.connect_composition_II()
medulla.connect_composition_III()
medulla.add_selectors()
g_med = medulla.export_to_gexf('medulla.gexf.gz')
n_dict_med, s_dict_med = LPU.graph_to_dicts(g_med)

vc.create_pattern(n_dict_lam, n_dict_med, 'lam_med.gexf.gz')
コード例 #16
0
    man = core.Manager()

    G = nx.MultiDiGraph()

    G.add_node(
        'neuron0',
        {
            'class': 'LeakyIAF',
            'name': 'LeakyIAF',
            'resting_potential': -70.0,
            'threshold': -45.0,
            'capacitance': 0.07,  # in mS
            'resistance': 0.2,  # in Ohm
        })

    comp_dict, conns = LPU.graph_to_dicts(G)

    fl_input_processor = StepInputProcessor('I', ['neuron0'], 40, 0.2, 0.8)
    fl_output_processor = FileOutputProcessor([('spike_state', None),
                                               ('V', None)],
                                              'new_output.h5',
                                              sample_interval=1)

    man.add(LPU,
            'ge',
            dt,
            comp_dict,
            conns,
            device=args.gpu_dev,
            input_processors=[fl_input_processor],
            output_processors=[fl_output_processor],
コード例 #17
0
ファイル: generic_demo.py プロジェクト: chungheng/neurodriver
                    help='Number of steps [default: %s]' % steps)
parser.add_argument('-g', '--gpu_dev', default=0, type=int,
                    help='GPU device number [default: 0]')
args = parser.parse_args()

file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = setup_logger(file_name=file_name, screen=screen)

man = core.Manager()

(comp_dict, conns) = LPU.lpu_parser('./data/generic_lpu.gexf.gz')

#st_input_processor = StepInputProcessor('I', ['73','74','75','76','77'] , 10, 0.1,0.4)
fl_input_processor = FileInputProcessor('./data/generic_input.h5')
fl_output_processor = FileOutputProcessor([('V',None),('spike_state',None)], 'new_output.h5', sample_interval=1)
'''
a = LPU(dt, comp_dict, conns, device=args.gpu_dev, input_processors = [st_input_processor], id='ge')
'''
man.add(LPU, 'ge', dt, comp_dict, conns,
        device=args.gpu_dev, input_processors = [fl_input_processor],
        output_processors = [fl_output_processor], debug=args.debug)

man.spawn()
man.start(steps=args.steps)
man.wait()
コード例 #18
0
# Set up several LPUs:
man = core.Manager()
for i, neu_num in neu_dict.iteritems():
    lpu_entry = {}

    if i == 0:
        in_file_name = in_file_name_0
    else:
        in_file_name = None
    lpu_file_name = 'generic_lpu_%s.gexf.gz' % i
    out_file_name = 'generic_lpu_%s_output.h5' % i

    id = 'lpu_%s' % i

    g.create_lpu(lpu_file_name, id, *neu_num)
    (n_dict, s_dict) = LPU.lpu_parser(lpu_file_name)

    man.add(LPU, id, dt, n_dict, s_dict,
            input_file=in_file_name,
            output_file=out_file_name,
            device=i,
            debug=args.debug, time_sync=args.time_sync)

    lpu_entry['lpu_file_name'] = lpu_file_name
    lpu_entry['in_file_name'] = in_file_name
    lpu_entry['out_file_name'] = out_file_name
    lpu_entry['n_dict'] = n_dict
    lpu_entry['s_dict'] = s_dict

    lpu_dict[id] = lpu_entry
コード例 #19
0
ファイル: retinatest.py プロジェクト: noukernel/neurokernel
    G.node[nn] = {
    'model': 'Photoreceptor',
    'name' : 'neuron_0' ,
    'extern' : True,
    'public' : False,
    'spiking' : False,
    'Vinit' : -0.07,
    'SA' : 0.6982,
    'SI' : 0.000066517,
    'DRA' : 0.2285,
    'DRI' : 0.00012048 }
    nx.write_gexf(G, 'simple_lpu.gexf.gz')



(n_dict, s_dict) = LPU.lpu_parser('simple_lpu.gexf.gz')

t = np.arange(0, dt*Nt, dt) 
I = np.zeros((Nt, 1), dtype=np.double) 
I[np.logical_and(t>start, t<stop)] = I_max 
with h5py.File('simple_input.h5', 'w') as f: 
        f.create_dataset('array', (Nt,1), dtype=np.double, data=I) 
 
port_data = get_random_port() 
port_ctrl = get_random_port() 
 
lpu = LPU_retina(dt, n_dict, s_dict, input_file='retina_inputs.h5', output_file='retina_output.h5', port_ctrl=port_ctrl, port_data=port_data, device=0, id='simple', debug=False)
 
man = Manager(port_data, port_ctrl) 
man.add_brok() 
man.add_mod(lpu) 
コード例 #20
0
ファイル: ret_lam_demo.py プロジェクト: Hanyu-Li/neurokernel
        print('Writing lamina lpu')
        eyemodel.write_lamina(LAM_GEXF_FILE)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

if not args.suppress:
    man = core.Manager(port_data, port_ctrl)
    man.add_brok()

    print('Parsing retina lpu data')
    n_dict_ret, s_dict_ret = LPU.lpu_parser(RET_GEXF_FILE)
    print('Initializing retina LPU')
    lpu_ret = LPU(dt, n_dict_ret, s_dict_ret,
                  input_file=INPUT_FILE,
                  output_file=RET_OUTPUT_FILE, port_ctrl=port_ctrl,
                  port_data=port_data, device=args.ret_dev, id='retina',
                  debug=True)
    man.add_mod(lpu_ret)

    if not args.retina_only:
        print('Parsing lamina lpu data')
        n_dict_lam, s_dict_lam = LPU.lpu_parser(LAM_GEXF_FILE)
        print('Initializing lamina LPU')
        lpu_lam = LPU(dt, n_dict_lam, s_dict_lam,
                      input_file=None,
                      output_file=LAM_OUTPUT_FILE, port_ctrl=port_ctrl,
コード例 #21
0
    def launch(self, user_id, task):
        neuron_uid_list = [str(a) for a in task['neuron_list']]

        conf_obj = get_config_obj()
        config = conf_obj.conf

        if config['Retina']['intype'] == 'Natural':
            coord_file = config['InputType']['Natural']['coord_file']
            tmp = os.path.splitext(coord_file)
            config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format(
                tmp[0], user_id, tmp[1])

        setup_logger(file_name='neurokernel_' + user_id + '.log', screen=True)

        manager = core.Manager()

        lpus = {}
        patterns = {}
        G = task['data']
        with open('G.pickle', 'wb') as f:
            pickle.dump(G, f, protocol=pickle.HIGHEST_PROTOCOL)
        print(G)
        print(G.keys())
        print(G['LPU'])
        print(G['LPU'].keys())

        # get graph and output_uid_list for each LPU
        for k, lpu in G['LPU'].items():
            lpus[k] = {}
            g_lpu_na = create_graph_from_database_returned(lpu)
            lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na)
            lpus[k]['graph'] = lpu_nk_graph
            lpus[k]['output_uid_list'] = list(
                set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list)))
            lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)

        for kkey, lpu in lpus.items():
            graph = lpu['graph']

            for uid, comp in graph.node.items():
                if 'attr_dict' in comp:
                    print('Found attr_dict; fixing...')
                    nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                    # print('changed',uid)
                    graph.nodes[uid].pop('attr_dict')
            for i, j, k, v in graph.edges(keys=True, data=True):
                if 'attr_dict' in v:
                    for key in v['attr_dict']:
                        nx.set_edge_attributes(
                            graph, {(i, j, k): {
                                        key: v['attr_dict'][key]
                                    }})
                    graph.edges[(i, j, k)].pop('attr_dict')
            lpus[kkey]['graph'] = graph

        # get graph for each Pattern
        for k, pat in G['Pattern'].items():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                g_pattern_na = create_graph_from_database_returned(pat)
                pattern_nk = nk.na_pat_to_nk(g_pattern_na)
                print(lpus[l1]['graph'].nodes(data=True))
                lpu_ports = [node[1]['selector'] \
                             for node in lpus[l1]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port'] + \
                            [node[1]['selector'] \
                             for node in lpus[l2]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port']
                pattern_ports = pattern_nk.nodes()
                patterns[k] = {}
                patterns[k]['graph'] = pattern_nk.subgraph(
                    list(set(lpu_ports).intersection(set(pattern_ports))))

        dt = config['General']['dt']
        if 'dt' in task:
            dt = task['dt']
            print(dt)

        # add LPUs to manager
        for k, lpu in lpus.items():
            lpu_name = k
            graph = lpu['graph']

            for uid, comp in graph.node.items():
                if 'attr_dict' in comp:
                    nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                    # print('changed',uid)
                    graph.nodes[uid].pop('attr_dict')
            for i, j, ko, v in graph.edges(keys=True, data=True):
                if 'attr_dict' in v:
                    for key in v['attr_dict']:
                        nx.set_edge_attributes(
                            graph, {(i, j, ko): {
                                        key: v['attr_dict'][key]
                                    }})
                    graph.edges[(i, j, ko)].pop('attr_dict')
            nx.write_gexf(graph, 'name.gexf')
            with open(lpu_name + '.pickle', 'wb') as f:
                pickle.dump(graph, f, protocol=pickle.HIGHEST_PROTOCOL)
            comps = graph.node.items()

            #for uid, comp in comps:
            #    if 'attr_dict' in comp:
            #        nx.set_node_attributes(graph, {uid: comp['attr_dict']})
            #        print('changed',uid)
            #    if 'class' in comp:

            if k == 'retina':
                prs = [node for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'PhotoreceptorModel']
                for pr in prs:
                    graph.node[pr[0]]['num_microvilli'] = 3000
                input_processors = [
                    RetinaInputIndividual(config, prs, user_id)
                ]
                extra_comps = [PhotoreceptorModel]
                retina_input_uids = [a[0] for a in prs]
            elif k == 'EB':
                input_processor = StepInputProcessor('I', [node[0] for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'LeakyIAF'], 40.0, 0.0, 1.0)
                input_processors = [input_processor]
                extra_comps = [BufferVoltage]
            else:
                input_processors = []
                extra_comps = [BufferVoltage]
            if 'inputProcessors' in task:
                input_processors = loadExperimentSettings(
                    task['inputProcessors'])
            output_processor = FileOutputProcessor(
                [('V', lpu['output_uid_list'])],
                lpu['output_file'],
                sample_interval=10)

            (comp_dict, conns) = LPU.graph_to_dicts(graph)

            # print(comp_dict)
            # print(conns)
            print(k)
            manager.add(LPU,
                        k,
                        dt,
                        comp_dict,
                        conns,
                        device=0,
                        input_processors=input_processors,
                        output_processors=[output_processor],
                        extra_comps=extra_comps,
                        debug=True)

        # connect LPUs by Patterns
        for k, pattern in patterns.items():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                print('Connecting {} and {}'.format(l1, l2))
                pat, key_order = Pattern.from_graph(nx.DiGraph(
                    pattern['graph']),
                                                    return_key_order=True)
                print(l1, l2)
                print(key_order)
                with Timer('update of connections in Manager'):
                    manager.connect(l1,
                                    l2,
                                    pat,
                                    int_0=key_order.index(l1),
                                    int_1=key_order.index(l2))

        # start simulation
        steps = config['General']['steps']
        ignored_steps = config['General']['ignored_steps']
        if 'steps' in task:
            steps = task['steps']
        if 'ignored_steps' in task:
            ignored_steps = task['ignored_steps']
        # ignored_steps = 0
        # steps = 100
        manager.spawn()
        manager.start(steps=steps)
        manager.wait()

        time.sleep(5)
        print(task)

        # post-processing inputs (hard coded, can be better organized)
        inputs = {
            u'ydomain': 1.0,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }
        if 'retina' in lpus:
            input_array = si.read_array('{}_{}.h5'.format(
                config['Retina']['input_file'], user_id))
            inputs[u'ydomain'] = input_array.max()
            for i, item in enumerate(retina_input_uids):
                inputs['data'][item] = np.hstack(
                    (np.arange(int((steps - ignored_steps) / 10)).reshape(
                        (-1, 1)) * dt * 10, input_array[ignored_steps::10,
                                                        i:i + 1])).tolist()

            del input_array

        # post-processing outputs from all LPUs and combine them into one dictionary
        result = {
            u'ydomain': 1,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }

        for k, lpu in lpus.items():
            with h5py.File(lpu['output_file']) as output_file:
                uids = output_file['V']['uids'][:]
                output_array = output_file['V']['data'][:]
                for i, item in enumerate(uids):
                    output = output_array[int(ignored_steps / 10):, i:i + 1]
                    # tmp = output.max()-output.min()
                    # if tmp <= 0.01: #mV
                    #     output = (output - output.min()) + 0.5
                    # else:
                    #     output = (output - output.min())/tmp*0.9+0.1
                    result['data'][item] = np.hstack(
                        (np.arange(int((steps - ignored_steps) / 10)).reshape(
                            (-1, 1)) * dt * 10, output)).tolist()

        return inputs, result
コード例 #22
0
ファイル: retina_demo.py プロジェクト: yluo42/neurokernel
def main():
    import neurokernel.mpi_relaunch

    logger = setup_logger(file_name=None, screen=True)

    parser = argparse.ArgumentParser()
    parser.add_argument('-l',
                        '--layers',
                        dest='num_layers',
                        type=int,
                        default=16,
                        help='number of layers of ommatidia on circle')

    parser.add_argument('-i',
                        '--input',
                        action="store_true",
                        help='generates input if set')
    parser.add_argument('-g',
                        '--gexf',
                        action="store_true",
                        help='generates gexf of LPU if set')

    parser.add_argument('--steps',
                        default=100,
                        type=int,
                        help='simulation steps')

    args = parser.parse_args()

    dt = 1e-4
    GEXF_FILE = 'retina.gexf.gz'
    INPUT_FILE = 'vision_input.h5'
    IMAGE_FILE = 'image1.mat'
    OUTPUT_FILE = 'retina_output.h5'

    if args.input:
        print('Generating input of model from image file')
        generate_input(INPUT_FILE, IMAGE_FILE, args.num_layers)
    if args.gexf:
        print('Writing retina lpu')
        n = args.num_layers
        photoreceptor_num = 6 * (3 * n * (n + 1) + 1)
        generate_gexf(GEXF_FILE, photoreceptor_num)

    man = core_gpu.Manager()

    print('Parsing lpu data')
    n_dict_ret, s_dict_ret = LPU.lpu_parser(GEXF_FILE)
    print('Initializing LPU')
    man.add(LPU,
            'retina',
            dt,
            n_dict_ret,
            s_dict_ret,
            input_file=INPUT_FILE,
            output_file=OUTPUT_FILE,
            device=0,
            debug=True,
            time_sync=False)
    man.spawn()

    print('Starting simulation')
    start_time = time.time()
    man.start(steps=args.steps)
    man.wait()
    print('Simulation complete: Duration {} seconds'.format(time.time() -
                                                            start_time))
コード例 #23
0
ファイル: test_lpu.py プロジェクト: CEPBEP/neurokernel
   def test_graph_to_dicts(self):
       self.maxDiff = 2048
       g = nx.MultiDiGraph()
       g.add_node('0', {'model': 'LeakyIAF',
                        'spiking': True,
                        'extern': True,
                        'public': True,
                        'selector': '/lif0',
                        'C': 1.0,
                        'R': 1.0,
                        'V': -1.0,
                        'Vr': -0.1,
                        'Vt': -0.1,
                        'name': 'lif0'})
       g.add_node('1', {'model': 'LeakyIAF',
                        'spiking': True,
                        'extern': True,
                        'public': True,
                        'selector': '/lif1',
                        'C': 2.0,
                        'R': 2.0,
                        'V': -2.0,
                        'Vr': -0.2,
                        'Vt': -0.2,
                        'name': 'lif1'})
       g.add_node('2', {'model': 'MorrisLecar',
                        'spiking': False,
                        'extern': False,
                        'public': False,
                        'selector': '/ml0',
                        'V1': 0.03,
                        'V2': 0.3,
                        'V3': 0.2,
                        'V4': 0.1,
                        'initV': -0.1,
                        'initn': 0.1,
                        'offset': 0,
                        'phi': 0.01,
                        'name': 'ml0'})
       g.add_node('3', {'model': 'MorrisLecar',
                        'spiking': False,
                        'extern': False,
                        'public': False,
                        'selector': '/ml1',
                        'V1': 0.04,
                        'V2': 0.4,
                        'V3': 0.3,
                        'V4': 0.2,
                        'initV': -0.2,
                        'initn': 0.2,
                        'offset': 0,
                        'phi': 0.02,
                        'name': 'ml1'})
       g.add_edge('0', '1', attr_dict={'class': 0,
                                       'model': 'AlphaSynapse',
                                       'conductance': True,
                                       'name': 'lif0-lif1',
                                       'reverse': 0.01,
                                       'ad': 0.01,
                                       'gr': 1.0,
                                       'gmax': 0.001})
       g.add_edge('1', '0', attr_dict={'class': 0,
                                       'model': 'AlphaSynapse',
                                       'conductance': True,
                                       'name': 'lif1-lif0',
                                       'reverse': 0.02,
                                       'ad': 0.02,
                                       'gr': 2.0,
                                       'gmax': 0.002})

       n_dict, s_dict = LPU.graph_to_dicts(g)
       self.assertDictEqual(n_dict,
                            {'LeakyIAF': 
                             {'C': [1.0, 2.0], 
                              'name': ['lif0', 'lif1'], 
                              'id': [0, 1], 
                              'selector': ['/lif0', '/lif1'], 
                              'Vr': [-0.1, -0.2], 
                              'R': [1.0, 2.0], 
                              'Vt': [-0.1, -0.2], 
                              'V': [-1.0, -2.0], 
                              'extern': [True, True], 
                              'spiking': [True, True], 
                              'public': [True, True]},
                             'MorrisLecar': {
                                 'V1': [0.03,0.04],
                                 'V2': [0.3, 0.4],
                                 'V3': [0.2, 0.3],
                                 'V4': [0.1, 0.2],
                                 'initV': [-0.1, -0.2],
                                 'initn': [0.1, 0.2],
                                 'offset': [0, 0],
                                 'phi': [0.01, 0.02],
                                 'selector': ['/ml0','/ml1'],
                                 'name': ['ml0','ml1'],
                                 'id': [2, 3], 
                                 'extern': [False, False], 
                                 'spiking': [False, False], 
                                 'public': [False, False]}})
                                 
       self.assertDictEqual(s_dict,
                            {'AlphaSynapse': 
                             {'pre': ['1', '0'], 
                              'reverse': [0.02, 0.01], 
                              'gmax': [0.002, 0.001], 
                              'post': ['0', '1'], 
                              'class': [0, 0], 
                              'conductance': [True, True], 
                              'ad': [0.02, 0.01], 
                              'gr': [2.0, 1.0],
                              'id': [0, 1], 
                              'name': ['lif1-lif0', 'lif0-lif1']}})
コード例 #24
0
                    help='GPU for integration [default:3]')
args = parser.parse_args()

dt = 1e-4
dur = 1.4
Nt = args.steps or int(dur/dt)

file_name = 'neurokernel.log' if args.log.lower() in ['file', 'both'] else None
screen = True if args.log.lower() in ['screen', 'both'] else False
logger = setup_logger(file_name=file_name, screen=screen)

man = core.Manager()

# Load configurations for lamina, medulla and antennal lobe models:
al_id = 'antennallobe'
(n_dict_al, s_dict_al) = LPU.lpu_parser( './data/antennallobe.gexf.gz')
man.add(LPU, al_id, dt, n_dict_al, s_dict_al,
        input_file='./data/olfactory_input.h5',
        output_file='antennallobe_output.h5',
        device=args.al_dev, time_sync=args.time_sync)

lam_id = 'lamina'
(n_dict_lam, s_dict_lam) = LPU.lpu_parser('./data/lamina.gexf.gz')
man.add(LPU, lam_id, dt, n_dict_lam, s_dict_lam,
        input_file='./data/vision_input.h5',
        output_file='lamina_output.h5',
        device=args.al_dev, time_sync=args.time_sync)

med_id = 'medulla'
(n_dict_med, s_dict_med) = LPU.lpu_parser('./data/medulla.gexf.gz')
man.add(LPU, med_id, dt, n_dict_med, s_dict_med,
コード例 #25
0
    def run(connected):
        """
        Set `connected` to True to connect the LPUs.
        """

        import neurokernel.mpi_relaunch

        out_name = 'un' if not connected else 'co'
        man = core.Manager()

        lpu_file_0 = './data/generic_lpu_0.gexf.gz'
        lpu_file_1 = './data/generic_lpu_1.gexf.gz'
        comp_dict_0, conns_0 = LPU.lpu_parser(lpu_file_0)
        comp_dict_1, conns_1 = LPU.lpu_parser(lpu_file_1)

        fl_input_processor_0 = FileInputProcessor(
            './data/generic_lpu_0_input.h5')
        fl_output_processor_0 = FileOutputProcessor(
            [('V', None), ('spike_state', None)],
            'generic_lpu_0_%s_output.h5' % out_name,
            sample_interval=1)

        lpu_0_id = 'lpu_0'
        man.add(LPU,
                lpu_0_id,
                dt,
                comp_dict_0,
                conns_0,
                input_processors=[fl_input_processor_0],
                output_processors=[fl_output_processor_0],
                device=args.gpu_dev[0],
                debug=args.debug,
                time_sync=args.time_sync)

        fl_input_processor_1 = FileInputProcessor(
            './data/generic_lpu_1_input.h5')
        fl_output_processor_1 = FileOutputProcessor(
            [('V', None), ('spike_state', None)],
            'generic_lpu_1_%s_output.h5' % out_name,
            sample_interval=1)

        lpu_1_id = 'lpu_1'
        man.add(LPU,
                lpu_1_id,
                dt,
                comp_dict_1,
                conns_1,
                input_processors=[fl_input_processor_1],
                output_processors=[fl_output_processor_1],
                device=args.gpu_dev[1],
                debug=args.debug,
                time_sync=args.time_sync)

        # Create random connections between the input and output ports if the LPUs
        # are to be connected:
        if connected:

            # Find all output and input port selectors in each LPU:
            out_ports_spk_0 = plsel.Selector(','.join(
                LPU.extract_out_spk(comp_dict_0, 'id')[0]))
            out_ports_gpot_0 = plsel.Selector(','.join(
                LPU.extract_out_gpot(comp_dict_0, 'id')[0]))

            out_ports_spk_1 = plsel.Selector(','.join(
                LPU.extract_out_spk(comp_dict_1, 'id')[0]))
            out_ports_gpot_1 = plsel.Selector(','.join(
                LPU.extract_out_gpot(comp_dict_1, 'id')[0]))

            in_ports_spk_0 = plsel.Selector(','.join(
                LPU.extract_in_spk(comp_dict_0, 'id')[0]))
            in_ports_gpot_0 = plsel.Selector(','.join(
                LPU.extract_in_gpot(comp_dict_0, 'id')[0]))

            in_ports_spk_1 = plsel.Selector(','.join(
                LPU.extract_in_spk(comp_dict_1, 'id')[0]))
            in_ports_gpot_1 = plsel.Selector(','.join(
                LPU.extract_in_gpot(comp_dict_1, 'id')[0]))

            out_ports_0 = plsel.Selector.union(out_ports_spk_0,
                                               out_ports_gpot_0)
            out_ports_1 = plsel.Selector.union(out_ports_spk_1,
                                               out_ports_gpot_1)

            in_ports_0 = plsel.Selector.union(in_ports_spk_0, in_ports_gpot_0)
            in_ports_1 = plsel.Selector.union(in_ports_spk_1, in_ports_gpot_1)

            # Initialize a connectivity pattern between the two sets of port
            # selectors:
            pat = pattern.Pattern(
                plsel.Selector.union(out_ports_0, in_ports_0),
                plsel.Selector.union(out_ports_1, in_ports_1))

            # Create connections from the ports with identifiers matching the output
            # ports of one LPU to the ports with identifiers matching the input
            # ports of the other LPU:
            N_conn_spk_0_1 = min(len(out_ports_spk_0), len(in_ports_spk_1))
            N_conn_gpot_0_1 = min(len(out_ports_gpot_0), len(in_ports_gpot_1))
            for src, dest in zip(
                    random.sample(out_ports_spk_0.identifiers, N_conn_spk_0_1),
                    random.sample(in_ports_spk_1.identifiers, N_conn_spk_0_1)):
                pat[src, dest] = 1
                pat.interface[src, 'type'] = 'spike'
                pat.interface[dest, 'type'] = 'spike'
            for src, dest in zip(
                    random.sample(out_ports_gpot_0.identifiers,
                                  N_conn_gpot_0_1),
                    random.sample(in_ports_gpot_1.identifiers,
                                  N_conn_gpot_0_1)):
                pat[src, dest] = 1
                pat.interface[src, 'type'] = 'gpot'
                pat.interface[dest, 'type'] = 'gpot'

            man.connect(lpu_0_id, lpu_1_id, pat, 0, 1)

        man.spawn()
        man.start(steps=args.steps)
        man.wait()
コード例 #26
0
ファイル: olfaction_demo.py プロジェクト: yluo42/neurokernel
                    '--al_dev',
                    default=0,
                    type=int,
                    help='GPU for antennal lobe [default:0]')
args = parser.parse_args()

file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = setup_logger(file_name=file_name, screen=screen)

man = core.Manager()

(n_dict, s_dict) = LPU.lpu_parser('./data/antennallobe.gexf.gz')

man.add(LPU,
        'al',
        dt,
        n_dict,
        s_dict,
        input_file='./data/olfactory_input.h5',
        output_file='olfactory_output.h5',
        debug=args.debug)

man.spawn()
man.start(steps=args.steps)
man.wait()
コード例 #27
0
args = parser.parse_args()

file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = base.setup_logger(file_name=file_name, screen=screen)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

man = core.Manager(port_data, port_ctrl)
man.add_brok()

(n_dict_lam, s_dict_lam) = LPU.lpu_parser('./data/lamina.gexf.gz')
lpu_lam = LPU(dt, n_dict_lam, s_dict_lam,
              input_file='./data/vision_input.h5',
              output_file='lamina_output.h5', port_ctrl=port_ctrl,
              port_data=port_data, device=args.lam_dev, id='lamina')
man.add_mod(lpu_lam)

man.start(steps=args.steps)
man.stop()
コード例 #28
0
ファイル: vision_demo.py プロジェクト: yluo42/neurokernel
                    help='GPU for medulla [default: 1]')

args = parser.parse_args()

file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = setup_logger(file_name=file_name, screen=screen)

man = core.Manager()

lam_id = 'lamina'
(n_dict_lam, s_dict_lam) = LPU.lpu_parser('./data/lamina.gexf.gz')
man.add(LPU,
        lam_id,
        dt,
        n_dict_lam,
        s_dict_lam,
        input_file='./data/vision_input.h5',
        output_file='lamina_output.h5',
        device=args.lam_dev,
        time_sync=args.time_sync)

med_id = 'medulla'
(n_dict_med, s_dict_med) = LPU.lpu_parser('./data/medulla.gexf.gz')
man.add(LPU,
        med_id,
        dt,
コード例 #29
0
ファイル: test_lpu.py プロジェクト: yiyin/neurokernel
    def test_graph_to_dicts(self):
        self.maxDiff = 2048
        g = nx.MultiDiGraph()
        g.add_node(
            '0', {
                'model': 'LeakyIAF',
                'spiking': True,
                'extern': True,
                'public': True,
                'selector': '/lif0',
                'C': 1.0,
                'R': 1.0,
                'V': -1.0,
                'Vr': -0.1,
                'Vt': -0.1,
                'name': 'lif0'
            })
        g.add_node(
            '1', {
                'model': 'LeakyIAF',
                'spiking': True,
                'extern': True,
                'public': True,
                'selector': '/lif1',
                'C': 2.0,
                'R': 2.0,
                'V': -2.0,
                'Vr': -0.2,
                'Vt': -0.2,
                'name': 'lif1'
            })
        g.add_node(
            '2', {
                'model': 'MorrisLecar',
                'spiking': False,
                'extern': False,
                'public': False,
                'selector': '/ml0',
                'V1': 0.03,
                'V2': 0.3,
                'V3': 0.2,
                'V4': 0.1,
                'initV': -0.1,
                'initn': 0.1,
                'offset': 0,
                'phi': 0.01,
                'name': 'ml0'
            })
        g.add_node(
            '3', {
                'model': 'MorrisLecar',
                'spiking': False,
                'extern': False,
                'public': False,
                'selector': '/ml1',
                'V1': 0.04,
                'V2': 0.4,
                'V3': 0.3,
                'V4': 0.2,
                'initV': -0.2,
                'initn': 0.2,
                'offset': 0,
                'phi': 0.02,
                'name': 'ml1'
            })
        g.add_edge('0',
                   '1',
                   attr_dict={
                       'class': 0,
                       'model': 'AlphaSynapse',
                       'conductance': True,
                       'name': 'lif0-lif1',
                       'reverse': 0.01,
                       'ad': 0.01,
                       'gr': 1.0,
                       'gmax': 0.001
                   })
        g.add_edge('1',
                   '0',
                   attr_dict={
                       'class': 0,
                       'model': 'AlphaSynapse',
                       'conductance': True,
                       'name': 'lif1-lif0',
                       'reverse': 0.02,
                       'ad': 0.02,
                       'gr': 2.0,
                       'gmax': 0.002
                   })

        n_dict, s_dict = LPU.graph_to_dicts(g)
        self.assertDictEqual(
            n_dict, {
                'LeakyIAF': {
                    'C': [1.0, 2.0],
                    'name': ['lif0', 'lif1'],
                    'id': [0, 1],
                    'selector': ['/lif0', '/lif1'],
                    'Vr': [-0.1, -0.2],
                    'R': [1.0, 2.0],
                    'Vt': [-0.1, -0.2],
                    'V': [-1.0, -2.0],
                    'extern': [True, True],
                    'spiking': [True, True],
                    'public': [True, True]
                },
                'MorrisLecar': {
                    'V1': [0.03, 0.04],
                    'V2': [0.3, 0.4],
                    'V3': [0.2, 0.3],
                    'V4': [0.1, 0.2],
                    'initV': [-0.1, -0.2],
                    'initn': [0.1, 0.2],
                    'offset': [0, 0],
                    'phi': [0.01, 0.02],
                    'selector': ['/ml0', '/ml1'],
                    'name': ['ml0', 'ml1'],
                    'id': [2, 3],
                    'extern': [False, False],
                    'spiking': [False, False],
                    'public': [False, False]
                }
            })

        self.assertDictEqual(
            s_dict, {
                'AlphaSynapse': {
                    'pre': ['1', '0'],
                    'reverse': [0.02, 0.01],
                    'gmax': [0.002, 0.001],
                    'post': ['0', '1'],
                    'class': [0, 0],
                    'conductance': [True, True],
                    'ad': [0.02, 0.01],
                    'gr': [2.0, 1.0],
                    'id': [0, 1],
                    'name': ['lif1-lif0', 'lif0-lif1']
                }
            })
コード例 #30
0
    port_time = get_random_port()
else:
    port_time = args.port_time

if args.timeit:
    ptime = lambda t, s: print_time(t, s)
else:
    ptime = lambda t, s: print_time(t)

man = core.Manager(port_data, port_ctrl, port_time)
man.add_brok()

tic = time()

# Load configurations for lamina, medulla and antennal lobe models:
(n_dict_al, s_dict_al) = LPU.lpu_parser( './data/antennallobe.gexf.gz')
lpu_al = LPU(dt, n_dict_al, s_dict_al,
             input_file='./data/olfactory_input.h5',
             output_file='antennallobe_output.h5',
             port_ctrl=port_ctrl, port_data=port_data, port_time=port_time,
             device=args.al_dev, id='antennallobe', time_sync=args.time_sync)
man.add_mod(lpu_al)
tic = ptime(tic, 'Load AL LPU time')

(n_dict_lam, s_dict_lam) = LPU.lpu_parser('./data/lamina.gexf.gz')
lpu_lam = LPU(dt, n_dict_lam, s_dict_lam,
              input_file='./data/vision_input.h5',
              output_file='lamina_output.h5',
              port_ctrl=port_ctrl, port_data=port_data, port_time=port_time,
              device=args.al_dev, id='lamina', time_sync=args.time_sync)
man.add_mod(lpu_lam)
コード例 #31
0
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = base.setup_logger(file_name, screen)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

man = core.Manager(port_data, port_ctrl)
man.add_brok()

(n_dict, s_dict) = LPU.lpu_parser('./data/antennallobe.gexf.gz')

al = LPU(dt,
         n_dict,
         s_dict,
         input_file='./data/olfactory_input.h5',
         output_file='olfactory_output.h5',
         port_ctrl=port_ctrl,
         port_data=port_data,
         device=args.al_dev,
         id='al',
         debug=args.debug)
man.add_mod(al)

man.start(steps=args.steps)
man.stop()
コード例 #32
0
ファイル: vision_demo.py プロジェクト: neurokernel/vision
                    help='GPU for medulla [default: 1]')

args = parser.parse_args()

file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = setup_logger(file_name=file_name, screen=screen)

man = core.Manager()

lam_id = 'lamina'
(n_dict_lam, s_dict_lam) = LPU.lpu_parser('./data/lamina.gexf.gz')
man.add(LPU, lam_id, dt, n_dict_lam, s_dict_lam,
        input_file='./data/vision_input.h5',
        output_file='lamina_output.h5',
        device=args.lam_dev, time_sync=args.time_sync)

med_id = 'medulla'
(n_dict_med, s_dict_med) = LPU.lpu_parser('./data/medulla.gexf.gz')
man.add(LPU, med_id, dt, n_dict_med, s_dict_med,
        output_file='medulla_output.h5',
        device=args.med_dev, time_sync=args.time_sync)

pat = pattern.Pattern.from_graph(nx.read_gexf('./data/lam_med.gexf.gz'))
man.connect(lam_id, med_id, pat, 0, 1)

man.spawn()
コード例 #33
0
    def launch(self, user_id, task):
        neuron_uid_list = [str(a) for a in task['neuron_list']]

        conf_obj = get_config_obj()
        config = conf_obj.conf

        if config['Retina']['intype'] == 'Natural':
            coord_file = config['InputType']['Natural']['coord_file']
            tmp = os.path.splitext(coord_file)
            config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format(
                tmp[0], user_id, tmp[1])

        setup_logger(file_name='neurokernel_' + user_id + '.log', screen=False)

        manager = core.Manager()

        lpus = {}
        patterns = {}
        G = task['success']['data']

        # get graph and output_uid_list for each LPU
        for k, lpu in G['LPU'].iteritems():
            lpus[k] = {}
            g_lpu_na = create_graph_from_database_returned(lpu)
            lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na)
            lpus[k]['graph'] = lpu_nk_graph
            lpus[k]['output_uid_list'] = list(
                set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list)))
            lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)

        # get graph for each Pattern
        for k, pat in G['Pattern'].iteritems():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                g_pattern_na = create_graph_from_database_returned(pat)
                pattern_nk = nk.na_pat_to_nk(g_pattern_na)
                lpu_ports = [node[1]['selector'] \
                             for node in lpus[l1]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port'] + \
                            [node[1]['selector'] \
                             for node in lpus[l2]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port']
                pattern_ports = pattern_nk.nodes()
                patterns[k] = {}
                patterns[k]['graph'] = pattern_nk.subgraph(
                    list(set(lpu_ports).intersection(set(pattern_ports))))

        dt = config['General']['dt']

        # add LPUs to manager
        for k, lpu in lpus.iteritems():
            graph = lpu['graph']
            if k == 'retina':
                prs = [node for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'PhotoreceptorModel']
                for pr in prs:
                    graph.node[pr[0]]['num_microvilli'] = 3000
                input_processors = [
                    RetinaInputIndividual(config, prs, user_id)
                ]
                extra_comps = [PhotoreceptorModel]
                retina_input_uids = [a[0] for a in prs]
            else:
                input_processors = []
                extra_comps = [BufferVoltage]
            output_processor = FileOutputProcessor(
                [('V', lpu['output_uid_list'])],
                lpu['output_file'],
                sample_interval=10)

            (comp_dict, conns) = LPU.graph_to_dicts(graph)

            manager.add(LPU,
                        k,
                        dt,
                        comp_dict,
                        conns,
                        device=0,
                        input_processors=input_processors,
                        output_processors=[output_processor],
                        extra_comps=extra_comps)

        # connect LPUs by Patterns
        for k, pattern in patterns.iteritems():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                print('Connecting {} and {}'.format(l1, l2))
                pat, key_order = Pattern.from_graph(
                    nx.DiGraph(pattern['graph']))
                with Timer('update of connections in Manager'):
                    manager.connect(l1,
                                    l2,
                                    pat,
                                    int_0=key_order.index(l1),
                                    int_1=key_order.index(l2))

        # start simulation
        steps = config['General']['steps']
        ignored_steps = config['General']['ignored_steps']
        manager.spawn()
        manager.start(steps=steps)
        manager.wait()

        time.sleep(5)

        # post-processing inputs (hard coded, can be better organized)
        inputs = {
            u'ydomain': 1.0,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }
        if 'retina' in lpus:
            input_array = si.read_array('{}_{}.h5'.format(
                config['Retina']['input_file'], user_id))
            inputs[u'ydomain'] = input_array.max()
            for i, item in enumerate(retina_input_uids):
                inputs['data'][item] = np.hstack(
                    (np.arange(int((steps - ignored_steps) / 10)).reshape(
                        (-1, 1)) * dt * 10, input_array[ignored_steps::10,
                                                        i:i + 1])).tolist()

            del input_array

        # post-processing outputs from all LPUs and combine them into one dictionary
        result = {
            u'ydomain': 1,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }

        for k, lpu in lpus.iteritems():
            with h5py.File(lpu['output_file']) as output_file:
                uids = output_file['V']['uids'][:]
                output_array = output_file['V']['data'][:]
                for i, item in enumerate(uids):
                    output = output_array[int(ignored_steps / 10):, i:i + 1]
                    tmp = output.max() - output.min()
                    if tmp <= 0.01:  #mV
                        output = (output - output.min()) + 0.5
                    else:
                        output = (output - output.min()) / tmp * 0.9 + 0.1
                    result['data'][item] = np.hstack(
                        (np.arange(int((steps - ignored_steps) / 10)).reshape(
                            (-1, 1)) * dt * 10, output)).tolist()

        return inputs, result
コード例 #34
0
args = parser.parse_args()

file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = setup_logger(file_name=file_name, screen=screen)

man = core.Manager()

np.random.seed(0)
lpu_name = 'neurodriver'
g = create_lpu_graph(lpu_name, *args.n)
n_dict, s_dict = LPU.graph_to_dicts(g)
total_neurons =  \
    len([d for n, d in g.nodes(data=True) if d['model'] == 'LeakyIAF'])
total_synapses = \
    len([d for f, t, d in g.edges(data=True) if d['model'] == 'AlphaSynapse'])

output_file = None

man.add(MyLPU, lpu_name, dt, n_dict, s_dict, I_const=0.6,
        output_file=output_file,
        device=args.gpu_dev,
        debug=args.debug, time_sync=True)

man.spawn()
start = time.time()
man.start(steps=args.steps)
コード例 #35
0
ファイル: cx_demo.py プロジェクト: conghannn/CX_Interface
#lpu_name_to_s_dict = {}    # LPU name -> s_dict data struct
lpu_name_to_comp_dict = {}  # LPU name -> comp_dict data struct
lpu_name_to_conn_list = {}  # LPU name -> conn_list data struct

for name in lpu_name_list:
    lpu_name_to_node[name] = graph.LPUs.query(name=name).one()
    lpu_name_to_g_na[name] = lpu_name_to_node[name].traverse_owns(
        max_levels=2).get_as('nx')
    lpu_name_to_g_nk_orig[name] = nk.na_lpu_to_nk_new(lpu_name_to_g_na[name])
    lpu_name_to_g_nk[name] = nx.convert_node_labels_to_integers(
        lpu_name_to_g_nk_orig[name], ordering='sorted')
    lpu_name_to_g_nk[name] = \
        partly_relabel_by_sorted_attr(lpu_name_to_g_nk[name], 'model', ['LeakyIAF'], 'name')
    #lpu_name_to_n_dict[name], lpu_name_to_s_dict[name] = LPU.graph_to_dicts(lpu_name_to_g_nk[name])
    lpu_name_to_comp_dict[name], lpu_name_to_conn_list[
        name] = LPU.graph_to_dicts(lpu_name_to_g_nk[name])

# Patterns:
pat_name_list = [n.name for n in graph.Patterns.query().all()]

pat_name_to_node = {}  # LPU pair -> pyorient Pattern node
pat_name_to_g_na = {}  # LPU pair -> NeuroArch-compatible graph
pat_name_to_g_nk = {}  # LPU pair -> Neurokernel-compatible graph
pat_name_to_pat = {}  # LPU pair -> Pattern class instance

for name in pat_name_list:
    pat_name_to_node[name] = graph.Patterns.query(name=name).one()
    pat_name_to_g_na[name] = pat_name_to_node[name].traverse_owns(
        max_levels=2).get_as('nx')
    pat_name_to_g_nk[name] = nk.na_pat_to_nk(pat_name_to_g_na[name])
    pat_name_to_pat[name] = pattern.Pattern.from_graph(
コード例 #36
0
file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = base.setup_logger(file_name, screen)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

man = core.Manager(port_data, port_ctrl)
man.add_brok()

(n_dict, s_dict) = LPU.lpu_parser('./data/olfactory_lpu.gexf.gz')
olf = LPU(dt, n_dict, s_dict,
         input_file='./data/olfactory_input.h5',
         output_file='olfactory_output.h5', port_ctrl=port_ctrl,
         port_data=port_data,
         device=args.gpu_dev, id='olf',
         debug=args.debug)
man.add_mod(olf)

man.start(steps=args.steps)
man.stop()
コード例 #37
0
ファイル: olfaction_demo.py プロジェクト: MariyaS/neurokernel
file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = base.setup_logger(file_name=file_name, screen=screen)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

port_time = get_random_port()
man = core.Manager(port_data, port_ctrl, port_time)
man.add_brok()

(n_dict, s_dict) = LPU.lpu_parser('./data/antennallobe.gexf.gz')

al = LPU(dt, n_dict, s_dict, input_file='./data/olfactory_input.h5',
         output_file='olfactory_output.h5', port_ctrl=port_ctrl,
         port_data=port_data,
         device=args.al_dev, id='al',
         debug=args.debug)
man.add_mod(al)

man.start(steps=args.steps)
man.stop()
コード例 #38
0
                    help='GPU for integration [default:3]')
args = parser.parse_args()

dt = 1e-4
dur = 1.4
Nt = args.steps or int(dur / dt)

file_name = 'neurokernel.log' if args.log.lower() in ['file', 'both'] else None
screen = True if args.log.lower() in ['screen', 'both'] else False
logger = setup_logger(file_name=file_name, screen=screen)

man = core.Manager()

# Load configurations for lamina, medulla and antennal lobe models:
al_id = 'antennallobe'
(n_dict_al, s_dict_al) = LPU.lpu_parser('./data/antennallobe.gexf.gz')
man.add(LPU,
        al_id,
        dt,
        n_dict_al,
        s_dict_al,
        input_file='./data/olfactory_input.h5',
        output_file='antennallobe_output.h5',
        device=args.al_dev,
        time_sync=args.time_sync)

lam_id = 'lamina'
(n_dict_lam, s_dict_lam) = LPU.lpu_parser('./data/lamina.gexf.gz')
man.add(LPU,
        lam_id,
        dt,
コード例 #39
0
    if args.log.lower() in ['file', 'both']:
        file_name = 'neurokernel.log'
    if args.log.lower() in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)

    man = core.Manager()

    G = nx.MultiDiGraph()

    G.add_node('neuron0', **{
               'class': 'ConnorStevens',
               'name': 'ConnorStevens',
               })

    comp_dict, conns = LPU.graph_to_dicts(G)

    fl_input_processor = StepInputProcessor('I', ['neuron0'], 40, 0.15, 0.25)
    fl_output_processor = FileOutputProcessor([('spike_state', None),('V', None)], 'new_output.h5', sample_interval=1)

    man.add(LPU, 'ge', dt, comp_dict, conns,
            device=args.gpu_dev, input_processors = [fl_input_processor],
            output_processors = [fl_output_processor], debug=args.debug)

    man.spawn()
    man.start(steps=args.steps)
    man.wait()

    # plot the result
    import h5py
    import matplotlib
コード例 #40
0
ファイル: retina_demo.py プロジェクト: Hanyu-Li/neurokernel
    n = args.num_layers
    photoreceptor_num = 6*(3*n*(n+1)+1)
    generate_gexf(GEXF_FILE, photoreceptor_num)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

man = core.Manager(port_data, port_ctrl)
man.add_brok()

print('Parsing lpu data')
n_dict_ret, s_dict_ret = LPU.lpu_parser(GEXF_FILE)
print('Initializing LPU')
lpu_ret = LPU(dt, n_dict_ret, s_dict_ret,
              input_file=INPUT_FILE,
              output_file=OUTPUT_FILE, port_ctrl=port_ctrl,
              port_data=port_data, device=args.ret_dev, id='retina',
              debug=False)

man.add_mod(lpu_ret)

print('Starting simulation')
start_time = time.time()
man.start(steps=args.steps)
man.stop()
print('Simulation complete: Duration {} seconds'.format(time.time() - 
                                                            start_time))
コード例 #41
0
ファイル: multi_demo.py プロジェクト: parasdsingh/neurokernel
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

for i, neu_num in neu_dict.iteritems():
    lpu_entry = {}

    if i == 0:
        in_file_name = in_file_name_0
    else:
        in_file_name = None
    lpu_file_name = 'generic_lpu_%s.gexf.gz' % i
    out_file_name = 'generic_output_%s.h5' % i

    g.create_lpu(lpu_file_name, *neu_num)
    (n_dict, s_dict) = LPU.lpu_parser(lpu_file_name)

    id = 'lpu_%s' % i
    lpu = LPU(dt,
              n_dict,
              s_dict,
              input_file=in_file_name,
              output_file=out_file_name,
              port_ctrl=port_ctrl,
              port_data=port_data,
              device=i,
              id=id,
              debug=args.debug)

    lpu_entry['lpu_file_name'] = lpu_file_name
    lpu_entry['in_file_name'] = in_file_name
コード例 #42
0
import h5py
import networkx as nx
import argparse
import itertools
import random
import pickle

import neurokernel.mpi_relaunch
import neurokernel.core_gpu as core

from neurokernel.LPU.InputProcessors.StepInputProcessor import StepInputProcessor
from neurokernel.LPU.InputProcessors.FileInputProcessor import FileInputProcessor
from neurokernel.tools.logging import setup_logger
from neurokernel.LPU.LPU import LPU

(comp_dict, conns) = LPU.lpu_parser('neuroballad_temp_model.gexf.gz')
with open('run_parameters.pickle', 'rb') as f:
    run_parameters = pickle.load(f)
with open('record_parameters.pickle', 'rb') as f:
    record_parameters = pickle.load(f)
dur = 1.0
dt = 1e-4
dur = run_parameters[0]
dt = run_parameters[1]
fl_input_processor = FileInputProcessor('neuroballad_temp_model_input.h5')

from neurokernel.LPU.OutputProcessors.FileOutputProcessor import FileOutputProcessor
output_processor = FileOutputProcessor(record_parameters,
                                       'neuroballad_temp_model_output.h5',
                                       sample_interval=1)
コード例 #43
0
                    help='Write connectivity structures and inter-LPU routed data in debug folder')
parser.add_argument('-l', '--log', default='none', type=str,
                    help='Log output to screen [file, screen, both, or none; default:none]')
parser.add_argument('-s', '--steps', default=steps, type=int,
                    help='Number of steps [default: %s]' % steps)
parser.add_argument('-g', '--gpu_dev', default=0, type=int,
                    help='GPU device number [default: 0]')
args = parser.parse_args()

file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = setup_logger(file_name=file_name, screen=screen)

man = core.Manager()

(n_dict, s_dict) = LPU.lpu_parser('./data/generic_lpu.gexf.gz')

man.add(LPU, 'ge', dt, n_dict, s_dict,
        input_file='./data/generic_input.h5',
        output_file='generic_output.h5', 
        device=args.gpu_dev,
        debug=args.debug)

man.spawn()
man.start(steps=args.steps)
man.wait()
コード例 #44
0
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = base.setup_logger(file_name, screen)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

man = core.Manager(port_data, port_ctrl)
man.add_brok()

# Load configurations for lamina, medulla and antennal lobe models:
(n_dict_al, s_dict_al) = LPU.lpu_parser('./data/antennallobe.gexf.gz')
lpu_al = LPU(dt,
             n_dict_al,
             s_dict_al,
             input_file='./data/olfactory_input.h5',
             output_file='antennallobe_output.h5',
             port_ctrl=man.port_ctrl,
             port_data=man.port_data,
             device=args.al_dev,
             id='antennallobe')
man.add_mod(lpu_al)

(n_dict_lam, s_dict_lam) = LPU.lpu_parser('./data/lamina.gexf.gz')
lpu_lam = LPU(dt,
              n_dict_lam,
              s_dict_lam,
コード例 #45
0
    n = args.num_layers
    photoreceptor_num = 6 * (3 * n * (n + 1) + 1)
    generate_gexf(GEXF_FILE, photoreceptor_num)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

man = core.Manager(port_data, port_ctrl)
man.add_brok()

print('Parsing lpu data')
n_dict_ret, s_dict_ret = LPU.lpu_parser(GEXF_FILE)
print('Initializing LPU')
lpu_ret = LPU(dt,
              n_dict_ret,
              s_dict_ret,
              input_file=INPUT_FILE,
              output_file=OUTPUT_FILE,
              port_ctrl=port_ctrl,
              port_data=port_data,
              device=0,
              id='retina',
              debug=False)

man.add_mod(lpu_ret)

print('Starting simulation')
コード例 #46
0
ファイル: intro_demo.py プロジェクト: MariyaS/neurokernel
def run(connected):
    if args.port_data is None:
        port_data = get_random_port()
    else:
        port_data = args.port_data
    if args.port_ctrl is None:
        port_ctrl = get_random_port()
    else:
        port_ctrl = args.port_ctrl
    if args.port_time is None:
        port_time = get_random_port()
    else:
        port_time = args.port_time

    out_name = 'un' if not connected else 'co'
    man = core.Manager(port_data, port_ctrl, port_time)
    man.add_brok()

    lpu_file_0 = './data/generic_lpu_0.gexf.gz'
    lpu_file_1 = './data/generic_lpu_1.gexf.gz'
    (n_dict_0, s_dict_0) = LPU.lpu_parser(lpu_file_0)
    (n_dict_1, s_dict_1) = LPU.lpu_parser(lpu_file_1)

    lpu_0_id = 'lpu_0'
    lpu_0 = LPU(dt, n_dict_0, s_dict_0,
                input_file='./data/generic_lpu_0_input.h5',
                output_file='generic_lpu_0_%s_output.h5' % out_name,
                port_ctrl=port_ctrl, port_data=port_data,
                port_time=port_time,
                device=args.gpu_dev[0], id=lpu_0_id,
                debug=args.debug, time_sync=args.time_sync)
    man.add_mod(lpu_0)

    lpu_1_id = 'lpu_1'
    lpu_1 = LPU(dt, n_dict_1, s_dict_1,
                input_file='./data/generic_lpu_1_input.h5',
                output_file='generic_lpu_1_%s_output.h5' % out_name,
                port_ctrl=port_ctrl, port_data=port_data,
                port_time=port_time,
                device=args.gpu_dev[1], id=lpu_1_id,
                debug=args.debug, time_sync=args.time_sync)
    man.add_mod(lpu_1)

    # Create random connections between the input and output ports if the LPUs
    # are to be connected:
    if connected:

        # Find all output and input port selectors in each LPU:
        out_ports_0 = lpu_0.interface.out_ports().to_selectors()
        out_ports_1 = lpu_1.interface.out_ports().to_selectors()

        in_ports_0 = lpu_0.interface.in_ports().to_selectors()
        in_ports_1 = lpu_1.interface.in_ports().to_selectors()

        out_ports_spk_0 = lpu_0.interface.out_ports().spike_ports().to_selectors()
        out_ports_gpot_0 = lpu_0.interface.out_ports().gpot_ports().to_selectors()

        out_ports_spk_1 = lpu_1.interface.out_ports().spike_ports().to_selectors()
        out_ports_gpot_1 = lpu_1.interface.out_ports().gpot_ports().to_selectors()

        in_ports_spk_0 = lpu_0.interface.in_ports().spike_ports().to_selectors()
        in_ports_gpot_0 = lpu_0.interface.in_ports().gpot_ports().to_selectors()

        in_ports_spk_1 = lpu_1.interface.in_ports().spike_ports().to_selectors()
        in_ports_gpot_1 = lpu_1.interface.in_ports().gpot_ports().to_selectors()

        # Initialize a connectivity pattern between the two sets of port
        # selectors:
        pat = pattern.Pattern(','.join(out_ports_0+in_ports_0),
                              ','.join(out_ports_1+in_ports_1))

        # Create connections from the ports with identifiers matching the output
        # ports of one LPU to the ports with identifiers matching the input
        # ports of the other LPU:
        N_conn_spk_0_1 = min(len(out_ports_spk_0), len(in_ports_spk_1))
        N_conn_gpot_0_1 = min(len(out_ports_gpot_0), len(in_ports_gpot_1))
        for src, dest in zip(random.sample(out_ports_spk_0, N_conn_spk_0_1), 
                             random.sample(in_ports_spk_1, N_conn_spk_0_1)):
            pat[src, dest] = 1
            pat.interface[src, 'type'] = 'spike'
            pat.interface[dest, 'type'] = 'spike'
        for src, dest in zip(random.sample(out_ports_gpot_0, N_conn_gpot_0_1),
                             random.sample(in_ports_gpot_1, N_conn_gpot_0_1)):
            pat[src, dest] = 1
            pat.interface[src, 'type'] = 'gpot'
            pat.interface[dest, 'type'] = 'gpot'

        man.connect(lpu_0, lpu_1, pat, 0, 1)

    man.start(steps=args.steps)
    man.stop()
コード例 #47
0
ファイル: intro_demo.py プロジェクト: chungheng/neurodriver
    def run(connected):
        """
        Set `connected` to True to connect the LPUs.
        """

        import neurokernel.mpi_relaunch

        out_name = 'un' if not connected else 'co'
        man = core.Manager()

        lpu_file_0 = './data/generic_lpu_0.gexf.gz'
        lpu_file_1 = './data/generic_lpu_1.gexf.gz'
        comp_dict_0, conns_0 = LPU.lpu_parser(lpu_file_0)
        comp_dict_1, conns_1 = LPU.lpu_parser(lpu_file_1)
        
        fl_input_processor_0 = FileInputProcessor('./data/generic_lpu_0_input.h5')
        fl_output_processor_0 = FileOutputProcessor(
                    [('V',None),('spike_state',None)],
                    'generic_lpu_0_%s_output.h5' % out_name, sample_interval=1)

        lpu_0_id = 'lpu_0'
        man.add(LPU, lpu_0_id, dt, comp_dict_0, conns_0,
                    input_processors = [fl_input_processor_0],
                    output_processors = [fl_output_processor_0],
                    device=args.gpu_dev[0],
                    debug=args.debug, time_sync=args.time_sync)

        fl_input_processor_1 = FileInputProcessor('./data/generic_lpu_1_input.h5')
        fl_output_processor_1 = FileOutputProcessor(
                    [('V',None),('spike_state',None)],
                    'generic_lpu_1_%s_output.h5' % out_name, sample_interval=1)
                    
        lpu_1_id = 'lpu_1'
        man.add(LPU, lpu_1_id, dt, comp_dict_1, conns_1,
                    input_processors = [fl_input_processor_1],
                    output_processors = [fl_output_processor_1],
                    device=args.gpu_dev[1],
                    debug=args.debug, time_sync=args.time_sync)

        # Create random connections between the input and output ports if the LPUs
        # are to be connected:
        if connected:

            # Find all output and input port selectors in each LPU:
            out_ports_spk_0 = plsel.Selector(
                            ','.join(LPU.extract_out_spk(comp_dict_0, 'id')[0]))
            out_ports_gpot_0 = plsel.Selector(
                            ','.join(LPU.extract_out_gpot(comp_dict_0, 'id')[0]))

            out_ports_spk_1 = plsel.Selector(
                            ','.join(LPU.extract_out_spk(comp_dict_1, 'id')[0]))
            out_ports_gpot_1 = plsel.Selector(
                            ','.join(LPU.extract_out_gpot(comp_dict_1, 'id')[0]))

            in_ports_spk_0 = plsel.Selector(
                            ','.join(LPU.extract_in_spk(comp_dict_0, 'id')[0]))
            in_ports_gpot_0 = plsel.Selector(
                            ','.join(LPU.extract_in_gpot(comp_dict_0, 'id')[0]))

            in_ports_spk_1 = plsel.Selector(
                            ','.join(LPU.extract_in_spk(comp_dict_1, 'id')[0]))
            in_ports_gpot_1 = plsel.Selector(
                            ','.join(LPU.extract_in_gpot(comp_dict_1, 'id')[0]))

            out_ports_0 = plsel.Selector.union(out_ports_spk_0, out_ports_gpot_0)
            out_ports_1 = plsel.Selector.union(out_ports_spk_1, out_ports_gpot_1)

            in_ports_0 = plsel.Selector.union(in_ports_spk_0, in_ports_gpot_0)
            in_ports_1 = plsel.Selector.union(in_ports_spk_1, in_ports_gpot_1)

            # Initialize a connectivity pattern between the two sets of port
            # selectors:
            pat = pattern.Pattern(plsel.Selector.union(out_ports_0, in_ports_0),
                                  plsel.Selector.union(out_ports_1, in_ports_1))

            # Create connections from the ports with identifiers matching the output
            # ports of one LPU to the ports with identifiers matching the input
            # ports of the other LPU:
            N_conn_spk_0_1 = min(len(out_ports_spk_0), len(in_ports_spk_1))
            N_conn_gpot_0_1 = min(len(out_ports_gpot_0), len(in_ports_gpot_1))
            for src, dest in zip(random.sample(out_ports_spk_0.identifiers,
                                               N_conn_spk_0_1),
                                 random.sample(in_ports_spk_1.identifiers,
                                               N_conn_spk_0_1)):
                pat[src, dest] = 1
                pat.interface[src, 'type'] = 'spike'
                pat.interface[dest, 'type'] = 'spike'
            for src, dest in zip(random.sample(out_ports_gpot_0.identifiers,
                                               N_conn_gpot_0_1),
                                 random.sample(in_ports_gpot_1.identifiers,
                                               N_conn_gpot_0_1)):
                pat[src, dest] = 1
                pat.interface[src, 'type'] = 'gpot'
                pat.interface[dest, 'type'] = 'gpot'

            man.connect(lpu_0_id, lpu_1_id, pat, 0, 1)

        man.spawn()
        man.start(steps=args.steps)
        man.wait()
コード例 #48
0
#lpu_name_to_n_dict = {}    # LPU name -> n_dict data struct
#lpu_name_to_s_dict = {}    # LPU name -> s_dict data struct
lpu_name_to_comp_dict = {}  # LPU name -> comp_dict data struct
lpu_name_to_conn_list = {}  # LPU name -> conn_list data struct

for name in lpu_name_list:
    lpu_name_to_node[name] = graph.LPUs.query(name=name).one()
    lpu_name_to_g_na[name] = lpu_name_to_node[name].traverse_owns(
        max_levels=2).get_as('nx')
    lpu_name_to_g_nk_orig[name] = nk.na_lpu_to_nk_new(lpu_name_to_g_na[name])
    lpu_name_to_g_nk[name] = nx.convert_node_labels_to_integers(
        lpu_name_to_g_nk_orig[name], ordering='sorted')
    lpu_name_to_g_nk[name] = \
        partly_relabel_by_sorted_attr(lpu_name_to_g_nk[name], 'model', ['LeakyIAF'], 'name')
    lpu_name_to_comp_dict[name], lpu_name_to_conn_list[
        name] = LPU.graph_to_dicts(lpu_name_to_g_nk[name])

# Select spiking projection neurons:
lpu_name_to_neurons = {}
'''
for name in lpu_name_list:
    lpu_name_to_neurons[name] = \
        sorted([int(k) for k, n in lpu_name_to_g_nk[name].node.items() if \
                     n['class'] != 'port_in_spk' and \
                     n['spiking']])
'''

##### Pick 80 Neurons and sort them for visualization   ######
sort_list = {}
lpu_region_to_vision_region = lpu_region_to_number()
for name in ['BU', 'bu']:
コード例 #49
0
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = base.setup_logger(file_name, screen)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

man = core.Manager(port_data, port_ctrl)
man.add_brok()

(n_dict, s_dict) = LPU.lpu_parser('./data/olfactory_lpu.gexf.gz')
olf = LPU(dt,
          n_dict,
          s_dict,
          input_file='./data/olfactory_input.h5',
          output_file='olfactory_output.h5',
          port_ctrl=port_ctrl,
          port_data=port_data,
          device=args.gpu_dev,
          id='olf',
          debug=args.debug)
man.add_mod(olf)

man.start(steps=args.steps)
man.stop()
コード例 #50
0
ファイル: lamina_demo.py プロジェクト: hihihippp/neurokernel
args = parser.parse_args()

file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = base.setup_logger(file_name, screen)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

man = core.Manager(port_data, port_ctrl)
man.add_brok()

(n_dict_lam, s_dict_lam) = LPU.lpu_parser('./data/lamina.gexf.gz')
lpu_lam = LPU(dt, n_dict_lam, s_dict_lam,
              input_file='./data/vision_input.h5',
              output_file='lamina_output.h5', port_ctrl=port_ctrl,
              port_data=port_data, device=args.lam_dev, id='lamina')
man.add_mod(lpu_lam)

man.start(steps=args.steps)
man.stop()
コード例 #51
0
ファイル: ret_lam_demo.py プロジェクト: Hanyu-Li/neurokernel
        print('Writing lamina lpu')
        eyemodel.write_lamina(LAM_GEXF_FILE)

if args.port_data is None and args.port_ctrl is None:
    port_data = get_random_port()
    port_ctrl = get_random_port()
else:
    port_data = args.port_data
    port_ctrl = args.port_ctrl

if not args.suppress:
    man = core.Manager(port_data, port_ctrl)
    man.add_brok()

    print('Parsing retina lpu data')
    n_dict_ret, s_dict_ret = LPU.lpu_parser(RET_GEXF_FILE)
    print('Initializing retina LPU')
    lpu_ret = LPU(dt,
                  n_dict_ret,
                  s_dict_ret,
                  input_file=INPUT_FILE,
                  output_file=RET_OUTPUT_FILE,
                  port_ctrl=port_ctrl,
                  port_data=port_data,
                  device=args.ret_dev,
                  id='retina',
                  debug=True)
    man.add_mod(lpu_ret)

    if not args.retina_only:
        print('Parsing lamina lpu data')