예제 #1
0
def add_LPU(config, manager):
    config_photor = config['Photoreceptor']
    gexf_file = config_photor['gexf_file']
    input_file = config_photor['input_file']
    output_file = config_photor['output_file']

    G = generate_gexf(config_photor, gexf_file)

    LPU.graph_to_dicts(G)
    comp_dict, conns = LPU.graph_to_dicts(G)
    LPU_id = 'photoreceptor'
    debug = config_photor['debug']

    dt = config['General']['dt']
    extra_comps = [PhotoreceptorModel]

    input_processor = StepInputProcessor('photon', G.nodes(), 10000, 0.2, 1)
    output_processor = FileOutputProcessor([('V', G.nodes())],
                                           output_file,
                                           sample_interval=1)
    manager.add(LPU,
                LPU_id,
                dt,
                comp_dict,
                conns,
                device=0,
                input_processors=[input_processor],
                output_processors=[output_processor],
                debug=debug,
                time_sync=False,
                extra_comps=extra_comps)
예제 #2
0
def add_retina_LPU(config, retina_index, retina, manager):
    '''
        This method adds Retina LPU and its parameters to the manager
        so that it can be initialized later. Depending on configuration
        input can either be created in advance and read from file or
        generated during simulation by a generator object.

        --
        config: configuration dictionary like object
        i: identifier of eye in case more than one is used
        retina: retina array object required for the generation of
            graph.
        manager: manager object to which LPU will be added
        generator: generator object or None
    '''
    dt = config['General']['dt']
    debug = config['Retina']['debug']
    time_sync = config['Retina']['time_sync']

    input_filename = config['Retina']['input_file']
    output_filename = config['Retina']['output_file']
    gexf_filename = config['Retina']['gexf_file']
    suffix = config['General']['file_suffix']

    output_file = '{}{}{}.h5'.format(output_filename, retina_index, suffix)
    gexf_file = '{}{}{}.gexf.gz'.format(gexf_filename, retina_index, suffix)
    
    inputmethod = config['Retina']['inputmethod']
    if inputmethod == 'read':
        print('Generating input files')
        with Timer('input generation'):
            input_processor = RetinaFileInputProcessor(config, retina)
    else:
        print('Using input generating function')
        input_processor = RetinaInputProcessor(config, retina)

    output_processor = FileOutputProcessor([('V',None),('spike_state',None)], output_file, sample_interval=1)

    # retina also allows a subset of its graph to be taken
    # in case it is needed later to split the retina model to more
    # GPUs
    G = retina.get_worker_nomaster_graph()
    nx.write_gexf(G, gexf_file)

    (comp_dict, conns) = LPU.graph_to_dicts(G)
    retina_id = get_retina_id(retina_index)

    extra_comps = [LeakyIAF, BufferPhoton]

    manager.add(LPU, retina_id, dt, comp_dict, conns,
                device = retina_index, input_processors = [input_processor],
                output_processors = [output_processor], 
                debug=debug, time_sync=time_sync, extra_comps = extra_comps)
예제 #3
0
def cx_component(graph):

	#lpu lists
	lpu_name_list = ['BU', 'bu', 'EB', 'FB', 'PB']
	lpu_name_to_node = {}      # LPU name -> pyorient LPU node
	lpu_name_to_g_na = {}      # LPU name -> NeuroArch-compatible graph
	lpu_name_to_g_nk_orig = {} # LPU name -> Neurokernel-compatible graph
	lpu_name_to_g_nk = {}      # LPU name -> Neurokernel-compatible graph with int IDs
	#lpu_name_to_n_dict = {}    # LPU name -> n_dict data struct
	#lpu_name_to_s_dict = {}    # LPU name -> s_dict data struct
	lpu_name_to_comp_dict = {} # LPU name -> comp_dict data struct
	lpu_name_to_conn_list = {} # LPU name -> conn_list data struct

	for name in lpu_name_list:
		lpu_name_to_node[name] = graph.LPUs.query(name=name).one()
		lpu_name_to_g_na[name] = lpu_name_to_node[name].traverse_owns(max_levels = 2).get_as('nx')
		lpu_name_to_g_nk_orig[name] = nk.na_lpu_to_nk_new(lpu_name_to_g_na[name])
		lpu_name_to_g_nk[name] = nx.convert_node_labels_to_integers(lpu_name_to_g_nk_orig[name], ordering = 'sorted')
		lpu_name_to_g_nk[name] = \
		partly_relabel_by_sorted_attr(lpu_name_to_g_nk[name], 'model', ['LeakyIAF'], 'name')
		#lpu_name_to_n_dict[name], lpu_name_to_s_dict[name] = LPU.graph_to_dicts(lpu_name_to_g_nk[name])
		lpu_name_to_comp_dict[name], lpu_name_to_conn_list[name] = LPU.graph_to_dicts(lpu_name_to_g_nk[name])

		nx.write_gexf(lpu_name_to_g_nk[name], name+'.gexf.gz')



	# Patterns:
	pat_name_list = [n.name for n in graph.Patterns.query().all()]

	pat_name_to_node = {}     # LPU pair -> pyorient Pattern node
	pat_name_to_g_na = {}     # LPU pair -> NeuroArch-compatible graph
	pat_name_to_g_nk = {}     # LPU pair -> Neurokernel-compatible graph
	pat_name_to_pat = {}      # LPU pair -> Pattern class instance

	for name in pat_name_list:
	    pat_name_to_node[name] = graph.Patterns.query(name=name).one()
	    pat_name_to_g_na[name] = pat_name_to_node[name].traverse_owns(max_levels = 2).get_as('nx')
	    pat_name_to_g_nk[name] = nk.na_pat_to_nk(pat_name_to_g_na[name])
	    pat_name_to_pat[name] = Pattern.Pattern.from_graph(nx.DiGraph(pat_name_to_g_nk[name]))

	return lpu_name_to_comp_dict, lpu_name_to_conn_list, pat_name_list, pat_name_to_pat
예제 #4
0
def add_lamina_LPU(config, lamina_index, lamina, manager):
    '''
        This method adds Lamina LPU and its parameters to the manager
        so that it can be initialized later.

        --
        config: configuration dictionary like object
        i: identifier of eye in case more than one is used
        lamina: lamina array object required for the generation of
            graph.
        manager: manager object to which LPU will be added
        generator: generator object or None
    '''

    output_filename = config['Lamina']['output_file']
    gexf_filename = config['Lamina']['gexf_file']
    suffix = config['General']['file_suffix']

    dt = config['General']['dt']
    debug = config['Lamina']['debug']
    time_sync = config['Lamina']['time_sync']

    output_file = '{}{}{}.h5'.format(output_filename, lamina_index, suffix)
    gexf_file = '{}{}{}.gexf.gz'.format(gexf_filename, lamina_index, suffix)
    G = lamina.get_graph()
    nx.write_gexf(G, gexf_file)

    (comp_dict, conns) = LPU.graph_to_dicts(G)
    lamina_id = get_lamina_id(lamina_index)
    
    extra_comps = [BufferVoltage]
    
    output_processor = FileOutputProcessor(
                            [('V', None)], output_file,
                            sample_interval=1)

    manager.add(LPU, lamina_id, dt, comp_dict, conns,
                output_processors = [output_processor],
                device=lamina_index+1, debug=debug, time_sync=time_sync,
                extra_comps = extra_comps)
예제 #5
0
    man = core.Manager()

    G = nx.MultiDiGraph()

    G.add_node(
        'neuron0',
        {
            'class': 'LeakyIAF',
            'name': 'LeakyIAF',
            'resting_potential': -70.0,
            'threshold': -45.0,
            'capacitance': 0.07,  # in mS
            'resistance': 0.2,  # in Ohm
        })

    comp_dict, conns = LPU.graph_to_dicts(G)

    fl_input_processor = StepInputProcessor('I', ['neuron0'], 40, 0.2, 0.8)
    fl_output_processor = FileOutputProcessor([('spike_state', None),
                                               ('V', None)],
                                              'new_output.h5',
                                              sample_interval=1)

    man.add(LPU,
            'ge',
            dt,
            comp_dict,
            conns,
            device=args.gpu_dev,
            input_processors=[fl_input_processor],
            output_processors=[fl_output_processor],
예제 #6
0
#lpu_name_to_n_dict = {}    # LPU name -> n_dict data struct
#lpu_name_to_s_dict = {}    # LPU name -> s_dict data struct
lpu_name_to_comp_dict = {}  # LPU name -> comp_dict data struct
lpu_name_to_conn_list = {}  # LPU name -> conn_list data struct

for name in lpu_name_list:
    lpu_name_to_node[name] = graph.LPUs.query(name=name).one()
    lpu_name_to_g_na[name] = lpu_name_to_node[name].traverse_owns(
        max_levels=2).get_as('nx')
    lpu_name_to_g_nk_orig[name] = nk.na_lpu_to_nk_new(lpu_name_to_g_na[name])
    lpu_name_to_g_nk[name] = nx.convert_node_labels_to_integers(
        lpu_name_to_g_nk_orig[name], ordering='sorted')
    lpu_name_to_g_nk[name] = \
        partly_relabel_by_sorted_attr(lpu_name_to_g_nk[name], 'model', ['LeakyIAF'], 'name')
    lpu_name_to_comp_dict[name], lpu_name_to_conn_list[
        name] = LPU.graph_to_dicts(lpu_name_to_g_nk[name])

# Select spiking projection neurons:
lpu_name_to_neurons = {}
'''
for name in lpu_name_list:
    lpu_name_to_neurons[name] = \
        sorted([int(k) for k, n in lpu_name_to_g_nk[name].node.items() if \
                     n['class'] != 'port_in_spk' and \
                     n['spiking']])
'''

##### Pick 80 Neurons and sort them for visualization   ######
sort_list = {}
lpu_region_to_vision_region = lpu_region_to_number()
for name in ['BU', 'bu']:
args = parser.parse_args()

file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = setup_logger(file_name=file_name, screen=screen)

man = core.Manager()

np.random.seed(0)
lpu_name = 'neurodriver'
g = create_lpu_graph(lpu_name, *args.n)
n_dict, s_dict = LPU.graph_to_dicts(g)
total_neurons =  \
    len([d for n, d in g.nodes(data=True) if d['model'] == 'LeakyIAF'])
total_synapses = \
    len([d for f, t, d in g.edges(data=True) if d['model'] == 'AlphaSynapse'])

output_file = None

man.add(MyLPU, lpu_name, dt, n_dict, s_dict, I_const=0.6,
        output_file=output_file,
        device=args.gpu_dev,
        debug=args.debug, time_sync=True)

man.spawn()
start = time.time()
man.start(steps=args.steps)
예제 #8
0
import numpy as np

from neurokernel.LPU.LPU import LPU
import vision_configuration as vc

np.random.seed(10000)

lamina = vc.Lamina(24, 32, 'neuron_types_lamina.csv', 'synapse_lamina.csv', None)
lamina.create_cartridges()
lamina.connect_cartridges()
lamina.create_non_columnar_neurons()
lamina.connect_composition_II()
lamina.connect_composition_I()
lamina.add_selectors()
g_lam = lamina.export_to_gexf('lamina.gexf.gz')
n_dict_lam, s_dict_lam = LPU.graph_to_dicts(g_lam)

medulla = vc.Medulla(24, 32, 'neuron_types_medulla.csv', 'synapse_medulla.csv', 'synapse_medulla_other.csv')
medulla.create_cartridges()
medulla.connect_cartridges()
medulla.create_non_columnar_neurons()
medulla.connect_composition_I()
medulla.connect_composition_II()
medulla.connect_composition_III()
medulla.add_selectors()
g_med = medulla.export_to_gexf('medulla.gexf.gz')
n_dict_med, s_dict_med = LPU.graph_to_dicts(g_med)

vc.create_pattern(n_dict_lam, n_dict_med, 'lam_med.gexf.gz')
예제 #9
0
    if args.log.lower() in ['file', 'both']:
        file_name = 'neurokernel.log'
    if args.log.lower() in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)

    man = core.Manager()

    G = nx.MultiDiGraph()

    G.add_node('neuron0', **{
               'class': 'ConnorStevens',
               'name': 'ConnorStevens',
               })

    comp_dict, conns = LPU.graph_to_dicts(G)

    fl_input_processor = StepInputProcessor('I', ['neuron0'], 40, 0.15, 0.25)
    fl_output_processor = FileOutputProcessor([('spike_state', None),('V', None)], 'new_output.h5', sample_interval=1)

    man.add(LPU, 'ge', dt, comp_dict, conns,
            device=args.gpu_dev, input_processors = [fl_input_processor],
            output_processors = [fl_output_processor], debug=args.debug)

    man.spawn()
    man.start(steps=args.steps)
    man.wait()

    # plot the result
    import h5py
    import matplotlib
    def launch(self, user_id, task):
        neuron_uid_list = [str(a) for a in task['neuron_list']]

        conf_obj = get_config_obj()
        config = conf_obj.conf

        if config['Retina']['intype'] == 'Natural':
            coord_file = config['InputType']['Natural']['coord_file']
            tmp = os.path.splitext(coord_file)
            config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format(
                tmp[0], user_id, tmp[1])

        setup_logger(file_name='neurokernel_' + user_id + '.log', screen=False)

        manager = core.Manager()

        lpus = {}
        patterns = {}
        G = task['success']['data']

        # get graph and output_uid_list for each LPU
        for k, lpu in G['LPU'].iteritems():
            lpus[k] = {}
            g_lpu_na = create_graph_from_database_returned(lpu)
            lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na)
            lpus[k]['graph'] = lpu_nk_graph
            lpus[k]['output_uid_list'] = list(
                set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list)))
            lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)

        # get graph for each Pattern
        for k, pat in G['Pattern'].iteritems():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                g_pattern_na = create_graph_from_database_returned(pat)
                pattern_nk = nk.na_pat_to_nk(g_pattern_na)
                lpu_ports = [node[1]['selector'] \
                             for node in lpus[l1]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port'] + \
                            [node[1]['selector'] \
                             for node in lpus[l2]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port']
                pattern_ports = pattern_nk.nodes()
                patterns[k] = {}
                patterns[k]['graph'] = pattern_nk.subgraph(
                    list(set(lpu_ports).intersection(set(pattern_ports))))

        dt = config['General']['dt']

        # add LPUs to manager
        for k, lpu in lpus.iteritems():
            graph = lpu['graph']
            if k == 'retina':
                prs = [node for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'PhotoreceptorModel']
                for pr in prs:
                    graph.node[pr[0]]['num_microvilli'] = 3000
                input_processors = [
                    RetinaInputIndividual(config, prs, user_id)
                ]
                extra_comps = [PhotoreceptorModel]
                retina_input_uids = [a[0] for a in prs]
            else:
                input_processors = []
                extra_comps = [BufferVoltage]
            output_processor = FileOutputProcessor(
                [('V', lpu['output_uid_list'])],
                lpu['output_file'],
                sample_interval=10)

            (comp_dict, conns) = LPU.graph_to_dicts(graph)

            manager.add(LPU,
                        k,
                        dt,
                        comp_dict,
                        conns,
                        device=0,
                        input_processors=input_processors,
                        output_processors=[output_processor],
                        extra_comps=extra_comps)

        # connect LPUs by Patterns
        for k, pattern in patterns.iteritems():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                print('Connecting {} and {}'.format(l1, l2))
                pat, key_order = Pattern.from_graph(
                    nx.DiGraph(pattern['graph']))
                with Timer('update of connections in Manager'):
                    manager.connect(l1,
                                    l2,
                                    pat,
                                    int_0=key_order.index(l1),
                                    int_1=key_order.index(l2))

        # start simulation
        steps = config['General']['steps']
        ignored_steps = config['General']['ignored_steps']
        manager.spawn()
        manager.start(steps=steps)
        manager.wait()

        time.sleep(5)

        # post-processing inputs (hard coded, can be better organized)
        inputs = {
            u'ydomain': 1.0,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }
        if 'retina' in lpus:
            input_array = si.read_array('{}_{}.h5'.format(
                config['Retina']['input_file'], user_id))
            inputs[u'ydomain'] = input_array.max()
            for i, item in enumerate(retina_input_uids):
                inputs['data'][item] = np.hstack(
                    (np.arange(int((steps - ignored_steps) / 10)).reshape(
                        (-1, 1)) * dt * 10, input_array[ignored_steps::10,
                                                        i:i + 1])).tolist()

            del input_array

        # post-processing outputs from all LPUs and combine them into one dictionary
        result = {
            u'ydomain': 1,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }

        for k, lpu in lpus.iteritems():
            with h5py.File(lpu['output_file']) as output_file:
                uids = output_file['V']['uids'][:]
                output_array = output_file['V']['data'][:]
                for i, item in enumerate(uids):
                    output = output_array[int(ignored_steps / 10):, i:i + 1]
                    tmp = output.max() - output.min()
                    if tmp <= 0.01:  #mV
                        output = (output - output.min()) + 0.5
                    else:
                        output = (output - output.min()) / tmp * 0.9 + 0.1
                    result['data'][item] = np.hstack(
                        (np.arange(int((steps - ignored_steps) / 10)).reshape(
                            (-1, 1)) * dt * 10, output)).tolist()

        return inputs, result
예제 #11
0
    def test_graph_to_dicts(self):
        self.maxDiff = 2048
        g = nx.MultiDiGraph()
        g.add_node(
            '0', {
                'model': 'LeakyIAF',
                'spiking': True,
                'extern': True,
                'public': True,
                'selector': '/lif0',
                'C': 1.0,
                'R': 1.0,
                'V': -1.0,
                'Vr': -0.1,
                'Vt': -0.1,
                'name': 'lif0'
            })
        g.add_node(
            '1', {
                'model': 'LeakyIAF',
                'spiking': True,
                'extern': True,
                'public': True,
                'selector': '/lif1',
                'C': 2.0,
                'R': 2.0,
                'V': -2.0,
                'Vr': -0.2,
                'Vt': -0.2,
                'name': 'lif1'
            })
        g.add_node(
            '2', {
                'model': 'MorrisLecar',
                'spiking': False,
                'extern': False,
                'public': False,
                'selector': '/ml0',
                'V1': 0.03,
                'V2': 0.3,
                'V3': 0.2,
                'V4': 0.1,
                'initV': -0.1,
                'initn': 0.1,
                'offset': 0,
                'phi': 0.01,
                'name': 'ml0'
            })
        g.add_node(
            '3', {
                'model': 'MorrisLecar',
                'spiking': False,
                'extern': False,
                'public': False,
                'selector': '/ml1',
                'V1': 0.04,
                'V2': 0.4,
                'V3': 0.3,
                'V4': 0.2,
                'initV': -0.2,
                'initn': 0.2,
                'offset': 0,
                'phi': 0.02,
                'name': 'ml1'
            })
        g.add_edge('0',
                   '1',
                   attr_dict={
                       'class': 0,
                       'model': 'AlphaSynapse',
                       'conductance': True,
                       'name': 'lif0-lif1',
                       'reverse': 0.01,
                       'ad': 0.01,
                       'gr': 1.0,
                       'gmax': 0.001
                   })
        g.add_edge('1',
                   '0',
                   attr_dict={
                       'class': 0,
                       'model': 'AlphaSynapse',
                       'conductance': True,
                       'name': 'lif1-lif0',
                       'reverse': 0.02,
                       'ad': 0.02,
                       'gr': 2.0,
                       'gmax': 0.002
                   })

        n_dict, s_dict = LPU.graph_to_dicts(g)
        self.assertDictEqual(
            n_dict, {
                'LeakyIAF': {
                    'C': [1.0, 2.0],
                    'name': ['lif0', 'lif1'],
                    'id': [0, 1],
                    'selector': ['/lif0', '/lif1'],
                    'Vr': [-0.1, -0.2],
                    'R': [1.0, 2.0],
                    'Vt': [-0.1, -0.2],
                    'V': [-1.0, -2.0],
                    'extern': [True, True],
                    'spiking': [True, True],
                    'public': [True, True]
                },
                'MorrisLecar': {
                    'V1': [0.03, 0.04],
                    'V2': [0.3, 0.4],
                    'V3': [0.2, 0.3],
                    'V4': [0.1, 0.2],
                    'initV': [-0.1, -0.2],
                    'initn': [0.1, 0.2],
                    'offset': [0, 0],
                    'phi': [0.01, 0.02],
                    'selector': ['/ml0', '/ml1'],
                    'name': ['ml0', 'ml1'],
                    'id': [2, 3],
                    'extern': [False, False],
                    'spiking': [False, False],
                    'public': [False, False]
                }
            })

        self.assertDictEqual(
            s_dict, {
                'AlphaSynapse': {
                    'pre': ['1', '0'],
                    'reverse': [0.02, 0.01],
                    'gmax': [0.002, 0.001],
                    'post': ['0', '1'],
                    'class': [0, 0],
                    'conductance': [True, True],
                    'ad': [0.02, 0.01],
                    'gr': [2.0, 1.0],
                    'id': [0, 1],
                    'name': ['lif1-lif0', 'lif0-lif1']
                }
            })
예제 #12
0
    def launch(self, user_id, task):
        neuron_uid_list = [str(a) for a in task['neuron_list']]

        conf_obj = get_config_obj()
        config = conf_obj.conf

        if config['Retina']['intype'] == 'Natural':
            coord_file = config['InputType']['Natural']['coord_file']
            tmp = os.path.splitext(coord_file)
            config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format(
                tmp[0], user_id, tmp[1])

        setup_logger(file_name='neurokernel_' + user_id + '.log', screen=True)

        manager = core.Manager()

        lpus = {}
        patterns = {}
        G = task['data']
        with open('G.pickle', 'wb') as f:
            pickle.dump(G, f, protocol=pickle.HIGHEST_PROTOCOL)
        print(G)
        print(G.keys())
        print(G['LPU'])
        print(G['LPU'].keys())

        # get graph and output_uid_list for each LPU
        for k, lpu in G['LPU'].items():
            lpus[k] = {}
            g_lpu_na = create_graph_from_database_returned(lpu)
            lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na)
            lpus[k]['graph'] = lpu_nk_graph
            lpus[k]['output_uid_list'] = list(
                set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list)))
            lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)

        for kkey, lpu in lpus.items():
            graph = lpu['graph']

            for uid, comp in graph.node.items():
                if 'attr_dict' in comp:
                    print('Found attr_dict; fixing...')
                    nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                    # print('changed',uid)
                    graph.nodes[uid].pop('attr_dict')
            for i, j, k, v in graph.edges(keys=True, data=True):
                if 'attr_dict' in v:
                    for key in v['attr_dict']:
                        nx.set_edge_attributes(
                            graph, {(i, j, k): {
                                        key: v['attr_dict'][key]
                                    }})
                    graph.edges[(i, j, k)].pop('attr_dict')
            lpus[kkey]['graph'] = graph

        # get graph for each Pattern
        for k, pat in G['Pattern'].items():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                g_pattern_na = create_graph_from_database_returned(pat)
                pattern_nk = nk.na_pat_to_nk(g_pattern_na)
                print(lpus[l1]['graph'].nodes(data=True))
                lpu_ports = [node[1]['selector'] \
                             for node in lpus[l1]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port'] + \
                            [node[1]['selector'] \
                             for node in lpus[l2]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port']
                pattern_ports = pattern_nk.nodes()
                patterns[k] = {}
                patterns[k]['graph'] = pattern_nk.subgraph(
                    list(set(lpu_ports).intersection(set(pattern_ports))))

        dt = config['General']['dt']
        if 'dt' in task:
            dt = task['dt']
            print(dt)

        # add LPUs to manager
        for k, lpu in lpus.items():
            lpu_name = k
            graph = lpu['graph']

            for uid, comp in graph.node.items():
                if 'attr_dict' in comp:
                    nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                    # print('changed',uid)
                    graph.nodes[uid].pop('attr_dict')
            for i, j, ko, v in graph.edges(keys=True, data=True):
                if 'attr_dict' in v:
                    for key in v['attr_dict']:
                        nx.set_edge_attributes(
                            graph, {(i, j, ko): {
                                        key: v['attr_dict'][key]
                                    }})
                    graph.edges[(i, j, ko)].pop('attr_dict')
            nx.write_gexf(graph, 'name.gexf')
            with open(lpu_name + '.pickle', 'wb') as f:
                pickle.dump(graph, f, protocol=pickle.HIGHEST_PROTOCOL)
            comps = graph.node.items()

            #for uid, comp in comps:
            #    if 'attr_dict' in comp:
            #        nx.set_node_attributes(graph, {uid: comp['attr_dict']})
            #        print('changed',uid)
            #    if 'class' in comp:

            if k == 'retina':
                prs = [node for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'PhotoreceptorModel']
                for pr in prs:
                    graph.node[pr[0]]['num_microvilli'] = 3000
                input_processors = [
                    RetinaInputIndividual(config, prs, user_id)
                ]
                extra_comps = [PhotoreceptorModel]
                retina_input_uids = [a[0] for a in prs]
            elif k == 'EB':
                input_processor = StepInputProcessor('I', [node[0] for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'LeakyIAF'], 40.0, 0.0, 1.0)
                input_processors = [input_processor]
                extra_comps = [BufferVoltage]
            else:
                input_processors = []
                extra_comps = [BufferVoltage]
            if 'inputProcessors' in task:
                input_processors = loadExperimentSettings(
                    task['inputProcessors'])
            output_processor = FileOutputProcessor(
                [('V', lpu['output_uid_list'])],
                lpu['output_file'],
                sample_interval=10)

            (comp_dict, conns) = LPU.graph_to_dicts(graph)

            # print(comp_dict)
            # print(conns)
            print(k)
            manager.add(LPU,
                        k,
                        dt,
                        comp_dict,
                        conns,
                        device=0,
                        input_processors=input_processors,
                        output_processors=[output_processor],
                        extra_comps=extra_comps,
                        debug=True)

        # connect LPUs by Patterns
        for k, pattern in patterns.items():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                print('Connecting {} and {}'.format(l1, l2))
                pat, key_order = Pattern.from_graph(nx.DiGraph(
                    pattern['graph']),
                                                    return_key_order=True)
                print(l1, l2)
                print(key_order)
                with Timer('update of connections in Manager'):
                    manager.connect(l1,
                                    l2,
                                    pat,
                                    int_0=key_order.index(l1),
                                    int_1=key_order.index(l2))

        # start simulation
        steps = config['General']['steps']
        ignored_steps = config['General']['ignored_steps']
        if 'steps' in task:
            steps = task['steps']
        if 'ignored_steps' in task:
            ignored_steps = task['ignored_steps']
        # ignored_steps = 0
        # steps = 100
        manager.spawn()
        manager.start(steps=steps)
        manager.wait()

        time.sleep(5)
        print(task)

        # post-processing inputs (hard coded, can be better organized)
        inputs = {
            u'ydomain': 1.0,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }
        if 'retina' in lpus:
            input_array = si.read_array('{}_{}.h5'.format(
                config['Retina']['input_file'], user_id))
            inputs[u'ydomain'] = input_array.max()
            for i, item in enumerate(retina_input_uids):
                inputs['data'][item] = np.hstack(
                    (np.arange(int((steps - ignored_steps) / 10)).reshape(
                        (-1, 1)) * dt * 10, input_array[ignored_steps::10,
                                                        i:i + 1])).tolist()

            del input_array

        # post-processing outputs from all LPUs and combine them into one dictionary
        result = {
            u'ydomain': 1,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }

        for k, lpu in lpus.items():
            with h5py.File(lpu['output_file']) as output_file:
                uids = output_file['V']['uids'][:]
                output_array = output_file['V']['data'][:]
                for i, item in enumerate(uids):
                    output = output_array[int(ignored_steps / 10):, i:i + 1]
                    # tmp = output.max()-output.min()
                    # if tmp <= 0.01: #mV
                    #     output = (output - output.min()) + 0.5
                    # else:
                    #     output = (output - output.min())/tmp*0.9+0.1
                    result['data'][item] = np.hstack(
                        (np.arange(int((steps - ignored_steps) / 10)).reshape(
                            (-1, 1)) * dt * 10, output)).tolist()

        return inputs, result
예제 #13
0
   def test_graph_to_dicts(self):
       self.maxDiff = 2048
       g = nx.MultiDiGraph()
       g.add_node('0', {'model': 'LeakyIAF',
                        'spiking': True,
                        'extern': True,
                        'public': True,
                        'selector': '/lif0',
                        'C': 1.0,
                        'R': 1.0,
                        'V': -1.0,
                        'Vr': -0.1,
                        'Vt': -0.1,
                        'name': 'lif0'})
       g.add_node('1', {'model': 'LeakyIAF',
                        'spiking': True,
                        'extern': True,
                        'public': True,
                        'selector': '/lif1',
                        'C': 2.0,
                        'R': 2.0,
                        'V': -2.0,
                        'Vr': -0.2,
                        'Vt': -0.2,
                        'name': 'lif1'})
       g.add_node('2', {'model': 'MorrisLecar',
                        'spiking': False,
                        'extern': False,
                        'public': False,
                        'selector': '/ml0',
                        'V1': 0.03,
                        'V2': 0.3,
                        'V3': 0.2,
                        'V4': 0.1,
                        'initV': -0.1,
                        'initn': 0.1,
                        'offset': 0,
                        'phi': 0.01,
                        'name': 'ml0'})
       g.add_node('3', {'model': 'MorrisLecar',
                        'spiking': False,
                        'extern': False,
                        'public': False,
                        'selector': '/ml1',
                        'V1': 0.04,
                        'V2': 0.4,
                        'V3': 0.3,
                        'V4': 0.2,
                        'initV': -0.2,
                        'initn': 0.2,
                        'offset': 0,
                        'phi': 0.02,
                        'name': 'ml1'})
       g.add_edge('0', '1', attr_dict={'class': 0,
                                       'model': 'AlphaSynapse',
                                       'conductance': True,
                                       'name': 'lif0-lif1',
                                       'reverse': 0.01,
                                       'ad': 0.01,
                                       'gr': 1.0,
                                       'gmax': 0.001})
       g.add_edge('1', '0', attr_dict={'class': 0,
                                       'model': 'AlphaSynapse',
                                       'conductance': True,
                                       'name': 'lif1-lif0',
                                       'reverse': 0.02,
                                       'ad': 0.02,
                                       'gr': 2.0,
                                       'gmax': 0.002})

       n_dict, s_dict = LPU.graph_to_dicts(g)
       self.assertDictEqual(n_dict,
                            {'LeakyIAF': 
                             {'C': [1.0, 2.0], 
                              'name': ['lif0', 'lif1'], 
                              'id': [0, 1], 
                              'selector': ['/lif0', '/lif1'], 
                              'Vr': [-0.1, -0.2], 
                              'R': [1.0, 2.0], 
                              'Vt': [-0.1, -0.2], 
                              'V': [-1.0, -2.0], 
                              'extern': [True, True], 
                              'spiking': [True, True], 
                              'public': [True, True]},
                             'MorrisLecar': {
                                 'V1': [0.03,0.04],
                                 'V2': [0.3, 0.4],
                                 'V3': [0.2, 0.3],
                                 'V4': [0.1, 0.2],
                                 'initV': [-0.1, -0.2],
                                 'initn': [0.1, 0.2],
                                 'offset': [0, 0],
                                 'phi': [0.01, 0.02],
                                 'selector': ['/ml0','/ml1'],
                                 'name': ['ml0','ml1'],
                                 'id': [2, 3], 
                                 'extern': [False, False], 
                                 'spiking': [False, False], 
                                 'public': [False, False]}})
                                 
       self.assertDictEqual(s_dict,
                            {'AlphaSynapse': 
                             {'pre': ['1', '0'], 
                              'reverse': [0.02, 0.01], 
                              'gmax': [0.002, 0.001], 
                              'post': ['0', '1'], 
                              'class': [0, 0], 
                              'conductance': [True, True], 
                              'ad': [0.02, 0.01], 
                              'gr': [2.0, 1.0],
                              'id': [0, 1], 
                              'name': ['lif1-lif0', 'lif0-lif1']}})
예제 #14
0
    def compile(self, duration, dt=None, steps=None, in_list=None,
                record=('V', 'spike_state', 'I'), extra_comps=None,
                input_filename='neuroballad_temp_model_input.h5',
                output_filename='neuroballad_temp_model_output.h5',
                graph_filename='neuroballand_temp_graph.gexf.gz',
                device=0, sample_interval=1):
        if dt is not None:
            if steps is not None:
                assert dt*steps == duration, 'dt*step != duration'
            else:
                steps = int(duration/dt)
            t = np.linspace(0, duration, steps)
        else:
            if steps is not None:
                t = np.linspace(0, duration, steps)
                dt = t[1] - t[0]
            else:
                raise ValueError('dt and step cannot both be None')
        self.config = self.config._replace(duration=duration,
                                           steps=steps,
                                           dt=dt,
                                           t=t,
                                           device=device)
        # compile inputs
        if in_list is None:
            in_list = self._inputs
        uids = []
        for i in in_list:
            uids.append(self.encode_name(str(i.node_id),
                                         experiment_name=i.experiment_name))
        input_vars = []
        for i in in_list:
            if isinstance(i.var, list):
                for j in i.var:
                    input_vars.append(j)
            else:
                input_vars.append(i.var)
        input_vars = list(set(input_vars))
        uids = np.array(list(set(uids)), dtype='S')
        Is = {}
        Inodes = {}
        for i in input_vars:
            Inodes[i] = []
        for i in in_list:
            in_name = self.encode_name(str(i.node_id),
                                       experiment_name=i.experiment_name)
            if in_name in list(self.G.nodes(data=False)):
                pass
            else:
                raise ValueError(
                    'Input node {} not found in Circuit.'.format(in_name))

            if isinstance(i.var, list):
                for j in i.var:
                    Inodes[j].append(
                        self.encode_name(str(i.node_id),
                                         experiment_name=i.experiment_name))
            else:
                Inodes[i.var].append(
                    self.encode_name(str(i.node_id),
                                     experiment_name=i.experiment_name))
        for i in input_vars:
            Inodes[i] = np.array(list(set(Inodes[i])), dtype='S')
        for i in input_vars:
            Is[i] = np.zeros((self.config.steps, len(Inodes[i])),
                             dtype=self.dtype)

        for i in in_list:
            if isinstance(i.var, list):
                for j in i.var:
                    Is[j] = i.add(self, Inodes[j], Is[j], t, var=j)
            else:
                Is[i.var] = i.add(self, Inodes[i.var], Is[i.var], t, var=i.var)

        with h5py.File(input_filename, 'w') as f:
            for i in input_vars:
                # print(i + '/uids')
                i_nodes = Inodes[i]
                """
                try:
                    i_nodes = [i.decode('ascii') for i in i_nodes]
                except:
                    pass
                i_nodes = [self.encode_name(i) for i in i_nodes]
                """
                i_nodes = np.array(i_nodes, dtype='S')
                f.create_dataset(i + '/uids', data=i_nodes)
                f.create_dataset(i + '/data', (self.config.steps, len(Inodes[i])),
                                 dtype=self.dtype,
                                 data=Is[i])

        if graph_filename is not None:
            nx.write_gexf(self.G, graph_filename)

        from neurokernel.core_gpu import Manager
        from neurokernel.LPU.LPU import LPU
        import neurokernel.mpi_relaunch
        from neurokernel.LPU.InputProcessors.FileInputProcessor import  \
            FileInputProcessor
        from neurokernel.LPU.OutputProcessors.FileOutputProcessor import \
            FileOutputProcessor

        input_processor = FileInputProcessor(input_filename)
        (comp_dict, conns) = LPU.graph_to_dicts(self.G)
        output_processor = FileOutputProcessor([(i, None) for i in list(record)],
                                               output_filename,
                                               sample_interval=sample_interval)
        self.manager = Manager()
        self.manager.add(LPU, self.experiment_name, self.config.dt,
                         comp_dict, conns,
                         device=self.config.device,
                         input_processors=[input_processor],
                         output_processors=[output_processor],
                         debug=False,
                         extra_comps=extra_comps if extra_comps is not None else [])
예제 #15
0
    def compile(self,
                duration,
                dt=None,
                steps=None,
                in_list=None,
                record=('V', 'spike_state', 'I'),
                extra_comps=None,
                input_filename='neuroballad_temp_model_input.h5',
                output_filename='neuroballad_temp_model_output.h5',
                graph_filename='neuroballad_temp_graph.gexf.gz',
                device=0,
                sample_interval=1,
                execute_in_same_thread=True):
        """
        Compiles a neuroballad circuit before execution.

        # Arguments
            duration (float): Simulation duration.
            dt (float): Time step size.
            steps (int): Number of steps to use in simulation. Optional; don't use dt if provided.
            in_list (list): List of inputs to use during compilation.
            record (tuple): Tuple of variables to record. Defaults to ('V', 'spike_state', 'I').
            extra_comps (list): List of new, custom components to include for your simulation.
            input_filename (str): The .h5 file name to use for the input.
            output_filename (str): The .h5 file name to use for recording the output.
            graph_filename (str): Name of the graph file to save the circuit to. Uses the .gexf format.
            device (int): Device to use for execution.
            sample_interval (int): Sampling interval for recording simulation output.
            execute_in_same_thread (bool): Whether to execute the circuit in the current thread.
        """
        if dt is not None:
            if steps is not None:
                warnings.warn(
                    "Both 'steps' and 'duration' arguments were specified. 'steps' argument is ignored."
                )
                steps = int(duration / dt)

            else:
                steps = int(duration / dt)
            t = np.linspace(0, duration, steps)
        else:
            if steps is not None:
                t = np.linspace(0, duration, steps)
                dt = t[1] - t[0]
            else:
                raise ValueError('dt and step cannot both be None')
        self.config = self.config._replace(duration=duration,
                                           steps=steps,
                                           dt=dt,
                                           t=t,
                                           device=device)

        run_parameters = [duration, dt]
        with open('run_parameters.pickle', 'wb') as f:
            pickle.dump(run_parameters, f, protocol=pickle.HIGHEST_PROTOCOL)
        # Compile inputs
        if in_list is None:
            in_list = self._inputs
        uids = []
        for i in in_list:
            uids.append(
                self.encode_name(str(i.node_id),
                                 experiment_name=i.experiment_name))
        input_vars = []
        for i in in_list:
            if isinstance(i.var, list):
                for j in i.var:
                    input_vars.append(j)
            else:
                input_vars.append(i.var)
        input_vars = list(set(input_vars))
        uids = np.array(list(set(uids)), dtype='S')
        Is = {}
        Inodes = {}
        for i in input_vars:
            Inodes[i] = []
        for i in in_list:
            in_name = self.encode_name(str(i.node_id),
                                       experiment_name=i.experiment_name)
            if in_name in list(self.G.nodes(data=False)):
                pass
            else:
                raise ValueError(
                    'Input node {} not found in Circuit.'.format(in_name))

            if isinstance(i.var, list):
                for j in i.var:
                    Inodes[j].append(
                        self.encode_name(str(i.node_id),
                                         experiment_name=i.experiment_name))
            else:
                Inodes[i.var].append(
                    self.encode_name(str(i.node_id),
                                     experiment_name=i.experiment_name))
        for i in input_vars:
            Inodes[i] = np.array(list(set(Inodes[i])), dtype='S')
        for i in input_vars:
            Is[i] = np.zeros((self.config.steps, len(Inodes[i])),
                             dtype=self.dtype)

        for i in in_list:
            if isinstance(i.var, list):
                for j in i.var:
                    Is[j] = i.add(self, Inodes[j], Is[j], t, var=j)
            else:
                Is[i.var] = i.add(self, Inodes[i.var], Is[i.var], t, var=i.var)

        with h5py.File(input_filename, 'w') as f:
            for i in input_vars:
                # print(i + '/uids')
                i_nodes = Inodes[i]
                """
                try:
                    i_nodes = [i.decode('ascii') for i in i_nodes]
                except:
                    pass
                i_nodes = [self.encode_name(i) for i in i_nodes]
                """
                i_nodes = np.array(i_nodes, dtype='S')
                f.create_dataset(i + '/uids', data=i_nodes)
                f.create_dataset(i + '/data',
                                 (self.config.steps, len(Inodes[i])),
                                 dtype=self.dtype,
                                 data=Is[i])

        recorders = []
        for i in record:
            recorders.append((i, None))
        with open('record_parameters.pickle', 'wb') as f:
            pickle.dump(recorders, f, protocol=pickle.HIGHEST_PROTOCOL)

        if graph_filename is not None:
            nx.write_gexf(self.G, graph_filename)

        if execute_in_same_thread:
            from neurokernel.core_gpu import Manager
            from neurokernel.LPU.LPU import LPU
            # import neurokernel.mpi_relaunch
            from neurokernel.LPU.InputProcessors.FileInputProcessor import  \
                FileInputProcessor
            from neurokernel.LPU.OutputProcessors.FileOutputProcessor import \
                FileOutputProcessor

            input_processor = FileInputProcessor(input_filename)
            (comp_dict, conns) = LPU.graph_to_dicts(self.G)
            output_processor = FileOutputProcessor(
                [(i, None) for i in list(record)],
                output_filename,
                sample_interval=sample_interval)
            self.manager = Manager()
            self.manager.add(
                LPU,
                self.experiment_name,
                self.config.dt,
                comp_dict,
                conns,
                device=self.config.device,
                input_processors=[input_processor],
                output_processors=[output_processor],
                debug=False,
                extra_comps=extra_comps if extra_comps is not None else [])