Esempio n. 1
0
def main():
    import neurokernel.mpi_relaunch

    parser = argparse.ArgumentParser()
    parser.add_argument('-c',
                        '--config',
                        default='default',
                        help='configuration file')

    parser.add_argument('-v',
                        '--value',
                        type=int,
                        default=-1,
                        help='Value that can overwrite configuration'
                        'by changing this script accordingly.'
                        'It is useful when need to run this script'
                        'repeatedly for different configuration')

    args = parser.parse_args()

    with Timer('getting configuration'):
        conf_obj = get_config_obj(args)
        config = conf_obj.conf
        change_config(config, args.value)
        edit_files(config)

    setup_logging(config)
    # with Timer('input generation'):
    #     gi.gen_input_norfscreen(config)

    manager = core.Manager()
    with Timer('photoreceptor instantiation'):
        add_LPU(config, manager)

    start_simulation(config, manager)
Esempio n. 2
0
def main():
    import neurokernel.mpi_relaunch
    # default limit is low for pickling
    # the data structures passed through mpi
    sys.setrecursionlimit(RECURSION_LIMIT)
    resource.setrlimit(resource.RLIMIT_STACK,
                       (resource.RLIM_INFINITY, resource.RLIM_INFINITY))

    parser = argparse.ArgumentParser()
    parser.add_argument('-c',
                        '--config',
                        default='default',
                        help='configuration file')
    parser.add_argument('-v',
                        '--value',
                        type=int,
                        default=-1,
                        help='Value that can overwrite configuration '
                        'by changing this script accordingly. '
                        'It is useful when need to run this script '
                        'repeatedly for different configuration')

    args = parser.parse_args()

    with Timer('getting configuration'):
        conf_obj = get_config_obj(args)
        config = conf_obj.conf
        change_config(config, args.value)

    setup_logging(config)

    worker_num = config['Retina']['worker_num']
    num_rings = config['Retina']['rings']
    radius = config['Retina']['radius']
    eulerangles = config['Retina']['eulerangles']

    manager = core.Manager()

    with Timer('instantiation of retina'):
        transform = AlbersProjectionMap(radius, eulerangles).invmap
        hexagon = hx.HexagonArray(num_rings=num_rings,
                                  radius=radius,
                                  transform=transform)

        retina = ret.RetinaArray(hexagon, config)

        # sets retina attribute which is required for the generation of
        # receptive fields
        #if generator is not None:
        #    generator.generate_datafiles(0, retina)

        add_master_LPU(config, 0, retina, manager)

        for j in range(worker_num):
            add_worker_LPU(config, j, retina, manager)
            connect_master_worker(config, j, retina, manager)

    start_simulation(config, manager)
Esempio n. 3
0
def connect_retina_lamina(config, index, retina, lamina, manager):
    '''
        The connections between Retina and Lamina follow
        the neural superposition rule of the fly's compound eye.
        See more information in NeurokernelRFC#2.

        Retina provides an interface to make this connection easier.
        --
        config: configuration dictionary like object
        i: identifier of eye in case more than one is used
        retina: retina array object
        lamina: lamina array object
        manager: manager object to which connection pattern will be added
    '''
    retina_id = get_retina_id(index)
    lamina_id = get_lamina_id(index)
    print('Connecting {} and {}'.format(retina_id, lamina_id))

    retina_selectors = retina.get_all_selectors()
    lamina_selectors = []#lamina.get_all_selectors()
    with Timer('creation of Pattern object'):
        from_list = []
        to_list = []

        # accounts neural superposition
        rulemap = retina.rulemap
        for ret_sel in retina_selectors:
            if not ret_sel.endswith('agg'):
                # format should be '/ret/<ommid>/<neuronname>'
                _, lpu, ommid, n_name = ret_sel.split('/')
                # find neighbor of neural superposition
                neighborid = rulemap.neighbor_for_photor(int(ommid), n_name)
                # format should be '/lam/<cartid>/<neuronname>'
                lam_sel = lamina.get_selector(neighborid, n_name)

                # setup connection from retina to lamina
                from_list.append(ret_sel)
                to_list.append(lam_sel)
                
                from_list.append(lam_sel+'_agg')
                to_list.append(ret_sel+'_agg')
                lamina_selectors.append(lam_sel)
                lamina_selectors.append(lam_sel+'_agg')
                
        pattern = Pattern.from_concat(','.join(retina_selectors),
                                      ','.join(lamina_selectors),
                                      from_sel=','.join(from_list),
                                      to_sel=','.join(to_list),
                                      gpot_sel=','.join(from_list+to_list))
        nx.write_gexf(pattern.to_graph(), retina_id+'_'+lamina_id+'.gexf.gz',
                      prettyprint=True)

    with Timer('update of connections in Manager'):
        manager.connect(retina_id, lamina_id, pattern)
Esempio n. 4
0
def connect_master_worker(config, i, retina, manager):
    total_neurons = retina.num_photoreceptors

    worker_num = config['Retina']['worker_num']

    master_id = get_master_id(0)
    worker_id = get_worker_id(i)
    print('Connecting {} and {}'.format(master_id, worker_id))

    with Timer('update of connections in Pattern object'):
        pattern = retina.update_pattern_master_worker(i + 1, worker_num)

    with Timer('update of connections in Manager'):
        manager.connect(master_id, worker_id, pattern)
Esempio n. 5
0
def main():
    import neurokernel.mpi_relaunch

    # default limit is low for pickling
    # the data structures passed through mpi
    sys.setrecursionlimit(RECURSION_LIMIT)
    resource.setrlimit(resource.RLIMIT_STACK,
                       (resource.RLIM_INFINITY, resource.RLIM_INFINITY))

    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config', default='default',
                        help='configuration file')
    parser.add_argument('-v', '--value', type=int, default=-1,
                        help='Value that can overwrite configuration '
                             'by changing this script accordingly. '
                             'It is useful when need to run this script '
                             'repeatedly for different configuration')

    args = parser.parse_args()

    with Timer('getting configuration'):
        conf_obj = get_config_obj(args)
        config = conf_obj.conf
        change_config(config, args.value)

    setup_logging(config)

    num_rings = config['Retina']['rings']
    eulerangles = config['Retina']['eulerangles']
    radius = config['Retina']['radius']

    manager = core.Manager()
    
    with Timer('instantiation of retina and lamina'):
        transform = AlbersProjectionMap(radius, eulerangles).invmap
        r_hexagon = r_hx.HexagonArray(num_rings=num_rings, radius=radius,
                                      transform=transform)
        l_hexagon = l_hx.HexagonArray(num_rings=num_rings, radius=radius,
                                      transform=transform)

        retina = ret.RetinaArray(r_hexagon, config)
        lamina = lam.LaminaArray(l_hexagon, config)

        add_retina_LPU(config, 0, retina, manager)
        add_lamina_LPU(config, 0, lamina, manager)

        connect_retina_lamina(config, 0, retina, lamina, manager)

    start_simulation(config, manager)
Esempio n. 6
0
def start_simulation(config, manager):
    steps = config['General']['steps']
    with Timer('retina simulation'):
        manager.spawn()
        print('Manager spawned')
        manager.start(steps=steps)
        manager.wait()
Esempio n. 7
0
def connect_master_worker(config, i, j, retina, manager):
    total_neurons = retina.num_photoreceptors

    eye_num = config['General']['eye_num']
    worker_num = config['Retina']['worker_num']

    worker_dev = j + i * worker_num + eye_num

    master_id = get_master_id(i)
    worker_id = get_worker_id(worker_dev)
    print('Connecting {} and {}'.format(master_id, worker_id))

    with Timer('update of connections in Pattern object'):
        pattern = retina.update_pattern_master_worker(j + 1, worker_num)

    with Timer('update of connections in Manager'):
        manager.connect(master_id, worker_id, pattern)
Esempio n. 8
0
    def test_image2D(imtype, write_video, write_image, write_screen, config):
        steps = config['General']['steps']
        config['General']['intype'] = imtype
        image2Dgen = image2Dfactory(imtype)(config)

        with Timer('generation of {} example input(steps {})'.format(
                imtype, steps)):
            images = image2Dgen.generate_2dimage(num_steps=steps)

        if write_video:
            with Timer('writing video file'):
                savemp4(images, '{}_input.mp4'.format(imtype.lower()))

        if write_image:
            with Timer('writing image file'):
                savefig(np.log10(images[:, 1, 1] + 1e-10),
                        'logarithm of {}'.format(imtype),
                        'temp_{}.png'.format(imtype))

        if write_screen:
            print('Images: \n{}'.format(images))
Esempio n. 9
0
def get_input_gen(config, retina):
    inputmethod = config['Retina']['inputmethod']

    if inputmethod == 'read':
        print('Generating input files')
        with Timer('input generation'):
            gi.gen_input(config)
        return None
    else:
        print('Using input generating function')

        return RetinaInputProcessor(config, retina)
Esempio n. 10
0
def add_retina_LPU(config, retina_index, retina, manager):
    '''
        This method adds Retina LPU and its parameters to the manager
        so that it can be initialized later. Depending on configuration
        input can either be created in advance and read from file or
        generated during simulation by a generator object.

        --
        config: configuration dictionary like object
        i: identifier of eye in case more than one is used
        retina: retina array object required for the generation of
            graph.
        manager: manager object to which LPU will be added
        generator: generator object or None
    '''
    dt = config['General']['dt']
    debug = config['Retina']['debug']
    time_sync = config['Retina']['time_sync']

    input_filename = config['Retina']['input_file']
    output_filename = config['Retina']['output_file']
    gexf_filename = config['Retina']['gexf_file']
    suffix = config['General']['file_suffix']

    output_file = '{}{}{}.h5'.format(output_filename, retina_index, suffix)
    gexf_file = '{}{}{}.gexf.gz'.format(gexf_filename, retina_index, suffix)
    
    inputmethod = config['Retina']['inputmethod']
    if inputmethod == 'read':
        print('Generating input files')
        with Timer('input generation'):
            input_processor = RetinaFileInputProcessor(config, retina)
    else:
        print('Using input generating function')
        input_processor = RetinaInputProcessor(config, retina)

    output_processor = FileOutputProcessor([('V',None),('spike_state',None)], output_file, sample_interval=1)

    # retina also allows a subset of its graph to be taken
    # in case it is needed later to split the retina model to more
    # GPUs
    G = retina.get_worker_nomaster_graph()
    nx.write_gexf(G, gexf_file)

    (comp_dict, conns) = LPU.graph_to_dicts(G)
    retina_id = get_retina_id(retina_index)

    extra_comps = [LeakyIAF, BufferPhoton]

    manager.add(LPU, retina_id, dt, comp_dict, conns,
                device = retina_index, input_processors = [input_processor],
                output_processors = [output_processor], 
                debug=debug, time_sync=time_sync, extra_comps = extra_comps)
Esempio n. 11
0
def add_master_LPU(config, retina_index, retina, manager):
    dt = config['General']['dt']
    debug = config['Retina']['debug']
    time_sync = config['Retina']['time_sync']

    input_filename = config['Retina']['input_file']
    output_filename = config['Retina']['output_file']
    gexf_filename = config['Retina']['gexf_file']
    suffix = config['General']['file_suffix']

    output_file = '{}{}{}.h5'.format(output_filename, retina_index, suffix)
    gexf_file = '{}{}{}.gexf.gz'.format(gexf_filename, retina_index, suffix)

    inputmethod = config['Retina']['inputmethod']
    if inputmethod == 'read':
        print('Generating input files')
        with Timer('input generation'):
            input_processor = RetinaFileInputProcessor(config, retina)
    else:
        print('Using input generating function')
        input_processor = RetinaInputProcessor(config, retina)

    input_processor = get_input_gen(config, retina)
    uids_to_record = [
        'ret_{}_{}'.format(name, i) for i in range(retina.num_elements)
        for name in ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']
    ]
    output_processor = FileOutputProcessor([('V', uids_to_record)],
                                           output_file,
                                           sample_interval=1)

    G = retina.get_master_graph()
    nx.write_gexf(G, gexf_file)

    (comp_dict, conns) = LPU.lpu_parser(gexf_file)
    master_id = get_master_id(retina_index)

    extra_comps = [BufferPhoton, BufferVoltage]

    manager.add(LPU,
                master_id,
                dt,
                comp_dict,
                conns,
                device=retina_index,
                input_processors=[input_processor],
                output_processors=[output_processor],
                debug=debug,
                time_sync=time_sync,
                extra_comps=extra_comps)
Esempio n. 12
0
def gen_input_norfscreen(config):
    steps = config['General']['steps']
    input_file = config['Photoreceptor']['input_file']
    input_type = config['Photoreceptor']['single_intype']
    input_obj = get_singleinput_cls(input_type)(config)

    with Timer('getting photoreceptor inputs'):
        # although not configurable one can use
        # alternatives like .get_flat_image()
        # if available by manually
        # changing command below
        photor_inputs = input_obj.get_input(steps)

        config['General']['steps'] = photor_inputs[:, 0].size

    sio.write_array(photor_inputs, filename=input_file)
Esempio n. 13
0
def get_input_gen(config, retina):
    '''
        Depending on configuration input can either be created
        in advance and read from file or
        generated during simulation by a generator object.
    '''

    inputmethod = config['Retina']['inputmethod']

    if inputmethod == 'read':
        print('Generating input files')
        with Timer('input generation'):
            gi.gen_input(config)
        return None
    else:
        print('Using input generating function')
        return RetinaInputProcessor(config, retina)
Esempio n. 14
0
def start_simulation(config, manager):
    steps = config['General']['steps']
    with Timer('photoreceptor simulation'):
        manager.spawn()
        manager.start(steps=steps)
        manager.wait()
Esempio n. 15
0
    def launch(self, user_id, task):
        # neuron_uid_list = [str(a) for a in task['neuron_list']]
        try:
            # conf_obj = get_config_obj()
            # config = conf_obj.conf

            setup_logger(file_name = 'neurokernel_'+user_id+'.log', screen = False)

            manager = core.Manager()

            lpus = {}
            patterns = {}
            G = task['data']

            for i in list(G['Pattern'].keys()):
                a = G['Pattern'][i]['nodes']
                if len([k for k,v in a.items() if v['class'] == 'Port']) == 0:
                    del G['Pattern'][i]

            for i in list(G['LPU'].keys()):
                a = G['LPU'][i]['nodes']
                if len(a) < 3:
                    del G['LPU'][i]

            # with open('G.pickle', 'wb') as f:
            #     pickle.dump(G, f, protocol=pickle.HIGHEST_PROTOCOL)
            # print(G)
            # print(G.keys())
            # print(G['LPU'])
            # print(G['LPU'].keys())

            # get graph and output_uid_list for each LPU
            for k, lpu in G['LPU'].items():
                lpus[k] = {}
                g_lpu_na = create_graph_from_database_returned(lpu)
                lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na)
                lpus[k]['graph'] = lpu_nk_graph
                # lpus[k]['output_uid_list'] = list(
                #             set(lpu_nk_graph.nodes()).intersection(
                #                 set(neuron_uid_list)))
                # lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)

            for kkey, lpu in lpus.items():
                graph = lpu['graph']

                for uid, comp in graph.nodes.items():
                    if 'attr_dict' in comp:
                        print('Found attr_dict; fixing...')
                        nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                        # print('changed',uid)
                        graph.nodes[uid].pop('attr_dict')
                    if 'params' in comp:
                        params = graph.nodes[uid].pop('params')
                        nx.set_node_attributes(graph, {uid: {k: float(v) for k, v in params.items()}})
                    if 'states' in comp:
                        states = graph.nodes[uid].pop('states')
                        nx.set_node_attributes(graph, {uid: {'init{}'.format(k): float(v) for k, v in states.items()}})
                for i,j,k,v in graph.edges(keys=True, data=True):
                    if 'attr_dict' in v:
                        for key in v['attr_dict']:
                            nx.set_edge_attributes(graph, {(i,j,k): {key: v['attr_dict'][key]}})
                        graph.edges[(i,j,k)].pop('attr_dict')
                lpus[kkey]['graph'] = graph

            # get graph for each Pattern
            for k, pat in G['Pattern'].items():
                l1,l2 = k.split('-')
                if l1 in lpus and l2 in lpus:
                    g_pattern_na = create_graph_from_database_returned(pat)
                    pattern_nk = nk.na_pat_to_nk(g_pattern_na)
                    #print(lpus[l1]['graph'].nodes(data=True))
                    lpu_ports = [node[1]['selector'] \
                                 for node in lpus[l1]['graph'].nodes(data=True) \
                                 if node[1]['class']=='Port'] + \
                                [node[1]['selector'] \
                                 for node in lpus[l2]['graph'].nodes(data=True) \
                                 if node[1]['class']=='Port']
                    pattern_ports = pattern_nk.nodes()
                    patterns[k] = {}
                    patterns[k]['graph'] = pattern_nk.subgraph(
                        list(set(lpu_ports).intersection(set(pattern_ports))))

            # dt = config['General']['dt']
            # if 'dt' in task:
            dt = task['dt']
            print(dt)

            device_count = 0
            # add LPUs to manager
            for k, lpu in lpus.items():
                lpu_name = k
                graph = lpu['graph']

                for uid, comp in graph.nodes.items():
                    if 'attr_dict' in comp:
                        nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                        # print('changed',uid)
                        graph.nodes[uid].pop('attr_dict')
                for i,j,ko,v in graph.edges(keys=True, data=True):
                    if 'attr_dict' in v:
                        for key in v['attr_dict']:
                            nx.set_edge_attributes(graph, {(i,j,ko): {key: v['attr_dict'][key]}})
                        graph.edges[(i,j,ko)].pop('attr_dict')
                # nx.write_gexf(graph,'name.gexf')
                # with open(lpu_name + '.pickle', 'wb') as f:
                #     pickle.dump(graph, f, protocol=pickle.HIGHEST_PROTOCOL)
                comps =  graph.nodes.items()

                #for uid, comp in comps:
                #    if 'attr_dict' in comp:
                #        nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                #        print('changed',uid)
                #    if 'class' in comp:

                # if k == 'retina':
                #     if config['Retina']['intype'] == 'Natural':
                #         coord_file = config['InputType']['Natural']['coord_file']
                #         tmp = os.path.splitext(coord_file)
                #         config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format(
                #                 tmp[0], user_id, tmp[1])
                #     prs = [node for node in graph.nodes(data=True) \
                #            if node[1]['class'] == 'PhotoreceptorModel']
                #     for pr in prs:
                #         graph.node[pr[0]]['num_microvilli'] = 3000
                #     input_processors = [RetinaInputIndividual(config, prs, user_id)]
                #     extra_comps = [PhotoreceptorModel]
                #     retina_input_uids = [a[0] for a in prs]
                # # elif k == 'EB':
                # #     input_processor = StepInputProcessor('I', [node[0] for node in graph.nodes(data=True) \
                # #            if node[1]['class'] == 'LeakyIAF'], 40.0, 0.0, 1.0)
                # #     input_processors = [input_processor]
                # #     extra_comps = []#[BufferVoltage]
                # else:
                #     input_processors = []
                #     extra_comps = [BufferVoltage]
                if 'inputProcessors' in task:
                    if lpu_name in task['inputProcessors']:
                        input_processors, record = \
                            loadInputProcessors(task['inputProcessors'][lpu_name])
                        lpus[k]['input_record'] = record
                    else:
                        input_processors = []
                else:
                    input_processors = []

                # configure output processors
                lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)
                output_processors = []
                if 'outputProcessors' in task:
                    if lpu_name in task['outputProcessors']:
                        output_processors, record = loadOutputProcessors(
                                                lpus[k]['output_file'],
                                                task['outputProcessors'][lpu_name])
                        if len(record):
                            lpus[k]['output_uid_dict'] = record

                # (comp_dict, conns) = LPU.graph_to_dicts(graph)
                manager.add(LPU, k, dt, 'pickle', pickle.dumps(graph),#comp_dict, conns,
                            device = device_count, input_processors = input_processors,
                            output_processors = output_processors,
                            extra_comps = [], debug = False)
                device_count = (device_count+1) % self.ngpus

            # connect LPUs by Patterns
            for k, pattern in patterns.items():
                l1,l2 = k.split('-')
                if l1 in lpus and l2 in lpus:
                    print('Connecting {} and {}'.format(l1, l2))
                    pat, key_order = Pattern.from_graph(nx.DiGraph(pattern['graph']),
                                                        return_key_order = True)
                    print(l1,l2)
                    print(key_order)
                    with Timer('update of connections in Manager'):
                        try:
                            manager.connect(l1, l2, pat,
                                            int_0 = key_order.index('{}/{}'.format(k,l1)),
                                            int_1 = key_order.index('{}/{}'.format(k,l2)))
                        except ValueError:
                            manager.connect(l1, l2, pat,
                                            int_0 = key_order.index(l1),
                                            int_1 = key_order.index(l2))

            # start simulation
            # steps = config['General']['steps']
            # ignored_steps = config['General']['ignored_steps']
            # if 'steps' in task:
            steps = task['steps']
            # if 'ignored_steps' in task:
            # ignored_steps = task['ignored_steps']
            # ignored_steps = 0
            # steps = 100
            manager.spawn()
            manager.start(steps=steps)
        except:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            tb = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
            print('An error occured during the compilation\n' + tb)
            return {'error':
                        {'exception': tb,
                         'message': 'An error occured during the compilation in execution'}}

        try:
            manager.wait()
        except LPUExecutionError:
            ErrorMessage = 'An error occured during execution of LPU {} at step {}:\n'.format(
                            manager._errors[0][0], manager._errors[0][1]) + \
                            ''.join(manager._errors[0][2])
            print(ErrorMessage)
            return {'error':
                        {'exception': ''.join(manager._errors[0][2]),
                         'message': 'An error occured during execution of LPU {} at step {}:\n'.format(
                            manager._errors[0][0], manager._errors[0][1])}}
        time.sleep(5)
        # print(task)

        ignored_steps = 0

        try:
            # post-processing inputs (hard coded, can be better organized)
            result = {'sensory': {}, 'input': {}, 'output': {}}
            for k, lpu in lpus.items():
                records = lpu.get('input_record', [])
                for record in records:
                    if record['sensory_file'] is not None:
                        if k not in result['sensory']:
                            result['sensory'][k] = []
                        with h5py.File(record['sensory_file']) as sensory_file:
                            result['sensory'][k].append({'dt': record['sensory_interval']*dt,
                                                         'data': sensory_file['sensory'][:]})
                    if record['input_file'] is not None:
                        with h5py.File(record['input_file']) as input_file:
                            sample_interval = input_file['metadata'].attrs['sample_interval']
                            for var in input_file.keys():
                                if var == 'metadata': continue
                                uids = [n.decode() for n in input_file[var]['uids'][:]]
                                input_array = input_file[var]['data'][:]
                                for i, item in enumerate(uids):
                                    if var == 'spike_state':
                                        input = np.nonzero(input_array[ignored_steps:, i:i+1].reshape(-1))[0]*dt
                                        if item in result['input']:
                                            if 'spike_time' in result['input'][item]:
                                                result['input'][item]['spike_time']['data'].append(input)
                                                result['input'][item]['spike_time']['data'] = \
                                                    np.sort(result['input'][item]['spike_time']['data'])
                                            else:
                                                result['input'][item]['spike_time'] = {
                                                    'data': input.copy(),
                                                    'dt': dt*sample_interval}
                                        else:
                                            result['input'][item] = {'spike_time': {
                                                'data': input.copy(),
                                                'dt': dt*sample_interval}}
                                    else:
                                        input = input_array[ignored_steps:, i:i+1]
                                        if item in result['input']:
                                            if var in result['input'][item]:
                                                result['input'][item][var]['data'] += input
                                            else:
                                                result['input'][item][var] = {
                                                    'data': input.copy(),
                                                    'dt': dt*sample_interval}
                                        else:
                                            result['input'][item] = {var: {
                                                'data': input.copy(),
                                                'dt': dt*sample_interval}}

            # if 'retina' in lpus:
            #     input_array = si.read_array(
            #             '{}_{}.h5'.format(config['Retina']['input_file'], user_id))
            #     inputs[u'ydomain'] = input_array.max()
            #     for i, item in enumerate(retina_input_uids):
            #         inputs['data'][item] = np.hstack(
            #             (np.arange(int((steps-ignored_steps)/10)).reshape((-1,1))*dt*10,
            #              input_array[ignored_steps::10,i:i+1])).tolist()
            #
            #     del input_array

            # post-processing outputs from all LPUs and combine them into one dictionary
            # result = {u'data': {}}

            for k, lpu in lpus.items():
                uid_dict = lpu.get('output_uid_dict', None)
                if uid_dict is not None:
                    with h5py.File(lpu['output_file']) as output_file:
                        sample_interval = output_file['metadata'].attrs['sample_interval']
                        for var in uid_dict:
                            if var == 'spike_state':
                                uids = [n.decode() for n in output_file[var]['uids'][:]]
                                spike_times = output_file[var]['data']['time'][:]
                                index = output_file[var]['data']['index'][:]
                                for i, item in enumerate(uids):
                                    # output = np.nonzero(output_array[ignored_steps:, i:i+1].reshape(-1))[0]*dt
                                    output = spike_times[index == i]
                                    output = output[output>ignored_steps*dt]-ignored_steps*dt
                                    if item in result['output']:
                                        result['output'][item]['spike_time'] = {
                                            'data': output,
                                            'dt': dt*sample_interval}
                                    else:
                                        result['output'][item] = {'spike_time': {
                                                    'data': output,
                                                    'dt': dt*sample_interval}}
                            else:
                                uids = [n.decode() for n in output_file[var]['uids'][:]]
                                output_array = output_file[var]['data'][:]
                                for i, item in enumerate(uids):
                                    # if var == 'spike_state':
                                    #     output = np.nonzero(output_array[ignored_steps:, i:i+1].reshape(-1))[0]*dt
                                    #     if item in result['output']:
                                    #         result['output'][item]['spike_time'] = {
                                    #             'data': output.tolist(),
                                    #             'dt': dt}
                                    #     else:
                                    #         result['output'][item] = {'spike_time': {
                                    #             'data': output.tolist(),
                                    #             'dt': dt}}
                                    # else:
                                    output = output_array[ignored_steps:, i:i+1]
                                    if item in result['output']:
                                        result['output'][item][var] = {
                                            'data': output.copy(),
                                            'dt': dt*sample_interval}
                                    else:
                                        result['output'][item] = {var: {
                                            'data': output.copy(),
                                            'dt': dt*sample_interval}}
            result = {'success': {'result': result, 'meta': {'dur': steps*dt}}}
        except:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            tb = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
            print('An error occured postprocessing of results\n' + tb)
            return {'error':
                        {'exception': tb,
                         'message': 'An error occured when postprocessing of results in execution'}}
        return result
Esempio n. 16
0
    def launch(self, user_id, task):
        neuron_uid_list = [str(a) for a in task['neuron_list']]

        conf_obj = get_config_obj()
        config = conf_obj.conf

        if config['Retina']['intype'] == 'Natural':
            coord_file = config['InputType']['Natural']['coord_file']
            tmp = os.path.splitext(coord_file)
            config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format(
                tmp[0], user_id, tmp[1])

        setup_logger(file_name='neurokernel_' + user_id + '.log', screen=True)

        manager = core.Manager()

        lpus = {}
        patterns = {}
        G = task['data']
        with open('G.pickle', 'wb') as f:
            pickle.dump(G, f, protocol=pickle.HIGHEST_PROTOCOL)
        print(G)
        print(G.keys())
        print(G['LPU'])
        print(G['LPU'].keys())

        # get graph and output_uid_list for each LPU
        for k, lpu in G['LPU'].items():
            lpus[k] = {}
            g_lpu_na = create_graph_from_database_returned(lpu)
            lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na)
            lpus[k]['graph'] = lpu_nk_graph
            lpus[k]['output_uid_list'] = list(
                set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list)))
            lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)

        for kkey, lpu in lpus.items():
            graph = lpu['graph']

            for uid, comp in graph.node.items():
                if 'attr_dict' in comp:
                    print('Found attr_dict; fixing...')
                    nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                    # print('changed',uid)
                    graph.nodes[uid].pop('attr_dict')
            for i, j, k, v in graph.edges(keys=True, data=True):
                if 'attr_dict' in v:
                    for key in v['attr_dict']:
                        nx.set_edge_attributes(
                            graph, {(i, j, k): {
                                        key: v['attr_dict'][key]
                                    }})
                    graph.edges[(i, j, k)].pop('attr_dict')
            lpus[kkey]['graph'] = graph

        # get graph for each Pattern
        for k, pat in G['Pattern'].items():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                g_pattern_na = create_graph_from_database_returned(pat)
                pattern_nk = nk.na_pat_to_nk(g_pattern_na)
                print(lpus[l1]['graph'].nodes(data=True))
                lpu_ports = [node[1]['selector'] \
                             for node in lpus[l1]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port'] + \
                            [node[1]['selector'] \
                             for node in lpus[l2]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port']
                pattern_ports = pattern_nk.nodes()
                patterns[k] = {}
                patterns[k]['graph'] = pattern_nk.subgraph(
                    list(set(lpu_ports).intersection(set(pattern_ports))))

        dt = config['General']['dt']
        if 'dt' in task:
            dt = task['dt']
            print(dt)

        # add LPUs to manager
        for k, lpu in lpus.items():
            lpu_name = k
            graph = lpu['graph']

            for uid, comp in graph.node.items():
                if 'attr_dict' in comp:
                    nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                    # print('changed',uid)
                    graph.nodes[uid].pop('attr_dict')
            for i, j, ko, v in graph.edges(keys=True, data=True):
                if 'attr_dict' in v:
                    for key in v['attr_dict']:
                        nx.set_edge_attributes(
                            graph, {(i, j, ko): {
                                        key: v['attr_dict'][key]
                                    }})
                    graph.edges[(i, j, ko)].pop('attr_dict')
            nx.write_gexf(graph, 'name.gexf')
            with open(lpu_name + '.pickle', 'wb') as f:
                pickle.dump(graph, f, protocol=pickle.HIGHEST_PROTOCOL)
            comps = graph.node.items()

            #for uid, comp in comps:
            #    if 'attr_dict' in comp:
            #        nx.set_node_attributes(graph, {uid: comp['attr_dict']})
            #        print('changed',uid)
            #    if 'class' in comp:

            if k == 'retina':
                prs = [node for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'PhotoreceptorModel']
                for pr in prs:
                    graph.node[pr[0]]['num_microvilli'] = 3000
                input_processors = [
                    RetinaInputIndividual(config, prs, user_id)
                ]
                extra_comps = [PhotoreceptorModel]
                retina_input_uids = [a[0] for a in prs]
            elif k == 'EB':
                input_processor = StepInputProcessor('I', [node[0] for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'LeakyIAF'], 40.0, 0.0, 1.0)
                input_processors = [input_processor]
                extra_comps = [BufferVoltage]
            else:
                input_processors = []
                extra_comps = [BufferVoltage]
            if 'inputProcessors' in task:
                input_processors = loadExperimentSettings(
                    task['inputProcessors'])
            output_processor = FileOutputProcessor(
                [('V', lpu['output_uid_list'])],
                lpu['output_file'],
                sample_interval=10)

            (comp_dict, conns) = LPU.graph_to_dicts(graph)

            # print(comp_dict)
            # print(conns)
            print(k)
            manager.add(LPU,
                        k,
                        dt,
                        comp_dict,
                        conns,
                        device=0,
                        input_processors=input_processors,
                        output_processors=[output_processor],
                        extra_comps=extra_comps,
                        debug=True)

        # connect LPUs by Patterns
        for k, pattern in patterns.items():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                print('Connecting {} and {}'.format(l1, l2))
                pat, key_order = Pattern.from_graph(nx.DiGraph(
                    pattern['graph']),
                                                    return_key_order=True)
                print(l1, l2)
                print(key_order)
                with Timer('update of connections in Manager'):
                    manager.connect(l1,
                                    l2,
                                    pat,
                                    int_0=key_order.index(l1),
                                    int_1=key_order.index(l2))

        # start simulation
        steps = config['General']['steps']
        ignored_steps = config['General']['ignored_steps']
        if 'steps' in task:
            steps = task['steps']
        if 'ignored_steps' in task:
            ignored_steps = task['ignored_steps']
        # ignored_steps = 0
        # steps = 100
        manager.spawn()
        manager.start(steps=steps)
        manager.wait()

        time.sleep(5)
        print(task)

        # post-processing inputs (hard coded, can be better organized)
        inputs = {
            u'ydomain': 1.0,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }
        if 'retina' in lpus:
            input_array = si.read_array('{}_{}.h5'.format(
                config['Retina']['input_file'], user_id))
            inputs[u'ydomain'] = input_array.max()
            for i, item in enumerate(retina_input_uids):
                inputs['data'][item] = np.hstack(
                    (np.arange(int((steps - ignored_steps) / 10)).reshape(
                        (-1, 1)) * dt * 10, input_array[ignored_steps::10,
                                                        i:i + 1])).tolist()

            del input_array

        # post-processing outputs from all LPUs and combine them into one dictionary
        result = {
            u'ydomain': 1,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }

        for k, lpu in lpus.items():
            with h5py.File(lpu['output_file']) as output_file:
                uids = output_file['V']['uids'][:]
                output_array = output_file['V']['data'][:]
                for i, item in enumerate(uids):
                    output = output_array[int(ignored_steps / 10):, i:i + 1]
                    # tmp = output.max()-output.min()
                    # if tmp <= 0.01: #mV
                    #     output = (output - output.min()) + 0.5
                    # else:
                    #     output = (output - output.min())/tmp*0.9+0.1
                    result['data'][item] = np.hstack(
                        (np.arange(int((steps - ignored_steps) / 10)).reshape(
                            (-1, 1)) * dt * 10, output)).tolist()

        return inputs, result
    def launch(self, user_id, task):
        neuron_uid_list = [str(a) for a in task['neuron_list']]

        conf_obj = get_config_obj()
        config = conf_obj.conf

        if config['Retina']['intype'] == 'Natural':
            coord_file = config['InputType']['Natural']['coord_file']
            tmp = os.path.splitext(coord_file)
            config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format(
                tmp[0], user_id, tmp[1])

        setup_logger(file_name='neurokernel_' + user_id + '.log', screen=False)

        manager = core.Manager()

        lpus = {}
        patterns = {}
        G = task['success']['data']

        # get graph and output_uid_list for each LPU
        for k, lpu in G['LPU'].iteritems():
            lpus[k] = {}
            g_lpu_na = create_graph_from_database_returned(lpu)
            lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na)
            lpus[k]['graph'] = lpu_nk_graph
            lpus[k]['output_uid_list'] = list(
                set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list)))
            lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)

        # get graph for each Pattern
        for k, pat in G['Pattern'].iteritems():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                g_pattern_na = create_graph_from_database_returned(pat)
                pattern_nk = nk.na_pat_to_nk(g_pattern_na)
                lpu_ports = [node[1]['selector'] \
                             for node in lpus[l1]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port'] + \
                            [node[1]['selector'] \
                             for node in lpus[l2]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port']
                pattern_ports = pattern_nk.nodes()
                patterns[k] = {}
                patterns[k]['graph'] = pattern_nk.subgraph(
                    list(set(lpu_ports).intersection(set(pattern_ports))))

        dt = config['General']['dt']

        # add LPUs to manager
        for k, lpu in lpus.iteritems():
            graph = lpu['graph']
            if k == 'retina':
                prs = [node for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'PhotoreceptorModel']
                for pr in prs:
                    graph.node[pr[0]]['num_microvilli'] = 3000
                input_processors = [
                    RetinaInputIndividual(config, prs, user_id)
                ]
                extra_comps = [PhotoreceptorModel]
                retina_input_uids = [a[0] for a in prs]
            else:
                input_processors = []
                extra_comps = [BufferVoltage]
            output_processor = FileOutputProcessor(
                [('V', lpu['output_uid_list'])],
                lpu['output_file'],
                sample_interval=10)

            (comp_dict, conns) = LPU.graph_to_dicts(graph)

            manager.add(LPU,
                        k,
                        dt,
                        comp_dict,
                        conns,
                        device=0,
                        input_processors=input_processors,
                        output_processors=[output_processor],
                        extra_comps=extra_comps)

        # connect LPUs by Patterns
        for k, pattern in patterns.iteritems():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                print('Connecting {} and {}'.format(l1, l2))
                pat, key_order = Pattern.from_graph(
                    nx.DiGraph(pattern['graph']))
                with Timer('update of connections in Manager'):
                    manager.connect(l1,
                                    l2,
                                    pat,
                                    int_0=key_order.index(l1),
                                    int_1=key_order.index(l2))

        # start simulation
        steps = config['General']['steps']
        ignored_steps = config['General']['ignored_steps']
        manager.spawn()
        manager.start(steps=steps)
        manager.wait()

        time.sleep(5)

        # post-processing inputs (hard coded, can be better organized)
        inputs = {
            u'ydomain': 1.0,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }
        if 'retina' in lpus:
            input_array = si.read_array('{}_{}.h5'.format(
                config['Retina']['input_file'], user_id))
            inputs[u'ydomain'] = input_array.max()
            for i, item in enumerate(retina_input_uids):
                inputs['data'][item] = np.hstack(
                    (np.arange(int((steps - ignored_steps) / 10)).reshape(
                        (-1, 1)) * dt * 10, input_array[ignored_steps::10,
                                                        i:i + 1])).tolist()

            del input_array

        # post-processing outputs from all LPUs and combine them into one dictionary
        result = {
            u'ydomain': 1,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }

        for k, lpu in lpus.iteritems():
            with h5py.File(lpu['output_file']) as output_file:
                uids = output_file['V']['uids'][:]
                output_array = output_file['V']['data'][:]
                for i, item in enumerate(uids):
                    output = output_array[int(ignored_steps / 10):, i:i + 1]
                    tmp = output.max() - output.min()
                    if tmp <= 0.01:  #mV
                        output = (output - output.min()) + 0.5
                    else:
                        output = (output - output.min()) / tmp * 0.9 + 0.1
                    result['data'][item] = np.hstack(
                        (np.arange(int((steps - ignored_steps) / 10)).reshape(
                            (-1, 1)) * dt * 10, output)).tolist()

        return inputs, result