示例#1
0
    def sim(self,
            duration,
            dt,
            steps=None,
            in_list=None,
            record=('V', 'spike_state', 'I'),
            log=None,
            device=0,
            sample_interval=1,
            input_filename='neuroballad_temp_model_input.h5',
            output_filename='neuroballad_temp_model_output.h5',
            graph_filename='neuroballad_temp_graph.gexf.gz',
            log_filename='neuroballad_temp_log.log',
            extra_comps=None,
            preamble=[],
            args=[],
            execute_in_same_thread=True):
        """
        Simulates the circuit for a set amount of time, with a fixed temporal
        step size and a list of inputs.

        TODO
        ----
        1. use preamble and args for slurm

        Example
        --------
        >>> C.sim(1., 1e-4, InIStep(0, 10., 1., 2.))
        """
        from neurokernel.tools.logging import setup_logger
        if log is not None:
            screen = False
            file_name = None
            if log.lower() in ['file', 'both']:
                file_name = log_filename
            if log.lower() in ['screen', 'both']:
                screen = True
            self.logger = setup_logger(file_name=file_name, screen=screen)

        self.compile(duration,
                     dt,
                     steps=steps,
                     in_list=in_list,
                     record=record,
                     extra_comps=extra_comps,
                     input_filename=input_filename,
                     output_filename=output_filename,
                     graph_filename=graph_filename,
                     device=device,
                     sample_interval=sample_interval,
                     execute_in_same_thread=execute_in_same_thread)
        if execute_in_same_thread:
            self.manager.spawn()
            self.manager.start(self.config.steps)
            self.manager.wait()
        else:
            if not os.path.isfile('neuroballad_execute.py'):
                copyfile(get_neuroballad_path() + '/neuroballad_execute.py',\
                        'neuroballad_execute.py')
            subprocess.call(preamble + ['python', 'neuroballad_execute.py'])
示例#2
0
def main():
    import neurokernel.mpi_relaunch

    logger = setup_logger(file_name=None, screen=True)

    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-l", "--layers", dest="num_layers", type=int, default=16, help="number of layers of ommatidia on circle"
    )

    parser.add_argument("-i", "--input", action="store_true", help="generates input if set")
    parser.add_argument("-g", "--gexf", action="store_true", help="generates gexf of LPU if set")

    parser.add_argument("--steps", default=100, type=int, help="simulation steps")

    args = parser.parse_args()

    dt = 1e-4
    GEXF_FILE = "retina.gexf.gz"
    INPUT_FILE = "vision_input.h5"
    IMAGE_FILE = "image1.mat"
    OUTPUT_FILE = "retina_output.h5"

    if args.input:
        print("Generating input of model from image file")
        generate_input(INPUT_FILE, IMAGE_FILE, args.num_layers)
    if args.gexf:
        print("Writing retina lpu")
        n = args.num_layers
        photoreceptor_num = 6 * (3 * n * (n + 1) + 1)
        generate_gexf(GEXF_FILE, photoreceptor_num)

    man = core_gpu.Manager()

    print("Parsing lpu data")
    n_dict_ret, s_dict_ret = LPU.lpu_parser(GEXF_FILE)
    print("Initializing LPU")
    man.add(
        LPU,
        "retina",
        dt,
        n_dict_ret,
        s_dict_ret,
        input_file=INPUT_FILE,
        output_file=OUTPUT_FILE,
        device=0,
        debug=True,
        time_sync=False,
    )
    man.spawn()

    print("Starting simulation")
    start_time = time.time()
    man.start(steps=args.steps)
    man.wait()
    print("Simulation complete: Duration {} seconds".format(time.time() - start_time))
示例#3
0
def setup_logging(config):
    log = config['General']['log']
    file_name = None
    screen = False

    if log in ['file', 'both']:
        file_name = 'neurokernel.log'
    if log in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)
示例#4
0
def setup_logging(config):
    log = config["General"]["log"]
    file_name = None
    screen = False

    if log in ["file", "both"]:
        file_name = "neurokernel.log"
    if log in ["screen", "both"]:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)
示例#5
0
def setup_logging(config):
    log = config['General']['log']
    file_name = None
    screen = False

    if log in ['file', 'both']:
        file_name = 'neurokernel.log'
    if log in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)
示例#6
0
def setup_logging(config):
    '''
        Logging is useful for debugging
        purposes. By default errors that
        are thrown during simulation do
        not appear on screen.
    '''
    log = config['General']['log']
    file_name = None
    screen = False

    if log in ['file', 'both']:
        file_name = 'neurokernel.log'
    if log in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)
示例#7
0
def setup_logging(config):
    '''
        Logging is useful for debugging
        purposes. By default errors that
        are thrown during simulation do
        not appear on screen.
    '''
    log = config['General']['log']
    file_name = None
    screen = False

    if log in ['file', 'both']:
        file_name = 'neurokernel.log'
    if log in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)
示例#8
0
def launch_nk(params,debug=False,device=0,log=False):
    """ Launch Neurokernel
    """
    
    if log:
        screen = True
        logger = setup_logger(file_name=params['name'] +'.log', screen=screen)
    
    man = Manager()

    man.add(LPU, 
            params['name'], 
            params['dt'], 
            params['n_dict'], 
            params['s_dict'],
            input_file=params['input_file'],
            output_file=params['output_file'],
            device=device, 
            debug=debug,
            components=params['components'])

    man.spawn()
    man.start(int(params['steps']))
    man.wait()
    parser.add_argument('-u', '--lpus', default=n_gpus, type=int,
                        help='Number of LPUs [default: %s]' % n_gpus)
    parser.add_argument('-g', '--gpus', default=n_gpus, type=int,
                        help='Number of GPUs [default: %s]' % n_gpus)
    parser.add_argument('-p', '--use_mps', action='store_true',
                        help='Use Multi-Process Service [default: False]')
    args = parser.parse_args()

    file_name = None
    screen = False
    if args.log.lower() in ['file', 'both']:
        file_name = 'neurokernel.log'
    if args.log.lower() in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen,
                          mpi_comm=MPI.COMM_WORLD,
                          multiline=True)

    df = pd.read_excel('s2.xlsx',
                       sheetname='Connectivity Matrix')

    # Select only main LPUs in olfaction, vision, and central complex systems:
    lpu_list = ['AL', 'al', 'MB', 'mb', 'LH', 'lh', 'MED', 'med', 'LOB', 'lob',
                'LOP', 'lop', 'OG', 'og', 'EB', 'FB', 'NOD', 'nod', 'PCB']
    conn_mat = df.ix[lpu_list][lpu_list].astype(int).as_matrix()

    # Get order in which LPUs (denoted by index into `conn_mat`) should be added
    # to maximize added number of ports for each additional LPU:
    ind_order = get_index_order.get_index_order(conn_mat)
    
    # Make sure specified number of LPUs to partition over GPUs is at least as
示例#10
0
def main():

    def run(connected):
        """
        Set `connected` to True to connect the LPUs.
        """

        import neurokernel.mpi_relaunch

        out_name = 'un' if not connected else 'co'
        man = core.Manager()

        lpu_file_0 = './data/generic_lpu_0.gexf.gz'
        lpu_file_1 = './data/generic_lpu_1.gexf.gz'
        comp_dict_0, conns_0 = LPU.lpu_parser(lpu_file_0)
        comp_dict_1, conns_1 = LPU.lpu_parser(lpu_file_1)
        
        fl_input_processor_0 = FileInputProcessor('./data/generic_lpu_0_input.h5')
        fl_output_processor_0 = FileOutputProcessor(
                    [('V',None),('spike_state',None)],
                    'generic_lpu_0_%s_output.h5' % out_name, sample_interval=1)

        lpu_0_id = 'lpu_0'
        man.add(LPU, lpu_0_id, dt, comp_dict_0, conns_0,
                    input_processors = [fl_input_processor_0],
                    output_processors = [fl_output_processor_0],
                    device=args.gpu_dev[0],
                    debug=args.debug, time_sync=args.time_sync)

        fl_input_processor_1 = FileInputProcessor('./data/generic_lpu_1_input.h5')
        fl_output_processor_1 = FileOutputProcessor(
                    [('V',None),('spike_state',None)],
                    'generic_lpu_1_%s_output.h5' % out_name, sample_interval=1)
                    
        lpu_1_id = 'lpu_1'
        man.add(LPU, lpu_1_id, dt, comp_dict_1, conns_1,
                    input_processors = [fl_input_processor_1],
                    output_processors = [fl_output_processor_1],
                    device=args.gpu_dev[1],
                    debug=args.debug, time_sync=args.time_sync)

        # Create random connections between the input and output ports if the LPUs
        # are to be connected:
        if connected:

            # Find all output and input port selectors in each LPU:
            out_ports_spk_0 = plsel.Selector(
                            ','.join(LPU.extract_out_spk(comp_dict_0, 'id')[0]))
            out_ports_gpot_0 = plsel.Selector(
                            ','.join(LPU.extract_out_gpot(comp_dict_0, 'id')[0]))

            out_ports_spk_1 = plsel.Selector(
                            ','.join(LPU.extract_out_spk(comp_dict_1, 'id')[0]))
            out_ports_gpot_1 = plsel.Selector(
                            ','.join(LPU.extract_out_gpot(comp_dict_1, 'id')[0]))

            in_ports_spk_0 = plsel.Selector(
                            ','.join(LPU.extract_in_spk(comp_dict_0, 'id')[0]))
            in_ports_gpot_0 = plsel.Selector(
                            ','.join(LPU.extract_in_gpot(comp_dict_0, 'id')[0]))

            in_ports_spk_1 = plsel.Selector(
                            ','.join(LPU.extract_in_spk(comp_dict_1, 'id')[0]))
            in_ports_gpot_1 = plsel.Selector(
                            ','.join(LPU.extract_in_gpot(comp_dict_1, 'id')[0]))

            out_ports_0 = plsel.Selector.union(out_ports_spk_0, out_ports_gpot_0)
            out_ports_1 = plsel.Selector.union(out_ports_spk_1, out_ports_gpot_1)

            in_ports_0 = plsel.Selector.union(in_ports_spk_0, in_ports_gpot_0)
            in_ports_1 = plsel.Selector.union(in_ports_spk_1, in_ports_gpot_1)

            # Initialize a connectivity pattern between the two sets of port
            # selectors:
            pat = pattern.Pattern(plsel.Selector.union(out_ports_0, in_ports_0),
                                  plsel.Selector.union(out_ports_1, in_ports_1))

            # Create connections from the ports with identifiers matching the output
            # ports of one LPU to the ports with identifiers matching the input
            # ports of the other LPU:
            N_conn_spk_0_1 = min(len(out_ports_spk_0), len(in_ports_spk_1))
            N_conn_gpot_0_1 = min(len(out_ports_gpot_0), len(in_ports_gpot_1))
            for src, dest in zip(random.sample(out_ports_spk_0.identifiers,
                                               N_conn_spk_0_1),
                                 random.sample(in_ports_spk_1.identifiers,
                                               N_conn_spk_0_1)):
                pat[src, dest] = 1
                pat.interface[src, 'type'] = 'spike'
                pat.interface[dest, 'type'] = 'spike'
            for src, dest in zip(random.sample(out_ports_gpot_0.identifiers,
                                               N_conn_gpot_0_1),
                                 random.sample(in_ports_gpot_1.identifiers,
                                               N_conn_gpot_0_1)):
                pat[src, dest] = 1
                pat.interface[src, 'type'] = 'gpot'
                pat.interface[dest, 'type'] = 'gpot'

            man.connect(lpu_0_id, lpu_1_id, pat, 0, 1)

        man.spawn()
        man.start(steps=args.steps)
        man.wait()

    dt = 1e-4
    dur = 1.0
    steps = int(dur/dt)

    parser = argparse.ArgumentParser()
    parser.add_argument('--debug', default=False,
                        dest='debug', action='store_true',
                        help='Write connectivity structures and inter-LPU routed data in debug folder')
    parser.add_argument('-l', '--log', default='none', type=str,
                        help='Log output to screen [file, screen, both, or none; default:none]')
    parser.add_argument('-s', '--steps', default=steps, type=int,
                        help='Number of steps [default: %s]' % steps)
    parser.add_argument('-r', '--time_sync', default=False, action='store_true',
                        help='Time data reception throughput [default: False]')
    parser.add_argument('-g', '--gpu_dev', default=[0, 1], type=int, nargs='+',
                        help='GPU device numbers [default: 0 1]')
    parser.add_argument('-d', '--disconnect', default=False, action='store_true',
                        help='Run with disconnected LPUs [default: False]')
    args = parser.parse_args()

    file_name = None
    screen = False
    if args.log.lower() in ['file', 'both']:
        file_name = 'neurokernel.log'
    if args.log.lower() in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)

    random.seed(0)
    c = not args.disconnect
    run(c)
示例#11
0
def run_lif(payload):
    """
    Run an example lif model through the nk_server json interface
    """

    print payload
    payload = json.loads(payload)
    params = payload['params']
    sim_input = payload['input']

    data_dir =  'data/'+params['sim_uid'] + '/' + params['sim_exp'] +'/'

    try:
        os.stat('data/'+params['sim_uid']+'/')
    except:
        os.mkdir('data/'+params['sim_uid']+'/')
    try:
        os.stat(data_dir)
    except:
        os.mkdir(data_dir)
 
    dt = params['sim_dt']
    Nt = params['sim_steps']
    dur = Nt/dt

    G = nx.DiGraph() # or nx.MultiDiGraph()
    G.add_nodes_from([0])
    G.node[0] = {
        'model': 'LeakyIAF_rest',
        'name': 'neuron_0',
        'extern': True,      # indicates whether the neuron can receive an external input signal
        'public': True,      # indicates whether the neuron can emit output to other LPUs 
        'spiking': True,     # indicates whether the neuron outputs spikes or a membrane voltage
        'selector': '/a[0]', # every public neuron must have a selector
        'V': params['par_V'], # initial membrane voltage
        'Vr': params['par_Vr'],     # reset voltage ## The same as the implicit resting potential
        'Vt': params['par_Vt'],       # spike threshold
        'R': params['par_R'],           # membrane resistance
        'C': params['par_C'],          # membrane capacitance
        'Er': params['par_rest']          # membrane capacitance
    }

    nx.write_gexf(G, data_dir +'lif_graph.gexf.gz')

    N_neurons = G.number_of_nodes()
    if sim_input == 'Default':
        t = np.arange(0, params['sim_dt']*params['sim_steps'], params['sim_dt'])
        I = np.zeros((params['sim_steps'], N_neurons), dtype=np.double)
        I[t>0.2] = 1e-9
        I[t>0.4] = 2e-9
        I[t>0.8] = 2.5e-9
       

        with h5py.File(data_dir + 'lif_input.h5', 'w') as f:
            f.create_dataset('array', (Nt, N_neurons),
                         dtype=np.double,
                         data=I)
    else:
        print 'loading non-default inputs (WIP)'
        with h5py.File(data_dir + 'lif_input.h5', 'w') as f:
            f.create_dataset('array', (Nt, N_neurons),
                         dtype=np.double,
                         data=sim_input)

    parser = argparse.ArgumentParser()

    parser.add_argument('--debug', default=False,
                    dest='debug', action='store_true',
                    help='Write connectivity structures and inter-LPU routed data in debug folder')

    parser.add_argument('-l', '--log', default='none', type=str,
                    help='Log output to screen [file, screen, both, or none; default:none]')

    parser.add_argument('-s', '--steps', default=params['sim_steps'], type=int,
                    help='Number of steps [default: %s]' % params['sim_steps'])

    args = parser.parse_args()


    file_name = None

    screen = False
    if args.log.lower() in ['file', 'both']:
        file_name = 'lif.log'
    if args.log.lower() in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)


    man = Manager()
    (n_dict, s_dict) = LPU.lpu_parser(data_dir+'lif_graph.gexf.gz')

    if params['sim_output'] != 'spike':
        args.debug = True

    man.add(LPU, 'lif', dt, n_dict, s_dict, 
            input_file=data_dir +'lif_input.h5',
            output_file=data_dir +'lif_output.h5',
            device=0, debug=True)

    man.spawn()
    man.start(steps=params['sim_steps'])
    man.wait()

    if params['sim_output'] == 'spike':
        with h5py.File(data_dir +'lif_output_spike.h5') as f:
            data = np.array(f['array']).T.tolist()   
    else:
        ######## BUG: Needs to output debug to data folder
        with h5py.File('./lif_V.h5') as f:
            data = np.array(f['array']).T.tolist()   
        
    return data
示例#12
0
def main():
    import neurokernel.mpi_relaunch
    import neurokernel.core_gpu as core
    (comp_dict, conns) = LPU.lpu_parser('neuroballad_temp_model.gexf.gz')
    with open('run_parameters.pickle', 'rb') as f:
        run_parameters = pickle.load(f)
    with open('record_parameters.pickle', 'rb') as f:
        record_parameters = pickle.load(f)
    dur = 1.0
    dt = 1e-4
    dur = run_parameters[0]
    dt = run_parameters[1]
    fl_input_processor = FileInputProcessor('neuroballad_temp_model_input.h5')

    from neurokernel.LPU.OutputProcessors.FileOutputProcessor import FileOutputProcessor
    output_processor = FileOutputProcessor(record_parameters,
                                           'neuroballad_temp_model_output.h5',
                                           sample_interval=1)

    #Parse extra arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--debug',
        default=False,
        dest='debug',
        action='store_true',
        help=
        'Write connectivity structures and inter-LPU routed data in debug folder'
    )
    parser.add_argument(
        '-l',
        '--log',
        default='file',
        type=str,
        help='Log output to screen [file, screen, both, or none; default:none]'
    )
    parser.add_argument('-r',
                        '--time_sync',
                        default=False,
                        action='store_true',
                        help='Time data reception throughput [default: False]')
    parser.add_argument('-g',
                        '--gpu_dev',
                        default=[0, 1],
                        type=int,
                        nargs='+',
                        help='GPU device numbers [default: 0 1]')
    parser.add_argument('-d',
                        '--disconnect',
                        default=False,
                        action='store_true',
                        help='Run with disconnected LPUs [default: False]')
    args = parser.parse_args()
    file_name = None
    screen = False
    if args.log.lower() in ['file', 'both']:
        file_name = 'neurokernel.log'
    if args.log.lower() in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)
    man = core.Manager()

    man.add(LPU,
            'lpu',
            dt,
            comp_dict,
            conns,
            input_processors=[fl_input_processor],
            output_processors=[output_processor],
            device=args.gpu_dev[1],
            debug=True)

    steps = int(dur / dt)
    man.spawn()
    man.start(steps=steps)
    man.wait()
示例#13
0
# PB_local_ids = [i for i, name in enumerate(lpu_name_to_n_dict['PB']['LeakyIAF']['name']) \
#                  if lpu_name_to_family_map['PB'][name] == 'PB']
# v_PB.add_LPU('PB_output_spike.h5',
#           graph=lpu_name_to_g_nk['PB'], LPU='PB Local')
# v_PB.add_plot({'type': 'raster', 'ids': {0: PB_local_ids},
#                'yticks': range(1, 1+len(PB_local_ids)), #'yticklabels': range(len(pb_neurons)),
#                'yticklabels': []},
#               'PB Local', 'Output')

v_PB.update_interval = update_interval
v_PB.fontsize = fontsize
v_PB.dt = dt
v_PB.xlim = xlim
v_PB.run('PB_output_%s.png' % args.d)
'''
logger = setup_logger(screen=True)
dt = 1e-4
update_interval = None
fontsize = 16
xlim = [0, 1.0]

v_BU = vis.visualizer()

v_BU.add_LPU('BU_output.h5', LPU='BU')
v_BU.add_plot(
    {
        'type': 'raster',
        'uids': [sort_list['BU']],
        'variable': 'spike_state',
        'yticks': range(1, 1 + len(lpu_name_to_neurons['BU'])
                        ),  #'yticklabels': range(len(bu_neurons))
示例#14
0
def main():
    import neurokernel.mpi_relaunch

    logger = setup_logger(file_name=None, screen=True)

    parser = argparse.ArgumentParser()
    parser.add_argument('-l',
                        '--layers',
                        dest='num_layers',
                        type=int,
                        default=16,
                        help='number of layers of ommatidia on circle')

    parser.add_argument('-i',
                        '--input',
                        action="store_true",
                        help='generates input if set')
    parser.add_argument('-g',
                        '--gexf',
                        action="store_true",
                        help='generates gexf of LPU if set')

    parser.add_argument('--steps',
                        default=100,
                        type=int,
                        help='simulation steps')

    args = parser.parse_args()

    dt = 1e-4
    GEXF_FILE = 'retina.gexf.gz'
    INPUT_FILE = 'vision_input.h5'
    IMAGE_FILE = 'image1.mat'
    OUTPUT_FILE = 'retina_output.h5'

    if args.input:
        print('Generating input of model from image file')
        generate_input(INPUT_FILE, IMAGE_FILE, args.num_layers)
    if args.gexf:
        print('Writing retina lpu')
        n = args.num_layers
        photoreceptor_num = 6 * (3 * n * (n + 1) + 1)
        generate_gexf(GEXF_FILE, photoreceptor_num)

    man = core_gpu.Manager()

    print('Parsing lpu data')
    n_dict_ret, s_dict_ret = LPU.lpu_parser(GEXF_FILE)
    print('Initializing LPU')
    man.add(LPU,
            'retina',
            dt,
            n_dict_ret,
            s_dict_ret,
            input_file=INPUT_FILE,
            output_file=OUTPUT_FILE,
            device=0,
            debug=True,
            time_sync=False)
    man.spawn()

    print('Starting simulation')
    start_time = time.time()
    man.start(steps=args.steps)
    man.wait()
    print('Simulation complete: Duration {} seconds'.format(time.time() -
                                                            start_time))
示例#15
0
    def launch(self, user_id, task):
        # neuron_uid_list = [str(a) for a in task['neuron_list']]
        try:
            # conf_obj = get_config_obj()
            # config = conf_obj.conf

            setup_logger(file_name = 'neurokernel_'+user_id+'.log', screen = False)

            manager = core.Manager()

            lpus = {}
            patterns = {}
            G = task['data']

            for i in list(G['Pattern'].keys()):
                a = G['Pattern'][i]['nodes']
                if len([k for k,v in a.items() if v['class'] == 'Port']) == 0:
                    del G['Pattern'][i]

            for i in list(G['LPU'].keys()):
                a = G['LPU'][i]['nodes']
                if len(a) < 3:
                    del G['LPU'][i]

            # with open('G.pickle', 'wb') as f:
            #     pickle.dump(G, f, protocol=pickle.HIGHEST_PROTOCOL)
            # print(G)
            # print(G.keys())
            # print(G['LPU'])
            # print(G['LPU'].keys())

            # get graph and output_uid_list for each LPU
            for k, lpu in G['LPU'].items():
                lpus[k] = {}
                g_lpu_na = create_graph_from_database_returned(lpu)
                lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na)
                lpus[k]['graph'] = lpu_nk_graph
                # lpus[k]['output_uid_list'] = list(
                #             set(lpu_nk_graph.nodes()).intersection(
                #                 set(neuron_uid_list)))
                # lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)

            for kkey, lpu in lpus.items():
                graph = lpu['graph']

                for uid, comp in graph.nodes.items():
                    if 'attr_dict' in comp:
                        print('Found attr_dict; fixing...')
                        nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                        # print('changed',uid)
                        graph.nodes[uid].pop('attr_dict')
                    if 'params' in comp:
                        params = graph.nodes[uid].pop('params')
                        nx.set_node_attributes(graph, {uid: {k: float(v) for k, v in params.items()}})
                    if 'states' in comp:
                        states = graph.nodes[uid].pop('states')
                        nx.set_node_attributes(graph, {uid: {'init{}'.format(k): float(v) for k, v in states.items()}})
                for i,j,k,v in graph.edges(keys=True, data=True):
                    if 'attr_dict' in v:
                        for key in v['attr_dict']:
                            nx.set_edge_attributes(graph, {(i,j,k): {key: v['attr_dict'][key]}})
                        graph.edges[(i,j,k)].pop('attr_dict')
                lpus[kkey]['graph'] = graph

            # get graph for each Pattern
            for k, pat in G['Pattern'].items():
                l1,l2 = k.split('-')
                if l1 in lpus and l2 in lpus:
                    g_pattern_na = create_graph_from_database_returned(pat)
                    pattern_nk = nk.na_pat_to_nk(g_pattern_na)
                    #print(lpus[l1]['graph'].nodes(data=True))
                    lpu_ports = [node[1]['selector'] \
                                 for node in lpus[l1]['graph'].nodes(data=True) \
                                 if node[1]['class']=='Port'] + \
                                [node[1]['selector'] \
                                 for node in lpus[l2]['graph'].nodes(data=True) \
                                 if node[1]['class']=='Port']
                    pattern_ports = pattern_nk.nodes()
                    patterns[k] = {}
                    patterns[k]['graph'] = pattern_nk.subgraph(
                        list(set(lpu_ports).intersection(set(pattern_ports))))

            # dt = config['General']['dt']
            # if 'dt' in task:
            dt = task['dt']
            print(dt)

            device_count = 0
            # add LPUs to manager
            for k, lpu in lpus.items():
                lpu_name = k
                graph = lpu['graph']

                for uid, comp in graph.nodes.items():
                    if 'attr_dict' in comp:
                        nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                        # print('changed',uid)
                        graph.nodes[uid].pop('attr_dict')
                for i,j,ko,v in graph.edges(keys=True, data=True):
                    if 'attr_dict' in v:
                        for key in v['attr_dict']:
                            nx.set_edge_attributes(graph, {(i,j,ko): {key: v['attr_dict'][key]}})
                        graph.edges[(i,j,ko)].pop('attr_dict')
                # nx.write_gexf(graph,'name.gexf')
                # with open(lpu_name + '.pickle', 'wb') as f:
                #     pickle.dump(graph, f, protocol=pickle.HIGHEST_PROTOCOL)
                comps =  graph.nodes.items()

                #for uid, comp in comps:
                #    if 'attr_dict' in comp:
                #        nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                #        print('changed',uid)
                #    if 'class' in comp:

                # if k == 'retina':
                #     if config['Retina']['intype'] == 'Natural':
                #         coord_file = config['InputType']['Natural']['coord_file']
                #         tmp = os.path.splitext(coord_file)
                #         config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format(
                #                 tmp[0], user_id, tmp[1])
                #     prs = [node for node in graph.nodes(data=True) \
                #            if node[1]['class'] == 'PhotoreceptorModel']
                #     for pr in prs:
                #         graph.node[pr[0]]['num_microvilli'] = 3000
                #     input_processors = [RetinaInputIndividual(config, prs, user_id)]
                #     extra_comps = [PhotoreceptorModel]
                #     retina_input_uids = [a[0] for a in prs]
                # # elif k == 'EB':
                # #     input_processor = StepInputProcessor('I', [node[0] for node in graph.nodes(data=True) \
                # #            if node[1]['class'] == 'LeakyIAF'], 40.0, 0.0, 1.0)
                # #     input_processors = [input_processor]
                # #     extra_comps = []#[BufferVoltage]
                # else:
                #     input_processors = []
                #     extra_comps = [BufferVoltage]
                if 'inputProcessors' in task:
                    if lpu_name in task['inputProcessors']:
                        input_processors, record = \
                            loadInputProcessors(task['inputProcessors'][lpu_name])
                        lpus[k]['input_record'] = record
                    else:
                        input_processors = []
                else:
                    input_processors = []

                # configure output processors
                lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)
                output_processors = []
                if 'outputProcessors' in task:
                    if lpu_name in task['outputProcessors']:
                        output_processors, record = loadOutputProcessors(
                                                lpus[k]['output_file'],
                                                task['outputProcessors'][lpu_name])
                        if len(record):
                            lpus[k]['output_uid_dict'] = record

                # (comp_dict, conns) = LPU.graph_to_dicts(graph)
                manager.add(LPU, k, dt, 'pickle', pickle.dumps(graph),#comp_dict, conns,
                            device = device_count, input_processors = input_processors,
                            output_processors = output_processors,
                            extra_comps = [], debug = False)
                device_count = (device_count+1) % self.ngpus

            # connect LPUs by Patterns
            for k, pattern in patterns.items():
                l1,l2 = k.split('-')
                if l1 in lpus and l2 in lpus:
                    print('Connecting {} and {}'.format(l1, l2))
                    pat, key_order = Pattern.from_graph(nx.DiGraph(pattern['graph']),
                                                        return_key_order = True)
                    print(l1,l2)
                    print(key_order)
                    with Timer('update of connections in Manager'):
                        try:
                            manager.connect(l1, l2, pat,
                                            int_0 = key_order.index('{}/{}'.format(k,l1)),
                                            int_1 = key_order.index('{}/{}'.format(k,l2)))
                        except ValueError:
                            manager.connect(l1, l2, pat,
                                            int_0 = key_order.index(l1),
                                            int_1 = key_order.index(l2))

            # start simulation
            # steps = config['General']['steps']
            # ignored_steps = config['General']['ignored_steps']
            # if 'steps' in task:
            steps = task['steps']
            # if 'ignored_steps' in task:
            # ignored_steps = task['ignored_steps']
            # ignored_steps = 0
            # steps = 100
            manager.spawn()
            manager.start(steps=steps)
        except:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            tb = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
            print('An error occured during the compilation\n' + tb)
            return {'error':
                        {'exception': tb,
                         'message': 'An error occured during the compilation in execution'}}

        try:
            manager.wait()
        except LPUExecutionError:
            ErrorMessage = 'An error occured during execution of LPU {} at step {}:\n'.format(
                            manager._errors[0][0], manager._errors[0][1]) + \
                            ''.join(manager._errors[0][2])
            print(ErrorMessage)
            return {'error':
                        {'exception': ''.join(manager._errors[0][2]),
                         'message': 'An error occured during execution of LPU {} at step {}:\n'.format(
                            manager._errors[0][0], manager._errors[0][1])}}
        time.sleep(5)
        # print(task)

        ignored_steps = 0

        try:
            # post-processing inputs (hard coded, can be better organized)
            result = {'sensory': {}, 'input': {}, 'output': {}}
            for k, lpu in lpus.items():
                records = lpu.get('input_record', [])
                for record in records:
                    if record['sensory_file'] is not None:
                        if k not in result['sensory']:
                            result['sensory'][k] = []
                        with h5py.File(record['sensory_file']) as sensory_file:
                            result['sensory'][k].append({'dt': record['sensory_interval']*dt,
                                                         'data': sensory_file['sensory'][:]})
                    if record['input_file'] is not None:
                        with h5py.File(record['input_file']) as input_file:
                            sample_interval = input_file['metadata'].attrs['sample_interval']
                            for var in input_file.keys():
                                if var == 'metadata': continue
                                uids = [n.decode() for n in input_file[var]['uids'][:]]
                                input_array = input_file[var]['data'][:]
                                for i, item in enumerate(uids):
                                    if var == 'spike_state':
                                        input = np.nonzero(input_array[ignored_steps:, i:i+1].reshape(-1))[0]*dt
                                        if item in result['input']:
                                            if 'spike_time' in result['input'][item]:
                                                result['input'][item]['spike_time']['data'].append(input)
                                                result['input'][item]['spike_time']['data'] = \
                                                    np.sort(result['input'][item]['spike_time']['data'])
                                            else:
                                                result['input'][item]['spike_time'] = {
                                                    'data': input.copy(),
                                                    'dt': dt*sample_interval}
                                        else:
                                            result['input'][item] = {'spike_time': {
                                                'data': input.copy(),
                                                'dt': dt*sample_interval}}
                                    else:
                                        input = input_array[ignored_steps:, i:i+1]
                                        if item in result['input']:
                                            if var in result['input'][item]:
                                                result['input'][item][var]['data'] += input
                                            else:
                                                result['input'][item][var] = {
                                                    'data': input.copy(),
                                                    'dt': dt*sample_interval}
                                        else:
                                            result['input'][item] = {var: {
                                                'data': input.copy(),
                                                'dt': dt*sample_interval}}

            # if 'retina' in lpus:
            #     input_array = si.read_array(
            #             '{}_{}.h5'.format(config['Retina']['input_file'], user_id))
            #     inputs[u'ydomain'] = input_array.max()
            #     for i, item in enumerate(retina_input_uids):
            #         inputs['data'][item] = np.hstack(
            #             (np.arange(int((steps-ignored_steps)/10)).reshape((-1,1))*dt*10,
            #              input_array[ignored_steps::10,i:i+1])).tolist()
            #
            #     del input_array

            # post-processing outputs from all LPUs and combine them into one dictionary
            # result = {u'data': {}}

            for k, lpu in lpus.items():
                uid_dict = lpu.get('output_uid_dict', None)
                if uid_dict is not None:
                    with h5py.File(lpu['output_file']) as output_file:
                        sample_interval = output_file['metadata'].attrs['sample_interval']
                        for var in uid_dict:
                            if var == 'spike_state':
                                uids = [n.decode() for n in output_file[var]['uids'][:]]
                                spike_times = output_file[var]['data']['time'][:]
                                index = output_file[var]['data']['index'][:]
                                for i, item in enumerate(uids):
                                    # output = np.nonzero(output_array[ignored_steps:, i:i+1].reshape(-1))[0]*dt
                                    output = spike_times[index == i]
                                    output = output[output>ignored_steps*dt]-ignored_steps*dt
                                    if item in result['output']:
                                        result['output'][item]['spike_time'] = {
                                            'data': output,
                                            'dt': dt*sample_interval}
                                    else:
                                        result['output'][item] = {'spike_time': {
                                                    'data': output,
                                                    'dt': dt*sample_interval}}
                            else:
                                uids = [n.decode() for n in output_file[var]['uids'][:]]
                                output_array = output_file[var]['data'][:]
                                for i, item in enumerate(uids):
                                    # if var == 'spike_state':
                                    #     output = np.nonzero(output_array[ignored_steps:, i:i+1].reshape(-1))[0]*dt
                                    #     if item in result['output']:
                                    #         result['output'][item]['spike_time'] = {
                                    #             'data': output.tolist(),
                                    #             'dt': dt}
                                    #     else:
                                    #         result['output'][item] = {'spike_time': {
                                    #             'data': output.tolist(),
                                    #             'dt': dt}}
                                    # else:
                                    output = output_array[ignored_steps:, i:i+1]
                                    if item in result['output']:
                                        result['output'][item][var] = {
                                            'data': output.copy(),
                                            'dt': dt*sample_interval}
                                    else:
                                        result['output'][item] = {var: {
                                            'data': output.copy(),
                                            'dt': dt*sample_interval}}
            result = {'success': {'result': result, 'meta': {'dur': steps*dt}}}
        except:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            tb = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
            print('An error occured postprocessing of results\n' + tb)
            return {'error':
                        {'exception': tb,
                         'message': 'An error occured when postprocessing of results in execution'}}
        return result
                    help='Log output to screen [file, screen, both, or none; default:none]')
parser.add_argument('-s', '--steps', default=steps, type=int,
                    help='Number of steps [default: %s]' % steps)
parser.add_argument('-g', '--gpu_dev', default=0, type=int,
                    help='GPU device number [default: 0]')
parser.add_argument('-n', type=int, nargs=3, default=(30, 30, 30),
                    help='Numbers of sensory, local, and projection neurons')
args = parser.parse_args()

file_name = None
screen = False
if args.log.lower() in ['file', 'both']:
    file_name = 'neurokernel.log'
if args.log.lower() in ['screen', 'both']:
    screen = True
logger = setup_logger(file_name=file_name, screen=screen)

man = core.Manager()

np.random.seed(0)
lpu_name = 'neurodriver'
g = create_lpu_graph(lpu_name, *args.n)
n_dict, s_dict = LPU.graph_to_dicts(g)
total_neurons =  \
    len([d for n, d in g.nodes(data=True) if d['model'] == 'LeakyIAF'])
total_synapses = \
    len([d for f, t, d in g.edges(data=True) if d['model'] == 'AlphaSynapse'])

output_file = None

man.add(MyLPU, lpu_name, dt, n_dict, s_dict, I_const=0.6,
示例#17
0
def main():
    def run(connected):
        """
        Set `connected` to True to connect the LPUs.
        """

        import neurokernel.mpi_relaunch

        out_name = 'un' if not connected else 'co'
        man = core.Manager()

        lpu_file_0 = './data/generic_lpu_0.gexf.gz'
        lpu_file_1 = './data/generic_lpu_1.gexf.gz'
        comp_dict_0, conns_0 = LPU.lpu_parser(lpu_file_0)
        comp_dict_1, conns_1 = LPU.lpu_parser(lpu_file_1)

        fl_input_processor_0 = FileInputProcessor(
            './data/generic_lpu_0_input.h5')
        fl_output_processor_0 = FileOutputProcessor(
            [('V', None), ('spike_state', None)],
            'generic_lpu_0_%s_output.h5' % out_name,
            sample_interval=1)

        lpu_0_id = 'lpu_0'
        man.add(LPU,
                lpu_0_id,
                dt,
                comp_dict_0,
                conns_0,
                input_processors=[fl_input_processor_0],
                output_processors=[fl_output_processor_0],
                device=args.gpu_dev[0],
                debug=args.debug,
                time_sync=args.time_sync)

        fl_input_processor_1 = FileInputProcessor(
            './data/generic_lpu_1_input.h5')
        fl_output_processor_1 = FileOutputProcessor(
            [('V', None), ('spike_state', None)],
            'generic_lpu_1_%s_output.h5' % out_name,
            sample_interval=1)

        lpu_1_id = 'lpu_1'
        man.add(LPU,
                lpu_1_id,
                dt,
                comp_dict_1,
                conns_1,
                input_processors=[fl_input_processor_1],
                output_processors=[fl_output_processor_1],
                device=args.gpu_dev[1],
                debug=args.debug,
                time_sync=args.time_sync)

        # Create random connections between the input and output ports if the LPUs
        # are to be connected:
        if connected:

            # Find all output and input port selectors in each LPU:
            out_ports_spk_0 = plsel.Selector(','.join(
                LPU.extract_out_spk(comp_dict_0, 'id')[0]))
            out_ports_gpot_0 = plsel.Selector(','.join(
                LPU.extract_out_gpot(comp_dict_0, 'id')[0]))

            out_ports_spk_1 = plsel.Selector(','.join(
                LPU.extract_out_spk(comp_dict_1, 'id')[0]))
            out_ports_gpot_1 = plsel.Selector(','.join(
                LPU.extract_out_gpot(comp_dict_1, 'id')[0]))

            in_ports_spk_0 = plsel.Selector(','.join(
                LPU.extract_in_spk(comp_dict_0, 'id')[0]))
            in_ports_gpot_0 = plsel.Selector(','.join(
                LPU.extract_in_gpot(comp_dict_0, 'id')[0]))

            in_ports_spk_1 = plsel.Selector(','.join(
                LPU.extract_in_spk(comp_dict_1, 'id')[0]))
            in_ports_gpot_1 = plsel.Selector(','.join(
                LPU.extract_in_gpot(comp_dict_1, 'id')[0]))

            out_ports_0 = plsel.Selector.union(out_ports_spk_0,
                                               out_ports_gpot_0)
            out_ports_1 = plsel.Selector.union(out_ports_spk_1,
                                               out_ports_gpot_1)

            in_ports_0 = plsel.Selector.union(in_ports_spk_0, in_ports_gpot_0)
            in_ports_1 = plsel.Selector.union(in_ports_spk_1, in_ports_gpot_1)

            # Initialize a connectivity pattern between the two sets of port
            # selectors:
            pat = pattern.Pattern(
                plsel.Selector.union(out_ports_0, in_ports_0),
                plsel.Selector.union(out_ports_1, in_ports_1))

            # Create connections from the ports with identifiers matching the output
            # ports of one LPU to the ports with identifiers matching the input
            # ports of the other LPU:
            N_conn_spk_0_1 = min(len(out_ports_spk_0), len(in_ports_spk_1))
            N_conn_gpot_0_1 = min(len(out_ports_gpot_0), len(in_ports_gpot_1))
            for src, dest in zip(
                    random.sample(out_ports_spk_0.identifiers, N_conn_spk_0_1),
                    random.sample(in_ports_spk_1.identifiers, N_conn_spk_0_1)):
                pat[src, dest] = 1
                pat.interface[src, 'type'] = 'spike'
                pat.interface[dest, 'type'] = 'spike'
            for src, dest in zip(
                    random.sample(out_ports_gpot_0.identifiers,
                                  N_conn_gpot_0_1),
                    random.sample(in_ports_gpot_1.identifiers,
                                  N_conn_gpot_0_1)):
                pat[src, dest] = 1
                pat.interface[src, 'type'] = 'gpot'
                pat.interface[dest, 'type'] = 'gpot'

            man.connect(lpu_0_id, lpu_1_id, pat, 0, 1)

        man.spawn()
        man.start(steps=args.steps)
        man.wait()

    dt = 1e-4
    dur = 1.0
    steps = int(dur / dt)

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--debug',
        default=False,
        dest='debug',
        action='store_true',
        help=
        'Write connectivity structures and inter-LPU routed data in debug folder'
    )
    parser.add_argument(
        '-l',
        '--log',
        default='none',
        type=str,
        help='Log output to screen [file, screen, both, or none; default:none]'
    )
    parser.add_argument('-s',
                        '--steps',
                        default=steps,
                        type=int,
                        help='Number of steps [default: %s]' % steps)
    parser.add_argument('-r',
                        '--time_sync',
                        default=False,
                        action='store_true',
                        help='Time data reception throughput [default: False]')
    parser.add_argument('-g',
                        '--gpu_dev',
                        default=[0, 1],
                        type=int,
                        nargs='+',
                        help='GPU device numbers [default: 0 1]')
    parser.add_argument('-d',
                        '--disconnect',
                        default=False,
                        action='store_true',
                        help='Run with disconnected LPUs [default: False]')
    args = parser.parse_args()

    file_name = None
    screen = False
    if args.log.lower() in ['file', 'both']:
        file_name = 'neurokernel.log'
    if args.log.lower() in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)

    random.seed(0)
    c = not args.disconnect
    run(c)
示例#18
0
    def launch(self, user_id, task):
        neuron_uid_list = [str(a) for a in task['neuron_list']]

        conf_obj = get_config_obj()
        config = conf_obj.conf

        if config['Retina']['intype'] == 'Natural':
            coord_file = config['InputType']['Natural']['coord_file']
            tmp = os.path.splitext(coord_file)
            config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format(
                tmp[0], user_id, tmp[1])

        setup_logger(file_name='neurokernel_' + user_id + '.log', screen=True)

        manager = core.Manager()

        lpus = {}
        patterns = {}
        G = task['data']
        with open('G.pickle', 'wb') as f:
            pickle.dump(G, f, protocol=pickle.HIGHEST_PROTOCOL)
        print(G)
        print(G.keys())
        print(G['LPU'])
        print(G['LPU'].keys())

        # get graph and output_uid_list for each LPU
        for k, lpu in G['LPU'].items():
            lpus[k] = {}
            g_lpu_na = create_graph_from_database_returned(lpu)
            lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na)
            lpus[k]['graph'] = lpu_nk_graph
            lpus[k]['output_uid_list'] = list(
                set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list)))
            lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)

        for kkey, lpu in lpus.items():
            graph = lpu['graph']

            for uid, comp in graph.node.items():
                if 'attr_dict' in comp:
                    print('Found attr_dict; fixing...')
                    nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                    # print('changed',uid)
                    graph.nodes[uid].pop('attr_dict')
            for i, j, k, v in graph.edges(keys=True, data=True):
                if 'attr_dict' in v:
                    for key in v['attr_dict']:
                        nx.set_edge_attributes(
                            graph, {(i, j, k): {
                                        key: v['attr_dict'][key]
                                    }})
                    graph.edges[(i, j, k)].pop('attr_dict')
            lpus[kkey]['graph'] = graph

        # get graph for each Pattern
        for k, pat in G['Pattern'].items():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                g_pattern_na = create_graph_from_database_returned(pat)
                pattern_nk = nk.na_pat_to_nk(g_pattern_na)
                print(lpus[l1]['graph'].nodes(data=True))
                lpu_ports = [node[1]['selector'] \
                             for node in lpus[l1]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port'] + \
                            [node[1]['selector'] \
                             for node in lpus[l2]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port']
                pattern_ports = pattern_nk.nodes()
                patterns[k] = {}
                patterns[k]['graph'] = pattern_nk.subgraph(
                    list(set(lpu_ports).intersection(set(pattern_ports))))

        dt = config['General']['dt']
        if 'dt' in task:
            dt = task['dt']
            print(dt)

        # add LPUs to manager
        for k, lpu in lpus.items():
            lpu_name = k
            graph = lpu['graph']

            for uid, comp in graph.node.items():
                if 'attr_dict' in comp:
                    nx.set_node_attributes(graph, {uid: comp['attr_dict']})
                    # print('changed',uid)
                    graph.nodes[uid].pop('attr_dict')
            for i, j, ko, v in graph.edges(keys=True, data=True):
                if 'attr_dict' in v:
                    for key in v['attr_dict']:
                        nx.set_edge_attributes(
                            graph, {(i, j, ko): {
                                        key: v['attr_dict'][key]
                                    }})
                    graph.edges[(i, j, ko)].pop('attr_dict')
            nx.write_gexf(graph, 'name.gexf')
            with open(lpu_name + '.pickle', 'wb') as f:
                pickle.dump(graph, f, protocol=pickle.HIGHEST_PROTOCOL)
            comps = graph.node.items()

            #for uid, comp in comps:
            #    if 'attr_dict' in comp:
            #        nx.set_node_attributes(graph, {uid: comp['attr_dict']})
            #        print('changed',uid)
            #    if 'class' in comp:

            if k == 'retina':
                prs = [node for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'PhotoreceptorModel']
                for pr in prs:
                    graph.node[pr[0]]['num_microvilli'] = 3000
                input_processors = [
                    RetinaInputIndividual(config, prs, user_id)
                ]
                extra_comps = [PhotoreceptorModel]
                retina_input_uids = [a[0] for a in prs]
            elif k == 'EB':
                input_processor = StepInputProcessor('I', [node[0] for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'LeakyIAF'], 40.0, 0.0, 1.0)
                input_processors = [input_processor]
                extra_comps = [BufferVoltage]
            else:
                input_processors = []
                extra_comps = [BufferVoltage]
            if 'inputProcessors' in task:
                input_processors = loadExperimentSettings(
                    task['inputProcessors'])
            output_processor = FileOutputProcessor(
                [('V', lpu['output_uid_list'])],
                lpu['output_file'],
                sample_interval=10)

            (comp_dict, conns) = LPU.graph_to_dicts(graph)

            # print(comp_dict)
            # print(conns)
            print(k)
            manager.add(LPU,
                        k,
                        dt,
                        comp_dict,
                        conns,
                        device=0,
                        input_processors=input_processors,
                        output_processors=[output_processor],
                        extra_comps=extra_comps,
                        debug=True)

        # connect LPUs by Patterns
        for k, pattern in patterns.items():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                print('Connecting {} and {}'.format(l1, l2))
                pat, key_order = Pattern.from_graph(nx.DiGraph(
                    pattern['graph']),
                                                    return_key_order=True)
                print(l1, l2)
                print(key_order)
                with Timer('update of connections in Manager'):
                    manager.connect(l1,
                                    l2,
                                    pat,
                                    int_0=key_order.index(l1),
                                    int_1=key_order.index(l2))

        # start simulation
        steps = config['General']['steps']
        ignored_steps = config['General']['ignored_steps']
        if 'steps' in task:
            steps = task['steps']
        if 'ignored_steps' in task:
            ignored_steps = task['ignored_steps']
        # ignored_steps = 0
        # steps = 100
        manager.spawn()
        manager.start(steps=steps)
        manager.wait()

        time.sleep(5)
        print(task)

        # post-processing inputs (hard coded, can be better organized)
        inputs = {
            u'ydomain': 1.0,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }
        if 'retina' in lpus:
            input_array = si.read_array('{}_{}.h5'.format(
                config['Retina']['input_file'], user_id))
            inputs[u'ydomain'] = input_array.max()
            for i, item in enumerate(retina_input_uids):
                inputs['data'][item] = np.hstack(
                    (np.arange(int((steps - ignored_steps) / 10)).reshape(
                        (-1, 1)) * dt * 10, input_array[ignored_steps::10,
                                                        i:i + 1])).tolist()

            del input_array

        # post-processing outputs from all LPUs and combine them into one dictionary
        result = {
            u'ydomain': 1,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }

        for k, lpu in lpus.items():
            with h5py.File(lpu['output_file']) as output_file:
                uids = output_file['V']['uids'][:]
                output_array = output_file['V']['data'][:]
                for i, item in enumerate(uids):
                    output = output_array[int(ignored_steps / 10):, i:i + 1]
                    # tmp = output.max()-output.min()
                    # if tmp <= 0.01: #mV
                    #     output = (output - output.min()) + 0.5
                    # else:
                    #     output = (output - output.min())/tmp*0.9+0.1
                    result['data'][item] = np.hstack(
                        (np.arange(int((steps - ignored_steps) / 10)).reshape(
                            (-1, 1)) * dt * 10, output)).tolist()

        return inputs, result
示例#19
0
                        type=int,
                        help='Maximum number of steps [default: %s]' %
                        max_steps)
    parser.add_argument('-g',
                        '--gpus',
                        default=n_gpus,
                        type=int,
                        help='Number of GPUs [default: %s]' % n_gpus)
    parser.add_argument('-p',
                        '--use_mps',
                        action='store_true',
                        help='Use Multi-Process Service [default: False]')
    args = parser.parse_args()

    file_name = None
    screen = False
    if args.log.lower() in ['file', 'both']:
        file_name = 'neurokernel.log'
    if args.log.lower() in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name,
                          screen=screen,
                          mpi_comm=MPI.COMM_WORLD,
                          multiline=True)

    conn_mat = pd.read_excel(
        's2.xlsx', sheetname='Connectivity Matrix').astype(int).as_matrix()

    print(args.gpus, ) + emulate(conn_mat, args.scaling, args.gpus,
                                 args.max_steps, args.use_mps)
    def launch(self, user_id, task):
        neuron_uid_list = [str(a) for a in task['neuron_list']]

        conf_obj = get_config_obj()
        config = conf_obj.conf

        if config['Retina']['intype'] == 'Natural':
            coord_file = config['InputType']['Natural']['coord_file']
            tmp = os.path.splitext(coord_file)
            config['InputType']['Natural']['coord_file'] = '{}_{}{}'.format(
                tmp[0], user_id, tmp[1])

        setup_logger(file_name='neurokernel_' + user_id + '.log', screen=False)

        manager = core.Manager()

        lpus = {}
        patterns = {}
        G = task['success']['data']

        # get graph and output_uid_list for each LPU
        for k, lpu in G['LPU'].iteritems():
            lpus[k] = {}
            g_lpu_na = create_graph_from_database_returned(lpu)
            lpu_nk_graph = nk.na_lpu_to_nk_new(g_lpu_na)
            lpus[k]['graph'] = lpu_nk_graph
            lpus[k]['output_uid_list'] = list(
                set(lpu_nk_graph.nodes()).intersection(set(neuron_uid_list)))
            lpus[k]['output_file'] = '{}_output_{}.h5'.format(k, user_id)

        # get graph for each Pattern
        for k, pat in G['Pattern'].iteritems():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                g_pattern_na = create_graph_from_database_returned(pat)
                pattern_nk = nk.na_pat_to_nk(g_pattern_na)
                lpu_ports = [node[1]['selector'] \
                             for node in lpus[l1]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port'] + \
                            [node[1]['selector'] \
                             for node in lpus[l2]['graph'].nodes(data=True) \
                             if node[1]['class']=='Port']
                pattern_ports = pattern_nk.nodes()
                patterns[k] = {}
                patterns[k]['graph'] = pattern_nk.subgraph(
                    list(set(lpu_ports).intersection(set(pattern_ports))))

        dt = config['General']['dt']

        # add LPUs to manager
        for k, lpu in lpus.iteritems():
            graph = lpu['graph']
            if k == 'retina':
                prs = [node for node in graph.nodes(data=True) \
                       if node[1]['class'] == 'PhotoreceptorModel']
                for pr in prs:
                    graph.node[pr[0]]['num_microvilli'] = 3000
                input_processors = [
                    RetinaInputIndividual(config, prs, user_id)
                ]
                extra_comps = [PhotoreceptorModel]
                retina_input_uids = [a[0] for a in prs]
            else:
                input_processors = []
                extra_comps = [BufferVoltage]
            output_processor = FileOutputProcessor(
                [('V', lpu['output_uid_list'])],
                lpu['output_file'],
                sample_interval=10)

            (comp_dict, conns) = LPU.graph_to_dicts(graph)

            manager.add(LPU,
                        k,
                        dt,
                        comp_dict,
                        conns,
                        device=0,
                        input_processors=input_processors,
                        output_processors=[output_processor],
                        extra_comps=extra_comps)

        # connect LPUs by Patterns
        for k, pattern in patterns.iteritems():
            l1, l2 = k.split('-')
            if l1 in lpus and l2 in lpus:
                print('Connecting {} and {}'.format(l1, l2))
                pat, key_order = Pattern.from_graph(
                    nx.DiGraph(pattern['graph']))
                with Timer('update of connections in Manager'):
                    manager.connect(l1,
                                    l2,
                                    pat,
                                    int_0=key_order.index(l1),
                                    int_1=key_order.index(l2))

        # start simulation
        steps = config['General']['steps']
        ignored_steps = config['General']['ignored_steps']
        manager.spawn()
        manager.start(steps=steps)
        manager.wait()

        time.sleep(5)

        # post-processing inputs (hard coded, can be better organized)
        inputs = {
            u'ydomain': 1.0,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }
        if 'retina' in lpus:
            input_array = si.read_array('{}_{}.h5'.format(
                config['Retina']['input_file'], user_id))
            inputs[u'ydomain'] = input_array.max()
            for i, item in enumerate(retina_input_uids):
                inputs['data'][item] = np.hstack(
                    (np.arange(int((steps - ignored_steps) / 10)).reshape(
                        (-1, 1)) * dt * 10, input_array[ignored_steps::10,
                                                        i:i + 1])).tolist()

            del input_array

        # post-processing outputs from all LPUs and combine them into one dictionary
        result = {
            u'ydomain': 1,
            u'xdomain': dt * (steps - ignored_steps),
            u'dt': dt * 10,
            u'data': {}
        }

        for k, lpu in lpus.iteritems():
            with h5py.File(lpu['output_file']) as output_file:
                uids = output_file['V']['uids'][:]
                output_array = output_file['V']['data'][:]
                for i, item in enumerate(uids):
                    output = output_array[int(ignored_steps / 10):, i:i + 1]
                    tmp = output.max() - output.min()
                    if tmp <= 0.01:  #mV
                        output = (output - output.min()) + 0.5
                    else:
                        output = (output - output.min()) / tmp * 0.9 + 0.1
                    result['data'][item] = np.hstack(
                        (np.arange(int((steps - ignored_steps) / 10)).reshape(
                            (-1, 1)) * dt * 10, output)).tolist()

        return inputs, result
示例#21
0
                        type=int,
                        help='Number of steps [default: %s]' % steps)
    parser.add_argument('-g',
                        '--gpu_dev',
                        default=0,
                        type=int,
                        help='GPU device number [default: 0]')
    args = parser.parse_args()

    file_name = None
    screen = False
    if args.log.lower() in ['file', 'both']:
        file_name = 'neurokernel.log'
    if args.log.lower() in ['screen', 'both']:
        screen = True
    logger = setup_logger(file_name=file_name, screen=screen)

    man = core.Manager()

    G = nx.MultiDiGraph()

    G.add_node(
        'neuron0',
        {
            'class': 'LeakyIAF',
            'name': 'LeakyIAF',
            'resting_potential': -70.0,
            'threshold': -45.0,
            'capacitance': 0.07,  # in mS
            'resistance': 0.2,  # in Ohm
        })
示例#22
0
def main():

	logging.basicConfig(level=logging.DEBUG, stream=sys.stdout,
	                    format='%(asctime)s %(name)s %(levelname)s %(message)s')
	logger = logging.getLogger('cx')



	sys.setrecursionlimit(10000)
	### Graph ###
	graph = Graph(Config.from_url(cx_db, 'admin', 'admin',
	                              initial_drop=False))
	graph.include(models.Node.registry)
	graph.include(models.Relationship.registry)


	### Retina ###
	config=retlam_demo.ConfigReader('retlam_default.cfg','../template_spec.cfg').conf

	retina = get_retina(config)
	

	##### Configuration  ######
	logger = setup_logger(screen=True)
	lpu_selectors, to_list = get_cx_selectors_name()
	lpu_name_to_comp_dict, lpu_name_to_conn_list, pat_name_list, pat_name_to_pat = cx_component(graph)
	

	man = core.Manager()

	dt = 1e-4
	dur = 0.2
	steps = int(dur/dt)
	debug = True

	lpu_name_list = ['BU', 'bu', 'EB', 'FB', 'PB']
	for name in lpu_name_list:
		input_processor = []
		output_processor = [FileOutputProcessor([('spike_state', None), ('V',None), ('g',None), ('I', None)], '{}_output.h5'.format(name), sample_interval = 1)]

		man.add(LPU, name, dt, lpu_name_to_comp_dict[name],
	            lpu_name_to_conn_list[name],
	            input_processors = input_processor,
	            output_processors = output_processor,
	            device=0,
	            debug=debug, time_sync=False)


	retlam_demo.add_retina_LPU(config, 0, retina, man)
	logger.info('add retina lpu')

	for name in pat_name_list:
	    id_0, id_1 = name.split('-')
	    man.connect(id_0, id_1, pat_name_to_pat[name][0], pat_name_to_pat[name][1].index('0'), pat_name_to_pat[name][1].index('1'))

	logger.info('link lpus among cx lpus')    

	link_retina_pat_cx(retina, lpu_selectors, to_list, man)
	logger.info('link retina and cx lpu')
	
	man.spawn()
	man.start(steps)
	man.wait()