args = parser.parse_args() dt = 1e-4 dur = 1.4 Nt = int(dur / dt) file_name = None screen = False if args.log.lower() in ['file', 'both']: file_name = 'neurokernel.log' if args.log.lower() in ['screen', 'both']: screen = True logger = base.setup_logger(file_name, screen) if args.port_data is None and args.port_ctrl is None: port_data = get_random_port() port_ctrl = get_random_port() else: port_data = args.port_data port_ctrl = args.port_ctrl man = core.Manager(port_data, port_ctrl) man.add_brok() # Load configurations for lamina, medulla and antennal lobe models: (n_dict_al, s_dict_al) = LPU.lpu_parser('./data/antennallobe.gexf.gz') lpu_al = LPU(dt, n_dict_al, s_dict_al, input_file='./data/olfactory_input.h5', output_file='antennallobe_output.h5',
def emulate(n_lpu, n_spike, n_gpot, steps): """ Benchmark inter-LPU communication throughput. Each LPU is configured to use a different local GPU. Parameters ---------- n_lpu : int Number of LPUs. Must be at least 2 and no greater than the number of local GPUs. n_spike : int Total number of input and output spiking ports any single LPU exposes to any other LPU. Each LPU will therefore have 2*n_spike*(n_lpu-1) total spiking ports. n_gpot : int Total number of input and output graded potential ports any single LPU exposes to any other LPU. Each LPU will therefore have 2*n_gpot*(n_lpu-1) total graded potential ports. steps : int Number of steps to execute. Returns ------- average_throughput, total_throughput : float Average per-step and total received data throughput in bytes/seconds. exec_time : float Execution time in seconds. """ # Time everything starting with manager initialization: start_all = time.time() # Check whether a sufficient number of GPUs are available: drv.init() if n_lpu > drv.Device.count(): raise RuntimeError('insufficient number of available GPUs.') # Set up manager and broker: man = Manager(get_random_port(), get_random_port(), get_random_port()) man.add_brok() # Generate selectors for configuring modules and patterns: mod_sels, pat_sels = gen_sels(n_lpu, n_spike, n_gpot) # Set up modules: for i in xrange(n_lpu): lpu_i = 'lpu%s' % i sel, sel_in, sel_out, sel_gpot, sel_spike = mod_sels[lpu_i] m = MyModule(sel, sel_in, sel_out, sel_gpot, sel_spike, port_data=man.port_data, port_ctrl=man.port_ctrl, port_time=man.port_time, id=lpu_i, device=i, debug=args.debug) man.add_mod(m) # Set up connections between module pairs: for i, j in itertools.combinations(xrange(n_lpu), 2): lpu_i = 'lpu%s' % i lpu_j = 'lpu%s' % j sel_from, sel_to, sel_in_i, sel_out_i, sel_gpot_i, sel_spike_i, \ sel_in_j, sel_out_j, sel_gpot_j, sel_spike_j = pat_sels[(lpu_i, lpu_j)] pat = Pattern.from_concat(sel_from, sel_to, from_sel=sel_from, to_sel=sel_to, data=1) pat.interface[sel_in_i, 'interface', 'io'] = [0, 'in'] pat.interface[sel_out_i, 'interface', 'io'] = [0, 'out'] pat.interface[sel_gpot_i, 'interface', 'type'] = [0, 'gpot'] pat.interface[sel_spike_i, 'interface', 'type'] = [0, 'spike'] pat.interface[sel_in_j, 'interface', 'io'] = [1, 'in'] pat.interface[sel_out_j, 'interface', 'io'] = [1, 'out'] pat.interface[sel_gpot_j, 'interface', 'type'] = [1, 'gpot'] pat.interface[sel_spike_j, 'interface', 'type'] = [1, 'spike'] man.connect(man.modules[lpu_i], man.modules[lpu_j], pat, 0, 1, compat_check=False) start_main = time.time() man.start(steps=steps) man.stop() stop_main = time.time() t = man.get_throughput() return t[0], (time.time()-start_all), (stop_main-start_main), t[3]
GEXF_FILE = 'retina.gexf.gz' INPUT_FILE = 'vision_input.h5' IMAGE_FILE = 'image1.mat' OUTPUT_FILE = 'retina_output.h5' if args.input: print('Generating input of model from image file') generate_input(INPUT_FILE, IMAGE_FILE, args.num_layers) if args.gexf: print('Writing retina lpu') n = args.num_layers photoreceptor_num = 6*(3*n*(n+1)+1) generate_gexf(GEXF_FILE, photoreceptor_num) if args.port_data is None and args.port_ctrl is None: port_data = get_random_port() port_ctrl = get_random_port() else: port_data = args.port_data port_ctrl = args.port_ctrl man = core.Manager(port_data, port_ctrl) man.add_brok() print('Parsing lpu data') n_dict_ret, s_dict_ret = LPU.lpu_parser(GEXF_FILE) print('Initializing LPU') lpu_ret = LPU(dt, n_dict_ret, s_dict_ret, input_file=INPUT_FILE, output_file=OUTPUT_FILE, port_ctrl=port_ctrl, port_data=port_data, device=args.ret_dev, id='retina',
def run(connected): if args.port_data is None and args.port_ctrl is None: port_data = get_random_port() port_ctrl = get_random_port() else: port_data = args.port_data port_ctrl = args.port_ctrl out_name = 'un' if not connected else 'co' man = core.Manager(port_data, port_ctrl) man.add_brok() lpu_file_0 = './data/generic_lpu_0.gexf.gz' lpu_file_1 = './data/generic_lpu_1.gexf.gz' (n_dict_0, s_dict_0) = LPU.lpu_parser(lpu_file_0) (n_dict_1, s_dict_1) = LPU.lpu_parser(lpu_file_1) ge_0_id = 'ge_0' ge_0 = LPU(dt, n_dict_0, s_dict_0, input_file='./data/generic_input_0.h5', output_file='generic_output_0_%s.h5' % out_name, port_ctrl=port_ctrl, port_data=port_data, device=args.gpu_dev[0], id=ge_0_id, debug=args.debug) man.add_mod(ge_0) ge_1_id = 'ge_1' ge_1 = LPU(dt, n_dict_1, s_dict_1, input_file='./data/generic_input_1.h5', output_file='generic_output_1_%s.h5' % out_name, port_ctrl=port_ctrl, port_data=port_data, device=args.gpu_dev[1], id=ge_1_id, debug=args.debug) man.add_mod(ge_1) # Connect the public neurons in the two LPUs: df_neu_0, df_syn_0 = neurokernel.tools.graph.graph_to_df(nx.read_gexf(lpu_file_0)) df_neu_1, df_syn_1 = neurokernel.tools.graph.graph_to_df(nx.read_gexf(lpu_file_1)) # Number of public neurons in each LPU: N_spike_0 = len(df_neu_0[(df_neu_0['spiking']==True)&(df_neu_0['public']==True)]) N_gpot_0 = len(df_neu_0[(df_neu_0['spiking']==False)&(df_neu_0['public']==True)]) N_spike_1 = len(df_neu_1[(df_neu_1['spiking']==True)&(df_neu_1['public']==True)]) N_gpot_1 = len(df_neu_1[(df_neu_1['spiking']==False)&(df_neu_1['public']==True)]) # Alpha function synaptic parameters: alphasynapse_type_params = {'AlphaSynapse': ['ad', 'ar', 'gmax', 'id', 'class', 'conductance', 'reverse']} if connected: conn = core.Connectivity(N_gpot_0, N_spike_0, N_gpot_1, N_spike_1, 1, ge_0.id, ge_1.id, alphasynapse_type_params) for id, (i, j) in enumerate(itertools.product(xrange(N_spike_0), xrange(N_spike_1))): conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j] = 1 conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'name'] = 'int_0to1_%s_%s' % (i, j) conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'model'] = 'AlphaSynapse' conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'ad'] = 0.19*1000 conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'ar'] = 1.1*100 conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'class'] = 0 conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'conductance'] = True conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'gmax'] = 0.003 conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'id'] = id conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'reverse'] = 0.065 man.connect(ge_0, ge_1, conn) man.start(steps=args.steps) man.stop()
def run_test(m0_sel_in_gpot, m0_sel_in_spike, m0_sel_out_gpot, m0_sel_out_spike, m1_sel_in_gpot, m1_sel_in_spike, m1_sel_out_gpot, m1_sel_out_spike): # Create test module classes with a queue installed in the destination # module to check that data was correctly propagated: class TestModule0(TestModule): def __init__(self, *args, **kwargs): super(TestModule0, self).__init__(*args, **kwargs) self.q = Queue() def run_step(self): self.log_info('saving data to queue before run step') if self.steps > 0: self.q.put( (self.pm['gpot'][self._out_port_dict['gpot']['m1']].copy(), self.pm['spike'][self._out_port_dict['spike'] ['m1']].copy())) super(TestModule0, self).run_step() class TestModule1(TestModule): def __init__(self, *args, **kwargs): super(TestModule1, self).__init__(*args, **kwargs) self.q = Queue() def run_step(self): super(TestModule1, self).run_step() self.log_info('saving data to queue after run step') if self.steps > 0: self.q.put( (self.pm['gpot'][self._in_port_dict['gpot']['m0']].copy(), self.pm['spike'][self._in_port_dict['spike'] ['m0']].copy())) m0_sel_gpot = m0_sel_in_gpot + m0_sel_out_gpot m0_sel_spike = m0_sel_in_spike + m0_sel_out_spike m0_sel = m0_sel_in_gpot + m0_sel_in_spike + m0_sel_out_gpot + m0_sel_out_spike m0_data_gpot = np.ones(len(m0_sel_gpot), np.double) m0_data_spike = np.ones(len(m0_sel_spike), np.int32) m1_sel_gpot = m1_sel_in_gpot + m1_sel_out_gpot m1_sel_spike = m1_sel_in_spike + m1_sel_out_spike m1_sel = m1_sel_in_gpot + m1_sel_in_spike + m1_sel_out_gpot + m1_sel_out_spike m1_data_gpot = np.zeros(len(m1_sel_gpot), np.double) m1_data_spike = np.zeros(len(m1_sel_spike), np.int32) # Instantiate manager and broker: man = Manager(get_random_port(), get_random_port(), get_random_port()) man.add_brok() # Add modules: m0 = TestModule0(m0_sel, m0_sel_in_gpot, m0_sel_in_spike, m0_sel_out_gpot, m0_sel_out_spike, m0_data_gpot, m0_data_spike, man.port_data, man.port_ctrl, man.port_time, id='m0') man.add_mod(m0) m1 = TestModule1(m1_sel, m1_sel_in_gpot, m1_sel_in_spike, m1_sel_out_gpot, m1_sel_out_spike, m1_data_gpot, m1_data_spike, man.port_data, man.port_ctrl, man.port_time, id='m1') man.add_mod(m1) # Connect the modules: pat = Pattern(m0_sel, m1_sel) pat.interface[m0_sel_in_gpot] = [0, 'in', 'gpot'] pat.interface[m0_sel_out_gpot] = [0, 'out', 'gpot'] pat.interface[m0_sel_in_spike] = [0, 'in', 'spike'] pat.interface[m0_sel_out_spike] = [0, 'out', 'spike'] pat.interface[m1_sel_in_gpot] = [1, 'in', 'gpot'] pat.interface[m1_sel_out_gpot] = [1, 'out', 'gpot'] pat.interface[m1_sel_in_spike] = [1, 'in', 'spike'] pat.interface[m1_sel_out_spike] = [1, 'out', 'spike'] for sel_from, sel_to in zip(m0_sel_out_gpot, m1_sel_in_gpot): if not (sel_from == ((), ) or sel_to == ((), )): pat[sel_from, sel_to] = 1 for sel_from, sel_to in zip(m0_sel_out_spike, m1_sel_in_spike): if not (sel_from == ((), ) or sel_to == ((), )): pat[sel_from, sel_to] = 1 man.connect(m0, m1, pat, 0, 1) # Execute exactly two steps; m0 transmits data during the first step, which # should be received by m1 during the second step: man.start(steps=2) man.stop() # Forcibly terminate all processes that are still alive: if m0.is_alive(): m0.terminate() if m1.is_alive(): m1.terminate() for b in man.brokers.values(): if b.is_alive(): b.terminate() # Check that data was propagated correctly: m0_data_gpot_after, m0_data_spike_after = m0.q.get() m1_data_gpot_after, m1_data_spike_after = m1.q.get() assert all(m0_data_gpot_after == m1_data_gpot_after) assert all(m0_data_spike_after == m1_data_spike_after)
def run_test(m0_sel_in_gpot, m0_sel_in_spike, m0_sel_out_gpot, m0_sel_out_spike, m1_sel_in_gpot, m1_sel_in_spike, m1_sel_out_gpot, m1_sel_out_spike): # Create test module classes with a queue installed in the destination # module to check that data was correctly propagated: class TestModule0(TestModule): def __init__(self, *args, **kwargs): super(TestModule0, self).__init__(*args, **kwargs) self.q = Queue() def run_step(self): self.log_info('saving data to queue before run step') if self.steps > 0: self.q.put((self.pm['gpot'][self._out_port_dict['gpot']['m1']].copy(), self.pm['spike'][self._out_port_dict['spike']['m1']].copy())) super(TestModule0, self).run_step() class TestModule1(TestModule): def __init__(self, *args, **kwargs): super(TestModule1, self).__init__(*args, **kwargs) self.q = Queue() def run_step(self): super(TestModule1, self).run_step() self.log_info('saving data to queue after run step') if self.steps > 0: self.q.put((self.pm['gpot'][self._in_port_dict['gpot']['m0']].copy(), self.pm['spike'][self._in_port_dict['spike']['m0']].copy())) m0_sel_gpot = m0_sel_in_gpot+m0_sel_out_gpot m0_sel_spike = m0_sel_in_spike+m0_sel_out_spike m0_sel = m0_sel_in_gpot+m0_sel_in_spike+m0_sel_out_gpot+m0_sel_out_spike m0_data_gpot = np.ones(len(m0_sel_gpot), np.double) m0_data_spike = np.ones(len(m0_sel_spike), np.int32) m1_sel_gpot = m1_sel_in_gpot+m1_sel_out_gpot m1_sel_spike = m1_sel_in_spike+m1_sel_out_spike m1_sel = m1_sel_in_gpot+m1_sel_in_spike+m1_sel_out_gpot+m1_sel_out_spike m1_data_gpot = np.zeros(len(m1_sel_gpot), np.double) m1_data_spike = np.zeros(len(m1_sel_spike), np.int32) # Instantiate manager and broker: man = Manager(get_random_port(), get_random_port(), get_random_port()) man.add_brok() # Add modules: m0 = TestModule0(m0_sel, m0_sel_in_gpot, m0_sel_in_spike, m0_sel_out_gpot, m0_sel_out_spike, m0_data_gpot, m0_data_spike, man.port_data, man.port_ctrl, man.port_time, id='m0') man.add_mod(m0) m1 = TestModule1(m1_sel, m1_sel_in_gpot, m1_sel_in_spike, m1_sel_out_gpot, m1_sel_out_spike, m1_data_gpot, m1_data_spike, man.port_data, man.port_ctrl, man.port_time, id='m1') man.add_mod(m1) # Connect the modules: pat = Pattern(m0_sel, m1_sel) pat.interface[m0_sel_in_gpot] = [0, 'in', 'gpot'] pat.interface[m0_sel_out_gpot] = [0, 'out', 'gpot'] pat.interface[m0_sel_in_spike] = [0, 'in', 'spike'] pat.interface[m0_sel_out_spike] = [0, 'out', 'spike'] pat.interface[m1_sel_in_gpot] = [1, 'in', 'gpot'] pat.interface[m1_sel_out_gpot] = [1, 'out', 'gpot'] pat.interface[m1_sel_in_spike] = [1, 'in', 'spike'] pat.interface[m1_sel_out_spike] = [1, 'out', 'spike'] for sel_from, sel_to in zip(m0_sel_out_gpot, m1_sel_in_gpot): if not (sel_from == ((),) or sel_to == ((),)): pat[sel_from, sel_to] = 1 for sel_from, sel_to in zip(m0_sel_out_spike, m1_sel_in_spike): if not (sel_from == ((),) or sel_to == ((),)): pat[sel_from, sel_to] = 1 man.connect(m0, m1, pat, 0, 1) # Execute exactly two steps; m0 transmits data during the first step, which # should be received by m1 during the second step: man.start(steps=2) man.stop() # Forcibly terminate all processes that are still alive: if m0.is_alive(): m0.terminate() if m1.is_alive(): m1.terminate() for b in man.brokers.values(): if b.is_alive(): b.terminate() # Check that data was propagated correctly: m0_data_gpot_after, m0_data_spike_after = m0.q.get() m1_data_gpot_after, m1_data_spike_after = m1.q.get() assert all(m0_data_gpot_after == m1_data_gpot_after) assert all(m0_data_spike_after == m1_data_spike_after)
def run(connected): if args.port_data is None and args.port_ctrl is None: port_data = get_random_port() port_ctrl = get_random_port() else: port_data = args.port_data port_ctrl = args.port_ctrl out_name = 'un' if not connected else 'co' man = core.Manager(port_data, port_ctrl) man.add_brok() lpu_file_0 = './data/generic_lpu_0.gexf.gz' lpu_file_1 = './data/generic_lpu_1.gexf.gz' (n_dict_0, s_dict_0) = LPU.lpu_parser(lpu_file_0) (n_dict_1, s_dict_1) = LPU.lpu_parser(lpu_file_1) ge_0_id = 'ge_0' ge_0 = LPU(dt, n_dict_0, s_dict_0, input_file='./data/generic_input_0.h5', output_file='generic_output_0_%s.h5' % out_name, port_ctrl=port_ctrl, port_data=port_data, device=args.gpu_dev[0], id=ge_0_id, debug=args.debug) man.add_mod(ge_0) ge_1_id = 'ge_1' ge_1 = LPU(dt, n_dict_1, s_dict_1, input_file='./data/generic_input_1.h5', output_file='generic_output_1_%s.h5' % out_name, port_ctrl=port_ctrl, port_data=port_data, device=args.gpu_dev[1], id=ge_1_id, debug=args.debug) man.add_mod(ge_1) # Connect the public neurons in the two LPUs: df_neu_0, df_syn_0 = neurokernel.tools.graph.graph_to_df( nx.read_gexf(lpu_file_0)) df_neu_1, df_syn_1 = neurokernel.tools.graph.graph_to_df( nx.read_gexf(lpu_file_1)) # Number of public neurons in each LPU: N_spike_0 = len(df_neu_0[(df_neu_0['spiking'] == True) & (df_neu_0['public'] == True)]) N_gpot_0 = len(df_neu_0[(df_neu_0['spiking'] == False) & (df_neu_0['public'] == True)]) N_spike_1 = len(df_neu_1[(df_neu_1['spiking'] == True) & (df_neu_1['public'] == True)]) N_gpot_1 = len(df_neu_1[(df_neu_1['spiking'] == False) & (df_neu_1['public'] == True)]) # Alpha function synaptic parameters: alphasynapse_type_params = { 'AlphaSynapse': ['ad', 'ar', 'gmax', 'id', 'class', 'conductance', 'reverse'] } if connected: conn = core.Connectivity(N_gpot_0, N_spike_0, N_gpot_1, N_spike_1, 1, ge_0.id, ge_1.id, alphasynapse_type_params) for id, (i, j) in enumerate( itertools.product(xrange(N_spike_0), xrange(N_spike_1))): conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j] = 1 conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'name'] = 'int_0to1_%s_%s' % (i, j) conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'model'] = 'AlphaSynapse' conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'ad'] = 0.19 * 1000 conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'ar'] = 1.1 * 100 conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'class'] = 0 conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'conductance'] = True conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'gmax'] = 0.003 conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'id'] = id conn[ge_0_id, 'spike', i, ge_1_id, 'spike', j, 0, 'reverse'] = 0.065 man.connect(ge_0, ge_1, conn) man.start(steps=args.steps) man.stop()
def run(connected): if args.port_data is None: port_data = get_random_port() else: port_data = args.port_data if args.port_ctrl is None: port_ctrl = get_random_port() else: port_ctrl = args.port_ctrl if args.port_time is None: port_time = get_random_port() else: port_time = args.port_time out_name = 'un' if not connected else 'co' man = core.Manager(port_data, port_ctrl, port_time) man.add_brok() lpu_file_0 = './data/generic_lpu_0.gexf.gz' lpu_file_1 = './data/generic_lpu_1.gexf.gz' (n_dict_0, s_dict_0) = LPU.lpu_parser(lpu_file_0) (n_dict_1, s_dict_1) = LPU.lpu_parser(lpu_file_1) lpu_0_id = 'lpu_0' lpu_0 = LPU(dt, n_dict_0, s_dict_0, input_file='./data/generic_lpu_0_input.h5', output_file='generic_lpu_0_%s_output.h5' % out_name, port_ctrl=port_ctrl, port_data=port_data, port_time=port_time, device=args.gpu_dev[0], id=lpu_0_id, debug=args.debug, time_sync=args.time_sync) man.add_mod(lpu_0) lpu_1_id = 'lpu_1' lpu_1 = LPU(dt, n_dict_1, s_dict_1, input_file='./data/generic_lpu_1_input.h5', output_file='generic_lpu_1_%s_output.h5' % out_name, port_ctrl=port_ctrl, port_data=port_data, port_time=port_time, device=args.gpu_dev[1], id=lpu_1_id, debug=args.debug, time_sync=args.time_sync) man.add_mod(lpu_1) # Create random connections between the input and output ports if the LPUs # are to be connected: if connected: # Find all output and input port selectors in each LPU: out_ports_0 = lpu_0.interface.out_ports().to_selectors() out_ports_1 = lpu_1.interface.out_ports().to_selectors() in_ports_0 = lpu_0.interface.in_ports().to_selectors() in_ports_1 = lpu_1.interface.in_ports().to_selectors() out_ports_spk_0 = lpu_0.interface.out_ports().spike_ports().to_selectors() out_ports_gpot_0 = lpu_0.interface.out_ports().gpot_ports().to_selectors() out_ports_spk_1 = lpu_1.interface.out_ports().spike_ports().to_selectors() out_ports_gpot_1 = lpu_1.interface.out_ports().gpot_ports().to_selectors() in_ports_spk_0 = lpu_0.interface.in_ports().spike_ports().to_selectors() in_ports_gpot_0 = lpu_0.interface.in_ports().gpot_ports().to_selectors() in_ports_spk_1 = lpu_1.interface.in_ports().spike_ports().to_selectors() in_ports_gpot_1 = lpu_1.interface.in_ports().gpot_ports().to_selectors() # Initialize a connectivity pattern between the two sets of port # selectors: pat = pattern.Pattern(','.join(out_ports_0+in_ports_0), ','.join(out_ports_1+in_ports_1)) # Create connections from the ports with identifiers matching the output # ports of one LPU to the ports with identifiers matching the input # ports of the other LPU: N_conn_spk_0_1 = min(len(out_ports_spk_0), len(in_ports_spk_1)) N_conn_gpot_0_1 = min(len(out_ports_gpot_0), len(in_ports_gpot_1)) for src, dest in zip(random.sample(out_ports_spk_0, N_conn_spk_0_1), random.sample(in_ports_spk_1, N_conn_spk_0_1)): pat[src, dest] = 1 pat.interface[src, 'type'] = 'spike' pat.interface[dest, 'type'] = 'spike' for src, dest in zip(random.sample(out_ports_gpot_0, N_conn_gpot_0_1), random.sample(in_ports_gpot_1, N_conn_gpot_0_1)): pat[src, dest] = 1 pat.interface[src, 'type'] = 'gpot' pat.interface[dest, 'type'] = 'gpot' man.connect(lpu_0, lpu_1, pat, 0, 1) man.start(steps=args.steps) man.stop()