Ejemplo n.º 1
0
    def compile(self, duration, dt=None, steps=None, in_list=None,
                record=('V', 'spike_state', 'I'), extra_comps=None,
                input_filename='neuroballad_temp_model_input.h5',
                output_filename='neuroballad_temp_model_output.h5',
                graph_filename='neuroballand_temp_graph.gexf.gz',
                device=0, sample_interval=1):
        if dt is not None:
            if steps is not None:
                assert dt*steps == duration, 'dt*step != duration'
            else:
                steps = int(duration/dt)
            t = np.linspace(0, duration, steps)
        else:
            if steps is not None:
                t = np.linspace(0, duration, steps)
                dt = t[1] - t[0]
            else:
                raise ValueError('dt and step cannot both be None')
        self.config = self.config._replace(duration=duration,
                                           steps=steps,
                                           dt=dt,
                                           t=t,
                                           device=device)
        # compile inputs
        if in_list is None:
            in_list = self._inputs
        uids = []
        for i in in_list:
            uids.append(self.encode_name(str(i.node_id),
                                         experiment_name=i.experiment_name))
        input_vars = []
        for i in in_list:
            if isinstance(i.var, list):
                for j in i.var:
                    input_vars.append(j)
            else:
                input_vars.append(i.var)
        input_vars = list(set(input_vars))
        uids = np.array(list(set(uids)), dtype='S')
        Is = {}
        Inodes = {}
        for i in input_vars:
            Inodes[i] = []
        for i in in_list:
            in_name = self.encode_name(str(i.node_id),
                                       experiment_name=i.experiment_name)
            if in_name in list(self.G.nodes(data=False)):
                pass
            else:
                raise ValueError(
                    'Input node {} not found in Circuit.'.format(in_name))

            if isinstance(i.var, list):
                for j in i.var:
                    Inodes[j].append(
                        self.encode_name(str(i.node_id),
                                         experiment_name=i.experiment_name))
            else:
                Inodes[i.var].append(
                    self.encode_name(str(i.node_id),
                                     experiment_name=i.experiment_name))
        for i in input_vars:
            Inodes[i] = np.array(list(set(Inodes[i])), dtype='S')
        for i in input_vars:
            Is[i] = np.zeros((self.config.steps, len(Inodes[i])),
                             dtype=self.dtype)

        for i in in_list:
            if isinstance(i.var, list):
                for j in i.var:
                    Is[j] = i.add(self, Inodes[j], Is[j], t, var=j)
            else:
                Is[i.var] = i.add(self, Inodes[i.var], Is[i.var], t, var=i.var)

        with h5py.File(input_filename, 'w') as f:
            for i in input_vars:
                # print(i + '/uids')
                i_nodes = Inodes[i]
                """
                try:
                    i_nodes = [i.decode('ascii') for i in i_nodes]
                except:
                    pass
                i_nodes = [self.encode_name(i) for i in i_nodes]
                """
                i_nodes = np.array(i_nodes, dtype='S')
                f.create_dataset(i + '/uids', data=i_nodes)
                f.create_dataset(i + '/data', (self.config.steps, len(Inodes[i])),
                                 dtype=self.dtype,
                                 data=Is[i])

        if graph_filename is not None:
            nx.write_gexf(self.G, graph_filename)

        from neurokernel.core_gpu import Manager
        from neurokernel.LPU.LPU import LPU
        import neurokernel.mpi_relaunch
        from neurokernel.LPU.InputProcessors.FileInputProcessor import  \
            FileInputProcessor
        from neurokernel.LPU.OutputProcessors.FileOutputProcessor import \
            FileOutputProcessor

        input_processor = FileInputProcessor(input_filename)
        (comp_dict, conns) = LPU.graph_to_dicts(self.G)
        output_processor = FileOutputProcessor([(i, None) for i in list(record)],
                                               output_filename,
                                               sample_interval=sample_interval)
        self.manager = Manager()
        self.manager.add(LPU, self.experiment_name, self.config.dt,
                         comp_dict, conns,
                         device=self.config.device,
                         input_processors=[input_processor],
                         output_processors=[output_processor],
                         debug=False,
                         extra_comps=extra_comps if extra_comps is not None else [])
Ejemplo n.º 2
0
    def compile(self,
                duration,
                dt=None,
                steps=None,
                in_list=None,
                record=('V', 'spike_state', 'I'),
                extra_comps=None,
                input_filename='neuroballad_temp_model_input.h5',
                output_filename='neuroballad_temp_model_output.h5',
                graph_filename='neuroballad_temp_graph.gexf.gz',
                device=0,
                sample_interval=1,
                execute_in_same_thread=True):
        """
        Compiles a neuroballad circuit before execution.

        # Arguments
            duration (float): Simulation duration.
            dt (float): Time step size.
            steps (int): Number of steps to use in simulation. Optional; don't use dt if provided.
            in_list (list): List of inputs to use during compilation.
            record (tuple): Tuple of variables to record. Defaults to ('V', 'spike_state', 'I').
            extra_comps (list): List of new, custom components to include for your simulation.
            input_filename (str): The .h5 file name to use for the input.
            output_filename (str): The .h5 file name to use for recording the output.
            graph_filename (str): Name of the graph file to save the circuit to. Uses the .gexf format.
            device (int): Device to use for execution.
            sample_interval (int): Sampling interval for recording simulation output.
            execute_in_same_thread (bool): Whether to execute the circuit in the current thread.
        """
        if dt is not None:
            if steps is not None:
                warnings.warn(
                    "Both 'steps' and 'duration' arguments were specified. 'steps' argument is ignored."
                )
                steps = int(duration / dt)

            else:
                steps = int(duration / dt)
            t = np.linspace(0, duration, steps)
        else:
            if steps is not None:
                t = np.linspace(0, duration, steps)
                dt = t[1] - t[0]
            else:
                raise ValueError('dt and step cannot both be None')
        self.config = self.config._replace(duration=duration,
                                           steps=steps,
                                           dt=dt,
                                           t=t,
                                           device=device)

        run_parameters = [duration, dt]
        with open('run_parameters.pickle', 'wb') as f:
            pickle.dump(run_parameters, f, protocol=pickle.HIGHEST_PROTOCOL)
        # Compile inputs
        if in_list is None:
            in_list = self._inputs
        uids = []
        for i in in_list:
            uids.append(
                self.encode_name(str(i.node_id),
                                 experiment_name=i.experiment_name))
        input_vars = []
        for i in in_list:
            if isinstance(i.var, list):
                for j in i.var:
                    input_vars.append(j)
            else:
                input_vars.append(i.var)
        input_vars = list(set(input_vars))
        uids = np.array(list(set(uids)), dtype='S')
        Is = {}
        Inodes = {}
        for i in input_vars:
            Inodes[i] = []
        for i in in_list:
            in_name = self.encode_name(str(i.node_id),
                                       experiment_name=i.experiment_name)
            if in_name in list(self.G.nodes(data=False)):
                pass
            else:
                raise ValueError(
                    'Input node {} not found in Circuit.'.format(in_name))

            if isinstance(i.var, list):
                for j in i.var:
                    Inodes[j].append(
                        self.encode_name(str(i.node_id),
                                         experiment_name=i.experiment_name))
            else:
                Inodes[i.var].append(
                    self.encode_name(str(i.node_id),
                                     experiment_name=i.experiment_name))
        for i in input_vars:
            Inodes[i] = np.array(list(set(Inodes[i])), dtype='S')
        for i in input_vars:
            Is[i] = np.zeros((self.config.steps, len(Inodes[i])),
                             dtype=self.dtype)

        for i in in_list:
            if isinstance(i.var, list):
                for j in i.var:
                    Is[j] = i.add(self, Inodes[j], Is[j], t, var=j)
            else:
                Is[i.var] = i.add(self, Inodes[i.var], Is[i.var], t, var=i.var)

        with h5py.File(input_filename, 'w') as f:
            for i in input_vars:
                # print(i + '/uids')
                i_nodes = Inodes[i]
                """
                try:
                    i_nodes = [i.decode('ascii') for i in i_nodes]
                except:
                    pass
                i_nodes = [self.encode_name(i) for i in i_nodes]
                """
                i_nodes = np.array(i_nodes, dtype='S')
                f.create_dataset(i + '/uids', data=i_nodes)
                f.create_dataset(i + '/data',
                                 (self.config.steps, len(Inodes[i])),
                                 dtype=self.dtype,
                                 data=Is[i])

        recorders = []
        for i in record:
            recorders.append((i, None))
        with open('record_parameters.pickle', 'wb') as f:
            pickle.dump(recorders, f, protocol=pickle.HIGHEST_PROTOCOL)

        if graph_filename is not None:
            nx.write_gexf(self.G, graph_filename)

        if execute_in_same_thread:
            from neurokernel.core_gpu import Manager
            from neurokernel.LPU.LPU import LPU
            # import neurokernel.mpi_relaunch
            from neurokernel.LPU.InputProcessors.FileInputProcessor import  \
                FileInputProcessor
            from neurokernel.LPU.OutputProcessors.FileOutputProcessor import \
                FileOutputProcessor

            input_processor = FileInputProcessor(input_filename)
            (comp_dict, conns) = LPU.graph_to_dicts(self.G)
            output_processor = FileOutputProcessor(
                [(i, None) for i in list(record)],
                output_filename,
                sample_interval=sample_interval)
            self.manager = Manager()
            self.manager.add(
                LPU,
                self.experiment_name,
                self.config.dt,
                comp_dict,
                conns,
                device=self.config.device,
                input_processors=[input_processor],
                output_processors=[output_processor],
                debug=False,
                extra_comps=extra_comps if extra_comps is not None else [])
Ejemplo n.º 3
0
def emulate(n_lpu, n_spike, n_gpot, steps):
    """
    Benchmark inter-LPU communication throughput.

    Each LPU is configured to use a different local GPU.

    Parameters
    ----------
    n_lpu : int
        Number of LPUs. Must be at least 2 and no greater than the number of
        local GPUs.
    n_spike : int
        Total number of input and output spiking ports any
        single LPU exposes to any other LPU. Each LPU will therefore
        have 2*n_spike*(n_lpu-1) total spiking ports.
    n_gpot : int
        Total number of input and output graded potential ports any
        single LPU exposes to any other LPU. Each LPU will therefore
        have 2*n_gpot*(n_lpu-1) total graded potential ports.
    steps : int
        Number of steps to execute.

    Returns
    -------
    average_throughput, total_throughput : float
        Average per-step and total received data throughput in bytes/seconds.
    exec_time : float
        Execution time in seconds.
    """

    # Time everything starting with manager initialization:
    start_all = time.time()

    # Check whether a sufficient number of GPUs are available:
    drv.init()
    if n_lpu > drv.Device.count():
        raise RuntimeError('insufficient number of available GPUs.')

    # Set up manager:
    man = Manager()

    # Generate selectors for configuring modules and patterns:
    mod_sels, pat_sels = gen_sels(n_lpu, n_spike, n_gpot)

    # Set up modules:
    for i in xrange(n_lpu):
        lpu_i = 'lpu%s' % i
        sel, sel_in, sel_out, sel_gpot, sel_spike = mod_sels[lpu_i]
        sel = Selector.union(sel_in, sel_out, sel_gpot, sel_spike)
        man.add(MyModule, lpu_i, sel, sel_in, sel_out,
                sel_gpot, sel_spike,
                None, None, ['interface', 'io', 'type'],
                CTRL_TAG, GPOT_TAG, SPIKE_TAG,
                device=i, time_sync=True)

    # Set up connections between module pairs:
    for i, j in itertools.combinations(xrange(n_lpu), 2):
        lpu_i = 'lpu%s' % i
        lpu_j = 'lpu%s' % j
        sel_from, sel_to, sel_in_i, sel_out_i, sel_gpot_i, sel_spike_i, \
            sel_in_j, sel_out_j, sel_gpot_j, sel_spike_j = pat_sels[(lpu_i, lpu_j)]
        pat = Pattern.from_concat(sel_from, sel_to,
                                  from_sel=sel_from, to_sel=sel_to, data=1)
        pat.interface[sel_in_i, 'interface', 'io'] = [0, 'in']
        pat.interface[sel_out_i, 'interface', 'io'] = [0, 'out']
        pat.interface[sel_gpot_i, 'interface', 'type'] = [0, 'gpot']
        pat.interface[sel_spike_i, 'interface', 'type'] = [0, 'spike']
        pat.interface[sel_in_j, 'interface', 'io'] = [1, 'in']
        pat.interface[sel_out_j, 'interface', 'io'] = [1, 'out']
        pat.interface[sel_gpot_j, 'interface', 'type'] = [1, 'gpot']
        pat.interface[sel_spike_j, 'interface', 'type'] = [1, 'spike']
        man.connect(lpu_i, lpu_j, pat, 0, 1, compat_check=False)

    man.spawn()
    start_main = time.time()
    man.start(steps)
    man.wait()
    stop_main = time.time()
    return man.average_step_sync_time, (time.time()-start_all), \
        (stop_main-start_main), (man.stop_time-man.start_time)
Ejemplo n.º 4
0
 def setUp(self):
     self.man = Manager()