def _select_sparse_matrix_format(self, proj): """ The sparse matrix format determines the fundamental structure for connectivity representation. It depends on the model type as well as hardware paradigm. Returns (str1, str2, bool): * str1: sparse matrix format declaration * str2: sparse matrix format arguments if needed (e. g. sizes) * bool: if the matrix is a complete (True) or sliced matrix (False) """ if Global.config["structural_plasticity"] and proj._storage_format != "lil": raise Global.InvalidConfiguration("Structural plasticity is only allowed for LIL format.") # get preferred index type idx_type, _, size_type, _ = determine_idx_type_for_projection(proj) # ANNarchy supports a list of different formats to encode projections. # The general structure of the decision tree is: # # - rate-coded # - formats # - paradigm # - spike # - formats # - ordering # - paradigm if proj.synapse_type.type == "rate": # Sanity check if proj._storage_order == "pre_to_post": Global.CodeGeneratorException(" The storage_order 'pre_to_post' is invalid for rate-coded synapses (Projection: "+proj.name+")") # Check for the provided format + paradigm combination if a suitable implementation is available. if proj._storage_format == "lil": if Global._check_paradigm("openmp"): if Global.config['num_threads'] == 1: sparse_matrix_format = "LILMatrix<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"LILMatrix.hpp\"\n" single_matrix = True else: if proj._no_split_matrix: sparse_matrix_format = "LILMatrix<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"LILMatrix.hpp\"\n" single_matrix = True else: sparse_matrix_format = "PartitionedMatrix< LILMatrix<"+idx_type+", "+size_type+">, "+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"PartitionedMatrix.hpp\"\n#include \"LILMatrix.hpp\"\n" single_matrix = False else: Global.CodeGeneratorException(" No implementation assigned for rate-coded synapses using LIL and paradigm="+str(Global.config['paradigm'])+" (Projection: "+proj.name+")") elif proj._storage_format == "coo": if Global._check_paradigm("openmp"): sparse_matrix_format = "COOMatrix<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"COOMatrix.hpp\"\n" single_matrix = True elif Global._check_paradigm("cuda"): sparse_matrix_format = "COOMatrixCUDA<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"COOMatrixCUDA.hpp\"\n" single_matrix = True else: Global.CodeGeneratorException(" No implementation assigned for rate-coded synapses using COO and paradigm="+str(Global.config['paradigm'])+" (Projection: "+proj.name+")") elif proj._storage_format == "bsr": if Global._check_paradigm("openmp"): sparse_matrix_format = "BSRMatrix<"+idx_type+", "+size_type+", true>" sparse_matrix_include = "#include \"BSRMatrix.hpp\"\n" single_matrix = True elif Global._check_paradigm("cuda"): sparse_matrix_format = "BSRMatrixCUDA<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"BSRMatrixCUDA.hpp\"\n" single_matrix = True else: Global.CodeGeneratorException(" No implementation assigned for rate-coded synapses using BSR and paradigm="+str(Global.config['paradigm'])+" (Projection: "+proj.name+")") elif proj._storage_format == "csr": if Global._check_paradigm("openmp"): sparse_matrix_format = "CSRMatrix<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"CSRMatrix.hpp\"\n" single_matrix = True elif Global._check_paradigm("cuda"): sparse_matrix_format = "CSRMatrixCUDA<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"CSRMatrixCUDA.hpp\"\n" single_matrix = True else: Global.CodeGeneratorException(" No implementation assigned for rate-coded synapses using CSR and paradigm="+str(Global.config['paradigm'])+" (Projection: "+proj.name+")") elif proj._storage_format == "ellr": if Global._check_paradigm("openmp"): sparse_matrix_format = "ELLRMatrix<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"ELLRMatrix.hpp\"\n" single_matrix = True elif Global._check_paradigm("cuda"): sparse_matrix_format = "ELLRMatrixCUDA<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"ELLRMatrixCUDA.hpp\"\n" single_matrix = True else: Global.CodeGeneratorException(" No implementation assigned for rate-coded synapses using ELLPACK-R and paradigm="+str(Global.config['paradigm'])+" (Projection: "+proj.name+")") elif proj._storage_format == "ell": if Global._check_paradigm("openmp"): sparse_matrix_format = "ELLMatrix<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"ELLMatrix.hpp\"\n" single_matrix = True elif Global._check_paradigm("cuda"): sparse_matrix_format = "ELLMatrixCUDA<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"ELLMatrixCUDA.hpp\"\n" single_matrix = True else: Global.CodeGeneratorException(" No implementation assigned for rate-coded synapses using ELLPACK and paradigm="+str(Global.config['paradigm'])+" (Projection: "+proj.name+")") elif proj._storage_format == "hyb": if Global._check_paradigm("openmp"): sparse_matrix_format = "HYBMatrix<"+idx_type+", "+size_type+", true>" sparse_matrix_include = "#include \"HYBMatrix.hpp\"\n" single_matrix = True elif Global._check_paradigm("cuda"): sparse_matrix_format = "HYBMatrixCUDA<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"HYBMatrixCUDA.hpp\"\n" single_matrix = True else: Global.CodeGeneratorException(" No implementation assigned for rate-coded synapses using Hybrid (COO+ELL) and paradigm="+str(Global.config['paradigm'])+" (Projection: "+proj.name+")") elif proj._storage_format == "dense": if Global._check_paradigm("openmp"): sparse_matrix_format = "DenseMatrix<"+idx_type+", "+size_type+", true>" sparse_matrix_include = "#include \"DenseMatrix.hpp\"\n" single_matrix = True else: sparse_matrix_format = "DenseMatrixCUDA<"+idx_type+", "+size_type+", true>" sparse_matrix_include = "#include \"DenseMatrixCUDA.hpp\"\n" single_matrix = True else: Global.CodeGeneratorException(" No implementation assigned for rate-coded synapses using '"+proj._storage_format+"' storage format (Projection: "+proj.name+")") elif proj.synapse_type.type == "spike": # Check for the provided format + paradigm # combination if it's availability if proj._storage_format == "lil": if proj._storage_order == "pre_to_post": Global.CodeGeneratorException(" The storage_order 'pre_to_post' is invalid for LIL representations (Projection: "+proj.name+")") if Global._check_paradigm("openmp"): if Global.config['num_threads'] == 1 or proj._no_split_matrix: sparse_matrix_format = "LILInvMatrix<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"LILInvMatrix.hpp\"\n" single_matrix = True else: sparse_matrix_format = "PartitionedMatrix<LILInvMatrix<"+idx_type+", "+size_type+">, "+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"PartitionedMatrix.hpp\"\n#include \"LILInvMatrix.hpp\"\n" single_matrix = False else: Global.CodeGeneratorException(" No implementation assigned for spiking synapses using LIL and paradigm="+str(Global.config['paradigm'])+ " (Projection: "+proj.name+")") elif proj._storage_format == "csr": if proj._storage_order == "post_to_pre": if Global._check_paradigm("openmp"): sparse_matrix_format = "CSRCMatrix<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"CSRCMatrix.hpp\"\n" single_matrix = True elif Global._check_paradigm("cuda"): sparse_matrix_format = "CSRCMatrixCUDA<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"CSRCMatrixCUDA.hpp\"\n" single_matrix = True else: raise NotImplementedError else: if Global._check_paradigm("openmp"): if Global.config['num_threads'] == 1 or proj._no_split_matrix: sparse_matrix_format = "CSRCMatrixT<"+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"CSRCMatrixT.hpp\"\n" single_matrix = True else: sparse_matrix_format = "PartitionedMatrix<CSRCMatrixT<"+idx_type+", "+size_type+">, "+idx_type+", "+size_type+">" sparse_matrix_include = "#include \"PartitionedMatrix.hpp\"\n#include \"CSRCMatrixT.hpp\"\n" single_matrix = False else: raise NotImplementedError elif proj._storage_format == "dense": if proj._storage_order == "post_to_pre": if Global._check_paradigm("openmp"): sparse_matrix_format = "DenseMatrix<"+idx_type+", "+size_type+", false>" sparse_matrix_include = "#include \"DenseMatrix.hpp\"\n" single_matrix = True else: raise NotImplementedError else: raise NotImplementedError else: Global.CodeGeneratorException(" No implementation assigned for spiking synapses using '"+proj._storage_format+"' storage format (Projection: "+proj.name+")") else: Global.CodeGeneratorException(" Invalid synapse type " + proj.synapse_type.type) # HD (6th Oct 2020) # Currently I unified this by flipping the dimensions in CSRCMatrixT in the C++ code sparse_matrix_args = " %(post_size)s, %(pre_size)s" % { 'pre_size': proj.pre.population.size if isinstance(proj.pre, PopulationView) else proj.pre.size, 'post_size': proj.post.population.size if isinstance(proj.post, PopulationView) else proj.post.size } if proj._storage_format == "bsr": if hasattr(proj, "_bsr_size"): sparse_matrix_args += ", " + str(proj._bsr_size) else: sparse_matrix_args += ", " + str(determine_bsr_blocksize(proj.pre.population.size if isinstance(proj.pre, PopulationView) else proj.pre.size, proj.post.population.size if isinstance(proj.post, PopulationView) else proj.post.size)) if Global.config['verbose']: print("Selected", sparse_matrix_format, "(", sparse_matrix_args, ")", "for projection ", proj.name, "and single_matrix =", single_matrix ) return sparse_matrix_include, sparse_matrix_format, sparse_matrix_args, single_matrix
def _declaration_accessors(self, proj, single_matrix): """ Generate declaration and accessor code for variables/parameters of the projection. Returns: (dict, str): first return value contain declaration code and last one the accessor code. The declaration dictionary has the following fields: delay, event_driven, rng, parameters_variables, additional, cuda_stream """ # create the code for non-specific projections declare_event_driven = "" declare_rng = "" declare_additional = "" # Delays if proj.max_delay > 1: if proj.uniform_delay > 1 : key_delay = "uniform" else: if Global._check_paradigm("cuda"): Global.CodeGeneratorException("Non-uniform delays on rate-coded or spiking synapses are not available for CUDA devices.") if proj.synapse_type.type == "rate": key_delay = "nonuniform_rate_coded" else: key_delay = "nonuniform_spiking" declare_delay = self._templates['delay'][key_delay]['declare'] init_delay = self._templates['delay'][key_delay]['init'] else: declare_delay = "" init_delay = "" # Code for declarations and accessors declare_parameters_variables, accessor = self._generate_default_get_set(proj, single_matrix) # If no psp is defined, it's event-driven has_event_driven = False for var in proj.synapse_type.description['variables']: if var['method'] == 'event-driven': has_event_driven = True break if has_event_driven: declare_event_driven = self._templates['event_driven']['declare'] # Arrays for the random numbers if len(proj.synapse_type.description['random_distributions']) > 0: declare_rng += """ // Random numbers """ for rd in proj.synapse_type.description['random_distributions']: declare_rng += self._templates['rng'][rd['locality']]['decl'] % { 'rd_name' : rd['name'], 'type': rd['ctype'], 'float_prec': Global.config['precision'], 'template': rd['template'] % {'float_prec':Global.config['precision']} } # Structural plasticity if Global.config['structural_plasticity']: declare_parameters_variables += self._header_structural_plasticity(proj) # Specific projections can overwrite if 'declare_parameters_variables' in proj._specific_template.keys(): declare_parameters_variables = proj._specific_template['declare_parameters_variables'] if 'access_parameters_variables' in proj._specific_template.keys(): accessor = proj._specific_template['access_parameters_variables'] if 'declare_rng' in proj._specific_template.keys(): declare_rng = proj._specific_template['declare_rng'] if 'declare_event_driven' in proj._specific_template.keys(): declare_event_driven = proj._specific_template['declare_event_driven'] if 'declare_additional' in proj._specific_template.keys(): declare_additional = proj._specific_template['declare_additional'] # Finalize the declarations declaration = { 'declare_delay': declare_delay, 'init_delay': init_delay, 'event_driven': declare_event_driven, 'rng': declare_rng, 'parameters_variables': declare_parameters_variables, 'additional': declare_additional } return declaration, accessor