def create_connectivity(self, conn_type):
        """
        This function (re-) creates the network connectivity.
        """

        # distribute the cell ids among involved processes
        (n_src, n_tgt, self.tp_src, self.tp_tgt) = utils.resolve_src_tgt_with_tp(conn_type, self.params)

        print 'Connect anisotropic %s - %s' % (conn_type[0].capitalize(), conn_type[1].capitalize())

        gid_tgt_min, gid_tgt_max = utils.distribute_n(n_tgt, self.n_proc, self.pc_id)
        print 'Process %d deals with target GIDS %d - %d' % (self.pc_id, gid_tgt_min, gid_tgt_max)
        gid_src_min, gid_src_max = utils.distribute_n(n_src, self.n_proc, self.pc_id)
        print 'Process %d deals with source GIDS %d - %d' % (self.pc_id, gid_src_min, gid_src_max)
        n_my_tgts = gid_tgt_max - gid_tgt_min

        # data structure for connection storage
        self.target_adj_list = [ [] for i in xrange(n_my_tgts)]

        n_src_cells_per_neuron = int(round(self.params['p_%s' % conn_type] * n_src))

        # compute all pairwise connection probabilities
        for i_, tgt in enumerate(range(gid_tgt_min, gid_tgt_max)):
            if (i_ % 20) == 0:
                print '%.2f percent complete' % (i_ / float(n_my_tgts) * 100.)
            p = np.zeros(n_src)
            latency = np.zeros(n_src)
            for src in xrange(n_src):
                if conn_type[0] == conn_type[1]: # no self-connection
                    if (src != tgt):
                        p[src], latency[src] = CC.get_p_conn(self.tp_src[src, :], self.tp_tgt[tgt, :], params['w_sigma_x'], params['w_sigma_v'], params['connectivity_radius'])
                else: # different populations --> same indices mean different cells, no check for src != tgt
                    p[src], latency[src] = CC.get_p_conn(self.tp_src[src, :], self.tp_tgt[tgt, :], params['w_sigma_x'], params['w_sigma_v'], params['connectivity_radius'])
            # sort connection probabilities and select remaining connections
            sorted_indices = np.argsort(p)
            if conn_type[0] == 'e':
                sources = sorted_indices[-n_src_cells_per_neuron:]
            else:
                if conn_type == 'ii':
                    sources = sorted_indices[1:n_src_cells_per_neuron+1]  # shift indices to avoid self-connection, because p_ii = .0
                else:
                    sources = sorted_indices[:n_src_cells_per_neuron]
            w = (self.params['w_tgt_in_per_cell_%s' % conn_type] / p[sources].sum()) * p[sources]
            for i in xrange(len(sources)):
                if w[i] > self.params['w_thresh_connection']:
                    delay = min(max(latency[sources[i]] * self.params['delay_scale'], self.params['delay_range'][0]), self.params['delay_range'][1])  # map the delay into the valid range
                    # create adjacency list for all local cells and store connection in class container
                    self.target_adj_list[i_].append(sources[i])


        # communicate the resulting target_adj_list to the root process
        self.send_list_to_root(self.target_adj_list)
    def __init__(self, params, comm=None):
        self.params = params
        self.comm = comm
        self.n_speeds = params["n_speeds"]
        self.n_cycles = params["n_cycles"]
        self.n_directions = params["n_theta"]
        self.n_iterations_total = (
            self.params["n_theta"]
            * self.params["n_speeds"]
            * self.params["n_cycles"]
            * self.params["n_stim_per_direction"]
        )
        self.selected_conns = None
        self.n_time_steps = self.params["t_sim"] / self.params["dt_rate"]

        # distribute units among processors
        if comm != None:
            self.pc_id, self.n_proc = comm.rank, comm.size
            my_units = utils.distribute_n(params["n_exc"], self.n_proc, self.pc_id)
            self.my_units = range(my_units[0], my_units[1])
        else:
            self.my_units = range(self.params["n_exc"])
            self.pc_id, self.n_proc = 0, 1

        try:
            self.tuning_prop = np.loadtxt(self.params["tuning_prop_means_fn"])
        except:
            print "Tuning properties file not found: %s\n Will create new ones" % self.params["tuning_prop_means_fn"]
            self.tuning_prop = utils.set_tuning_prop(self.params, mode="hexgrid", cell_type="exc")
            np.savetxt(self.params["tuning_prop_means_fn"], self.tuning_prop)

        if comm != None:
            comm.barrier()

        self.initial_value = 1e-2  # should be around 1 / n_units per HC, i.e. 1. / (params['N_theta'] * params['N_V']
        self.eps = 0.1 * self.initial_value
        self.normalize = False  # normalize input within a 'hypercolumn'

        all_conns = []
        # distribute connections among processors
        for i in xrange(params["n_exc"]):
            for j in xrange(params["n_exc"]):
                if i != j:
                    all_conns.append((i, j))
        self.my_conns = utils.distribute_list(all_conns, n_proc, pc_id)

        # setup data structures
        self.my_conns = np.array(self.my_conns)
        np.savetxt("delme_my_conns_%d.txt" % self.pc_id, self.my_conns, fmt="%d\t%d")
        self.pre_ids = np.unique(self.my_conns[:, 0])
        self.post_ids = np.unique(self.my_conns[:, 1])
        self.gid_idx_map_pre = {}
        self.gid_idx_map_post = {}
        for i in xrange(self.pre_ids.size):
            self.gid_idx_map_pre[self.pre_ids[i]] = i
        for i in xrange(self.post_ids.size):
            self.gid_idx_map_post[self.post_ids[i]] = i
        self.my_selected_conns = []
Example #3
0
 def set_my_gids(self, cell_type):
     """
     If run on multiple cores, distribute the GIDs among the processes
     """
     gid_min, gid_max = utils.distribute_n(self.params['n_%s' % cell_type],
                                           self.n_proc, self.pc_id)
     self.my_gids[cell_type] = (gid_min +
                                self.params['%s_offset' % cell_type],
                                gid_max +
                                self.params['%s_offset' % cell_type])
Example #4
0
def compute_weights_convergence_constrained(tuning_prop, params, comm=None):
    """
    This function computes for each target the X % of source cells which have the highest
    connection probability to the target cell.

    Arguments:
        tuning_prop: 2 dimensional array with shape (n_cells, 4)
            tp[:, 0] : x-position
            tp[:, 1] : y-position
            tp[:, 2] : u-position (speed in x-direction)
            tp[:, 3] : v-position (speed in y-direction)
    """
    if comm != None:
        pc_id, n_proc = comm.rank, comm.size
        comm.barrier()
    else:
        pc_id, n_proc = 0, 1
    gid_min, gid_max = utils.distribute_n(params['n_exc'], n_proc, pc_id)
    sigma_x, sigma_v = params['w_sigma_x'], params['w_sigma_v'] # small sigma values let p and w shrink
    (delay_min, delay_max) = params['delay_range']
    output_fn = params['conn_list_ee_conv_constr_fn_base'] + 'pid%d.dat' % (pc_id)
    print "Proc %d computes initial weights for gids (%d, %d) to file %s" % (pc_id, gid_min, gid_max, output_fn)
    conn_file = open(output_fn, 'w')
    my_cells = range(gid_min, gid_max)
    n_src_cells = round(params['p_ee'] * params['n_exc']) # number of sources per target neuron
    output = np.zeros((len(my_cells), n_src_cells+1), dtype='int')
    weights = np.zeros((len(my_cells), n_src_cells+1), dtype='int')

    output = ''
    cnt = 0
    for tgt in my_cells:
        p = np.zeros(params['n_exc'])
        latency = np.zeros(params['n_exc'])
        for src in xrange(params['n_exc']):
            if (src != tgt):
                p[src], latency[src] = get_p_conn(tuning_prop[src, :], tuning_prop[tgt, :], sigma_x, sigma_v, self.params['connectivity_radius'])
        sorted_indices = np.argsort(p)
        sources = sorted_indices[-params['n_src_cells_per_neuron']:] 
        w = params['w_tgt_in'] / p[sources].sum() * p[sources]
#        w = utils.linear_transformation(w, params['w_min'], params['w_max'])
        for i in xrange(len(sources)):
#            w[i] = max(params['w_min'], min(w[i], params['w_max']))
            src = sources[i]
            delay = min(max(latency[src], delay_min), delay_max)  # map the delay into the valid range
            d_ij = utils.euclidean(tuning_prop[src, :], tuning_prop[tgt, :])
            output += '%d\t%d\t%.2e\t%.2e\t%.2e\n' % (src, tgt, w[i], delay, d_ij)
            cnt += 1

    print 'PID %d Writing %d connections to file: %s' % (pc_id, cnt, output_fn)
    conn_file.write(output)
    conn_file.close()

    if (comm != None):
        comm.barrier()
Example #5
0
    def prepare_spiketrains(self, tuning_prop):
        if (type(tuning_prop) == type('')):
#            try:
            tp = np.loadtxt(tuning_prop)
#            except:
#                print 'Pid %d fails to load the file ...' % self.pc_id
#                tp = self.prepare_tuning_prop(self.params)
                    
        elif (type(tuning_prop) == type(np.array([]))):
            tp = tuning_prop
        else:
            raise TypeError, 'Only filename or numpy array accepted for tuning_prop, given %s' % (str(type(tuning_prop)))

        my_units = utils.distribute_n(self.params['n_exc'], self.n_proc, self.pc_id)

        input_spike_trains = utils.create_spike_trains_for_motion(tp, self.params, contrast=.9, my_units=my_units) # write to paths defined in the params dictionary

        if self.comm != None:
            self.comm.barrier() # 
Example #6
0
if len(sys.argv) > 1:
    if sys.argv[1] == 'full':
        full_system = True

tau_dict = {'tau_zi' : 50.,    'tau_zj' : 5., 
            'tau_ei' : 50.,   'tau_ej' : 50., 'tau_eij' : 50.,
            'tau_pi' : 500.,  'tau_pj' : 500., 'tau_pij' : 500.,
            }

PS = simulation_parameters.parameter_storage()
params = PS.params
PS.create_folders()
PS.write_parameters_to_file()

n_cells = params['n_exc']
my_units = utils.distribute_n(n_cells, n_proc, pc_id)

mp = params['motion_params']

# P R E P A R E     T U N I N G    P R O P E R T I E S
tuning_prop = utils.set_tuning_prop(params, mode='hexgrid', v_max=params['v_max'])
np.savetxt(params['tuning_prop_means_fn'],tuning_prop)
#exit(1)

# load
#tuning_prop = np.loadtxt(params['tuning_prop_means_fn'])

# P R E P A R E     I N P U T 
#prepare_input(tuning_prop, params, my_units)
#if comm != None:
#    comm.barrier()
Example #7
0
print 'n_cells=%d\tn_exc=%d\tn_inh=%d' % (params['n_cells'], params['n_exc'], params['n_inh'])
print 'Blur', params['blur_X'], params['blur_V']

scale_input_frequency = False
if scale_input_frequency:
    scaling_factor = utils.scale_input_frequency(params['blur_X'])
    params['f_max_stim'] *= scaling_factor

try:
    from mpi4py import MPI
    USE_MPI = True
    comm = MPI.COMM_WORLD
    pc_id, n_proc = comm.rank, comm.size
    print "USE_MPI:", USE_MPI, 'pc_id, n_proc:', pc_id, n_proc
except:
    USE_MPI = False
    pc_id, n_proc, comm = 0, 1, None
    print "MPI not used"

try:
    tuning_prop = np.loadtxt(params['tuning_prop_means_fn'])
except:
    print 'File with tuning properties missing: %s\nPlease run: \nmpirun -np [N] python prepare_tuning_prop.py\nOR\npython prepare_tuning_prop.py' % params['tuning_prop_means_fn']
    exit(1)

my_units = utils.distribute_n(params['n_exc'], n_proc, pc_id)
utils.create_spike_trains_for_motion(tuning_prop, params, contrast=.9, my_units=my_units, seed=seed) # write to paths defined in the params dictionary
if comm != None:
    comm.barrier()
Example #8
0
def bcpnn_offline_noColumns(params, conn_list, sim_cnt=0, save_all=False, comm=None):
    """
    This function computes the weight and bias values based on spiketimes during the simulation.

    Arguments:
        params: parameter dictionary
        conn_list:  two-dim numpy array storing cell-to-cell connections (only non-zero elements will be processed)
                            in the format (src, tgt, weight, delay)
                            or
                            file name in which the date is stored in this way
        sim_cnt: int for recording to file
        save_all: if True all traces will be saved
        comm = MPI communicator

    """
    if (type(conn_list) == type('')):
        d = np.load(conn_list)

    if (comm != None):
        pc_id, n_proc = comm.rank, comm.size
    else:
        pc_id, n_proc = 0, 1
    # extract the local list of elements 'my_conns' from the global conn_list
    n_total = len(conn_list)
    (min_id, max_id) = utils.distribute_n(n_total, n_proc, pc_id)
    my_conns = [(conn_list[i, 0], conn_list[i, 1], conn_list[i, 2], conn_list[i, 3]) for i in xrange(min_id, max_id)]

    fn = params['exc_spiketimes_fn_merged'] + str(sim_cnt) + '.ras'
    spklist = nts.load_spikelist(fn)#, range(params['n_exc_per_mc']), t_start=0, t_stop=params['t_sim'])
    spiketrains = spklist.spiketrains

    new_conn_list = np.zeros((len(my_conns), 4)) # (src, tgt, weight, delay)
    bias_dict = {}
    for i in xrange(params['n_exc']):
        bias_dict[i] = None
    
    for i in xrange(len(my_conns)):
#    for i in xrange(2):
        pre_id = my_conns[i][0]
        post_id = my_conns[i][1]

        # create traces from spiketimes
        # pre
        spiketimes_pre = spiketrains[pre_id+1.].spike_times
        pre_trace = utils.convert_spiketrain_to_trace(spiketimes_pre, params['t_sim'] + 1) # + 1 is to handle spikes in the last time step
        # post
        spiketimes_post = spiketrains[post_id+1.].spike_times
        post_trace = utils.convert_spiketrain_to_trace(spiketimes_post, params['t_sim'] + 1) # + 1 is to handle spikes in the last time step

        # compute
#        print "%d Computing traces for %d -> %d; %.2f percent " % (pc_id, pre_id, post_id, i / float(len(my_conns)) * 100.)
        get_traces = save_all
        if (get_traces):
            wij, bias, pi, pj, pij, ei, ej, eij, zi, zj = get_spiking_weight_and_bias(pre_trace, post_trace, get_traces)
            dw = (wij.max() - wij.min()) * params['dw_scale']
            # bias update
            new_bias = bias.max()
        else:
            dw, new_bias = get_spiking_weight_and_bias(pre_trace, post_trace, get_traces)
            dw *= params['dw_scale']

        # bias update
        if bias_dict[post_id] == None:
            bias_dict[post_id] = new_bias


        # weight update
        new_conn_list[i, 0] = pre_id
        new_conn_list[i, 1] = post_id
        new_conn_list[i, 2] = dw + my_conns[i][2]
        new_conn_list[i, 3] = my_conns[i][3]

#        print "DEBUG Pc %d \t%d\t%d\t%.1e\t%.1e\tbias:%.4e\tconn:" % (pc_id, new_conn_list[i, 0], new_conn_list[i, 1],  new_conn_list[i, 2],  new_conn_list[i, 3], new_bias[i, 1]), my_conns[i]
        if (save_all):
            # save
            output_fn = params['weights_fn_base'] + "%d_%d.npy" % (pre_id, post_id)
            np.save(output_fn, wij)

            output_fn = params['bias_fn_base'] + "%d.npy" % (post_id)
            np.save(output_fn, bias)

            output_fn = params['ztrace_fn_base'] + "%d.npy" % pre_id
            np.save(output_fn, zi)
            output_fn = params['ztrace_fn_base'] + "%d.npy" % post_id
            np.save(output_fn, zj)

            output_fn = params['etrace_fn_base'] + "%d.npy" % pre_id
            np.save(output_fn, ei)
            output_fn = params['etrace_fn_base'] + "%d.npy" % post_id
            np.save(output_fn, ej)
            output_fn = params['etrace_fn_base'] + "%d_%d.npy" % (pre_id, post_id)
            np.save(output_fn, eij)

            output_fn = params['ptrace_fn_base'] + "%d.npy" % pre_id
            np.save(output_fn, pi)
            output_fn = params['ptrace_fn_base'] + "%d.npy" % post_id
            np.save(output_fn, pj)
            output_fn = params['ptrace_fn_base'] + "%d_%d.npy" % (pre_id, post_id)
            np.save(output_fn, pij)

    if (n_proc > 1):
        output_fn_conn_list = params['conn_list_ee_fn_base'] + str(sim_cnt+1) + '.dat'
        utils.gather_conn_list(comm, new_conn_list, n_total, output_fn_conn_list)

        output_fn_bias = params['bias_values_fn_base'] + str(sim_cnt+1) + '.dat'
        utils.gather_bias(comm, bias_dict, n_total, output_fn_bias)

    else:
        print "Debug saving to", params['conn_list_ee_fn_base'] + str(sim_cnt+1) + '.dat'
        np.savetxt(params['conn_list_ee_fn_base'] + str(sim_cnt+1) + '.dat', my_conns)#conn_list)
        print "Debug saving to", params['bias_values_fn_base'] + str(sim_cnt+1) + '.dat'
        np.savetxt(params['bias_values_fn_base'] + str(sim_cnt+1) + '.dat', bias)
 def set_my_gids(self, cell_type):
     """
     If run on multiple cores, distribute the GIDs among the processes
     """
     gid_min, gid_max = utils.distribute_n(self.params['n_%s' % cell_type], self.n_proc, self.pc_id)
     self.my_gids[cell_type] = (gid_min + self.params['%s_offset' % cell_type], gid_max + self.params['%s_offset' % cell_type])