예제 #1
0
def ap_test(template_class, tree, v_init):

    cell = cells.make_neurotree_cell(template_class, neurotree_dict=tree)
    h.dt = 0.025

    prelength = 100.0
    stimdur = 10.0

    soma = list(cell.soma)[0]
    initial_amp = 0.05

    h.tlog = h.Vector()
    h.tlog.record(h._ref_t)

    h.Vlog = h.Vector()
    h.Vlog.record(soma(0.5)._ref_v)

    thr = cells.find_spike_threshold_minimum(cell,
                                             loc=0.5,
                                             sec=soma,
                                             duration=stimdur,
                                             initial_amp=initial_amp)

    f = open("HIPPCell_ap_results.dat", 'w')
    f.write("## current amplitude: %g\n" % thr)
    f.close()

    f = open("HIPPCell_voltage_trace.dat", 'w')
    for i in range(0, int(h.tlog.size())):
        f.write('%g %g\n' % (h.tlog.x[i], h.Vlog.x[i]))
    f.close()
예제 #2
0
def fi_test(template_class, tree, v_init):

    cell = cells.make_neurotree_cell(template_class, neurotree_dict=tree)
    soma = list(cell.soma)[0]
    h.dt = 0.025

    prelength = 1000.0
    mainlength = 2000.0

    tstop = prelength + mainlength

    stimdur = 1000.0

    stim1 = h.IClamp(soma(0.5))
    stim1.delay = prelength
    stim1.dur = stimdur
    stim1.amp = 0.2

    h.tlog = h.Vector()
    h.tlog.record(h._ref_t)

    h.Vlog = h.Vector()
    h.Vlog.record(soma(0.5)._ref_v)

    h.spikelog = h.Vector()
    nc = h.NetCon(soma(0.5)._ref_v, h.nil)
    nc.threshold = -20.0
    nc.record(h.spikelog)

    h.tstop = tstop

    frs = []
    stim_amps = [stim1.amp]
    for it in range(1, 9):

        neuron_utils.simulate(v_init, prelength, mainlength)

        print("fi_test: stim1.amp = %g spikelog.size = %d\n" %
              (stim1.amp, h.spikelog.size()))
        stim1.amp = stim1.amp + 0.1
        stim_amps.append(stim1.amp)
        frs.append(h.spikelog.size())
        h.spikelog.clear()
        h.tlog.clear()
        h.Vlog.clear()

    f = open("HIPPCell_fi_results.dat", 'w')

    for (fr, stim_amp) in zip(frs, stim_amps):
        f.write("%g %g\n" % (stim_amp, fr))

    f.close()
예제 #3
0
def passive_test(template_class, tree, v_init):

    cell = cells.make_neurotree_cell(template_class, neurotree_dict=tree)
    h.topology()

    h.dt = 0.025

    prelength = 1000
    mainlength = 2000

    tstop = prelength + mainlength

    stimdur = 500.0
    soma = list(cell.soma)[0]
    stim1 = h.IClamp(soma(0.5))
    stim1.delay = prelength
    stim1.dur = stimdur
    stim1.amp = -0.1

    h.tlog = h.Vector()
    h.tlog.record(h._ref_t)

    h.Vlog = h.Vector()
    h.Vlog.record(soma(0.5)._ref_v)

    h.tstop = tstop

    neuron_utils.simulate(v_init, prelength, mainlength)

    ## compute membrane time constant
    vrest = h.Vlog.x[int(h.tlog.indwhere(">=", prelength - 1))]
    vmin = h.Vlog.min()
    vmax = vrest

    ## the time it takes the system's step response to reach 1-1/e (or
    ## 63.2%) of the peak value
    amp23 = 0.632 * abs(vmax - vmin)
    vtau0 = vrest - amp23
    tau0 = h.tlog.x[int(h.Vlog.indwhere("<=", vtau0))] - prelength

    f = open("HIPPCell_passive_results.dat", 'w')

    f.write("DC input resistance: %g MOhm\n" % h.rn(cell))
    f.write("vmin: %g mV\n" % vmin)
    f.write("vtau0: %g mV\n" % vtau0)
    f.write("tau0: %g ms\n" % tau0)

    f.close()
예제 #4
0
def main(config, template_path, output_path, forest_path, populations, io_size,
         chunk_size, value_chunk_size, cache_size, verbose):
    """

    :param config:
    :param template_path:
    :param forest_path:
    :param populations:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(script_name)

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=MPI.COMM_WORLD,
              config_file=config,
              template_paths=template_path)
    h('objref nil, pc, templatePaths')
    h.load_file("nrngui.hoc")
    h.load_file("./templates/Value.hoc")
    h.xopen("./lib.hoc")
    h.pc = h.ParallelContext()

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    h.templatePaths = h.List()
    for path in env.templatePaths:
        h.templatePaths.append(h.Value(1, path))

    if output_path is None:
        output_path = forest_path

    if rank == 0:
        if not os.path.isfile(output_path):
            input_file = h5py.File(forest_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    (pop_ranges, _) = read_population_ranges(forest_path, comm=comm)
    start_time = time.time()
    for population in populations:
        logger.info('Rank %i population: %s' % (rank, population))
        count = 0
        (population_start, _) = pop_ranges[population]
        template_name = env.celltypes[population]['template']
        h.find_template(h.pc, h.templatePaths, template_name)
        template_class = eval('h.%s' % template_name)
        measures_dict = {}
        for gid, morph_dict in NeuroH5TreeGen(forest_path,
                                              population,
                                              io_size=io_size,
                                              comm=comm,
                                              topology=True):
            if gid is not None:
                logger.info('Rank %i gid: %i' % (rank, gid))
                cell = cells.make_neurotree_cell(template_class,
                                                 neurotree_dict=morph_dict,
                                                 gid=gid)
                secnodes_dict = morph_dict['section_topology']['nodes']

                apicalidx = set(cell.apicalidx)
                basalidx = set(cell.basalidx)

                dendrite_area_dict = {k + 1: 0.0 for k in range(0, 4)}
                dendrite_length_dict = {k + 1: 0.0 for k in range(0, 4)}
                for (i, sec) in enumerate(cell.sections):
                    if (i in apicalidx) or (i in basalidx):
                        secnodes = secnodes_dict[i]
                        prev_layer = None
                        for seg in sec.allseg():
                            L = seg.sec.L
                            nseg = seg.sec.nseg
                            seg_l = old_div(L, nseg)
                            seg_area = h.area(seg.x)
                            layer = cells.get_node_attribute(
                                'layer', morph_dict, seg.sec, secnodes, seg.x)
                            layer = layer if layer > 0 else (
                                prev_layer if prev_layer is not None else 1)
                            prev_layer = layer
                            dendrite_length_dict[layer] += seg_l
                            dendrite_area_dict[layer] += seg_area

                measures_dict[gid] = { 'dendrite_area': np.asarray([ dendrite_area_dict[k] for k in sorted(dendrite_area_dict.keys()) ], dtype=np.float32), \
                                       'dendrite_length': np.asarray([ dendrite_length_dict[k] for k in sorted(dendrite_length_dict.keys()) ], dtype=np.float32) }

                del cell
                count += 1
            else:
                logger.info('Rank %i gid is None' % rank)
        append_cell_attributes(output_path,
                               population,
                               measures_dict,
                               namespace='Tree Measurements',
                               comm=comm,
                               io_size=io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size,
                               cache_size=cache_size)
    MPI.Finalize()
예제 #5
0
def ap_rate_test (tree, v_init):

    cell = cells.make_neurotree_cell ("MOPPCell", neurotree_dict=tree)
    h.dt = 0.025

    prelength = 1000.0
    mainlength = 2000.0

    tstop = prelength+mainlength
    
    stimdur = 1000.0
    
    stim1 = h.IClamp(cell.sections[0](0.5))
    stim1.delay = prelength
    stim1.dur   = stimdur
    stim1.amp   = 0.2

    h.tlog = h.Vector()
    h.tlog.record (h._ref_t)

    h.Vlog = h.Vector()
    h.Vlog.record (cell.sections[0](0.5)._ref_v)

    h.spikelog = h.Vector()
    nc = h.NetCon(cell.sections[0](0.5)._ref_v, h.nil)
    nc.threshold = -40.0
    nc.record(h.spikelog)
    
    h.tstop = tstop


    it = 1
    ## Increase the injected current until at least 60 spikes occur
    ## or up to 5 steps
    while (h.spikelog.size() < 60):

        utils.simulate(h, v_init, prelength,mainlength)
        
        if ((h.spikelog.size() < 50) & (it < 5)):
            print("ap_rate_test: stim1.amp = %g spikelog.size = %d\n" % (stim1.amp, h.spikelog.size()))
            stim1.amp = stim1.amp + 0.1
            h.spikelog.clear()
            h.tlog.clear()
            h.Vlog.clear()
            it += 1
        else:
            break

    print("ap_rate_test: stim1.amp = %g spikelog.size = %d\n" % (stim1.amp, h.spikelog.size()))

    isivect = h.Vector(h.spikelog.size()-1, 0.0)
    tspike = h.spikelog.x[0]
    for i in range(1,int(h.spikelog.size())):
        isivect.x[i-1] = h.spikelog.x[i]-tspike
        tspike = h.spikelog.x[i]
    
    print("ap_rate_test: isivect.size = %d\n" % isivect.size())
    isimean  = isivect.mean()
    isivar   = isivect.var()
    isistdev = isivect.stdev()
    
    isilast = int(isivect.size())-1
    if (isivect.size() > 10):
        isi10th = 10 
    else:
        isi10th = isilast
    
    ## Compute the last spike that is largest than the first one.
    ## This is necessary because some variants of the model generate spike doublets,
    ## (i.e. spike with very short distance between them, which confuse the ISI statistics.
    isilastgt = int(isivect.size())-1
    while (isivect.x[isilastgt] < isivect.x[1]):
        isilastgt = isilastgt-1
    
    if (not (isilastgt > 0)):
        isivect.printf()
        raise RuntimeError("Unable to find ISI greater than first ISI: forest_path = %s gid = %d" % (forest_path, gid))
    
    f=open("MOPPCell_ap_rate_results.dat",'w')

    f.write ("## number of spikes: %g\n" % h.spikelog.size())
    f.write ("## FR mean: %g\n" % (1.0 / isimean))
    f.write ("## ISI mean: %g\n" % isimean) 
    f.write ("## ISI variance: %g\n" % isivar)
    f.write ("## ISI stdev: %g\n" % isistdev)
    f.write ("## ISI adaptation 1: %g\n" % (old_div(isivect.x[0], isimean)))
    f.write ("## ISI adaptation 2: %g\n" % (old_div(isivect.x[0], isivect.x[isilast])))
    f.write ("## ISI adaptation 3: %g\n" % (old_div(isivect.x[0], isivect.x[isi10th])))
    f.write ("## ISI adaptation 4: %g\n" % (old_div(isivect.x[0], isivect.x[isilastgt])))

    f.close()

    f=open("MOPPCell_voltage_trace.dat",'w')
    for i in range(0, int(h.tlog.size())):
        f.write('%g %g\n' % (h.tlog.x[i], h.Vlog.x[i]))
    f.close()
예제 #6
0
def gap_junction_test (tree, v_init):
    
    h('objref gjlist, cells, Vlog1, Vlog2')

    h.pc = h.ParallelContext()
    h.cells  = h.List()
    h.gjlist = h.List()
    
    cell1 = cells.make_neurotree_cell ("MOPPCell", neurotree_dict=tree)
    cell2 = cells.make_neurotree_cell ("MOPPCell", neurotree_dict=tree)

    h.cells.append(cell1)
    h.cells.append(cell2)

    ggid        = 20000000
    source      = 10422930
    destination = 10422670
    srcbranch   = 1
    dstbranch   = 2
    weight      = 5.4e-4

    stimdur     = 500
    tstop       = 2000
    
    h.pc.set_gid2node(source, int(h.pc.id()))
    nc = cell1.connect2target(h.nil)
    h.pc.cell(source, nc, 1)

    h.pc.set_gid2node(destination, int(h.pc.id()))
    nc = cell2.connect2target(h.nil)
    h.pc.cell(destination, nc, 1)

    stim1 = h.IClamp(cell1.sections[0](0.5))
    stim1.delay = 250
    stim1.dur = stimdur
    stim1.amp = -0.1

    stim2 = h.IClamp(cell2.sections[0](0.5))
    stim2.delay = 500+stimdur
    stim2.dur = stimdur
    stim2.amp = -0.1

    log_size = old_div(tstop,h.dt) + 1
    
    h.tlog = h.Vector(log_size,0)
    h.tlog.record (h._ref_t)

    h.Vlog1 = h.Vector(log_size)
    h.Vlog1.record (cell1.sections[0](0.5)._ref_v)

    h.Vlog2 = h.Vector(log_size)
    h.Vlog2.record (cell2.sections[0](0.5)._ref_v)
    
    h.mkgap(h.pc, h.gjlist, source, srcbranch, ggid, ggid+1, weight)
    h.mkgap(h.pc, h.gjlist, destination, dstbranch, ggid+1, ggid, weight)

    h.pc.setup_transfer()
    h.pc.set_maxstep(10.0)

    h.stdinit()
    h.finitialize(v_init)
    h.pc.barrier()

    h.tstop = tstop
    h.pc.psolve(h.tstop)

    f=open("MOPPCellGJ.dat",'w')
    for (t,v1,v2) in zip(h.tlog,h.Vlog1,h.Vlog2):
        f.write("%f %f %f\n" % (t,v1,v2))
    f.close()
예제 #7
0
def main(config, template_path, output_path, forest_path, populations,
         distance_bin_size, io_size, chunk_size, value_chunk_size, cache_size,
         verbose):
    """

    :param config:
    :param template_path:
    :param forest_path:
    :param populations:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(script_name)

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=MPI.COMM_WORLD,
              config_file=config,
              template_paths=template_path)
    configure_hoc_env(env)

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if output_path is None:
        output_path = forest_path

    if rank == 0:
        if not os.path.isfile(output_path):
            input_file = h5py.File(forest_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    layers = env.layers
    layer_idx_dict = {
        layers[layer_name]: layer_name
        for layer_name in ['GCL', 'IML', 'MML', 'OML', 'Hilus']
    }

    (pop_ranges, _) = read_population_ranges(forest_path, comm=comm)
    start_time = time.time()
    for population in populations:
        logger.info('Rank %i population: %s' % (rank, population))
        count = 0
        (population_start, _) = pop_ranges[population]
        template_class = load_cell_template(env,
                                            population,
                                            bcast_template=True)
        measures_dict = {}
        for gid, morph_dict in NeuroH5TreeGen(forest_path,
                                              population,
                                              io_size=io_size,
                                              comm=comm,
                                              topology=True):
            if gid is not None:
                logger.info('Rank %i gid: %i' % (rank, gid))
                cell = cells.make_neurotree_cell(template_class,
                                                 neurotree_dict=morph_dict,
                                                 gid=gid)
                secnodes_dict = morph_dict['section_topology']['nodes']

                apicalidx = set(cell.apicalidx)
                basalidx = set(cell.basalidx)

                dendrite_area_dict = {k: 0.0 for k in layer_idx_dict}
                dendrite_length_dict = {k: 0.0 for k in layer_idx_dict}
                dendrite_distances = []
                dendrite_diams = []
                for (i, sec) in enumerate(cell.sections):
                    if (i in apicalidx) or (i in basalidx):
                        secnodes = secnodes_dict[i]
                        for seg in sec.allseg():
                            L = seg.sec.L
                            nseg = seg.sec.nseg
                            seg_l = L / nseg
                            seg_area = h.area(seg.x)
                            seg_diam = seg.diam
                            seg_distance = get_distance_to_node(
                                cell,
                                list(cell.soma)[0], seg.sec, seg.x)
                            dendrite_diams.append(seg_diam)
                            dendrite_distances.append(seg_distance)
                            layer = synapses.get_node_attribute(
                                'layer', morph_dict, seg.sec, secnodes, seg.x)
                            dendrite_length_dict[layer] += seg_l
                            dendrite_area_dict[layer] += seg_area

                dendrite_distance_array = np.asarray(dendrite_distances)
                dendrite_diam_array = np.asarray(dendrite_diams)
                dendrite_distance_bin_range = int(
                    ((np.max(dendrite_distance_array)) -
                     np.min(dendrite_distance_array)) / distance_bin_size) + 1
                dendrite_distance_counts, dendrite_distance_edges = np.histogram(
                    dendrite_distance_array,
                    bins=dendrite_distance_bin_range,
                    density=False)
                dendrite_diam_sums, _ = np.histogram(
                    dendrite_distance_array,
                    weights=dendrite_diam_array,
                    bins=dendrite_distance_bin_range,
                    density=False)
                dendrite_mean_diam_hist = np.zeros_like(dendrite_diam_sums)
                np.divide(dendrite_diam_sums,
                          dendrite_distance_counts,
                          where=dendrite_distance_counts > 0,
                          out=dendrite_mean_diam_hist)

                dendrite_area_per_layer = np.asarray([
                    dendrite_area_dict[k]
                    for k in sorted(dendrite_area_dict.keys())
                ],
                                                     dtype=np.float32)
                dendrite_length_per_layer = np.asarray([
                    dendrite_length_dict[k]
                    for k in sorted(dendrite_length_dict.keys())
                ],
                                                       dtype=np.float32)

                measures_dict[gid] = {
                    'dendrite_distance_hist_edges':
                    np.asarray(dendrite_distance_edges, dtype=np.float32),
                    'dendrite_distance_counts':
                    np.asarray(dendrite_distance_counts, dtype=np.int32),
                    'dendrite_mean_diam_hist':
                    np.asarray(dendrite_mean_diam_hist, dtype=np.float32),
                    'dendrite_area_per_layer':
                    dendrite_area_per_layer,
                    'dendrite_length_per_layer':
                    dendrite_length_per_layer
                }

                del cell
                count += 1
            else:
                logger.info('Rank %i gid is None' % rank)
        append_cell_attributes(output_path,
                               population,
                               measures_dict,
                               namespace='Tree Measurements',
                               comm=comm,
                               io_size=io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size,
                               cache_size=cache_size)
    MPI.Finalize()
예제 #8
0
def main(config, config_prefix, template_path, output_path, forest_path,
         populations, distribution, io_size, chunk_size, value_chunk_size,
         cache_size, write_size, verbose, dry_run):
    """

    :param config:
    :param config_prefix:
    :param template_path:
    :param forest_path:
    :param populations:
    :param distribution:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank

    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    env = Env(comm=MPI.COMM_WORLD,
              config_file=config,
              config_prefix=config_prefix,
              template_paths=template_path)

    configure_hoc_env(env)

    if io_size == -1:
        io_size = comm.size

    if output_path is None:
        output_path = forest_path

    if not dry_run:
        if rank == 0:
            if not os.path.isfile(output_path):
                input_file = h5py.File(forest_path, 'r')
                output_file = h5py.File(output_path, 'w')
                input_file.copy('/H5Types', output_file)
                input_file.close()
                output_file.close()
        comm.barrier()

    (pop_ranges, _) = read_population_ranges(forest_path, comm=comm)
    start_time = time.time()
    syn_stats = {}
    for population in populations:
        logger.info('Rank %i population: %s' % (rank, population))
        (population_start, _) = pop_ranges[population]
        template_class = load_cell_template(env, population)

        density_dict = env.celltypes[population]['synapses']['density']
        layer_set_dict = defaultdict(set)
        swc_set_dict = defaultdict(set)
        for sec_name, sec_dict in viewitems(density_dict):
            for syn_type, syn_dict in viewitems(sec_dict):
                swc_set_dict[syn_type].add(env.SWC_Types[sec_name])
                for layer_name in syn_dict:
                    if layer_name != 'default':
                        layer = env.layers[layer_name]
                        layer_set_dict[syn_type].add(layer)

        syn_stats_dict = { 'section': defaultdict(lambda: { 'excitatory': 0, 'inhibitory': 0 }), \
                           'layer': defaultdict(lambda: { 'excitatory': 0, 'inhibitory': 0 }), \
                           'swc_type': defaultdict(lambda: { 'excitatory': 0, 'inhibitory': 0 }), \
                           'total': { 'excitatory': 0, 'inhibitory': 0 } }

        count = 0
        gid_count = 0
        synapse_dict = {}
        for gid, morph_dict in NeuroH5TreeGen(forest_path,
                                              population,
                                              io_size=io_size,
                                              comm=comm,
                                              topology=True):
            local_time = time.time()
            if gid is not None:
                logger.info('Rank %i gid: %i' % (rank, gid))
                cell = cells.make_neurotree_cell(template_class,
                                                 neurotree_dict=morph_dict,
                                                 gid=gid)
                cell_sec_dict = {
                    'apical': (cell.apical, None),
                    'basal': (cell.basal, None),
                    'soma': (cell.soma, None),
                    'ais': (cell.ais, None),
                    'hillock': (cell.hillock, None)
                }
                cell_secidx_dict = {
                    'apical': cell.apicalidx,
                    'basal': cell.basalidx,
                    'soma': cell.somaidx,
                    'ais': cell.aisidx,
                    'hillock': cell.hilidx
                }

                random_seed = env.model_config['Random Seeds'][
                    'Synapse Locations'] + gid
                if distribution == 'uniform':
                    syn_dict, seg_density_per_sec = synapses.distribute_uniform_synapses(
                        random_seed, env.Synapse_Types, env.SWC_Types,
                        env.layers, density_dict, morph_dict, cell_sec_dict,
                        cell_secidx_dict)

                elif distribution == 'poisson':
                    syn_dict, seg_density_per_sec = synapses.distribute_poisson_synapses(
                        random_seed, env.Synapse_Types, env.SWC_Types,
                        env.layers, density_dict, morph_dict, cell_sec_dict,
                        cell_secidx_dict)
                else:
                    raise Exception('Unknown distribution type: %s' %
                                    distribution)

                synapse_dict[gid] = syn_dict
                this_syn_stats = update_syn_stats(env, syn_stats_dict,
                                                  syn_dict)
                check_syns(gid, morph_dict, this_syn_stats,
                           seg_density_per_sec, layer_set_dict, swc_set_dict,
                           env, logger)

                del cell
                num_syns = len(synapse_dict[gid]['syn_ids'])
                logger.info(
                    'Rank %i took %i s to compute %d synapse locations for %s gid: %i'
                    % (rank, time.time() - local_time, num_syns, population,
                       gid))
                logger.info(
                    '%s gid %i synapses: %s' %
                    (population, gid, local_syn_summary(this_syn_stats)))
                gid_count += 1
            else:
                logger.info('Rank %i gid is None' % rank)
            if (not dry_run) and (gid_count % write_size == 0):
                append_cell_attributes(output_path,
                                       population,
                                       synapse_dict,
                                       namespace='Synapse Attributes',
                                       comm=comm,
                                       io_size=io_size,
                                       chunk_size=chunk_size,
                                       value_chunk_size=value_chunk_size,
                                       cache_size=cache_size)
                synapse_dict = {}
                gc.collect()
            syn_stats[population] = syn_stats_dict
            count += 1

        if not dry_run:
            append_cell_attributes(output_path,
                                   population,
                                   synapse_dict,
                                   namespace='Synapse Attributes',
                                   comm=comm,
                                   io_size=io_size,
                                   chunk_size=chunk_size,
                                   value_chunk_size=value_chunk_size,
                                   cache_size=cache_size)

        global_count = comm.gather(gid_count, root=0)

        if gid_count > 0:
            color = 1
        else:
            color = 0

        comm0 = comm.Split(color, 0)
        if color == 1:
            summary = global_syn_summary(comm0,
                                         syn_stats,
                                         np.sum(global_count),
                                         root=0)
            if rank == 0:
                logger.info(
                    'target: %s, %i ranks took %i s to compute synapse locations for %i cells'
                    % (population, comm.size, time.time() - start_time,
                       np.sum(global_count)))
                logger.info(summary)
        comm0.Free()
        comm.barrier()

    MPI.Finalize()
예제 #9
0
def measure_gap_junction_coupling (env, template_class, tree, v_init, cell_dict={}):
    
    h('objref gjlist, cells, Vlog1, Vlog2')

    pc = env.pc
    h.cells  = h.List()
    h.gjlist = h.List()
    
    cell1 = cells.make_neurotree_cell (template_class, neurotree_dict=tree)
    cell2 = cells.make_neurotree_cell (template_class, neurotree_dict=tree)

    h.cells.append(cell1)
    h.cells.append(cell2)

    ggid        = 20000000
    source      = 10422930
    destination = 10422670
    weight      = 5.4e-4
    srcsec   = int(cell1.somaidx.x[0])
    dstsec   = int(cell2.somaidx.x[0])

    stimdur     = 500
    tstop       = 2000
    
    pc.set_gid2node(source, int(pc.id()))
    nc = cell1.connect2target(h.nil)
    pc.cell(source, nc, 1)
    soma1 = list(cell1.soma)[0]

    pc.set_gid2node(destination, int(pc.id()))
    nc = cell2.connect2target(h.nil)
    pc.cell(destination, nc, 1)
    soma2 = list(cell2.soma)[0]

    stim1 = h.IClamp(soma1(0.5))
    stim1.delay = 250
    stim1.dur = stimdur
    stim1.amp = -0.1

    stim2 = h.IClamp(soma2(0.5))
    stim2.delay = 500+stimdur
    stim2.dur = stimdur
    stim2.amp = -0.1

    log_size = old_div(tstop,h.dt) + 1
    
    h.tlog = h.Vector(log_size,0)
    h.tlog.record (h._ref_t)

    h.Vlog1 = h.Vector(log_size)
    h.Vlog1.record (soma1(0.5)._ref_v)

    h.Vlog2 = h.Vector(log_size)
    h.Vlog2.record (soma2(0.5)._ref_v)


    gjpos = 0.5
    neuron_utils.mkgap(env, cell1, source, gjpos, srcsec, ggid, ggid+1, weight)
    neuron_utils.mkgap(env, cell2, destination, gjpos, dstsec, ggid+1, ggid, weight)

    pc.setup_transfer()
    pc.set_maxstep(10.0)

    h.stdinit()
    h.finitialize(v_init)
    pc.barrier()

    h.tstop = tstop
    pc.psolve(h.tstop)
예제 #10
0
def synapse_test(template_class,
                 gid,
                 tree,
                 synapses,
                 v_init,
                 env,
                 unique=True):

    postsyn_name = 'BC'
    presyn_names = ['GC', 'MC', 'BC', 'HC', 'HCC', 'MOPP', 'NGFC']

    cell = cells.make_neurotree_cell(template_class, neurotree_dict=tree)

    all_syn_ids = synapses['syn_ids']
    all_syn_layers = synapses['syn_layers']
    all_syn_sections = synapses['syn_secs']

    print('Total %i %s synapses' % (len(all_syn_ids), postsyn_name))

    env.cells.append(cell)
    env.pc.set_gid2node(gid, env.comm.rank)

    syn_attrs = env.synapse_attributes
    syn_attrs.load_syn_id_attrs(gid, synapses)

    for presyn_name in presyn_names:

        syn_ids = []
        layers = env.connection_config[postsyn_name][presyn_name].layers
        proportions = env.connection_config[postsyn_name][
            presyn_name].proportions
        for syn_id, syn_layer in zip(all_syn_ids, all_syn_layers):
            i = utils.list_index(syn_layer, layers)
            if i is not None:
                if random.random() <= proportions[i]:
                    syn_ids.append(syn_id)

        syn_params_dict = env.connection_config[postsyn_name][
            presyn_name].mechanisms
        syn_obj_dict = mksyns(
            gid,
            cell,
            syn_ids,
            syn_params_dict,
            env,
            0,
            add_synapse=add_unique_synapse if unique else add_shared_synapse)

        v_holding = -60
        synapse_group_test(env, presyn_name, gid, cell, syn_obj_dict,
                           syn_params_dict, 1, v_holding, v_init)
        synapse_group_test(env, presyn_name, gid, cell, syn_obj_dict,
                           syn_params_dict, 10, v_holding, v_init)
        synapse_group_test(env, presyn_name, gid, cell, syn_obj_dict,
                           syn_params_dict, 100, v_holding, v_init)

        rate = 30
        synapse_group_rate_test(env, presyn_name, gid, cell, syn_obj_dict,
                                syn_params_dict, 1, rate)
        synapse_group_rate_test(env, presyn_name, gid, cell, syn_obj_dict,
                                syn_params_dict, 10, rate)
예제 #11
0
def generate_gj_connections(env, forest_path, soma_coords_dict,
                            gj_config_dict, gj_seed, connectivity_namespace, connectivity_path,
                            io_size, chunk_size, value_chunk_size, cache_size,
                            dry_run=False):
    """Generates gap junction connectivity based on Euclidean-distance-weighted probabilities.
    :param gj_config: connection configuration object (instance of env.GapjunctionConfig)
    :param gj_seed: random seed for determining gap junction connectivity
    :param connectivity_namespace: namespace of gap junction connectivity attributes
    :param connectivity_path: path to gap junction connectivity file
    :param io_size: number of I/O ranks to use for parallel connectivity append
    :param chunk_size: HDF5 chunk size for connectivity file (pointer and index datasets)
    :param value_chunk_size: HDF5 chunk size for connectivity file (value datasets)
    :param cache_size: how many cells to read ahead
    """

    comm = env.comm

    rank = comm.rank
    size = comm.size

    if io_size == -1:
        io_size = comm.size

    start_time = time.time()

    ranstream_gj = np.random.RandomState(gj_seed)
    population_pairs = list(gj_config_dict.keys())

    for pp in population_pairs:
        if rank == 0:
            logger.info('%s <-> %s' % (pp[0], pp[1]))

    total_count = 0
    gid_count = 0

    for (i, (pp, gj_config)) in enumerate(sorted(viewitems(gj_config_dict))):
        if rank == 0:
            logger.info('Generating gap junction connections between populations %s and %s...' % pp)

        ranstream_gj.seed(gj_seed + i)

        coupling_params = np.asarray(gj_config.coupling_parameters)
        coupling_coeffs = np.asarray(gj_config.coupling_coefficients)
        connection_prob = gj_config.connection_probability
        connection_params = np.asarray(gj_config.connection_parameters)
        connection_bounds = np.asarray(gj_config.connection_bounds)

        population_a = pp[0]
        population_b = pp[1]

        template_name_a = env.celltypes[population_a]['template']
        template_name_b = env.celltypes[population_b]['template']

        load_cell_template(env, population_a)
        load_cell_template(env, population_b)
        template_class_a = getattr(h, template_name_a)
        template_class_b = getattr(h, template_name_b)

        clst_a = []
        gid_a = []
        for (gid, coords) in viewitems(soma_coords_dict[population_a]):
            clst_a.append(np.asarray(coords))
            gid_a.append(gid)
        gid_a = np.asarray(gid_a)

        sortidx_a = np.argsort(gid_a)
        coords_a = np.asarray([clst_a[i] for i in sortidx_a])

        clst_b = []
        gid_b = []
        for (gid, coords) in viewitems(soma_coords_dict[population_b]):
            clst_b.append(np.asarray(coords))
            gid_b.append(gid)
        gid_b = np.asarray(gid_b)

        sortidx_b = np.argsort(gid_b)
        coords_b = np.asarray([clst_b[i] for i in sortidx_b])

        gj_prob_dict = filter_by_distance(gid_a[sortidx_a], coords_a,
                                          gid_b[sortidx_b], coords_b,
                                          connection_bounds,
                                          connection_params)

        gj_probs = []
        gj_distances = []
        gids_a = []
        gids_b = []
        for gid, v in viewitems(gj_prob_dict):
            if gid % size == rank:
                (nngids, nndists, nnprobs) = v
                gids_a.append(np.full(nngids.shape, gid, np.int32))
                gids_b.append(nngids)
                gj_probs.append(nnprobs)
                gj_distances.append(nndists)

        gids_a = np.concatenate(gids_a)
        gids_b = np.concatenate(gids_b)
        gj_probs = np.concatenate(gj_probs)
        gj_probs = gj_probs / gj_probs.sum()
        gj_distances = np.concatenate(gj_distances)
        gids_a = np.asarray(gids_a, dtype=np.uint32)
        gids_b = np.asarray(gids_b, dtype=np.uint32)

        cell_dict_a = {}
        selection_a = set(gids_a)
        if rank == 0:
            logger.info('Reading tree selection of population %s (%d cells)...' % (pp[0], len(selection_a)))
        (tree_iter_a, _) = read_tree_selection(forest_path, population_a, list(selection_a))
        for (gid, tree_dict) in tree_iter_a:
            cell_dict_a[gid] = cells.make_neurotree_cell(template_class_a, neurotree_dict=tree_dict, gid=gid)

        cell_dict_b = {}
        selection_b = set(gids_b)
        if rank == 0:
            logger.info('Reading tree selection of population %s (%d cells)...' % (pp[1], len(selection_b)))
        (tree_iter_b, _) = read_tree_selection(forest_path, population_b, list(selection_b))
        for (gid, tree_dict) in tree_iter_b:
            cell_dict_b[gid] = cells.make_neurotree_cell(template_class_b, neurotree_dict=tree_dict, gid=gid)

        if rank == 0:
            logger.info('Generating gap junction pairs between populations %s and %s...' % pp)

        gj_dict = {}
        count = generate_gap_junctions(connection_prob, coupling_coeffs, coupling_params,
                                       ranstream_gj, gids_a, gids_b, gj_probs, gj_distances,
                                       cell_dict_a, cell_dict_b,
                                       gj_dict)

        gj_graph_dict = {pp[0]: {pp[1]: gj_dict}}

        if not dry_run:
            append_graph(connectivity_path, gj_graph_dict, io_size=io_size, comm=comm)

        total_count += count

    global_count = comm.gather(total_count, root=0)
    if rank == 0:
        logger.info(
            '%i ranks took %i s to generate %i edges' % (comm.size, time.time() - start_time, np.sum(global_count)))