コード例 #1
0
def main(population, forest_path, forest_measurement_namespace, attr_name,
         selection_path):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    population_ranges = read_population_ranges(forest_path)[0]

    selection = []
    f = open(selection_path, 'r')
    for line in f.readlines():
        selection.append(int(line))
    f.close()

    columns = [attr_name]
    df_dict = {}
    it = read_cell_attribute_selection(forest_path,
                                       population,
                                       namespace=forest_measurement_namespace,
                                       selection=selection)

    for cell_gid, meas_dict in it:
        cell_attr = meas_dict[attr_name]
        df_dict[cell_gid] = [np.sum(cell_attr)]

    df = pd.DataFrame.from_dict(df_dict, orient='index', columns=columns)
    df = df.reindex(selection)
    df.to_csv('tree.%s.%s.csv' % (attr_name, population))
コード例 #2
0
def main(coords_path, coords_namespace):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    print('Allocated %i ranks' % size)

    population_ranges = read_population_ranges(coords_path)[0]
    print(population_ranges)

    soma_coords = {}
    populations = ['GC']
    for population in population_ranges.keys():

        print('Population %s' % population)
        it = read_cell_attributes(coords_path,
                                  population,
                                  namespace=coords_namespace)

        print('it = %s' % str(it))
        for cell_gid, coords_dict in it:

            print(hasattr(coords_dict, 'U Coordinate'))
            cell_u = getattr(coords_dict, 'U Coordinate')
            cell_u = getattr(coords_dict, 'U Coordinate')
            cell_v = getattr(coords_dict, 'V Coordinate')

            print('Rank %i: gid = %i u = %f v = %f' %
                  (rank, cell_gid, cell_u, cell_v))
コード例 #3
0
def main(coords_path, coords_namespace):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    print ('Allocated %i ranks' % size)
    sys.stdout.flush()

    population_ranges = read_population_ranges(coords_path)[0]
    
    soma_coords = {}
    for population in ['GC']:


        attr_iter = bcast_cell_attributes(coords_path, population, namespace=coords_namespace, 
                                          root=0, mask=set(['U Coordinate', 'V Coordinate', 'L Coordinate']),
                                          comm=comm)
        
        for cell_gid, coords_dict in attr_iter:

            cell_u = coords_dict['U Coordinate']
            cell_v = coords_dict['V Coordinate']
                
            print ('Rank %i: gid = %i u = %f v = %f' % (rank, cell_gid, cell_u, cell_v))
コード例 #4
0
def main(config, config_prefix, population, gid, ref_axis, input_file, template_name, output_file, dry_run, verbose):
    
    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    h.load_file("nrngui.hoc")
    h.load_file("import3d.hoc")

    env = Env(config_file=config, config_prefix=config_prefix)
    swc_type_defs = env.SWC_Types

    if not os.path.isfile(output_file):
        io_utils.make_h5types(env, output_file)

    (forest_pop_ranges, _)  = read_population_ranges(output_file)
    (forest_population_start, forest_population_count) = forest_pop_ranges[population]
    forest_population_end = forest_population_start + forest_population_count
    h.load_file(input_file)
    cell = getattr(h, template_name)(0, 0)
    if verbose:
        h.topology()
    tree_dict = export_swc_dict(cell, ref_axis=ref_axis)

    if (gid < forest_population_start) or (gid > forest_population_end):
        gid = forest_population_start
    trees_dict = { gid : tree_dict }

    logger.info(pprint.pformat(trees_dict))

    if not dry_run:
        append_cell_trees(output_file, population, trees_dict)
コード例 #5
0
def main(population, features_path, features_namespace, extra_columns):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    population_ranges = read_population_ranges(features_path)[0]

    soma_coords = {}

    extra_columns_list = extra_columns.split(",")
    columns = ['Field Width', 'X Offset', 'Y Offset'] + extra_columns_list
    df_dict = {}
    it = read_cell_attributes(features_path,
                              population,
                              namespace=features_namespace)

    for cell_gid, features_dict in it:
        cell_field_width = features_dict['Field Width'][0]
        cell_xoffset = features_dict['X Offset'][0]
        cell_yoffset = features_dict['Y Offset'][0]

        df_dict[cell_gid] = [cell_field_width, cell_xoffset, cell_yoffset]

    df = pd.DataFrame.from_dict(df_dict, orient='index', columns=columns)
    df = df.reindex(sorted(df_dict.keys()))
    df.to_csv('features.%s.csv' % population)
コード例 #6
0
def main(population, features_path, features_namespace, selection_path):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    population_ranges = read_population_ranges(features_path)[0]
    
    soma_coords = {}

    selection = []
    f = open(selection_path, 'r')
    for line in f.readlines():
        selection.append(int(line))
    f.close()

    columns = ['Field Width', 'X Offset', 'Y Offset']
    df_dict = {}
    it = read_cell_attribute_selection(features_path, population, 
                                       namespace=features_namespace, 
                                       selection=selection)

    for cell_gid, features_dict in it:
        cell_field_width = features_dict['Field Width'][0]
        cell_xoffset = features_dict['X Offset'][0]
        cell_yoffset = features_dict['Y Offset'][0]
        
        df_dict[cell_gid] = [cell_field_width, cell_xoffset, cell_yoffset]

        
    df = pd.DataFrame.from_dict(df_dict, orient='index', columns=columns)
    df = df.reindex(selection)
    df.to_csv('features.%s.csv' % population)
コード例 #7
0
def main(config, config_prefix, population, input_file, output_file, dry_run,
         verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    h.load_file("nrngui.hoc")
    h.load_file("import3d.hoc")

    env = Env(config_file=config, config_prefix=config_prefix)
    swc_type_defs = env.SWC_Types

    if not os.path.isfile(output_file):
        io_utils.make_h5types(env, output_file)

    (forest_pop_ranges, _) = read_population_ranges(output_file)
    (forest_population_start,
     forest_population_count) = forest_pop_ranges[population]

    h.load_file(input_file)
    if verbose:
        h.topology()
    tree_dict = export_swc_dict()

    trees_dict = {0: tree_dict}

    logger.info(pprint.pformat(trees_dict))

    if not dry_run:
        append_cell_trees(output_file, population, trees_dict)
コード例 #8
0
def main(coords_path, coords_namespace):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    print('Allocated %i ranks' % size)

    population_ranges = read_population_ranges(coords_path)[0]
    print(population_ranges)

    soma_coords = {}
    for population in sorted(population_ranges.keys()):

        print('Population %s' % population)
        it, tuple_info = read_cell_attributes(coords_path,
                                              population,
                                              namespace=coords_namespace,
                                              return_type='tuple')

        u_index = tuple_info['U Coordinate']
        v_index = tuple_info['V Coordinate']

        for cell_gid, coords_tuple in it:
            cell_u = coords_tuple[u_index]
            cell_v = coords_tuple[v_index]

            print('Rank %i: gid = %i u = %f v = %f' %
                  (rank, cell_gid, cell_u, cell_v))
コード例 #9
0
ファイル: env.py プロジェクト: pramodk/dentate
    def load_celltypes(self):
        """

        :return:
        """
        rank = self.comm.Get_rank()
        size = self.comm.Get_size()
        celltypes = self.celltypes
        typenames = sorted(celltypes.keys())

        if rank == 0:
            self.logger.info('env.data_file_path = %s' %
                             str(self.data_file_path))

        (population_ranges,
         _) = read_population_ranges(self.data_file_path, self.comm)
        if rank == 0:
            self.logger.info('population_ranges = %s' % str(population_ranges))

        for k in typenames:
            population_range = population_ranges.get(k, None)
            if population_range is not None:
                celltypes[k]['start'] = population_ranges[k][0]
                celltypes[k]['num'] = population_ranges[k][1]
                if 'mechanism file' in celltypes[k]:
                    celltypes[k]['mech_file_path'] = '%s/%s' % (
                        self.config_prefix, celltypes[k]['mechanism file'])
                    mech_dict = read_from_yaml(celltypes[k]['mech_file_path'])
                    celltypes[k]['mech_dict'] = mech_dict
                if 'synapses' in celltypes[k]:
                    synapses_dict = celltypes[k]['synapses']
                    if 'weights' in synapses_dict:
                        weights_config = synapses_dict['weights']
                        if isinstance(weights_config, list):
                            weights_dicts = weights_config
                        else:
                            weights_dicts = [weights_config]
                        for weights_dict in weights_dicts:
                            if 'expr' in weights_dict:
                                expr = weights_dict['expr']
                                parameter = weights_dict['parameter']
                                const = weights_dict.get('const', {})
                                clos = ExprClosure(parameter, expr, const)
                                weights_dict['closure'] = clos
                        synapses_dict['weights'] = weights_dicts

        population_names = read_population_names(self.data_file_path,
                                                 self.comm)
        if rank == 0:
            self.logger.info('population_names = %s' % str(population_names))
        self.cell_attribute_info = read_cell_attribute_info(
            self.data_file_path, population_names, comm=self.comm)

        if rank == 0:
            self.logger.info('attribute info: %s' %
                             str(self.cell_attribute_info))
コード例 #10
0
ファイル: graphlib.py プロジェクト: soltesz-lab/neuroh5
def make_node_rank_map(comm, filepath, iosize):

    size = comm.Get_size()

    (_, n_nodes) = read_population_ranges(filepath, comm=comm)

    node_rank_map = {}
    for i in range(0, n_nodes):
        node_rank_map[i] = i % size

    return (node_rank_map, n_nodes)
コード例 #11
0
def main(config, template_path, prototype_gid, prototype_path, forest_path, population, io_size, verbose):
    """

    :param config:
    :param template_path:
    :param prototype_gid:
    :param prototype_path:
    :param forest_path:
    :param population:
    :param io_size:
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(script_name)
        
    comm = MPI.COMM_WORLD
    rank = comm.rank
    
    env = Env(comm=MPI.COMM_WORLD, config_file=config, template_paths=template_path)
    configure_hoc_env(env)
    
    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)
    
    layers = env.layers
    layer_idx_dict = { layers[layer_name]: layer_name 
                       for layer_name in ['GCL', 'IML', 'MML', 'OML', 'Hilus'] }

    (tree_iter, _) = read_tree_selection(prototype_path, population, selection=[prototype_gid])
    (_, prototype_morph_dict) = next(tree_iter)
    prototype_x = prototype_morph_dict['x']
    prototype_y = prototype_morph_dict['y']
    prototype_z = prototype_morph_dict['z']
    prototype_xyz = (prototype_x, prototype_y, prototype_z)

    (pop_ranges, _) = read_population_ranges(forest_path, comm=comm)
    start_time = time.time()

    (population_start, _) = pop_ranges[population]
    template_class = load_cell_template(env, population, bcast_template=True)
    for gid, morph_dict in NeuroH5TreeGen(forest_path, population, io_size=io_size, cache_size=1, comm=comm, topology=True):
#    trees, _ = scatter_read_trees(forest_path, population, io_size=io_size, comm=comm, topology=True)
 #   for gid, morph_dict in trees:
        if gid is not None:
            logger.info('Rank %i gid: %i' % (rank, gid))
            secnodes_dict = morph_dict['section_topology']['nodes']
            vx = morph_dict['x']
            vy = morph_dict['y']
            vz = morph_dict['z']
            if compare_points((vx,vy,vz), prototype_xyz):
                logger.info('Possible match: gid %i' % gid)
    MPI.Finalize()
コード例 #12
0
ファイル: graphlib.py プロジェクト: soltesz-lab/neuroh5
def load_graph_networkit(comm, input_file):

    (_, n_nodes) = read_population_ranges(input_file, comm=comm)
    nhg = read_graph(input_file, comm=comm)
    g = Graph(n_nodes, False, True)

    for (presyn, prjs) in list(nhg.items()):
        for (postsyn, edges) in list(prjs.items()):
            sources = edges[0]
            destinations = edges[1]
            for (src, dst) in zip(sources, destinations):
                g.addEdge(src, dst)

    return g
コード例 #13
0
def main(config, stimulus_id, template_path, coords_path, output_path,
         distances_namespace, io_size, chunk_size, value_chunk_size,
         cache_size, write_size, verbose, dry_run):
    """

    :param config:
    :param coords_path:
    :param distances_namespace:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    :param write_size:
    :param dry_run:
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm, config_file=config, template_paths=template_path)
    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if (not dry_run) and (rank == 0):
        if not os.path.isfile(output_path):
            input_file = h5py.File(coords_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()
    population_ranges = read_population_ranges(coords_path, comm)[0]
    context.update(locals())

    gid_normed_distances = assign_cells_to_normalized_position(
    )  # Assign normalized u,v coordinates
    gid_module_assignments = assign_cells_to_module(
        gid_normed_distances, p_width=0.75, displace=0.0
    )  # Determine which module a cell is in based on normalized u position
    total_num_fields, gid_attributes = determine_cell_participation(
        gid_module_assignments
    )  # Determine if a cell is 1) active and; 2) how many fields?
    cell_attributes = build_cell_attributes(
        gid_attributes, gid_normed_distances, total_num_fields
    )  # Determine additional cell properties (lambda, field_width, orientation, jitter, and rate map. This will also build the data structure ({<pop>: {<cell type>: <cells>}}) containing all cells.

    if not dry_run and rank == 0:
        save_to_h5(cell_attributes)
コード例 #14
0
ファイル: project_somas.py プロジェクト: soltesz-lab/dentate
def main(config, coords_path, coords_namespace, resample, resolution, populations, projection_depth, io_size, chunk_size, value_chunk_size, cache_size, verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(script_name)
    
    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm, config_file=config)

    soma_coords = {}

    if rank == 0:
        logger.info('Reading population coordinates...')

    rotate = env.geometry['Parametric Surface']['Rotation']
    min_l = float('inf')
    max_l = 0.0
    population_ranges = read_population_ranges(coords_path)[0]
    population_extents = {}
    for population in population_ranges:
        min_extent = env.geometry['Cell Layers']['Minimum Extent'][population]
        max_extent = env.geometry['Cell Layers']['Maximum Extent'][population]
        min_l = min(min_extent[2], min_l)
        max_l = max(max_extent[2], max_l)
        population_extents[population] = (min_extent, max_extent)
        
    for population in populations:
        coords = bcast_cell_attributes(coords_path, population, 0, \
                                       namespace=coords_namespace)

        soma_coords[population] = { k: (v['U Coordinate'][0], v['V Coordinate'][0], v['L Coordinate'][0]) for (k,v) in coords }
        del coords
        gc.collect()
    
    output_path = coords_path
    soma_coords = icp_transform(comm, soma_coords, projection_depth, population_extents, \
                                populations=populations, rotate=rotate, verbose=verbose)
    
    for population in populations:

        if rank == 0:
            logger.info('Writing transformed coordinates for population %s...' % population)

        append_cell_attributes(output_path, population, soma_coords[population],
                               namespace='Soma Projections', comm=comm,
                               io_size=io_size, chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size, cache_size=cache_size)
コード例 #15
0
def main(population, coords_path, coords_namespace, distances_namespace,
         selection_path):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    population_ranges = read_population_ranges(coords_path)[0]

    soma_coords = {}

    selection = []
    f = open(selection_path, 'r')
    for line in f.readlines():
        selection.append(int(line))
    f.close()

    columns = ['U', 'V', 'L']
    df_dict = {}
    it = read_cell_attribute_selection(coords_path,
                                       population,
                                       namespace=coords_namespace,
                                       selection=selection)

    for cell_gid, coords_dict in it:
        cell_u = coords_dict['U Coordinate'][0]
        cell_v = coords_dict['V Coordinate'][0]
        cell_l = coords_dict['L Coordinate'][0]

        df_dict[cell_gid] = [cell_u, cell_v, cell_l]

    if distances_namespace is not None:
        columns.extend(['U Distance', 'V Distance'])
        it = read_cell_attribute_selection(coords_path,
                                           population,
                                           namespace=distances_namespace,
                                           selection=selection)
        for cell_gid, distances_dict in it:
            cell_ud = distances_dict['U Distance'][0]
            cell_vd = distances_dict['V Distance'][0]

            df_dict[cell_gid].extend([cell_ud, cell_vd])

    df = pd.DataFrame.from_dict(df_dict, orient='index', columns=columns)
    df = df.reindex(selection)
    df.to_csv('coords.%s.csv' % population)
コード例 #16
0
def main(coords_path, io_size, chunk_size, value_chunk_size):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(__file__)
    
    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm, config_file=config)
    output_path = coords_path

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    source_population_ranges = read_population_ranges(coords_path)
    source_populations = list(source_population_ranges.keys())

    for population in source_populations:
        if rank == 0:
            logger.info('population: ',population)
        soma_coords = bcast_cell_attributes(0, coords_path, population,
                                            namespace='Interpolated Coordinates', comm=comm)
        #print soma_coords.keys()
        u_coords = []
        gids = []
        for gid, attrs in viewitems(soma_coords):
            u_coords.append(attrs['U Coordinate'])
            gids.append(gid)
        u_coordv = np.asarray(u_coords, dtype=np.float32)
        gidv     = np.asarray(gids, dtype=np.uint32)
        sort_idx = np.argsort(u_coordv, axis=0)
        offset   = source_population_ranges[population][0]
        sorted_coords_dict = {}
        for i in range(0,sort_idx.size):
            sorted_coords_dict[offset+i] = soma_coords[gidv[sort_idx[i][0]]]
        
        append_cell_attributes(coords_path, population, sorted_coords_dict,
                                namespace='Sorted Coordinates', io_size=io_size, chunk_size=chunk_size,
                                value_chunk_size=value_chunk_size, comm=comm)
コード例 #17
0
def main(coords_path, coords_namespace, io_size, cache_size):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    print('Allocated %i ranks' % size)

    population_ranges = read_population_ranges(coords_path)[0]
    print(population_ranges)

    soma_coords = {}
    for population in population_ranges.keys():

        attr_iter = NeuroH5CellAttrGen(coords_path, population, namespace=coords_namespace, \
                                        comm=comm, io_size=io_size, cache_size=cache_size)

        i = 0
        for cell_gid, coords_dict in attr_iter:

            if cell_gid is not None:
                print('coords_dict: ', coords_dict)
                cell_u = coords_dict['U Coordinate']
                cell_v = coords_dict['V Coordinate']

                print('Rank %i: gid = %i u = %f v = %f' %
                      (rank, cell_gid, cell_u, cell_v))
                if i > 10:
                    break
                i = i + 1

    if rank == 0:
        import h5py
        count = 0
        f = h5py.File(coords_path, 'r+')
        if 'test' in f:
            count = f['test'][()]
            del (f['test'])
        f['test'] = count + 1
    comm.barrier()
コード例 #18
0
def main(coords_path, coords_namespace, io_size):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    print ('Allocated %i ranks' % size)

    population_ranges = read_population_ranges(coords_path)[0]
    
    soma_coords = {}
    for population in ['GC']:

        attr_dict = scatter_read_cell_attributes(coords_path, population, namespaces=[coords_namespace], io_size=io_size)
        attr_iter = attr_dict[coords_namespace]
        
        for cell_gid, coords_dict in attr_iter:

            cell_u = coords_dict['U Coordinate']
            cell_v = coords_dict['V Coordinate']
                
            print ('Rank %i: gid = %i u = %f v = %f' % (rank, cell_gid, cell_u, cell_v))
コード例 #19
0
def main(syn_path, syn_namespace, io_size):

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    print('Allocated %i ranks' % size)

    population_ranges = read_population_ranges(syn_path)[0]
    print(population_ranges)

    for population in population_ranges.keys():

        attr_dict = scatter_read_cell_attributes(syn_path,
                                                 population,
                                                 namespaces=[syn_namespace],
                                                 io_size=io_size)
        attr_iter = attr_dict[syn_namespace]

        for cell_gid, attr_dict in attr_iter:

            print('Rank %i: gid = %i syn attrs:' % (rank, cell_gid))
            pprint.pprint(attr_dict)
コード例 #20
0
ファイル: measure_trees.py プロジェクト: pramodk/dentate
def main(config, template_path, output_path, forest_path, populations, io_size,
         chunk_size, value_chunk_size, cache_size, verbose):
    """

    :param config:
    :param template_path:
    :param forest_path:
    :param populations:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(script_name)

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=MPI.COMM_WORLD,
              config_file=config,
              template_paths=template_path)
    h('objref nil, pc, templatePaths')
    h.load_file("nrngui.hoc")
    h.load_file("./templates/Value.hoc")
    h.xopen("./lib.hoc")
    h.pc = h.ParallelContext()

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    h.templatePaths = h.List()
    for path in env.templatePaths:
        h.templatePaths.append(h.Value(1, path))

    if output_path is None:
        output_path = forest_path

    if rank == 0:
        if not os.path.isfile(output_path):
            input_file = h5py.File(forest_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    (pop_ranges, _) = read_population_ranges(forest_path, comm=comm)
    start_time = time.time()
    for population in populations:
        logger.info('Rank %i population: %s' % (rank, population))
        count = 0
        (population_start, _) = pop_ranges[population]
        template_name = env.celltypes[population]['template']
        h.find_template(h.pc, h.templatePaths, template_name)
        template_class = eval('h.%s' % template_name)
        measures_dict = {}
        for gid, morph_dict in NeuroH5TreeGen(forest_path,
                                              population,
                                              io_size=io_size,
                                              comm=comm,
                                              topology=True):
            if gid is not None:
                logger.info('Rank %i gid: %i' % (rank, gid))
                cell = cells.make_neurotree_cell(template_class,
                                                 neurotree_dict=morph_dict,
                                                 gid=gid)
                secnodes_dict = morph_dict['section_topology']['nodes']

                apicalidx = set(cell.apicalidx)
                basalidx = set(cell.basalidx)

                dendrite_area_dict = {k + 1: 0.0 for k in range(0, 4)}
                dendrite_length_dict = {k + 1: 0.0 for k in range(0, 4)}
                for (i, sec) in enumerate(cell.sections):
                    if (i in apicalidx) or (i in basalidx):
                        secnodes = secnodes_dict[i]
                        prev_layer = None
                        for seg in sec.allseg():
                            L = seg.sec.L
                            nseg = seg.sec.nseg
                            seg_l = old_div(L, nseg)
                            seg_area = h.area(seg.x)
                            layer = cells.get_node_attribute(
                                'layer', morph_dict, seg.sec, secnodes, seg.x)
                            layer = layer if layer > 0 else (
                                prev_layer if prev_layer is not None else 1)
                            prev_layer = layer
                            dendrite_length_dict[layer] += seg_l
                            dendrite_area_dict[layer] += seg_area

                measures_dict[gid] = { 'dendrite_area': np.asarray([ dendrite_area_dict[k] for k in sorted(dendrite_area_dict.keys()) ], dtype=np.float32), \
                                       'dendrite_length': np.asarray([ dendrite_length_dict[k] for k in sorted(dendrite_length_dict.keys()) ], dtype=np.float32) }

                del cell
                count += 1
            else:
                logger.info('Rank %i gid is None' % rank)
        append_cell_attributes(output_path,
                               population,
                               measures_dict,
                               namespace='Tree Measurements',
                               comm=comm,
                               io_size=io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size,
                               cache_size=cache_size)
    MPI.Finalize()
コード例 #21
0
def main(config, config_prefix, selectivity_path, selectivity_namespace,
         arena_id, populations, n_trials, io_size, chunk_size,
         value_chunk_size, cache_size, write_size, output_path,
         spikes_namespace, spike_train_attr_name, gather, debug, plot,
         show_fig, save_fig, save_fig_dir, font_size, fig_format, verbose,
         dry_run):
    """

    :param config: str (.yaml file name)
    :param config_prefix: str (path to dir)
    :param selectivity_path: str (path to file)
    :param selectivity_namespace: str
    :param arena_id: str
    :param populations: str
    :param n_trials: int
    :param io_size: int
    :param chunk_size: int
    :param value_chunk_size: int
    :param cache_size: int
    :param write_size: int
    :param output_path: str (path to file)
    :param spikes_namespace: str
    :param spike_train_attr_name: str
    :param gather: bool
    :param debug: bool
    :param plot: bool
    :param show_fig: bool
    :param save_fig: str (base file name)
    :param save_fig_dir:  str (path to dir)
    :param font_size: float
    :param fig_format: str
    :param verbose: bool
    :param dry_run: bool
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank

    config_logging(verbose)

    env = Env(comm=comm,
              config_file=config,
              config_prefix=config_prefix,
              template_paths=None)
    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if save_fig is not None:
        plot = True

    if plot:
        from dentate.plot import default_fig_options

        fig_options = copy.copy(default_fig_options)
        fig_options.saveFigDir = save_fig_dir
        fig_options.fontSize = font_size
        fig_options.figFormat = fig_format
        fig_options.showFig = show_fig

    population_ranges = read_population_ranges(selectivity_path, comm)[0]

    if len(populations) == 0:
        populations = sorted(population_ranges.keys())

    if arena_id not in env.stimulus_config['Arena']:
        raise RuntimeError(
            'Arena with ID: %s not specified by configuration at file path: %s'
            % (arena_id, config_prefix + '/' + config))
    arena = env.stimulus_config['Arena'][arena_id]

    valid_selectivity_namespaces = dict()
    if rank == 0:
        for population in populations:
            if population not in population_ranges:
                raise RuntimeError(
                    'generate_input_spike_trains: specified population: %s not found in '
                    'provided selectivity_path: %s' %
                    (population, selectivity_path))
            if population not in env.stimulus_config[
                    'Selectivity Type Probabilities']:
                raise RuntimeError(
                    'generate_input_spike_trains: selectivity type not specified for '
                    'population: %s' % population)
            valid_selectivity_namespaces[population] = []
            with h5py.File(selectivity_path, 'r') as selectivity_f:
                for this_namespace in selectivity_f['Populations'][population]:
                    if 'Selectivity %s' % arena_id in this_namespace:
                        valid_selectivity_namespaces[population].append(
                            this_namespace)
                if len(valid_selectivity_namespaces[population]) == 0:
                    raise RuntimeError(
                        'generate_input_spike_trains: no selectivity data in arena: %s found '
                        'for specified population: %s in provided selectivity_path: %s'
                        % (arena_id, population, selectivity_path))
    comm.barrier()

    valid_selectivity_namespaces = comm.bcast(valid_selectivity_namespaces,
                                              root=0)
    selectivity_type_names = dict(
        (val, key) for (key, val) in viewitems(env.selectivity_types))

    equilibrate = get_equilibration(env)

    for trajectory_id in sorted(arena.trajectories.keys()):
        trajectory = arena.trajectories[trajectory_id]
        t, x, y, d = None, None, None, None
        if rank == 0:
            t, x, y, d = generate_linear_trajectory(
                trajectory,
                temporal_resolution=env.stimulus_config['Temporal Resolution'],
                equilibration_duration=env.
                stimulus_config['Equilibration Duration'])

        t = comm.bcast(t, root=0)
        x = comm.bcast(x, root=0)
        y = comm.bcast(y, root=0)
        d = comm.bcast(d, root=0)

        trajectory = t, x, y, d
        trajectory_namespace = 'Trajectory %s %s' % (arena_id, trajectory_id)
        output_namespace = '%s %s %s' % (spikes_namespace, arena_id,
                                         trajectory_id)

        if not dry_run and rank == 0:
            if output_path is None:
                raise RuntimeError(
                    'generate_input_spike_trains: missing output_path')
            if not os.path.isfile(output_path):
                with h5py.File(output_path, 'w') as output_file:
                    input_file = h5py.File(selectivity_path, 'r')
                    input_file.copy('/H5Types', output_file)
                    input_file.close()
            with h5py.File(output_path, 'a') as f:
                if trajectory_namespace not in f:
                    logger.info('Appending %s datasets to file at path: %s' %
                                (trajectory_namespace, output_path))
                group = f.create_group(trajectory_namespace)
                for key, value in zip(['t', 'x', 'y', 'd'], [t, x, y, d]):
                    dataset = group.create_dataset(key,
                                                   data=value,
                                                   dtype='float32')
                else:
                    loaded_t = f[trajectory_namespace]['t'][:]
                    if len(t) != len(loaded_t):
                        raise RuntimeError(
                            'generate_input_spike_trains: file at path: %s already contains the '
                            'namespace: %s, but the dataset sizes are inconsistent with the provided input'
                            'configuration' %
                            (output_path, trajectory_namespace))
        comm.barrier()

        if rank == 0:
            context.update(locals())

        spike_hist_sum_dict = {}
        spike_hist_resolution = 1000

        write_every = max(1, int(math.floor(write_size / comm.size)))
        for population in populations:

            this_spike_hist_sum = defaultdict(
                lambda: np.zeros(spike_hist_resolution))

            process_time = dict()
            for this_selectivity_namespace in sorted(
                    valid_selectivity_namespaces[population]):

                if rank == 0:
                    logger.info(
                        'Generating input source spike trains for population %s [%s]...'
                        % (population, this_selectivity_namespace))

                start_time = time.time()
                selectivity_attr_gen = NeuroH5CellAttrGen(
                    selectivity_path,
                    population,
                    namespace=this_selectivity_namespace,
                    comm=comm,
                    io_size=io_size,
                    cache_size=cache_size)
                spikes_attr_dict = dict()
                gid_count = 0
                for iter_count, (gid, selectivity_attr_dict
                                 ) in enumerate(selectivity_attr_gen):
                    if gid is not None:
                        context.update(locals())
                        spikes_attr_dict[gid] = \
                            generate_input_spike_trains(env, selectivity_type_names, trajectory,
                                                        gid, selectivity_attr_dict, n_trials=n_trials,
                                                        spike_train_attr_name=spike_train_attr_name,
                                                        spike_hist_resolution=spike_hist_resolution,
                                                        equilibrate=equilibrate,
                                                        spike_hist_sum=this_spike_hist_sum,
                                                        debug= (debug_callback, context) if debug else False)
                        gid_count += 1

                    if (iter_count > 0 and iter_count % write_every
                            == 0) or (debug and iter_count == 10):
                        total_gid_count = comm.reduce(gid_count,
                                                      root=0,
                                                      op=MPI.SUM)
                        if rank == 0:
                            logger.info(
                                'generated spike trains for %i %s cells' %
                                (total_gid_count, population))

                        if not dry_run:
                            append_cell_attributes(
                                output_path,
                                population,
                                spikes_attr_dict,
                                namespace=output_namespace,
                                comm=comm,
                                io_size=io_size,
                                chunk_size=chunk_size,
                                value_chunk_size=value_chunk_size)
                        del spikes_attr_dict
                        spikes_attr_dict = dict()

                        if debug and iter_count == 10:
                            break

            if not dry_run:
                append_cell_attributes(output_path,
                                       population,
                                       spikes_attr_dict,
                                       namespace=output_namespace,
                                       comm=comm,
                                       io_size=io_size,
                                       chunk_size=chunk_size,
                                       value_chunk_size=value_chunk_size)
                del spikes_attr_dict
                spikes_attr_dict = dict()
            process_time = time.time() - start_time

            total_gid_count = comm.reduce(gid_count, root=0, op=MPI.SUM)
            if rank == 0:
                logger.info(
                    'generated spike trains for %i %s cells in %.2f s' %
                    (total_gid_count, population, process_time))

            if gather:
                spike_hist_sum_dict[population] = this_spike_hist_sum

        if gather:
            this_spike_hist_sum = dict([
                (key, dict(val.items()))
                for key, val in viewitems(spike_hist_sum_dict)
            ])
            spike_hist_sum = comm.gather(this_spike_hist_sum, root=0)

            if rank == 0:
                merged_spike_hist_sum = defaultdict(lambda: defaultdict(
                    lambda: np.zeros(spike_hist_resolution)))
                for each_spike_hist_sum in spike_hist_sum:
                    for population in each_spike_hist_sum:
                        for selectivity_type_name in each_spike_hist_sum[
                                population]:
                            merged_spike_hist_sum[population][selectivity_type_name] = \
                                np.add(merged_spike_hist_sum[population][selectivity_type_name],
                                       each_spike_hist_sum[population][selectivity_type_name])

                if plot:

                    if save_fig is not None:
                        fig_options.saveFig = save_fig

                        plot_summed_spike_psth(t, trajectory_id,
                                               selectivity_type_name,
                                               merged_spike_hist_sum,
                                               spike_hist_resolution,
                                               fig_options)

        comm.barrier()

    if is_interactive and rank == 0:
        context.update(locals())
コード例 #22
0
def main(stimulus_path, input_stimulus_namespace, output_stimulus_namespace, io_size, chunk_size, value_chunk_size,
         cache_size, seed_offset, trajectory_id, debug):
    """
    :param stimulus_path: str
    :param input_stimulus_namespace: str
    :param output_stimulus_namespace: str
    :param io_size: int
    :param chunk_size: int
    :param value_chunk_size: int
    :param cache_size: int
    :param seed_offset: int
    :param trajectory_id: int
    :param debug: bool
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print('%i ranks have been allocated' % comm.size)
    sys.stdout.flush()

    seed_offset *= 2e6
    np.random.seed(int(seed_offset))

    population_ranges = read_population_ranges(comm, stimulus_path)[0]

    input_stimulus_namespace += ' ' + str(trajectory_id)
    output_stimulus_namespace += ' ' + str(trajectory_id)

    for population in ['LPP']:
        population_start = population_ranges[population][0]
        population_count = population_ranges[population][1]

        if rank == 0:
            random_gids = np.arange(0, population_count)
            np.random.shuffle(random_gids)
        else:
            random_gids = None
        random_gids = comm.bcast(random_gids, root=0)

        count = 0
        start_time = time.time()
        attr_gen = NeuroH5CellAttrGen(comm, stimulus_path, population, io_size=io_size,
                                      cache_size=cache_size, namespace=input_stimulus_namespace)
        if debug:
            attr_gen_wrapper = (next(attr_gen) for i in range(2))
        else:
            attr_gen_wrapper = attr_gen
        for gid, stimulus_dict in attr_gen_wrapper:
            local_time = time.time()
            new_response_dict = {}
            if gid is not None:

                random_gid = random_gids[gid-population_start]
                new_response_dict[random_gid] = {'rate': stimulus_dict['rate'],
                                                 'spiketrain': np.asarray(stimulus_dict['spiketrain'],
                                                                          dtype=np.float32),
                                                 'modulation': stimulus_dict['modulation'],
                                                 'peak_index': stimulus_dict['peak_index'] }

                print('Rank %i; source: %s; assigned spike trains for gid %i to gid %i in %.2f s' % \
                      (rank, population, gid, random_gid+population_start, time.time() - local_time))
                count += 1
            if not debug:
                append_cell_attributes(comm, stimulus_path, population, new_response_dict,
                                       namespace=output_stimulus_namespace,
                                       io_size=io_size, chunk_size=chunk_size,
                                       value_chunk_size=value_chunk_size)
            sys.stdout.flush()
            del new_response_dict
            gc.collect()

        global_count = comm.gather(count, root=0)
        if rank == 0:
            print('%i ranks randomized spike trains for %i cells in %.2f s' % (comm.size, np.sum(global_count),
                                                                               time.time() - start_time))
コード例 #23
0
def main(config, forest_path, connectivity_namespace, coords_path, coords_namespace, io_size, chunk_size, value_chunk_size,
         cache_size, debug):
    """

    :param forest_path:
    :param connectivity_namespace:
    :param coords_path:
    :param coords_namespace:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """
    # troubleshooting
    if False:
        forest_path = '../morphologies/DGC_forest_connectivity_20170508.h5'
        coords_path = '../morphologies/dentate_Full_Scale_Control_coords_selectivity_20170615.h5'
        coords_namespace = 'Coordinates'
        io_size = -1
        chunk_size = 1000
        value_chunk_size = 1000
        cache_size = 50

    comm = MPI.COMM_WORLD
    rank = comm.rank  # The process ID (integer 0-3 for 4-process run)

    env = Env(comm=comm, config_file=config)

    connection_config = env.connection_config    
    proportions = connection_config.synapse_proportions
    layers      = connection_config.synapse_layers
    syn_types   = connection_config.synapse_types
    swc_types   = connection_config.synapse_locations

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print('%i ranks have been allocated' % comm.size)
    sys.stdout.flush()

    start_time = time.time()

    soma_coords = {}
    source_populations = list(read_population_ranges(comm, coords_path).keys())
    for population in source_populations:
        soma_coords[population] = bcast_cell_attributes(comm, 0, coords_path, population,
                                                            namespace=coords_namespace)

    target = 'GC'

    layer_set, swc_type_set, syn_type_set = set(), set(), set()
    for source in layers[target]:
        layer_set.update(layers[target][source])
        swc_type_set.update(swc_types[target][source])
        syn_type_set.update(syn_types[target][source])

    count = 0
    attr_gen = NeuroH5CellAttrGen(comm, forest_path, target, io_size=io_size, cache_size=cache_size,
                                  namespace='Synapse Attributes')
    if debug:
        attr_gen_wrapper = (next(attr_gen) for i in range(2))
    else:
        attr_gen_wrapper = attr_gen
    for target_gid, attributes_dict in attr_gen_wrapper:
        last_time = time.time()
        connection_dict = {}
        p_dict = {}
        source_gid_dict = {}
        if target_gid is None:
            print('Rank %i target gid is None' % rank)
        else:
            print('Rank %i received attributes for target: %s, gid: %i' % (rank, target, target_gid))
            synapse_dict = attributes_dict['Synapse_Attributes']
            connection_dict[target_gid] = {}
            local_np_random.seed(target_gid + connectivity_seed_offset)
            connection_dict[target_gid]['source_gid'] = np.array([], dtype='uint32')
            connection_dict[target_gid]['syn_id'] = np.array([], dtype='uint32')

            for layer in layer_set:
                for swc_type in swc_type_set:
                    for syn_type in syn_type_set:
                        sources, this_proportions = filter_sources(target, layer, swc_type, syn_type, connection_config)
                        if sources:
                            if rank == 0 and count == 0:
                                source_list_str = '[' + ', '.join(['%s' % xi for xi in sources]) + ']'
                                print('Connections to target: %s in layer: %i ' \
                                    '(swc_type: %i, syn_type: %i): %s' % \
                                    (target, layer, swc_type, syn_type, source_list_str))
                            p, source_gid = np.array([]), np.array([])
                            for source, this_proportion in zip(sources, this_proportions):
                                if source not in source_gid_dict:
                                    this_source_gid = list(soma_coords[source].keys())
                                    this_p = np.ones(len(this_source_gid)) / float(len(this_source_gid))
                                    source_gid_dict[source] = this_source_gid
                                    p_dict[source] = this_p
                                else:
                                    this_source_gid = source_gid_dict[source]
                                    this_p = p_dict[source]
                                p = np.append(p, this_p * this_proportion)
                                source_gid = np.append(source_gid, this_source_gid)
                            syn_indexes = filter_synapses(synapse_dict, layer, swc_type, syn_type)
                            connection_dict[target_gid]['syn_id'] = \
                                np.append(connection_dict[target_gid]['syn_id'],
                                          synapse_dict['syn_id'][syn_indexes]).astype('uint32', copy=False)
                            this_source_gid = local_np_random.choice(source_gid, len(syn_indexes), p=p)
                            connection_dict[target_gid]['source_gid'] = \
                                np.append(connection_dict[target_gid]['source_gid'],
                                          this_source_gid).astype('uint32', copy=False)
            count += 1
            print('Rank %i took %.2f s to compute connectivity for target: %s, gid: %i' % (rank,
                                                                                           time.time() - last_time,
                                                                                           target, target_gid))
            sys.stdout.flush()
        if not debug:
            append_cell_attributes(comm, forest_path, target, connection_dict,
                                   namespace=connectivity_namespace, io_size=io_size, chunk_size=chunk_size,
                                   value_chunk_size=value_chunk_size)
        sys.stdout.flush()
        del connection_dict
        del p_dict
        del source_gid_dict
        gc.collect()

    global_count = comm.gather(count, root=0)
    if rank == 0:
        print('%i ranks took took %.2f s to compute connectivity for %i cells' % (comm.size, time.time() - start_time,
                                                                                  np.sum(global_count)))
コード例 #24
0
def main(arena_id, bin_sample_count, config, config_prefix, dataset_prefix,
         distances_namespace, distance_bin_extent, input_features_path,
         input_features_namespaces, populations, spike_input_path,
         spike_input_namespace, spike_input_attr, output_path, io_size,
         trajectory_id, write_selection, verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm,
              config_file=config,
              config_prefix=config_prefix,
              dataset_prefix=dataset_prefix,
              results_path=output_path,
              spike_input_path=spike_input_path,
              spike_input_namespace=spike_input_namespace,
              spike_input_attr=spike_input_attr,
              arena_id=arena_id,
              trajectory_id=trajectory_id)

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    pop_ranges, pop_size = read_population_ranges(env.connectivity_file_path,
                                                  comm=comm)

    distance_U_dict = {}
    distance_V_dict = {}
    range_U_dict = {}
    range_V_dict = {}

    selection_dict = defaultdict(set)

    comm0 = env.comm.Split(2 if rank == 0 else 0, 0)

    local_random = np.random.RandomState()
    local_random.seed(1000)

    if len(populations) == 0:
        populations = sorted(pop_ranges.keys())

    if rank == 0:
        for population in populations:
            distances = read_cell_attributes(env.data_file_path,
                                             population,
                                             namespace=distances_namespace,
                                             comm=comm0)

            soma_distances = {}
            if input_features_path is not None:
                num_fields_dict = {}
                for input_features_namespace in input_features_namespaces:
                    if arena_id is not None:
                        this_features_namespace = '%s %s' % (
                            input_features_namespace, arena_id)
                    else:
                        this_features_namespace = input_features_namespace
                    input_features_iter = read_cell_attributes(
                        input_features_path,
                        population,
                        namespace=this_features_namespace,
                        mask=set(['Num Fields']),
                        comm=comm0)
                    count = 0
                    for gid, attr_dict in input_features_iter:
                        num_fields_dict[gid] = attr_dict['Num Fields']
                        count += 1
                    logger.info(
                        'Read feature data from namespace %s for %i cells in population %s'
                        % (this_features_namespace, count, population))

                for (gid, v) in distances:
                    num_fields = num_fields_dict.get(gid, 0)
                    if num_fields > 0:
                        soma_distances[gid] = (v['U Distance'][0],
                                               v['V Distance'][0])
            else:
                for (gid, v) in distances:
                    soma_distances[gid] = (v['U Distance'][0],
                                           v['V Distance'][0])

            numitems = len(list(soma_distances.keys()))
            logger.info('read %s distances (%i elements)' %
                        (population, numitems))

            if numitems == 0:
                continue

            gid_array = np.asarray([gid for gid in soma_distances])
            distance_U_array = np.asarray(
                [soma_distances[gid][0] for gid in gid_array])
            distance_V_array = np.asarray(
                [soma_distances[gid][1] for gid in gid_array])

            U_min = np.min(distance_U_array)
            U_max = np.max(distance_U_array)
            V_min = np.min(distance_V_array)
            V_max = np.max(distance_V_array)

            range_U_dict[population] = (U_min, U_max)
            range_V_dict[population] = (V_min, V_max)

            distance_U = {
                gid: soma_distances[gid][0]
                for gid in soma_distances
            }
            distance_V = {
                gid: soma_distances[gid][1]
                for gid in soma_distances
            }

            distance_U_dict[population] = distance_U
            distance_V_dict[population] = distance_V

            min_dist = U_min
            max_dist = U_max

            distance_bins = np.arange(U_min, U_max, distance_bin_extent)
            distance_bin_array = np.digitize(distance_U_array, distance_bins)

            selection_set = set([])
            for bin_index in range(len(distance_bins) + 1):
                bin_gids = gid_array[np.where(
                    distance_bin_array == bin_index)[0]]
                if len(bin_gids) > 0:
                    selected_bin_gids = local_random.choice(
                        bin_gids, replace=False, size=bin_sample_count)
                    for gid in selected_bin_gids:
                        selection_set.add(int(gid))
            selection_dict[population] = selection_set

        yaml_output_dict = {}
        for k, v in utils.viewitems(selection_dict):
            yaml_output_dict[k] = list(sorted(v))

        yaml_output_path = '%s/DG_slice.yaml' % output_path
        with open(yaml_output_path, 'w') as outfile:
            yaml.dump(yaml_output_dict, outfile)

        del (yaml_output_dict)

    env.comm.barrier()

    write_selection_file_path = None
    if write_selection:
        write_selection_file_path = "%s/%s_selection.h5" % (env.results_path,
                                                            env.modelName)

    if write_selection_file_path is not None:
        if rank == 0:
            io_utils.mkout(env, write_selection_file_path)
        env.comm.barrier()
        selection_dict = env.comm.bcast(dict(selection_dict), root=0)
        env.cell_selection = selection_dict
        io_utils.write_cell_selection(env,
                                      write_selection_file_path,
                                      populations=populations)
        input_selection = io_utils.write_connection_selection(
            env, write_selection_file_path, populations=populations)

        if env.spike_input_ns is not None:
            io_utils.write_input_cell_selection(env,
                                                input_selection,
                                                write_selection_file_path,
                                                populations=populations)
    env.comm.barrier()
    MPI.Finalize()
コード例 #25
0
def main(config, template_path, output_path, forest_path, populations,
         distance_bin_size, io_size, chunk_size, value_chunk_size, cache_size,
         verbose):
    """

    :param config:
    :param template_path:
    :param forest_path:
    :param populations:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(script_name)

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=MPI.COMM_WORLD,
              config_file=config,
              template_paths=template_path)
    configure_hoc_env(env)

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if output_path is None:
        output_path = forest_path

    if rank == 0:
        if not os.path.isfile(output_path):
            input_file = h5py.File(forest_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    layers = env.layers
    layer_idx_dict = {
        layers[layer_name]: layer_name
        for layer_name in ['GCL', 'IML', 'MML', 'OML', 'Hilus']
    }

    (pop_ranges, _) = read_population_ranges(forest_path, comm=comm)
    start_time = time.time()
    for population in populations:
        logger.info('Rank %i population: %s' % (rank, population))
        count = 0
        (population_start, _) = pop_ranges[population]
        template_class = load_cell_template(env,
                                            population,
                                            bcast_template=True)
        measures_dict = {}
        for gid, morph_dict in NeuroH5TreeGen(forest_path,
                                              population,
                                              io_size=io_size,
                                              comm=comm,
                                              topology=True):
            if gid is not None:
                logger.info('Rank %i gid: %i' % (rank, gid))
                cell = cells.make_neurotree_cell(template_class,
                                                 neurotree_dict=morph_dict,
                                                 gid=gid)
                secnodes_dict = morph_dict['section_topology']['nodes']

                apicalidx = set(cell.apicalidx)
                basalidx = set(cell.basalidx)

                dendrite_area_dict = {k: 0.0 for k in layer_idx_dict}
                dendrite_length_dict = {k: 0.0 for k in layer_idx_dict}
                dendrite_distances = []
                dendrite_diams = []
                for (i, sec) in enumerate(cell.sections):
                    if (i in apicalidx) or (i in basalidx):
                        secnodes = secnodes_dict[i]
                        for seg in sec.allseg():
                            L = seg.sec.L
                            nseg = seg.sec.nseg
                            seg_l = L / nseg
                            seg_area = h.area(seg.x)
                            seg_diam = seg.diam
                            seg_distance = get_distance_to_node(
                                cell,
                                list(cell.soma)[0], seg.sec, seg.x)
                            dendrite_diams.append(seg_diam)
                            dendrite_distances.append(seg_distance)
                            layer = synapses.get_node_attribute(
                                'layer', morph_dict, seg.sec, secnodes, seg.x)
                            dendrite_length_dict[layer] += seg_l
                            dendrite_area_dict[layer] += seg_area

                dendrite_distance_array = np.asarray(dendrite_distances)
                dendrite_diam_array = np.asarray(dendrite_diams)
                dendrite_distance_bin_range = int(
                    ((np.max(dendrite_distance_array)) -
                     np.min(dendrite_distance_array)) / distance_bin_size) + 1
                dendrite_distance_counts, dendrite_distance_edges = np.histogram(
                    dendrite_distance_array,
                    bins=dendrite_distance_bin_range,
                    density=False)
                dendrite_diam_sums, _ = np.histogram(
                    dendrite_distance_array,
                    weights=dendrite_diam_array,
                    bins=dendrite_distance_bin_range,
                    density=False)
                dendrite_mean_diam_hist = np.zeros_like(dendrite_diam_sums)
                np.divide(dendrite_diam_sums,
                          dendrite_distance_counts,
                          where=dendrite_distance_counts > 0,
                          out=dendrite_mean_diam_hist)

                dendrite_area_per_layer = np.asarray([
                    dendrite_area_dict[k]
                    for k in sorted(dendrite_area_dict.keys())
                ],
                                                     dtype=np.float32)
                dendrite_length_per_layer = np.asarray([
                    dendrite_length_dict[k]
                    for k in sorted(dendrite_length_dict.keys())
                ],
                                                       dtype=np.float32)

                measures_dict[gid] = {
                    'dendrite_distance_hist_edges':
                    np.asarray(dendrite_distance_edges, dtype=np.float32),
                    'dendrite_distance_counts':
                    np.asarray(dendrite_distance_counts, dtype=np.int32),
                    'dendrite_mean_diam_hist':
                    np.asarray(dendrite_mean_diam_hist, dtype=np.float32),
                    'dendrite_area_per_layer':
                    dendrite_area_per_layer,
                    'dendrite_length_per_layer':
                    dendrite_length_per_layer
                }

                del cell
                count += 1
            else:
                logger.info('Rank %i gid is None' % rank)
        append_cell_attributes(output_path,
                               population,
                               measures_dict,
                               namespace='Tree Measurements',
                               comm=comm,
                               io_size=io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size,
                               cache_size=cache_size)
    MPI.Finalize()
コード例 #26
0
def main(config, config_prefix, max_section_length, population, forest_path,
         template_path, output_path, io_size, chunk_size, value_chunk_size,
         dry_run, verbose):
    """

    :param population: str
    :param forest_path: str (path)
    :param output_path: str (path)
    :param io_size: int
    :param chunk_size: int
    :param value_chunk_size: int
    :param verbose: bool
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    env = Env(comm=comm,
              config_file=config,
              config_prefix=config_prefix,
              template_paths=template_path)

    if rank == 0:
        if not os.path.isfile(output_path):
            input_file = h5py.File(forest_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    (forest_pop_ranges, _) = read_population_ranges(forest_path)
    (forest_population_start,
     forest_population_count) = forest_pop_ranges[population]

    (pop_ranges, _) = read_population_ranges(output_path)

    (population_start, population_count) = pop_ranges[population]

    new_trees_dict = {}
    for gid, tree_dict in NeuroH5TreeGen(forest_path,
                                         population,
                                         io_size=io_size,
                                         comm=comm,
                                         topology=False):
        if gid is not None:
            logger.info("Rank %d received gid %d" % (rank, gid))
            logger.info(pprint.pformat(tree_dict))
            new_tree_dict = cells.resize_tree_sections(tree_dict,
                                                       max_section_length)
            logger.info(pprint.pformat(new_tree_dict))
            new_trees_dict[gid] = new_tree_dict

    if not dry_run:
        append_cell_trees(output_path,
                          population,
                          new_trees_dict,
                          io_size=io_size,
                          comm=comm)

    comm.barrier()
    if (not dry_run) and (rank == 0):
        logger.info('Appended resized trees to %s' % output_path)
コード例 #27
0
def main(arena_id, config, config_prefix, dataset_prefix, distances_namespace, spike_input_path, spike_input_namespace, spike_input_attr, input_features_namespaces, input_features_path, selection_path, output_path, io_size, trajectory_id, verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank
    if io_size == -1:
        io_size = comm.size

    env = Env(comm=comm, config_file=config, 
              config_prefix=config_prefix, dataset_prefix=dataset_prefix, 
              results_path=output_path, spike_input_path=spike_input_path, 
              spike_input_namespace=spike_input_namespace, spike_input_attr=spike_input_attr,
              arena_id=arena_id, trajectory_id=trajectory_id, io_size=io_size)

    selection = []
    f = open(selection_path, 'r')
    for line in f.readlines():
        selection.append(int(line))
    f.close()
    selection = set(selection)

    pop_ranges, pop_size = read_population_ranges(env.connectivity_file_path, comm=comm)

    distance_U_dict = {}
    distance_V_dict = {}
    range_U_dict = {}
    range_V_dict = {}

    selection_dict = defaultdict(set)

    comm0 = env.comm.Split(2 if rank == 0 else 0, 0)

    if rank == 0:
        for population in pop_ranges:
            distances = read_cell_attributes(env.data_file_path, population, namespace=distances_namespace, comm=comm0)
            soma_distances = { k: (v['U Distance'][0], v['V Distance'][0]) for (k,v) in distances }
            del distances
        
            numitems = len(list(soma_distances.keys()))

            if numitems == 0:
                continue

            distance_U_array = np.asarray([soma_distances[gid][0] for gid in soma_distances])
            distance_V_array = np.asarray([soma_distances[gid][1] for gid in soma_distances])

            U_min = np.min(distance_U_array)
            U_max = np.max(distance_U_array)
            V_min = np.min(distance_V_array)
            V_max = np.max(distance_V_array)

            range_U_dict[population] = (U_min, U_max)
            range_V_dict[population] = (V_min, V_max)
            
            distance_U = { gid: soma_distances[gid][0] for gid in soma_distances }
            distance_V = { gid: soma_distances[gid][1] for gid in soma_distances }
            
            distance_U_dict[population] = distance_U
            distance_V_dict[population] = distance_V
            
            min_dist = U_min
            max_dist = U_max 

            selection_dict[population] = set([ k for k in distance_U if k in selection ])
    

    env.comm.barrier()

    write_selection_file_path =  "%s/%s_selection.h5" % (env.results_path, env.modelName)

    if rank == 0:
        io_utils.mkout(env, write_selection_file_path)
    env.comm.barrier()
    selection_dict = env.comm.bcast(dict(selection_dict), root=0)
    env.cell_selection = selection_dict
    io_utils.write_cell_selection(env, write_selection_file_path)
    input_selection = io_utils.write_connection_selection(env, write_selection_file_path)
    if spike_input_path:
        io_utils.write_input_cell_selection(env, input_selection, write_selection_file_path)
    if input_features_path:
        for this_input_features_namespace in sorted(input_features_namespaces):
            for population in sorted(input_selection):
                logger.info(f"Extracting input features {this_input_features_namespace} for population {population}...")
                it = read_cell_attribute_selection(input_features_path, population, 
                                                   namespace=f"{this_input_features_namespace} {arena_id}", 
                                                   selection=input_selection[population], comm=env.comm)
                output_features_dict = { cell_gid : cell_features_dict for cell_gid, cell_features_dict in it }
                append_cell_attributes(write_selection_file_path, population, output_features_dict,
                                       namespace=f"{this_input_features_namespace} {arena_id}", 
                                       io_size=io_size, comm=env.comm)
    env.comm.barrier()
コード例 #28
0
def main(config, config_prefix, types_path, geometry_path, output_path,
         output_namespace, populations, resolution, alpha_radius, nodeiter,
         dispersion_delta, snap_delta, io_size, chunk_size, value_chunk_size,
         verbose):

    config_logging(verbose)
    logger = get_script_logger(script_name)

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    np.seterr(all='raise')

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if rank == 0:
        if not os.path.isfile(output_path):
            input_file = h5py.File(types_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    env = Env(comm=comm, config_file=config, config_prefix=config_prefix)

    random_seed = int(env.model_config['Random Seeds']['Soma Locations'])
    random.seed(random_seed)

    layer_extents = env.geometry['Parametric Surface']['Layer Extents']
    rotate = env.geometry['Parametric Surface']['Rotation']

    (extent_u, extent_v, extent_l) = get_total_extents(layer_extents)
    vol = make_CA1_volume(extent_u,
                          extent_v,
                          extent_l,
                          rotate=rotate,
                          resolution=resolution)
    layer_alpha_shape_path = 'Layer Alpha Shape/%d/%d/%d' % resolution
    if rank == 0:
        logger.info("Constructing alpha shape for volume: extents: %s..." %
                    str((extent_u, extent_v, extent_l)))
        vol_alpha_shape_path = '%s/all' % (layer_alpha_shape_path)
        if geometry_path:
            vol_alpha_shape = load_alpha_shape(geometry_path,
                                               vol_alpha_shape_path)
        else:
            vol_alpha_shape = make_alpha_shape(vol, alpha_radius=alpha_radius)
            if geometry_path:
                save_alpha_shape(geometry_path, vol_alpha_shape_path,
                                 vol_alpha_shape)
        vert = vol_alpha_shape.points
        smp = np.asarray(vol_alpha_shape.bounds, dtype=np.int64)
        vol_domain = (vert, smp)

    layer_alpha_shapes = {}
    layer_extent_vals = {}
    layer_extent_transformed_vals = {}
    if rank == 0:
        for layer, extents in viewitems(layer_extents):
            (extent_u, extent_v,
             extent_l) = get_layer_extents(layer_extents, layer)
            layer_extent_vals[layer] = (extent_u, extent_v, extent_l)
            layer_extent_transformed_vals[layer] = CA1_volume_transform(
                extent_u, extent_v, extent_l)
            has_layer_alpha_shape = False
            if geometry_path:
                this_layer_alpha_shape_path = '%s/%s' % (
                    layer_alpha_shape_path, layer)
                this_layer_alpha_shape = load_alpha_shape(
                    geometry_path, this_layer_alpha_shape_path)
                layer_alpha_shapes[layer] = this_layer_alpha_shape
                if this_layer_alpha_shape is not None:
                    has_layer_alpha_shape = True
            if not has_layer_alpha_shape:
                logger.info(
                    "Constructing alpha shape for layers %s: extents: %s..." %
                    (layer, str(extents)))
                layer_vol = make_CA1_volume(extent_u,
                                            extent_v,
                                            extent_l,
                                            rotate=rotate,
                                            resolution=resolution)
                this_layer_alpha_shape = make_alpha_shape(
                    layer_vol, alpha_radius=alpha_radius)
                layer_alpha_shapes[layer] = this_layer_alpha_shape
                if geometry_path:
                    save_alpha_shape(geometry_path,
                                     this_layer_alpha_shape_path,
                                     this_layer_alpha_shape)

    comm.barrier()
    population_ranges = read_population_ranges(output_path, comm)[0]
    if len(populations) == 0:
        populations = sorted(population_ranges.keys())

    total_count = 0
    for population in populations:
        (population_start, population_count) = population_ranges[population]
        total_count += population_count

    all_xyz_coords1 = None
    generated_coords_count_dict = defaultdict(int)
    if rank == 0:
        all_xyz_coords_lst = []
        for population in populations:
            gc.collect()

            (population_start,
             population_count) = population_ranges[population]

            pop_layers = env.geometry['Cell Distribution'][population]
            pop_constraint = None
            if 'Cell Constraints' in env.geometry:
                if population in env.geometry['Cell Constraints']:
                    pop_constraint = env.geometry['Cell Constraints'][
                        population]
            if rank == 0:
                logger.info("Population %s: layer distribution is %s" %
                            (population, str(pop_layers)))

            pop_layer_count = 0
            for layer, count in viewitems(pop_layers):
                pop_layer_count += count
            assert (population_count == pop_layer_count)

            xyz_coords_lst = []
            for layer, count in viewitems(pop_layers):
                if count <= 0:
                    continue

                alpha = layer_alpha_shapes[layer]

                vert = alpha.points
                smp = np.asarray(alpha.bounds, dtype=np.int64)

                extents_xyz = layer_extent_transformed_vals[layer]
                for (vvi, vv) in enumerate(vert):
                    for (vi, v) in enumerate(vv):
                        if v < extents_xyz[vi][0]:
                            vert[vvi][vi] = extents_xyz[vi][0]
                        elif v > extents_xyz[vi][1]:
                            vert[vvi][vi] = extents_xyz[vi][1]

                N = int(count * 2)  # layer-specific number of nodes
                node_count = 0

                logger.info(
                    "Generating %i nodes in layer %s for population %s..." %
                    (N, layer, population))
                if verbose:
                    rbf_logger = logging.Logger.manager.loggerDict[
                        'rbf.pde.nodes']
                    rbf_logger.setLevel(logging.DEBUG)

                min_energy_constraint = None
                if pop_constraint is not None and layer in pop_constraint:
                    min_energy_constraint = pop_constraint[layer]

                nodes = gen_min_energy_nodes(count, (vert, smp),
                                             min_energy_constraint, nodeiter,
                                             dispersion_delta, snap_delta)
                #nodes = gen_min_energy_nodes(count, (vert, smp),
                #                             pop_constraint[layer] if pop_constraint is not None else None,
                #                             nodeiter, dispersion_delta, snap_delta)

                xyz_coords_lst.append(nodes.reshape(-1, 3))

            for this_xyz_coords in xyz_coords_lst:
                all_xyz_coords_lst.append(this_xyz_coords)
                generated_coords_count_dict[population] += len(this_xyz_coords)

        # Additional dispersion step to ensure no overlapping cell positions
        all_xyz_coords = np.row_stack(all_xyz_coords_lst)
        mask = np.ones((all_xyz_coords.shape[0], ), dtype=np.bool)
        # distance to nearest neighbor
        while True:
            kdt = cKDTree(all_xyz_coords[mask, :])
            nndist, nnindices = kdt.query(all_xyz_coords[mask, :], k=2)
            nndist, nnindices = nndist[:, 1:], nnindices[:, 1:]

            zindices = nnindices[np.argwhere(
                np.isclose(nndist, 0.0, atol=1e-3, rtol=1e-3))]
            if len(zindices) > 0:
                mask[np.argwhere(mask)[zindices]] = False
            else:
                break

        coords_offset = 0
        for population in populations:
            pop_coords_count = generated_coords_count_dict[population]
            pop_mask = mask[coords_offset:coords_offset + pop_coords_count]
            generated_coords_count_dict[population] = np.count_nonzero(
                pop_mask)
            coords_offset += pop_coords_count

        logger.info("Dispersion of %i nodes..." % np.count_nonzero(mask))
        all_xyz_coords1 = disperse(all_xyz_coords[mask, :],
                                   vol_domain,
                                   delta=dispersion_delta)

    if rank == 0:
        logger.info("Computing UVL coordinates of %i nodes..." %
                    len(all_xyz_coords1))

    all_xyz_coords_interp = None
    all_uvl_coords_interp = None

    if rank == 0:
        all_uvl_coords_interp = vol.inverse(all_xyz_coords1)
        all_xyz_coords_interp = vol(all_uvl_coords_interp[:, 0],
                                    all_uvl_coords_interp[:, 1],
                                    all_uvl_coords_interp[:, 2],
                                    mesh=False).reshape(3, -1).T

    if rank == 0:
        logger.info("Broadcasting generated nodes...")

    xyz_coords = comm.bcast(all_xyz_coords1, root=0)
    all_xyz_coords_interp = comm.bcast(all_xyz_coords_interp, root=0)
    all_uvl_coords_interp = comm.bcast(all_uvl_coords_interp, root=0)
    generated_coords_count_dict = comm.bcast(dict(generated_coords_count_dict),
                                             root=0)

    coords_offset = 0
    pop_coords_dict = {}
    for population in populations:
        xyz_error = np.asarray([0.0, 0.0, 0.0])

        pop_layers = env.geometry['Cell Distribution'][population]

        pop_start, pop_count = population_ranges[population]
        coords = []

        gen_coords_count = generated_coords_count_dict[population]

        for i, coord_ind in enumerate(
                range(coords_offset, coords_offset + gen_coords_count)):

            if i % size == rank:

                uvl_coords = all_uvl_coords_interp[coord_ind, :].ravel()
                xyz_coords1 = all_xyz_coords_interp[coord_ind, :].ravel()
                if uvl_in_bounds(all_uvl_coords_interp[coord_ind, :],
                                 layer_extents, pop_layers):
                    xyz_error = np.add(
                        xyz_error,
                        np.abs(
                            np.subtract(xyz_coords[coord_ind, :],
                                        xyz_coords1)))

                    logger.info('Rank %i: %s cell %i: %f %f %f' %
                                (rank, population, i, uvl_coords[0],
                                 uvl_coords[1], uvl_coords[2]))

                    coords.append(
                        (xyz_coords1[0], xyz_coords1[1], xyz_coords1[2],
                         uvl_coords[0], uvl_coords[1], uvl_coords[2]))
                else:
                    logger.debug(
                        'Rank %i: %s cell %i not in bounds: %f %f %f' %
                        (rank, population, i, uvl_coords[0], uvl_coords[1],
                         uvl_coords[2]))
                    uvl_coords = None
                    xyz_coords1 = None

        total_xyz_error = np.zeros((3, ))
        comm.Allreduce(xyz_error, total_xyz_error, op=MPI.SUM)

        coords_count = 0
        coords_count = np.sum(np.asarray(comm.allgather(len(coords))))

        mean_xyz_error = np.asarray([(total_xyz_error[0] / coords_count), \
                                     (total_xyz_error[1] / coords_count), \
                                     (total_xyz_error[2] / coords_count)])

        pop_coords_dict[population] = coords
        coords_offset += gen_coords_count

        if rank == 0:
            logger.info(
                'Total %i coordinates generated for population %s: mean XYZ error: %f %f %f'
                % (coords_count, population, mean_xyz_error[0],
                   mean_xyz_error[1], mean_xyz_error[2]))

    if rank == 0:
        color = 1
    else:
        color = 0

    ## comm0 includes only rank 0
    comm0 = comm.Split(color, 0)

    for population in populations:

        pop_start, pop_count = population_ranges[population]
        pop_layers = env.geometry['Cell Distribution'][population]
        pop_constraint = None
        if 'Cell Constraints' in env.geometry:
            if population in env.geometry['Cell Constraints']:
                pop_constraint = env.geometry['Cell Constraints'][population]

        coords_lst = comm.gather(pop_coords_dict[population], root=0)
        if rank == 0:
            all_coords = []
            for sublist in coords_lst:
                for item in sublist:
                    all_coords.append(item)
            coords_count = len(all_coords)

            if coords_count < pop_count:
                logger.warning(
                    "Generating additional %i coordinates for population %s..."
                    % (pop_count - len(all_coords), population))

                safety = 0.01
                delta = pop_count - len(all_coords)
                for i in range(delta):
                    for layer, count in viewitems(pop_layers):
                        if count > 0:
                            min_extent = layer_extents[layer][0]
                            max_extent = layer_extents[layer][1]
                            coord_u = np.random.uniform(
                                min_extent[0] + safety, max_extent[0] - safety)
                            coord_v = np.random.uniform(
                                min_extent[1] + safety, max_extent[1] - safety)
                            if pop_constraint is None:
                                coord_l = np.random.uniform(
                                    min_extent[2] + safety,
                                    max_extent[2] - safety)
                            else:
                                coord_l = np.random.uniform(
                                    pop_constraint[layer][0] + safety,
                                    pop_constraint[layer][1] - safety)
                            xyz_coords = CA1_volume(coord_u,
                                                    coord_v,
                                                    coord_l,
                                                    rotate=rotate).ravel()
                            all_coords.append(
                                (xyz_coords[0], xyz_coords[1], xyz_coords[2],
                                 coord_u, coord_v, coord_l))

            sampled_coords = random_subset(all_coords, int(pop_count))
            sampled_coords.sort(
                key=lambda coord: coord[3])  ## sort on U coordinate

            coords_dict = {
                pop_start + i: {
                    'X Coordinate': np.asarray([x_coord], dtype=np.float32),
                    'Y Coordinate': np.asarray([y_coord], dtype=np.float32),
                    'Z Coordinate': np.asarray([z_coord], dtype=np.float32),
                    'U Coordinate': np.asarray([u_coord], dtype=np.float32),
                    'V Coordinate': np.asarray([v_coord], dtype=np.float32),
                    'L Coordinate': np.asarray([l_coord], dtype=np.float32)
                }
                for (i, (x_coord, y_coord, z_coord, u_coord, v_coord,
                         l_coord)) in enumerate(sampled_coords)
            }

            append_cell_attributes(output_path,
                                   population,
                                   coords_dict,
                                   namespace=output_namespace,
                                   io_size=io_size,
                                   chunk_size=chunk_size,
                                   value_chunk_size=value_chunk_size,
                                   comm=comm0)

        comm.barrier()

    comm0.Free()
コード例 #29
0
def main(connectivity_path, output_path, coords_path, distances_namespace,
         destination, bin_size, cache_size, verbose):
    """
    Measures vertex distribution with respect to septo-temporal distance

    :param connectivity_path:
    :param coords_path:
    :param distances_namespace: 
    :param destination: 
    :param source: 

    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD

    rank = comm.Get_rank()

    (population_ranges, _) = read_population_ranges(coords_path)

    destination_start = population_ranges[destination][0]
    destination_count = population_ranges[destination][1]

    if rank == 0:
        logger.info('reading %s distances...' % destination)
    destination_soma_distances = bcast_cell_attributes(
        coords_path,
        destination,
        namespace=distances_namespace,
        comm=comm,
        root=0)

    destination_soma_distance_U = {}
    destination_soma_distance_V = {}
    for k, v in destination_soma_distances:
        destination_soma_distance_U[k] = v['U Distance'][0]
        destination_soma_distance_V[k] = v['V Distance'][0]

    del (destination_soma_distances)

    sources = []
    for (src, dst) in read_projection_names(connectivity_path):
        if dst == destination:
            sources.append(src)

    source_soma_distances = {}
    for s in sources:
        if rank == 0:
            logger.info('reading %s distances...' % s)
        source_soma_distances[s] = bcast_cell_attributes(
            coords_path, s, namespace=distances_namespace, comm=comm, root=0)

    source_soma_distance_U = {}
    source_soma_distance_V = {}
    for s in sources:
        this_source_soma_distance_U = {}
        this_source_soma_distance_V = {}
        for k, v in source_soma_distances[s]:
            this_source_soma_distance_U[k] = v['U Distance'][0]
            this_source_soma_distance_V[k] = v['V Distance'][0]
        source_soma_distance_U[s] = this_source_soma_distance_U
        source_soma_distance_V[s] = this_source_soma_distance_V
    del (source_soma_distances)

    logger.info('reading connections %s -> %s...' %
                (str(sources), destination))
    gg = [
        NeuroH5ProjectionGen(connectivity_path,
                             source,
                             destination,
                             cache_size=cache_size,
                             comm=comm) for source in sources
    ]

    dist_bins = defaultdict(dict)
    dist_u_bins = defaultdict(dict)
    dist_v_bins = defaultdict(dict)

    for prj_gen_tuple in utils.zip_longest(*gg):
        destination_gid = prj_gen_tuple[0][0]
        if not all([
                prj_gen_elt[0] == destination_gid
                for prj_gen_elt in prj_gen_tuple
        ]):
            raise Exception(
                'destination %s: destination_gid %i not matched across multiple projection generators: %s'
                % (destination, destination_gid,
                   [prj_gen_elt[0] for prj_gen_elt in prj_gen_tuple]))

        if destination_gid is not None:
            logger.info('reading connections of gid %i' % destination_gid)
            for (source, (this_destination_gid,
                          rest)) in zip(sources, prj_gen_tuple):
                this_source_soma_distance_U = source_soma_distance_U[source]
                this_source_soma_distance_V = source_soma_distance_V[source]
                this_dist_bins = dist_bins[source]
                this_dist_u_bins = dist_u_bins[source]
                this_dist_v_bins = dist_v_bins[source]
                (source_indexes, attr_dict) = rest
                dst_U = destination_soma_distance_U[destination_gid]
                dst_V = destination_soma_distance_V[destination_gid]
                for source_gid in source_indexes:
                    dist_u = dst_U - this_source_soma_distance_U[source_gid]
                    dist_v = dst_V - this_source_soma_distance_V[source_gid]
                    dist = abs(dist_u) + abs(dist_v)

                    update_bins(this_dist_bins, bin_size, dist)
                    update_bins(this_dist_u_bins, bin_size, dist_u)
                    update_bins(this_dist_v_bins, bin_size, dist_v)
    comm.barrier()

    logger.info('merging distance dictionaries...')
    add_bins_op = MPI.Op.Create(add_bins, commute=True)
    for source in sources:
        dist_bins[source] = comm.reduce(dist_bins[source],
                                        op=add_bins_op,
                                        root=0)
        dist_u_bins[source] = comm.reduce(dist_u_bins[source],
                                          op=add_bins_op,
                                          root=0)
        dist_v_bins[source] = comm.reduce(dist_v_bins[source],
                                          op=add_bins_op,
                                          root=0)

    comm.barrier()

    if rank == 0:
        color = 1
    else:
        color = 0

    ## comm0 includes only rank 0
    comm0 = comm.Split(color, 0)

    if rank == 0:
        if output_path is None:
            output_path = connectivity_path
        logger.info('writing output to %s...' % output_path)

        #f = h5py.File(output_path, 'a', driver='mpio', comm=comm0)
        #if 'Nodes' in f:
        #    nodes_grp = f['Nodes']
        #else:
        #    nodes_grp = f.create_group('Nodes')
        #grp = nodes_grp.create_group('Connectivity Distance Histogram')
        #dst_grp = grp.create_group(destination)
        for source in sources:
            dist_histoCount, dist_bin_edges = finalize_bins(
                dist_bins[source], bin_size)
            dist_u_histoCount, dist_u_bin_edges = finalize_bins(
                dist_u_bins[source], bin_size)
            dist_v_histoCount, dist_v_bin_edges = finalize_bins(
                dist_v_bins[source], bin_size)
            np.savetxt('%s Distance U Bin Count.dat' % source,
                       dist_u_histoCount)
            np.savetxt('%s Distance U Bin Edges.dat' % source,
                       dist_u_bin_edges)
            np.savetxt('%s Distance V Bin Count.dat' % source,
                       dist_v_histoCount)
            np.savetxt('%s Distance V Bin Edges.dat' % source,
                       dist_v_bin_edges)
            np.savetxt('%s Distance Bin Count.dat' % source, dist_histoCount)
            np.savetxt('%s Distance Bin Edges.dat' % source, dist_bin_edges)
        #f.close()
    comm.barrier()
コード例 #30
0
def main(forest_path, connectivity_namespace, coords_path, coords_namespace, io_size, chunk_size, value_chunk_size,
         cache_size):
    """

    :param forest_path:
    :param connectivity_namespace:
    :param coords_path:
    :param coords_namespace:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank  # The process ID (integer 0-3 for 4-process run)

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print('%i ranks have been allocated' % comm.size)
    sys.stdout.flush()

    start_time = time.time()

    soma_coords = {}
    source_populations = list(read_population_ranges(MPI._addressof(comm), coords_path).keys())
    for population in source_populations:
        soma_coords[population] = bcast_cell_attributes(MPI._addressof(comm), 0, coords_path, population,
                                                            namespace=coords_namespace)

    for population in soma_coords:
        for cell in viewvalues(soma_coords[population]):
            cell['u_index'] = get_array_index(u, cell['U Coordinate'][0])
            cell['v_index'] = get_array_index(v, cell['V Coordinate'][0])

    target = 'GC'

    layer_set, swc_type_set, syn_type_set = set(), set(), set()
    for source in layers[target]:
        layer_set.update(layers[target][source])
        swc_type_set.update(swc_types[target][source])
        syn_type_set.update(syn_types[target][source])

    count = 0
    for target_gid, attributes_dict in NeuroH5CellAttrGen(MPI._addressof(comm), forest_path, target, io_size=io_size,
                                                        cache_size=cache_size, namespace='Synapse_Attributes'):
        last_time = time.time()
        connection_dict = {}
        p_dict = {}
        source_gid_dict = {}
        if target_gid is None:
            print('Rank %i target gid is None' % rank)
        else:
            print('Rank %i received attributes for target: %s, gid: %i' % (rank, target, target_gid))
            synapse_dict = attributes_dict['Synapse_Attributes']
            connection_dict[target_gid] = {}
            local_np_random.seed(target_gid + connectivity_seed_offset)
            connection_dict[target_gid]['source_gid'] = np.array([], dtype='uint32')
            connection_dict[target_gid]['syn_id'] = np.array([], dtype='uint32')

            for layer in layer_set:
                for swc_type in swc_type_set:
                    for syn_type in syn_type_set:
                        sources, this_proportions = filter_sources(target, layer, swc_type, syn_type)
                        if sources:
                            if rank == 0 and count == 0:
                                source_list_str = '[' + ', '.join(['%s' % xi for xi in sources]) + ']'
                                print('Connections to target: %s in layer: %i ' \
                                    '(swc_type: %i, syn_type: %i): %s' % \
                                    (target, layer, swc_type, syn_type, source_list_str))
                            p, source_gid = np.array([]), np.array([])
                            for source, this_proportion in zip(sources, this_proportions):
                                if source not in source_gid_dict:
                                    this_p, this_source_gid = p_connect.get_p(target, source, target_gid, soma_coords,
                                                                              distance_U, distance_V)
                                    source_gid_dict[source] = this_source_gid
                                    p_dict[source] = this_p
                                else:
                                    this_source_gid = source_gid_dict[source]
                                    this_p = p_dict[source]
                                p = np.append(p, this_p * this_proportion)
                                source_gid = np.append(source_gid, this_source_gid)
                            syn_indexes = filter_synapses(synapse_dict, layer, swc_type, syn_type)
                            connection_dict[target_gid]['syn_id'] = \
                                np.append(connection_dict[target_gid]['syn_id'],
                                          synapse_dict['syn_id'][syn_indexes]).astype('uint32', copy=False)
                            this_source_gid = local_np_random.choice(source_gid, len(syn_indexes), p=p)
                            connection_dict[target_gid]['source_gid'] = \
                                np.append(connection_dict[target_gid]['source_gid'],
                                          this_source_gid).astype('uint32', copy=False)
            count += 1
            print('Rank %i took %i s to compute connectivity for target: %s, gid: %i' % (rank, time.time() - last_time,
                                                                                         target, target_gid))
            sys.stdout.flush()
        last_time = time.time()
        append_cell_attributes(MPI._addressof(comm), forest_path, target, connection_dict,
                               namespace=connectivity_namespace, io_size=io_size, chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size)
        if rank == 0:
            print('Appending connectivity attributes for target: %s took %i s' % (target, time.time() - last_time))
        sys.stdout.flush()
        del connection_dict
        del p_dict
        del source_gid_dict
        gc.collect()

    global_count = comm.gather(count, root=0)
    if rank == 0:
        print('%i ranks took took %i s to compute connectivity for %i cells' % (comm.size, time.time() - start_time,
                                                                                  np.sum(global_count)))