예제 #1
0
def main(config_file, config_prefix, population, gid, template_paths, dataset_prefix, results_path, results_file_id, results_namespace_id, v_init):

    if results_file_id is None:
        results_file_id = uuid.uuid4()
    if results_namespace_id is None:
        results_namespace_id = 'Cell Clamp Results'
    comm = MPI.COMM_WORLD
    np.seterr(all='raise')
    verbose = True
    params = dict(locals())
    env = Env(**params)
    configure_hoc_env(env)
    io_utils.mkout(env, env.results_file_path)
    env.cell_selection = {}

    attr_dict = {}
    attr_dict[gid] = {}
    attr_dict[gid].update(measure_passive(gid, population, v_init, env))
    attr_dict[gid].update(measure_ap(gid, population, v_init, env))
    attr_dict[gid].update(measure_ap_rate(gid, population, v_init, env))
    attr_dict[gid].update(measure_fi(gid, population, v_init, env))

    pprint.pprint(attr_dict)

    if results_path is not None:
        append_cell_attributes(env.results_file_path, population, attr_dict,
                               namespace=env.results_namespace_id,
                               comm=env.comm, io_size=env.io_size)
예제 #2
0
def eval_network(env, network_config, from_param_list, from_param_dict, network_params, network_param_values, params_id, target_trj_rate_map_dict, t_start, t_stop, target_populations, output_path):

    param_tuple_values = None
    if params_id is not None:
        x = network_param_values[params_id]
        if isinstance(x, list):
            param_tuple_values = from_param_list(x)
        elif isinstance(x, dict):
            param_tuple_values = from_param_dict(x)
        else:
            raise RuntimeError(f"eval_network: invalid input parameters argument {x}")
    
        if env.comm.rank == 0:
            logger.info("*** Updating network parameters ...")
            logger.info(pprint.pformat(param_tuple_values))
        update_network_params(env, param_tuple_values)

    env.checkpoint_clear_data = False
    env.checkpoint_interval = None
    env.tstop = t_stop
    network.run(env, output=network_config.get('output_results', False), shutdown=False)

    for pop_name in target_trj_rate_map_dict:
        append_cell_attributes(env.results_file_path, pop_name, target_trj_rate_map_dict[pop_name], 
                               namespace='Target Trajectory Rate Map', comm=env.comm, io_size=env.io_size)

    local_features = network_features(env, target_trj_rate_map_dict, t_start, t_stop, target_populations)
    return collect_network_features(env, local_features, target_populations, output_path, params_id, param_tuple_values)
예제 #3
0
def main(input_path, output_path, populations, io_size, chunk_size,
         value_chunk_size):

    comm = MPI.COMM_WORLD
    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print '%i ranks have been allocated' % comm.size
    sys.stdout.flush()

    env = Env(comm=comm, config_file=config)

    swc_type_apical = env.SWC_Types['apical']

    if rank == 0:
        input_file = h5py.File(input_path, 'r')
        output_file = h5py.File(output_path, 'w')
        input_file.copy('/H5Types', output_file)
        input_file.close()
        output_file.close()
    comm.barrier()

    for population in populations:

        if rank == 0:
            print 'population: ', population

        (trees, forestSize) = scatter_read_trees(input_path,
                                                 population,
                                                 io_size=io_size)

        coords_dict = {}
        for (gid, tree) in trees:

            vx = tree['x']
            vy = tree['y']
            vz = tree['z']
            swc_type = tree['swc_type']

            dend_idxs = np.where(swc_type == swc_type_apical)[0]

            x_coord = vx[dend_idxs[0]]
            y_coord = vy[dend_idxs[0]]
            z_coord = vz[dend_idxs[0]]
            coords_dict[gid] = {
                'X Coordinate': np.asarray([x_coord], dtype=np.float32),
                'Y Coordinate': np.asarray([y_coord], dtype=np.float32),
                'Z Coordinate': np.asarray([z_coord], dtype=np.float32)
            }

        append_cell_attributes(output_path,
                               population,
                               coords_dict,
                               namespace='Sampled Coordinates',
                               io_size=io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size)
예제 #4
0
def main(config, coords_path, coords_namespace, resample, resolution, populations, projection_depth, io_size, chunk_size, value_chunk_size, cache_size, verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(script_name)
    
    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm, config_file=config)

    soma_coords = {}

    if rank == 0:
        logger.info('Reading population coordinates...')

    rotate = env.geometry['Parametric Surface']['Rotation']
    min_l = float('inf')
    max_l = 0.0
    population_ranges = read_population_ranges(coords_path)[0]
    population_extents = {}
    for population in population_ranges:
        min_extent = env.geometry['Cell Layers']['Minimum Extent'][population]
        max_extent = env.geometry['Cell Layers']['Maximum Extent'][population]
        min_l = min(min_extent[2], min_l)
        max_l = max(max_extent[2], max_l)
        population_extents[population] = (min_extent, max_extent)
        
    for population in populations:
        coords = bcast_cell_attributes(coords_path, population, 0, \
                                       namespace=coords_namespace)

        soma_coords[population] = { k: (v['U Coordinate'][0], v['V Coordinate'][0], v['L Coordinate'][0]) for (k,v) in coords }
        del coords
        gc.collect()
    
    output_path = coords_path
    soma_coords = icp_transform(comm, soma_coords, projection_depth, population_extents, \
                                populations=populations, rotate=rotate, verbose=verbose)
    
    for population in populations:

        if rank == 0:
            logger.info('Writing transformed coordinates for population %s...' % population)

        append_cell_attributes(output_path, population, soma_coords[population],
                               namespace='Soma Projections', comm=comm,
                               io_size=io_size, chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size, cache_size=cache_size)
예제 #5
0
def save_to_h5(cell_attributes):

    for population in cell_attributes.keys():
        place_cells, grid_cells = {}, {}
        for gid, cell in viewitems(cell_attributes[population]):

            if cell['Feature Type'][0] == feature_grid:
                grid_cells[gid] = cell
            elif cell['Feature Type'][0] == feature_place:
                place_cells[gid] = cell

        append_cell_attributes(context.output_path, population, grid_cells, namespace='Grid Selectivity',\
                               comm=context.comm, io_size=context.io_size, chunk_size=context.chunk_size,\
                               value_chunk_size=context.value_chunk_size)

        append_cell_attributes(context.output_path, population, place_cells, namespace='Place Selectivity',\
                               comm=context.comm, io_size=context.io_size, chunk_size=context.chunk_size,\
                               value_chunk_size=context.value_chunk_size)
def save_to_h5(context, cell_dict):

    for population in cell_dict:
        place_cells, grid_cells = {}, {}
        for gid, cell in viewitems(cell_dict[population]):

            if cell.cell_type == selectivity_grid:
                grid_cells[gid] = cell.return_attr_dict()
            elif cell.cell_type == selectivity_place:
                place_cells[gid] = cell.return_attr_dict()

        append_cell_attributes(context.output_path, population, grid_cells, \
                               namespace='Grid Selectivity %s' % str(context.arena_id), \
                               comm=context.comm, io_size=context.io_size, chunk_size=context.chunk_size,\
                               value_chunk_size=context.value_chunk_size)

        append_cell_attributes(context.output_path, population, place_cells, \
                               namespace='Place Selectivity %s' % str(context.arena_id), \
                               comm=context.comm, io_size=context.io_size, chunk_size=context.chunk_size,\
                               value_chunk_size=context.value_chunk_size)
예제 #7
0
def main(coords_path, io_size, chunk_size, value_chunk_size):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(__file__)
    
    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm, config_file=config)
    output_path = coords_path

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    source_population_ranges = read_population_ranges(coords_path)
    source_populations = list(source_population_ranges.keys())

    for population in source_populations:
        if rank == 0:
            logger.info('population: ',population)
        soma_coords = bcast_cell_attributes(0, coords_path, population,
                                            namespace='Interpolated Coordinates', comm=comm)
        #print soma_coords.keys()
        u_coords = []
        gids = []
        for gid, attrs in viewitems(soma_coords):
            u_coords.append(attrs['U Coordinate'])
            gids.append(gid)
        u_coordv = np.asarray(u_coords, dtype=np.float32)
        gidv     = np.asarray(gids, dtype=np.uint32)
        sort_idx = np.argsort(u_coordv, axis=0)
        offset   = source_population_ranges[population][0]
        sorted_coords_dict = {}
        for i in range(0,sort_idx.size):
            sorted_coords_dict[offset+i] = soma_coords[gidv[sort_idx[i][0]]]
        
        append_cell_attributes(coords_path, population, sorted_coords_dict,
                                namespace='Sorted Coordinates', io_size=io_size, chunk_size=chunk_size,
                                value_chunk_size=value_chunk_size, comm=comm)
def main(config, coordinates, gid, field_width, peak_rate, input_features_path,
         input_features_namespaces, output_features_namespace,
         output_weights_path, output_features_path, initial_weights_path,
         reference_weights_path, h5types_path, synapse_name,
         initial_weights_namespace, reference_weights_namespace,
         output_weights_namespace, reference_weights_are_delta,
         connections_path, optimize_method, destination, sources, arena_id,
         max_delta_weight, field_width_scale, max_iter, verbose, dry_run,
         plot):
    """
    :param config: str (path to .yaml file)
    :param coordinates: tuple of float
    :param gid: int
    :param field_width: float
    :param peak_rate: float
    :param input_features_path: str (path to .h5 file)
    :param input_features_namespaces: str
    :param output_features_namespace: str
    :param output_weights_path: str (path to .h5 file)
    :param output_features_path: str (path to .h5 file)
    :param initial_weights_path: str (path to .h5 file)
    :param reference_weights_path: str (path to .h5 file)
    :param h5types_path: str (path to .h5 file)
    :param synapse_name: str
    :param initial_weights_namespace: str
    :param output_weights_namespace: str
    :param reference_weights_are_delta: bool
    :param connections_path: str (path to .h5 file)
    :param destination: str (population name)
    :param sources: list of str (population name)
    :param arena_id: str
    :param max_delta_weight: float
    :param field_width_scale: float
    :param max_iter: int
    :param verbose: bool
    :param dry_run: bool
    :param interactive: bool
    :param plot: bool
    """
    utils.config_logging(verbose)
    logger = utils.get_script_logger(__file__)

    env = Env(config_file=config)

    if not dry_run:
        if output_weights_path is None:
            raise RuntimeError(
                'Missing required argument: output_weights_path.')
        if not os.path.isfile(output_weights_path):
            if initial_weights_path is not None and os.path.isfile(
                    initial_weights_path):
                input_file_path = initial_weights_path
            elif h5types_path is not None and os.path.isfile(h5types_path):
                input_file_path = h5types_path
            else:
                raise RuntimeError(
                    'Missing required source for h5types: either an initial_weights_path or an '
                    'h5types_path must be provided.')
            with h5py.File(output_weights_path, 'a') as output_file:
                with h5py.File(input_file_path, 'r') as input_file:
                    input_file.copy('/H5Types', output_file)

    this_input_features_namespaces = [
        '%s %s' % (input_features_namespace, arena_id)
        for input_features_namespace in input_features_namespaces
    ]
    features_attr_names = ['Arena Rate Map']
    spatial_resolution = env.stimulus_config['Spatial Resolution']  # cm
    arena = env.stimulus_config['Arena'][arena_id]
    default_run_vel = arena.properties['default run velocity']  # cm/s
    arena_x, arena_y = stimulus.get_2D_arena_spatial_mesh(
        arena, spatial_resolution)
    dim_x = len(arena_x)
    dim_y = len(arena_y)

    if gid is None:
        target_gids = []
    else:
        target_gids = [gid]

    dst_input_features = defaultdict(dict)
    num_fields = len(coordinates)
    this_field_width = np.array([field_width] * num_fields, dtype=np.float32)
    this_scaled_field_width = np.array([field_width * field_width_scale] *
                                       num_fields,
                                       dtype=np.float32)
    this_peak_rate = np.array([peak_rate] * num_fields, dtype=np.float32)
    this_x0 = np.array([x for x, y in coordinates], dtype=np.float32)
    this_y0 = np.array([y for x, y in coordinates], dtype=np.float32)
    this_rate_map = np.asarray(get_rate_map(this_x0, this_y0, this_field_width,
                                            this_peak_rate, arena_x, arena_y),
                               dtype=np.float32)
    target_map = np.asarray(get_rate_map(this_x0, this_y0,
                                         this_scaled_field_width,
                                         this_peak_rate, arena_x, arena_y),
                            dtype=np.float32)
    selectivity_type = env.selectivity_types['place']
    dst_input_features[destination][target_gid] = {
        'Selectivity Type': np.array([selectivity_type], dtype=np.uint8),
        'Num Fields': np.array([num_fields], dtype=np.uint8),
        'Field Width': this_field_width,
        'Peak Rate': this_peak_rate,
        'X Offset': this_x0,
        'Y Offset': this_y0,
        'Arena Rate Map': this_rate_map.ravel()
    }

    initial_weights_by_syn_id_dict = dict()
    selection = [target_gid]
    if initial_weights_path is not None:
        initial_weights_iter = \
            read_cell_attribute_selection(initial_weights_path, destination, namespace=initial_weights_namespace,
                                          selection=selection)
        syn_weight_attr_dict = dict(initial_weights_iter)

        syn_ids = syn_weight_attr_dict[target_gid]['syn_id']
        weights = syn_weight_attr_dict[target_gid][synapse_name]

        for (syn_id, weight) in zip(syn_ids, weights):
            initial_weights_by_syn_id_dict[int(syn_id)] = float(weight)

        logger.info(
            'destination: %s; gid %i; read initial synaptic weights for %i synapses'
            % (destination, target_gid, len(initial_weights_by_syn_id_dict)))

    reference_weights_by_syn_id_dict = None
    if reference_weights_path is not None:
        reference_weights_by_syn_id_dict = dict()
        reference_weights_iter = \
            read_cell_attribute_selection(reference_weights_path, destination, namespace=reference_weights_namespace,
                                          selection=selection)
        syn_weight_attr_dict = dict(reference_weights_iter)

        syn_ids = syn_weight_attr_dict[target_gid]['syn_id']
        weights = syn_weight_attr_dict[target_gid][synapse_name]

        for (syn_id, weight) in zip(syn_ids, weights):
            reference_weights_by_syn_id_dict[int(syn_id)] = float(weight)

        logger.info(
            'destination: %s; gid %i; read reference synaptic weights for %i synapses'
            % (destination, target_gid, len(reference_weights_by_syn_id_dict)))

    source_gid_set_dict = defaultdict(set)
    syn_ids_by_source_gid_dict = defaultdict(list)
    initial_weights_by_source_gid_dict = dict()
    if reference_weights_by_syn_id_dict is None:
        reference_weights_by_source_gid_dict = None
    else:
        reference_weights_by_source_gid_dict = dict()
    (graph, edge_attr_info) = read_graph_selection(file_name=connections_path,
                                                   selection=[target_gid],
                                                   namespaces=['Synapses'])
    syn_id_attr_index = None
    for source, edge_iter in viewitems(graph[destination]):
        if source not in sources:
            continue
        this_edge_attr_info = edge_attr_info[destination][source]
        if 'Synapses' in this_edge_attr_info and \
           'syn_id' in this_edge_attr_info['Synapses']:
            syn_id_attr_index = this_edge_attr_info['Synapses']['syn_id']
        for (destination_gid, edges) in edge_iter:
            assert destination_gid == target_gid
            source_gids, edge_attrs = edges
            syn_ids = edge_attrs['Synapses'][syn_id_attr_index]
            count = 0
            for i in range(len(source_gids)):
                this_source_gid = int(source_gids[i])
                source_gid_set_dict[source].add(this_source_gid)
                this_syn_id = int(syn_ids[i])
                if this_syn_id not in initial_weights_by_syn_id_dict:
                    this_weight = \
                        env.connection_config[destination][source].mechanisms['default'][synapse_name]['weight']
                    initial_weights_by_syn_id_dict[this_syn_id] = this_weight
                syn_ids_by_source_gid_dict[this_source_gid].append(this_syn_id)
                if this_source_gid not in initial_weights_by_source_gid_dict:
                    initial_weights_by_source_gid_dict[this_source_gid] = \
                        initial_weights_by_syn_id_dict[this_syn_id]
                    if reference_weights_by_source_gid_dict is not None:
                        reference_weights_by_source_gid_dict[this_source_gid] = \
                            reference_weights_by_syn_id_dict[this_syn_id]
                count += 1
            logger.info(
                'destination: %s; gid %i; set initial synaptic weights for %d inputs from source population '
                '%s' % (destination, destination_gid, count, source))

    syn_count_by_source_gid_dict = dict()
    for source_gid in syn_ids_by_source_gid_dict:
        syn_count_by_source_gid_dict[source_gid] = len(
            syn_ids_by_source_gid_dict[source_gid])

    input_rate_maps_by_source_gid_dict = dict()
    for source in sources:
        source_gids = list(source_gid_set_dict[source])
        for input_features_namespace in this_input_features_namespaces:
            input_features_iter = read_cell_attribute_selection(
                input_features_path,
                source,
                namespace=input_features_namespace,
                mask=set(features_attr_names),
                selection=source_gids)
            count = 0
            for gid, attr_dict in input_features_iter:
                input_rate_maps_by_source_gid_dict[gid] = attr_dict[
                    'Arena Rate Map'].reshape((dim_x, dim_y))
                count += 1
            logger.info('Read %s feature data for %i cells in population %s' %
                        (input_features_namespace, count, source))

    if is_interactive:
        context.update(locals())

    normalized_delta_weights_dict, arena_LS_map = \
        synapses.generate_structured_weights(target_map=target_map,
                                             initial_weight_dict=initial_weights_by_source_gid_dict,
                                             input_rate_map_dict=input_rate_maps_by_source_gid_dict,
                                             syn_count_dict=syn_count_by_source_gid_dict,
                                             max_delta_weight=max_delta_weight, arena_x=arena_x, arena_y=arena_y,
                                             reference_weight_dict=reference_weights_by_source_gid_dict,
                                             reference_weights_are_delta=reference_weights_are_delta,
                                             reference_weights_namespace=reference_weights_namespace,
                                             optimize_method=optimize_method, verbose=verbose, plot=plot)

    output_syn_ids = np.empty(len(initial_weights_by_syn_id_dict),
                              dtype='uint32')
    output_weights = np.empty(len(initial_weights_by_syn_id_dict),
                              dtype='float32')
    i = 0
    for source_gid, this_weight in viewitems(normalized_delta_weights_dict):
        for syn_id in syn_ids_by_source_gid_dict[source_gid]:
            output_syn_ids[i] = syn_id
            output_weights[i] = this_weight
            i += 1
    output_weights_dict = {
        target_gid: {
            'syn_id': output_syn_ids,
            synapse_name: output_weights
        }
    }

    logger.info('destination: %s; gid %i; generated %s for %i synapses' %
                (destination, target_gid, output_weights_namespace,
                 len(output_weights)))

    if not dry_run:
        this_output_weights_namespace = '%s %s' % (output_weights_namespace,
                                                   arena_id)
        logger.info('Destination: %s; appending %s ...' %
                    (destination, this_output_weights_namespace))
        append_cell_attributes(output_weights_path,
                               destination,
                               output_weights_dict,
                               namespace=this_output_weights_namespace)
        logger.info('Destination: %s; appended %s' %
                    (destination, this_output_weights_namespace))
        output_weights_dict.clear()
        if output_features_path is not None:
            this_output_features_namespace = '%s %s' % (
                output_features_namespace, arena_id)
            cell_attr_dict = dst_input_features[destination]
            cell_attr_dict[target_gid]['Arena State Map'] = np.asarray(
                arena_LS_map.ravel(), dtype=np.float32)
            logger.info('Destination: %s; appending %s ...' %
                        (destination, this_output_features_namespace))
            append_cell_attributes(output_features_path,
                                   destination,
                                   cell_attr_dict,
                                   namespace=this_output_features_namespace)

    if is_interactive:
        context.update(locals())
예제 #9
0
def main(config, config_prefix, types_path, geometry_path, output_path,
         output_namespace, populations, resolution, alpha_radius, nodeiter,
         dispersion_delta, snap_delta, io_size, chunk_size, value_chunk_size,
         verbose):

    config_logging(verbose)
    logger = get_script_logger(script_name)

    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size

    np.seterr(all='raise')

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if rank == 0:
        if not os.path.isfile(output_path):
            input_file = h5py.File(types_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    env = Env(comm=comm, config_file=config, config_prefix=config_prefix)

    random_seed = int(env.model_config['Random Seeds']['Soma Locations'])
    random.seed(random_seed)

    layer_extents = env.geometry['Parametric Surface']['Layer Extents']
    rotate = env.geometry['Parametric Surface']['Rotation']

    (extent_u, extent_v, extent_l) = get_total_extents(layer_extents)
    vol = make_CA1_volume(extent_u,
                          extent_v,
                          extent_l,
                          rotate=rotate,
                          resolution=resolution)
    layer_alpha_shape_path = 'Layer Alpha Shape/%d/%d/%d' % resolution
    if rank == 0:
        logger.info("Constructing alpha shape for volume: extents: %s..." %
                    str((extent_u, extent_v, extent_l)))
        vol_alpha_shape_path = '%s/all' % (layer_alpha_shape_path)
        if geometry_path:
            vol_alpha_shape = load_alpha_shape(geometry_path,
                                               vol_alpha_shape_path)
        else:
            vol_alpha_shape = make_alpha_shape(vol, alpha_radius=alpha_radius)
            if geometry_path:
                save_alpha_shape(geometry_path, vol_alpha_shape_path,
                                 vol_alpha_shape)
        vert = vol_alpha_shape.points
        smp = np.asarray(vol_alpha_shape.bounds, dtype=np.int64)
        vol_domain = (vert, smp)

    layer_alpha_shapes = {}
    layer_extent_vals = {}
    layer_extent_transformed_vals = {}
    if rank == 0:
        for layer, extents in viewitems(layer_extents):
            (extent_u, extent_v,
             extent_l) = get_layer_extents(layer_extents, layer)
            layer_extent_vals[layer] = (extent_u, extent_v, extent_l)
            layer_extent_transformed_vals[layer] = CA1_volume_transform(
                extent_u, extent_v, extent_l)
            has_layer_alpha_shape = False
            if geometry_path:
                this_layer_alpha_shape_path = '%s/%s' % (
                    layer_alpha_shape_path, layer)
                this_layer_alpha_shape = load_alpha_shape(
                    geometry_path, this_layer_alpha_shape_path)
                layer_alpha_shapes[layer] = this_layer_alpha_shape
                if this_layer_alpha_shape is not None:
                    has_layer_alpha_shape = True
            if not has_layer_alpha_shape:
                logger.info(
                    "Constructing alpha shape for layers %s: extents: %s..." %
                    (layer, str(extents)))
                layer_vol = make_CA1_volume(extent_u,
                                            extent_v,
                                            extent_l,
                                            rotate=rotate,
                                            resolution=resolution)
                this_layer_alpha_shape = make_alpha_shape(
                    layer_vol, alpha_radius=alpha_radius)
                layer_alpha_shapes[layer] = this_layer_alpha_shape
                if geometry_path:
                    save_alpha_shape(geometry_path,
                                     this_layer_alpha_shape_path,
                                     this_layer_alpha_shape)

    comm.barrier()
    population_ranges = read_population_ranges(output_path, comm)[0]
    if len(populations) == 0:
        populations = sorted(population_ranges.keys())

    total_count = 0
    for population in populations:
        (population_start, population_count) = population_ranges[population]
        total_count += population_count

    all_xyz_coords1 = None
    generated_coords_count_dict = defaultdict(int)
    if rank == 0:
        all_xyz_coords_lst = []
        for population in populations:
            gc.collect()

            (population_start,
             population_count) = population_ranges[population]

            pop_layers = env.geometry['Cell Distribution'][population]
            pop_constraint = None
            if 'Cell Constraints' in env.geometry:
                if population in env.geometry['Cell Constraints']:
                    pop_constraint = env.geometry['Cell Constraints'][
                        population]
            if rank == 0:
                logger.info("Population %s: layer distribution is %s" %
                            (population, str(pop_layers)))

            pop_layer_count = 0
            for layer, count in viewitems(pop_layers):
                pop_layer_count += count
            assert (population_count == pop_layer_count)

            xyz_coords_lst = []
            for layer, count in viewitems(pop_layers):
                if count <= 0:
                    continue

                alpha = layer_alpha_shapes[layer]

                vert = alpha.points
                smp = np.asarray(alpha.bounds, dtype=np.int64)

                extents_xyz = layer_extent_transformed_vals[layer]
                for (vvi, vv) in enumerate(vert):
                    for (vi, v) in enumerate(vv):
                        if v < extents_xyz[vi][0]:
                            vert[vvi][vi] = extents_xyz[vi][0]
                        elif v > extents_xyz[vi][1]:
                            vert[vvi][vi] = extents_xyz[vi][1]

                N = int(count * 2)  # layer-specific number of nodes
                node_count = 0

                logger.info(
                    "Generating %i nodes in layer %s for population %s..." %
                    (N, layer, population))
                if verbose:
                    rbf_logger = logging.Logger.manager.loggerDict[
                        'rbf.pde.nodes']
                    rbf_logger.setLevel(logging.DEBUG)

                min_energy_constraint = None
                if pop_constraint is not None and layer in pop_constraint:
                    min_energy_constraint = pop_constraint[layer]

                nodes = gen_min_energy_nodes(count, (vert, smp),
                                             min_energy_constraint, nodeiter,
                                             dispersion_delta, snap_delta)
                #nodes = gen_min_energy_nodes(count, (vert, smp),
                #                             pop_constraint[layer] if pop_constraint is not None else None,
                #                             nodeiter, dispersion_delta, snap_delta)

                xyz_coords_lst.append(nodes.reshape(-1, 3))

            for this_xyz_coords in xyz_coords_lst:
                all_xyz_coords_lst.append(this_xyz_coords)
                generated_coords_count_dict[population] += len(this_xyz_coords)

        # Additional dispersion step to ensure no overlapping cell positions
        all_xyz_coords = np.row_stack(all_xyz_coords_lst)
        mask = np.ones((all_xyz_coords.shape[0], ), dtype=np.bool)
        # distance to nearest neighbor
        while True:
            kdt = cKDTree(all_xyz_coords[mask, :])
            nndist, nnindices = kdt.query(all_xyz_coords[mask, :], k=2)
            nndist, nnindices = nndist[:, 1:], nnindices[:, 1:]

            zindices = nnindices[np.argwhere(
                np.isclose(nndist, 0.0, atol=1e-3, rtol=1e-3))]
            if len(zindices) > 0:
                mask[np.argwhere(mask)[zindices]] = False
            else:
                break

        coords_offset = 0
        for population in populations:
            pop_coords_count = generated_coords_count_dict[population]
            pop_mask = mask[coords_offset:coords_offset + pop_coords_count]
            generated_coords_count_dict[population] = np.count_nonzero(
                pop_mask)
            coords_offset += pop_coords_count

        logger.info("Dispersion of %i nodes..." % np.count_nonzero(mask))
        all_xyz_coords1 = disperse(all_xyz_coords[mask, :],
                                   vol_domain,
                                   delta=dispersion_delta)

    if rank == 0:
        logger.info("Computing UVL coordinates of %i nodes..." %
                    len(all_xyz_coords1))

    all_xyz_coords_interp = None
    all_uvl_coords_interp = None

    if rank == 0:
        all_uvl_coords_interp = vol.inverse(all_xyz_coords1)
        all_xyz_coords_interp = vol(all_uvl_coords_interp[:, 0],
                                    all_uvl_coords_interp[:, 1],
                                    all_uvl_coords_interp[:, 2],
                                    mesh=False).reshape(3, -1).T

    if rank == 0:
        logger.info("Broadcasting generated nodes...")

    xyz_coords = comm.bcast(all_xyz_coords1, root=0)
    all_xyz_coords_interp = comm.bcast(all_xyz_coords_interp, root=0)
    all_uvl_coords_interp = comm.bcast(all_uvl_coords_interp, root=0)
    generated_coords_count_dict = comm.bcast(dict(generated_coords_count_dict),
                                             root=0)

    coords_offset = 0
    pop_coords_dict = {}
    for population in populations:
        xyz_error = np.asarray([0.0, 0.0, 0.0])

        pop_layers = env.geometry['Cell Distribution'][population]

        pop_start, pop_count = population_ranges[population]
        coords = []

        gen_coords_count = generated_coords_count_dict[population]

        for i, coord_ind in enumerate(
                range(coords_offset, coords_offset + gen_coords_count)):

            if i % size == rank:

                uvl_coords = all_uvl_coords_interp[coord_ind, :].ravel()
                xyz_coords1 = all_xyz_coords_interp[coord_ind, :].ravel()
                if uvl_in_bounds(all_uvl_coords_interp[coord_ind, :],
                                 layer_extents, pop_layers):
                    xyz_error = np.add(
                        xyz_error,
                        np.abs(
                            np.subtract(xyz_coords[coord_ind, :],
                                        xyz_coords1)))

                    logger.info('Rank %i: %s cell %i: %f %f %f' %
                                (rank, population, i, uvl_coords[0],
                                 uvl_coords[1], uvl_coords[2]))

                    coords.append(
                        (xyz_coords1[0], xyz_coords1[1], xyz_coords1[2],
                         uvl_coords[0], uvl_coords[1], uvl_coords[2]))
                else:
                    logger.debug(
                        'Rank %i: %s cell %i not in bounds: %f %f %f' %
                        (rank, population, i, uvl_coords[0], uvl_coords[1],
                         uvl_coords[2]))
                    uvl_coords = None
                    xyz_coords1 = None

        total_xyz_error = np.zeros((3, ))
        comm.Allreduce(xyz_error, total_xyz_error, op=MPI.SUM)

        coords_count = 0
        coords_count = np.sum(np.asarray(comm.allgather(len(coords))))

        mean_xyz_error = np.asarray([(total_xyz_error[0] / coords_count), \
                                     (total_xyz_error[1] / coords_count), \
                                     (total_xyz_error[2] / coords_count)])

        pop_coords_dict[population] = coords
        coords_offset += gen_coords_count

        if rank == 0:
            logger.info(
                'Total %i coordinates generated for population %s: mean XYZ error: %f %f %f'
                % (coords_count, population, mean_xyz_error[0],
                   mean_xyz_error[1], mean_xyz_error[2]))

    if rank == 0:
        color = 1
    else:
        color = 0

    ## comm0 includes only rank 0
    comm0 = comm.Split(color, 0)

    for population in populations:

        pop_start, pop_count = population_ranges[population]
        pop_layers = env.geometry['Cell Distribution'][population]
        pop_constraint = None
        if 'Cell Constraints' in env.geometry:
            if population in env.geometry['Cell Constraints']:
                pop_constraint = env.geometry['Cell Constraints'][population]

        coords_lst = comm.gather(pop_coords_dict[population], root=0)
        if rank == 0:
            all_coords = []
            for sublist in coords_lst:
                for item in sublist:
                    all_coords.append(item)
            coords_count = len(all_coords)

            if coords_count < pop_count:
                logger.warning(
                    "Generating additional %i coordinates for population %s..."
                    % (pop_count - len(all_coords), population))

                safety = 0.01
                delta = pop_count - len(all_coords)
                for i in range(delta):
                    for layer, count in viewitems(pop_layers):
                        if count > 0:
                            min_extent = layer_extents[layer][0]
                            max_extent = layer_extents[layer][1]
                            coord_u = np.random.uniform(
                                min_extent[0] + safety, max_extent[0] - safety)
                            coord_v = np.random.uniform(
                                min_extent[1] + safety, max_extent[1] - safety)
                            if pop_constraint is None:
                                coord_l = np.random.uniform(
                                    min_extent[2] + safety,
                                    max_extent[2] - safety)
                            else:
                                coord_l = np.random.uniform(
                                    pop_constraint[layer][0] + safety,
                                    pop_constraint[layer][1] - safety)
                            xyz_coords = CA1_volume(coord_u,
                                                    coord_v,
                                                    coord_l,
                                                    rotate=rotate).ravel()
                            all_coords.append(
                                (xyz_coords[0], xyz_coords[1], xyz_coords[2],
                                 coord_u, coord_v, coord_l))

            sampled_coords = random_subset(all_coords, int(pop_count))
            sampled_coords.sort(
                key=lambda coord: coord[3])  ## sort on U coordinate

            coords_dict = {
                pop_start + i: {
                    'X Coordinate': np.asarray([x_coord], dtype=np.float32),
                    'Y Coordinate': np.asarray([y_coord], dtype=np.float32),
                    'Z Coordinate': np.asarray([z_coord], dtype=np.float32),
                    'U Coordinate': np.asarray([u_coord], dtype=np.float32),
                    'V Coordinate': np.asarray([v_coord], dtype=np.float32),
                    'L Coordinate': np.asarray([l_coord], dtype=np.float32)
                }
                for (i, (x_coord, y_coord, z_coord, u_coord, v_coord,
                         l_coord)) in enumerate(sampled_coords)
            }

            append_cell_attributes(output_path,
                                   population,
                                   coords_dict,
                                   namespace=output_namespace,
                                   io_size=io_size,
                                   chunk_size=chunk_size,
                                   value_chunk_size=value_chunk_size,
                                   comm=comm0)

        comm.barrier()

    comm0.Free()
예제 #10
0
def main(config, coords_path, coords_namespace, distance_namespace, layers,
         npoints, spatial_resolution, io_size, verbose):

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm, config_file=config)

    max_extents = env.geometry['Parametric Surface']['Minimum Extent']
    min_extents = env.geometry['Parametric Surface']['Maximum Extent']

    layer_mids = []
    for ((layer_name, max_extent),
         (_, min_extent)) in itertools.izip(max_extents.iteritems(),
                                            min_extents.iteritems()):
        if layer_name in layers:
            mid = (max_extent[2] - min_extent[2]) / 2.
            layer_mids.append(mid)

    population_ranges = read_population_ranges(comm, coords_path)[0]

    ip_surfaces = []
    for layer in layer_mids:
        ip_surfaces.append(
            make_surface(l=layer, spatial_resolution=spatial_resolution))

    for population in population_ranges:
        (population_start, _) = population_ranges[population]

        for (layer_index, (layer_name, layer_mid, ip_surface)) in enumerate(
                itertools.izip(layers, layer_mids, ip_surfaces)):

            origin_u = np.min(ip_surface.su[0])
            origin_v = np.min(ip_surface.sv[0])

            for cell_gid, cell_coords_dict in NeuroH5CellAttrGen(
                    comm,
                    coords_path,
                    population,
                    io_size=io_size,
                    namespace=coords_namespace):
                arc_distance_dict = {}
                if cell_gid is None:
                    print 'Rank %i cell gid is None' % rank
                else:
                    cell_u = cell_coords_dict['U Coordinate']
                    cell_v = cell_coords_dict['V Coordinate']

                    U = np.linspace(origin_u, cell_u, npoints)
                    V = np.linspace(origin_v, cell_v, npoints)

                    arc_distance_u = ip_surface.point_distance(
                        U, cell_v, normalize_uv=True)
                    arc_distance_v = ip_surface.point_distance(
                        cell_u, V, normalize_uv=True)

                    arc_distance_dict[cell_gid - population_start] = {
                        'U Distance': np.asarray([arc_distance_u],
                                                 dtype='float32'),
                        'V Distance': np.asarray([arc_distance_v],
                                                 dtype='float32')
                    }

                    if verbose:
                        print 'Rank %i: gid = %i u = %f v = %f dist u = %f dist v = %f' % (
                            rank, cell_gid, cell_u, cell_v, arc_distance_u,
                            arc_distance_v)

                append_cell_attributes(comm,
                                       coords_path,
                                       population,
                                       arc_distance_dict,
                                       namespace='%s Layer %s' %
                                       (distance_namespace, layer_name),
                                       io_size=io_size)
예제 #11
0
def main(config_file, config_prefix, input_path, population, template_paths,
         dataset_prefix, results_path, results_file_id, results_namespace_id,
         v_init, io_size, chunk_size, value_chunk_size, write_size, verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank

    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if io_size == -1:
        io_size = comm.size

    if results_file_id is None:
        if rank == 0:
            result_file_id = uuid.uuid4()
        results_file_id = comm.bcast(results_file_id, root=0)
    if results_namespace_id is None:
        results_namespace_id = 'Cell Clamp Results'
    comm = MPI.COMM_WORLD
    np.seterr(all='raise')
    verbose = True
    params = dict(locals())
    env = Env(**params)
    configure_hoc_env(env)
    if rank == 0:
        io_utils.mkout(env, env.results_file_path)
    env.comm.barrier()
    env.cell_selection = {}
    template_class = load_cell_template(env, population)

    if input_path is not None:
        env.data_file_path = input_path
        env.load_celltypes()

    synapse_config = env.celltypes[population]['synapses']

    weights_namespaces = []
    if 'weights' in synapse_config:
        has_weights = synapse_config['weights']
        if has_weights:
            if 'weights namespace' in synapse_config:
                weights_namespaces.append(synapse_config['weights namespace'])
            elif 'weights namespaces' in synapse_config:
                weights_namespaces.extend(synapse_config['weights namespaces'])
            else:
                weights_namespaces.append('Weights')
    else:
        has_weights = False

    start_time = time.time()
    count = 0
    gid_count = 0
    attr_dict = {}
    if input_path is None:
        cell_path = env.data_file_path
        connectivity_path = env.connectivity_file_path
    else:
        cell_path = input_path
        connectivity_path = input_path

    for gid, morph_dict in NeuroH5TreeGen(cell_path,
                                          population,
                                          io_size=io_size,
                                          comm=env.comm,
                                          topology=True):
        local_time = time.time()
        if gid is not None:
            color = 0
            comm0 = comm.Split(color, 0)

            logger.info('Rank %i gid: %i' % (rank, gid))
            cell_dict = {'morph': morph_dict}
            synapses_iter = read_cell_attribute_selection(cell_path,
                                                          population, [gid],
                                                          'Synapse Attributes',
                                                          comm=comm0)
            _, synapse_dict = next(synapses_iter)
            cell_dict['synapse'] = synapse_dict

            if has_weights:
                cell_weights_iters = [
                    read_cell_attribute_selection(cell_path,
                                                  population, [gid],
                                                  weights_namespace,
                                                  comm=comm0)
                    for weights_namespace in weights_namespaces
                ]
                weight_dict = dict(
                    zip_longest(weights_namespaces, cell_weights_iters))
                cell_dict['weight'] = weight_dict

            (graph,
             a) = read_graph_selection(file_name=connectivity_path,
                                       selection=[gid],
                                       namespaces=['Synapses', 'Connections'],
                                       comm=comm0)
            cell_dict['connectivity'] = (graph, a)

            gid_count += 1

            attr_dict[gid] = {}
            attr_dict[gid].update(
                cell_clamp.measure_passive(gid,
                                           population,
                                           v_init,
                                           env,
                                           cell_dict=cell_dict))
            attr_dict[gid].update(
                cell_clamp.measure_ap(gid,
                                      population,
                                      v_init,
                                      env,
                                      cell_dict=cell_dict))
            attr_dict[gid].update(
                cell_clamp.measure_ap_rate(gid,
                                           population,
                                           v_init,
                                           env,
                                           cell_dict=cell_dict))
            attr_dict[gid].update(
                cell_clamp.measure_fi(gid,
                                      population,
                                      v_init,
                                      env,
                                      cell_dict=cell_dict))

        else:
            color = 1
            comm0 = comm.Split(color, 0)
            logger.info('Rank %i gid is None' % (rank))
        comm0.Free()

        count += 1
        if (results_path is not None) and (count % write_size == 0):
            append_cell_attributes(env.results_file_path,
                                   population,
                                   attr_dict,
                                   namespace=env.results_namespace_id,
                                   comm=env.comm,
                                   io_size=env.io_size,
                                   chunk_size=chunk_size,
                                   value_chunk_size=value_chunk_size)
            attr_dict = {}

    env.comm.barrier()
    if results_path is not None:
        append_cell_attributes(env.results_file_path,
                               population,
                               attr_dict,
                               namespace=env.results_namespace_id,
                               comm=env.comm,
                               io_size=env.io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size)
    global_count = env.comm.gather(gid_count, root=0)

    MPI.Finalize()
예제 #12
0
def main(config, config_prefix, selectivity_path, selectivity_namespace,
         arena_id, populations, n_trials, io_size, chunk_size,
         value_chunk_size, cache_size, write_size, output_path,
         spikes_namespace, spike_train_attr_name, gather, debug, plot,
         show_fig, save_fig, save_fig_dir, font_size, fig_format, verbose,
         dry_run):
    """

    :param config: str (.yaml file name)
    :param config_prefix: str (path to dir)
    :param selectivity_path: str (path to file)
    :param selectivity_namespace: str
    :param arena_id: str
    :param populations: str
    :param n_trials: int
    :param io_size: int
    :param chunk_size: int
    :param value_chunk_size: int
    :param cache_size: int
    :param write_size: int
    :param output_path: str (path to file)
    :param spikes_namespace: str
    :param spike_train_attr_name: str
    :param gather: bool
    :param debug: bool
    :param plot: bool
    :param show_fig: bool
    :param save_fig: str (base file name)
    :param save_fig_dir:  str (path to dir)
    :param font_size: float
    :param fig_format: str
    :param verbose: bool
    :param dry_run: bool
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank

    config_logging(verbose)

    env = Env(comm=comm,
              config_file=config,
              config_prefix=config_prefix,
              template_paths=None)
    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if save_fig is not None:
        plot = True

    if plot:
        from dentate.plot import default_fig_options

        fig_options = copy.copy(default_fig_options)
        fig_options.saveFigDir = save_fig_dir
        fig_options.fontSize = font_size
        fig_options.figFormat = fig_format
        fig_options.showFig = show_fig

    population_ranges = read_population_ranges(selectivity_path, comm)[0]

    if len(populations) == 0:
        populations = sorted(population_ranges.keys())

    if arena_id not in env.stimulus_config['Arena']:
        raise RuntimeError(
            'Arena with ID: %s not specified by configuration at file path: %s'
            % (arena_id, config_prefix + '/' + config))
    arena = env.stimulus_config['Arena'][arena_id]

    valid_selectivity_namespaces = dict()
    if rank == 0:
        for population in populations:
            if population not in population_ranges:
                raise RuntimeError(
                    'generate_input_spike_trains: specified population: %s not found in '
                    'provided selectivity_path: %s' %
                    (population, selectivity_path))
            if population not in env.stimulus_config[
                    'Selectivity Type Probabilities']:
                raise RuntimeError(
                    'generate_input_spike_trains: selectivity type not specified for '
                    'population: %s' % population)
            valid_selectivity_namespaces[population] = []
            with h5py.File(selectivity_path, 'r') as selectivity_f:
                for this_namespace in selectivity_f['Populations'][population]:
                    if 'Selectivity %s' % arena_id in this_namespace:
                        valid_selectivity_namespaces[population].append(
                            this_namespace)
                if len(valid_selectivity_namespaces[population]) == 0:
                    raise RuntimeError(
                        'generate_input_spike_trains: no selectivity data in arena: %s found '
                        'for specified population: %s in provided selectivity_path: %s'
                        % (arena_id, population, selectivity_path))
    comm.barrier()

    valid_selectivity_namespaces = comm.bcast(valid_selectivity_namespaces,
                                              root=0)
    selectivity_type_names = dict(
        (val, key) for (key, val) in viewitems(env.selectivity_types))

    equilibrate = get_equilibration(env)

    for trajectory_id in sorted(arena.trajectories.keys()):
        trajectory = arena.trajectories[trajectory_id]
        t, x, y, d = None, None, None, None
        if rank == 0:
            t, x, y, d = generate_linear_trajectory(
                trajectory,
                temporal_resolution=env.stimulus_config['Temporal Resolution'],
                equilibration_duration=env.
                stimulus_config['Equilibration Duration'])

        t = comm.bcast(t, root=0)
        x = comm.bcast(x, root=0)
        y = comm.bcast(y, root=0)
        d = comm.bcast(d, root=0)

        trajectory = t, x, y, d
        trajectory_namespace = 'Trajectory %s %s' % (arena_id, trajectory_id)
        output_namespace = '%s %s %s' % (spikes_namespace, arena_id,
                                         trajectory_id)

        if not dry_run and rank == 0:
            if output_path is None:
                raise RuntimeError(
                    'generate_input_spike_trains: missing output_path')
            if not os.path.isfile(output_path):
                with h5py.File(output_path, 'w') as output_file:
                    input_file = h5py.File(selectivity_path, 'r')
                    input_file.copy('/H5Types', output_file)
                    input_file.close()
            with h5py.File(output_path, 'a') as f:
                if trajectory_namespace not in f:
                    logger.info('Appending %s datasets to file at path: %s' %
                                (trajectory_namespace, output_path))
                group = f.create_group(trajectory_namespace)
                for key, value in zip(['t', 'x', 'y', 'd'], [t, x, y, d]):
                    dataset = group.create_dataset(key,
                                                   data=value,
                                                   dtype='float32')
                else:
                    loaded_t = f[trajectory_namespace]['t'][:]
                    if len(t) != len(loaded_t):
                        raise RuntimeError(
                            'generate_input_spike_trains: file at path: %s already contains the '
                            'namespace: %s, but the dataset sizes are inconsistent with the provided input'
                            'configuration' %
                            (output_path, trajectory_namespace))
        comm.barrier()

        if rank == 0:
            context.update(locals())

        spike_hist_sum_dict = {}
        spike_hist_resolution = 1000

        write_every = max(1, int(math.floor(write_size / comm.size)))
        for population in populations:

            this_spike_hist_sum = defaultdict(
                lambda: np.zeros(spike_hist_resolution))

            process_time = dict()
            for this_selectivity_namespace in sorted(
                    valid_selectivity_namespaces[population]):

                if rank == 0:
                    logger.info(
                        'Generating input source spike trains for population %s [%s]...'
                        % (population, this_selectivity_namespace))

                start_time = time.time()
                selectivity_attr_gen = NeuroH5CellAttrGen(
                    selectivity_path,
                    population,
                    namespace=this_selectivity_namespace,
                    comm=comm,
                    io_size=io_size,
                    cache_size=cache_size)
                spikes_attr_dict = dict()
                gid_count = 0
                for iter_count, (gid, selectivity_attr_dict
                                 ) in enumerate(selectivity_attr_gen):
                    if gid is not None:
                        context.update(locals())
                        spikes_attr_dict[gid] = \
                            generate_input_spike_trains(env, selectivity_type_names, trajectory,
                                                        gid, selectivity_attr_dict, n_trials=n_trials,
                                                        spike_train_attr_name=spike_train_attr_name,
                                                        spike_hist_resolution=spike_hist_resolution,
                                                        equilibrate=equilibrate,
                                                        spike_hist_sum=this_spike_hist_sum,
                                                        debug= (debug_callback, context) if debug else False)
                        gid_count += 1

                    if (iter_count > 0 and iter_count % write_every
                            == 0) or (debug and iter_count == 10):
                        total_gid_count = comm.reduce(gid_count,
                                                      root=0,
                                                      op=MPI.SUM)
                        if rank == 0:
                            logger.info(
                                'generated spike trains for %i %s cells' %
                                (total_gid_count, population))

                        if not dry_run:
                            append_cell_attributes(
                                output_path,
                                population,
                                spikes_attr_dict,
                                namespace=output_namespace,
                                comm=comm,
                                io_size=io_size,
                                chunk_size=chunk_size,
                                value_chunk_size=value_chunk_size)
                        del spikes_attr_dict
                        spikes_attr_dict = dict()

                        if debug and iter_count == 10:
                            break

            if not dry_run:
                append_cell_attributes(output_path,
                                       population,
                                       spikes_attr_dict,
                                       namespace=output_namespace,
                                       comm=comm,
                                       io_size=io_size,
                                       chunk_size=chunk_size,
                                       value_chunk_size=value_chunk_size)
                del spikes_attr_dict
                spikes_attr_dict = dict()
            process_time = time.time() - start_time

            total_gid_count = comm.reduce(gid_count, root=0, op=MPI.SUM)
            if rank == 0:
                logger.info(
                    'generated spike trains for %i %s cells in %.2f s' %
                    (total_gid_count, population, process_time))

            if gather:
                spike_hist_sum_dict[population] = this_spike_hist_sum

        if gather:
            this_spike_hist_sum = dict([
                (key, dict(val.items()))
                for key, val in viewitems(spike_hist_sum_dict)
            ])
            spike_hist_sum = comm.gather(this_spike_hist_sum, root=0)

            if rank == 0:
                merged_spike_hist_sum = defaultdict(lambda: defaultdict(
                    lambda: np.zeros(spike_hist_resolution)))
                for each_spike_hist_sum in spike_hist_sum:
                    for population in each_spike_hist_sum:
                        for selectivity_type_name in each_spike_hist_sum[
                                population]:
                            merged_spike_hist_sum[population][selectivity_type_name] = \
                                np.add(merged_spike_hist_sum[population][selectivity_type_name],
                                       each_spike_hist_sum[population][selectivity_type_name])

                if plot:

                    if save_fig is not None:
                        fig_options.saveFig = save_fig

                        plot_summed_spike_psth(t, trajectory_id,
                                               selectivity_type_name,
                                               merged_spike_hist_sum,
                                               spike_hist_resolution,
                                               fig_options)

        comm.barrier()

    if is_interactive and rank == 0:
        context.update(locals())
예제 #13
0
파일: io_utils.py 프로젝트: soltesz-lab/ca1
def recsout(env,
            output_path,
            t_start=None,
            clear_data=False,
            write_cell_location_data=False,
            write_trial_data=False):
    """
    Writes intracellular state traces to specified NeuroH5 output file.

    :param env:
    :param output_path:
    :param clear_data:
    :param reduce_data:
    :return:
    """
    t_rec = env.t_rec
    equilibration_duration = float(
        env.stimulus_config['Equilibration Duration'])
    reduce_data = env.recording_profile.get('reduce', None)
    n_trials = env.n_trials

    trial_time_ranges = get_trial_time_ranges(env.t_rec.to_python(),
                                              env.n_trials)
    trial_time_bins = [
        t_trial_start for t_trial_start, t_trial_end in trial_time_ranges
    ]
    trial_dur = np.asarray([env.tstop + equilibration_duration] * n_trials,
                           dtype=np.float32)

    for pop_name in sorted(env.celltypes.keys()):
        local_rec_types = list(env.recs_dict[pop_name].keys())
        rec_types = sorted(
            set(env.comm.allreduce(local_rec_types, op=mpi_op_concat)))
        for rec_type in rec_types:
            recs = env.recs_dict[pop_name][rec_type]
            attr_dict = defaultdict(lambda: {})
            for rec in recs:
                gid = rec['gid']
                data_vec = np.array(rec['vec'],
                                    copy=clear_data,
                                    dtype=np.float32)
                time_vec = np.array(t_rec, copy=clear_data, dtype=np.float32)
                if t_start is not None:
                    time_inds = np.where(time_vec >= t_start)[0]
                    time_vec = time_vec[time_inds]
                    data_vec = data_vec[time_inds]
                trial_bins = np.digitize(time_vec, trial_time_bins) - 1
                for trial_i in range(n_trials):
                    trial_inds = np.where(trial_bins == trial_i)[0]
                    time_vec[trial_inds] -= np.sum(
                        trial_dur[:(trial_i)]) + equilibration_duration
                label = rec['label']
                if label in attr_dict[gid]:
                    if reduce_data is None:
                        raise RuntimeError(
                            'recsout: duplicate recorder labels and no reduce strategy specified'
                        )
                    elif reduce_data is True:
                        attr_dict[gid][label] += data_vec
                    else:
                        raise RuntimeError(
                            'recsout: unsupported reduce strategy specified')
                else:
                    attr_dict[gid][label] = data_vec
                    attr_dict[gid]['t'] = time_vec
                if write_trial_data:
                    attr_dict[gid]['trial duration'] = trial_dur
                if write_cell_location_data:
                    distance = rec.get('distance', None)
                    if distance is not None:
                        attr_dict[gid]['distance'] = np.asarray(
                            [distance], dtype=np.float32)
                    section = rec.get('section', None)
                    if section is not None:
                        attr_dict[gid]['section'] = np.asarray([section],
                                                               dtype=np.int16)
                    loc = rec.get('loc', None)
                    if loc is not None:
                        attr_dict[gid]['loc'] = np.asarray([loc],
                                                           dtype=np.float32)
                if clear_data:
                    rec['vec'].resize(0)
            if env.results_namespace_id is None:
                namespace_id = "Intracellular %s" % (rec_type)
            else:
                namespace_id = "Intracellular %s %s" % (
                    rec_type, str(env.results_namespace_id))
            append_cell_attributes(output_path,
                                   pop_name,
                                   attr_dict,
                                   namespace=namespace_id,
                                   comm=env.comm,
                                   io_size=env.io_size)
    if clear_data:
        env.t_rec.resize(0)

    env.comm.barrier()
    if env.comm.Get_rank() == 0:
        logger.info("*** Output intracellular state results to file %s" %
                    output_path)
def main(config, config_prefix, forest_path, coords_path, populations,
         resolution, reltol, optiter, io_size, verbose, dry_run):

    config_logging(verbose)
    logger = get_script_logger(__file__)

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm, config_file=config, config_prefix=config_prefix)
    swc_type_soma = env.SWC_Types['soma']

    if io_size == -1:
        io_size = comm.size

    if rank == 0:
        import h5py
        if not os.path.isfile(coords_path):
            input_file = h5py.File(forest_path, 'r')
            output_file = h5py.File(coords_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    (pop_ranges, _) = read_population_ranges(forest_path)

    if rank == 0:
        color = 1
    else:
        color = 0

    ## comm0 includes only rank 0
    comm0 = comm.Split(color, 0)

    rotate = env.geometry['Parametric Surface']['Rotation']
    origin = env.geometry['Parametric Surface']['Origin']
    layer_extents = env.geometry['Parametric Surface']['Layer Extents']

    ((min_u, max_u), (min_v, max_v),
     (min_l, max_l)) = get_total_extents(layer_extents)

    ## This parameter is used to expand the range of L and avoid
    ## situations where the endpoints of L end up outside of the range
    ## of the distance interpolant
    safety = 0.001

    ip_volume = None
    if rank == 0:
        ip_volume = make_volume((min_u-safety, max_u+safety), \
                                (min_v-safety, max_v+safety), \
                                (min_l-safety, max_l+safety), \
                                resolution=resolution, rotate=rotate)

    ip_volume = env.comm.bcast(ip_volume, root=0)

    for population in populations:
        pop_layers = env.geometry['Cell Distribution'][population]

        if rank == 0:
            logger.info('Reading forest for population %s...' % population)

        (trees, forestSize) = scatter_read_trees(forest_path,
                                                 population,
                                                 io_size=io_size,
                                                 comm=comm)
        (population_start, _) = pop_ranges[population]

        if rank == 0:
            logger.info('Interpolating forest coordinates...')

        count = 0
        coords = []
        coords_dict = {}
        start_time = time.time()
        for (gid, morph_dict) in trees:

            swc_type = morph_dict['swc_type']
            xs = morph_dict['x']
            ys = morph_dict['y']
            zs = morph_dict['z']

            px = xs[0]
            py = ys[0]
            pz = zs[0]

            xyz_coords = np.array([px, py, pz]).reshape(3, 1).T
            uvl_coords_interp = ip_volume.inverse(xyz_coords)[0]
            xyz_coords_interp = DG_volume(uvl_coords_interp[0],
                                          uvl_coords_interp[1],
                                          uvl_coords_interp[2],
                                          rotate=rotate)[0]
            xyz_error_interp = np.abs(
                np.subtract(xyz_coords, xyz_coords_interp))[0]

            uvl_coords_opt = optimize_inverse_uvl_coords(xyz_coords,
                                                         rotate,
                                                         layer_extents,
                                                         pop_layers,
                                                         optiter=optiter)
            if uvl_coords_opt:
                xyz_coords_opt = DG_volume(uvl_coords_opt[0],
                                           uvl_coords_opt[1],
                                           uvl_coords_opt[2],
                                           rotate=rotate)[0]
                xyz_error_opt = np.abs(np.subtract(xyz_coords,
                                                   xyz_coords_opt))[0]
            else:
                xyz_coords_opt = None
                xyz_error_opt = None

            if (xyz_error_opt is not None) and \
               (xyz_error_opt[0] < xyz_error_interp[0]) and \
               (xyz_error_opt[1] < xyz_error_interp[1]) and \
               (xyz_error_opt[2] < xyz_error_interp[2]):
                uvl_coords = uvl_coords_opt
                xyz_error = xyz_error_opt
                xyz_coords1 = xyz_coords_opt
            else:
                uvl_coords = uvl_coords_interp
                xyz_error = xyz_error_interp
                xyz_coords1 = xyz_coords_interp

            if rank == 0:
                logger.info('xyz_coords: %s' % str(xyz_coords))
                logger.info('uvl_coords_interp: %s' % str(uvl_coords_interp))
                logger.info('xyz_coords_interp: %s' % str(xyz_coords_interp))
                logger.info('xyz_error_interp: %s' % str(xyz_error_interp))
                logger.info('uvl_coords_opt: %s' % str(uvl_coords_opt))
                logger.info('xyz_coords_opt: %s' % str(xyz_coords_opt))
                logger.info('xyz_error_opt: %s' % str(xyz_error_opt))
                logger.info(
                    'uvl_in_bounds: %s' %
                    str(uvl_in_bounds(uvl_coords, layer_extents, pop_layers)))

            coords_dict[gid] = {
                'X Coordinate': np.array([xyz_coords1[0]], dtype='float32'),
                'Y Coordinate': np.array([xyz_coords1[1]], dtype='float32'),
                'Z Coordinate': np.array([xyz_coords1[2]], dtype='float32'),
                'U Coordinate': np.array([uvl_coords[0]], dtype='float32'),
                'V Coordinate': np.array([uvl_coords[1]], dtype='float32'),
                'L Coordinate': np.array([uvl_coords[2]], dtype='float32'),
                'Interpolation Error': np.asarray(xyz_error, dtype='float32')
            }

            if uvl_in_bounds(uvl_coords, layer_extents, pop_layers) and \
               (xyz_error[0] <= reltol) and (xyz_error[1] <= reltol) and (xyz_error[2] <= reltol):
                coords.append(
                    (gid, uvl_coords[0], uvl_coords[1], uvl_coords[2]))
            else:
                logger.warning("Rank %d: uvl coords %s not added to new index (in bounds: %s; error: %s)" % \
                               (rank, str(uvl_coords), str(uvl_in_bounds(uvl_coords, layer_extents, pop_layers)), str(xyz_error)))

            count += 1

        if not dry_run:
            append_cell_attributes(coords_path,
                                   population,
                                   coords_dict,
                                   namespace='Interpolated Coordinates',
                                   io_size=io_size,
                                   comm=comm)

        global_count = np.sum(np.asarray(comm.gather(count, root=0)))
        if rank == 0:
            if global_count > 0:
                logger.info('Interpolation of %i %s cells took %i s' % (np.sum(global_count), \
                                                                        population, \
                                                                        time.time()-start_time))
        all_coords = comm.reduce(coords, root=0, op=mpi_op_concat)

        if rank == 0:
            if len(all_coords) > 0:
                coords_sort_idxs = list_argsort(
                    lambda coords: coords[1],
                    all_coords)  ## sort on U coordinate
                reindex_dict = {
                    coords[0]: {
                        'New Cell Index':
                        np.array([(i + population_start)], dtype='uint32')
                    }
                    for (i, coords) in zip(coords_sort_idxs, all_coords)
                }
                append_cell_attributes(coords_path,
                                       population,
                                       reindex_dict,
                                       namespace='Tree Reindex',
                                       comm=comm0)

        comm.barrier()
예제 #15
0
def main(stimulus_path, input_stimulus_namespace, output_stimulus_namespace, io_size, chunk_size, value_chunk_size,
         cache_size, seed_offset, trajectory_id, debug):
    """
    :param stimulus_path: str
    :param input_stimulus_namespace: str
    :param output_stimulus_namespace: str
    :param io_size: int
    :param chunk_size: int
    :param value_chunk_size: int
    :param cache_size: int
    :param seed_offset: int
    :param trajectory_id: int
    :param debug: bool
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print('%i ranks have been allocated' % comm.size)
    sys.stdout.flush()

    seed_offset *= 2e6
    np.random.seed(int(seed_offset))

    population_ranges = read_population_ranges(comm, stimulus_path)[0]

    input_stimulus_namespace += ' ' + str(trajectory_id)
    output_stimulus_namespace += ' ' + str(trajectory_id)

    for population in ['LPP']:
        population_start = population_ranges[population][0]
        population_count = population_ranges[population][1]

        if rank == 0:
            random_gids = np.arange(0, population_count)
            np.random.shuffle(random_gids)
        else:
            random_gids = None
        random_gids = comm.bcast(random_gids, root=0)

        count = 0
        start_time = time.time()
        attr_gen = NeuroH5CellAttrGen(comm, stimulus_path, population, io_size=io_size,
                                      cache_size=cache_size, namespace=input_stimulus_namespace)
        if debug:
            attr_gen_wrapper = (next(attr_gen) for i in range(2))
        else:
            attr_gen_wrapper = attr_gen
        for gid, stimulus_dict in attr_gen_wrapper:
            local_time = time.time()
            new_response_dict = {}
            if gid is not None:

                random_gid = random_gids[gid-population_start]
                new_response_dict[random_gid] = {'rate': stimulus_dict['rate'],
                                                 'spiketrain': np.asarray(stimulus_dict['spiketrain'],
                                                                          dtype=np.float32),
                                                 'modulation': stimulus_dict['modulation'],
                                                 'peak_index': stimulus_dict['peak_index'] }

                print('Rank %i; source: %s; assigned spike trains for gid %i to gid %i in %.2f s' % \
                      (rank, population, gid, random_gid+population_start, time.time() - local_time))
                count += 1
            if not debug:
                append_cell_attributes(comm, stimulus_path, population, new_response_dict,
                                       namespace=output_stimulus_namespace,
                                       io_size=io_size, chunk_size=chunk_size,
                                       value_chunk_size=value_chunk_size)
            sys.stdout.flush()
            del new_response_dict
            gc.collect()

        global_count = comm.gather(count, root=0)
        if rank == 0:
            print('%i ranks randomized spike trains for %i cells in %.2f s' % (comm.size, np.sum(global_count),
                                                                               time.time() - start_time))
def main(config, config_prefix, weights_path, weights_namespace, weights_name, connections_path, destination, sources, io_size, chunk_size, value_chunk_size, write_size, cache_size, verbose, dry_run):
    """

    :param weights_path: str
    :param weights_namespace: str
    :param connections_path: str
    :param io_size: int
    :param chunk_size: int
    :param value_chunk_size: int
    :param cache_size: int
    :param verbose:  bool
    :param dry_run:  bool
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm, config_file=config, config_prefix=config_prefix)

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if (not dry_run) and (rank==0):
        if not os.path.isfile(weights_path):
            input_file  = h5py.File(connections_path,'r')
            output_file = h5py.File(weights_path,'w')
            input_file.copy('/H5Types',output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    seed_offset = int(env.model_config['Random Seeds']['GC Log-Normal Weights 1'])

    pop_ranges, pop_size = read_population_ranges(connections_path, comm=comm)

    count = 0
    gid_count = 0
    start_time = time.time()

    connection_gen_list = [NeuroH5ProjectionGen(connections_path, source, destination, \
                                                    namespaces=['Synapses'], \
                                                    comm=comm, io_size=io_size) for source in sources]

    weights_dict = {}
    for attr_gen_package in utils.zip_longest(*connection_gen_list):
        local_time = time.time()
        source_syn_dict = defaultdict(list)
        source_gid_array = None
        conn_attr_dict = None
        destination_gid = attr_gen_package[0][0]
        if not all([attr_gen_items[0] == destination_gid for attr_gen_items in attr_gen_package]):
            raise Exception('Rank: %i; destination: %s; destination_gid %i not matched across multiple attribute generators: %s' %
                            (rank, destination, destination_gid,
                             str([attr_gen_items[0] for attr_gen_items in attr_gen_package])))
        if destination_gid is not None:
            seed = int(destination_gid + seed_offset)
            for this_destination_gid, (source_gid_array, conn_attr_dict) in attr_gen_package:
                for j in range(len(source_gid_array)):
                    this_source_gid = source_gid_array[j]
                    this_syn_id = conn_attr_dict['Synapses']['syn_id'][j]
                    source_syn_dict[this_source_gid].append(this_syn_id)
            weights_dict[destination_gid] = \
              synapses.generate_log_normal_weights(weights_name, mu, sigma, seed, source_syn_dict, clip=clip)
            logger.info('Rank %i; destination: %s; destination gid %i; sources: %s; generated log-normal weights for %i inputs in ' \
                        '%.2f s' % (rank, destination, destination_gid, \
                                    [source.encode('ascii') for source in list(sources)], \
                                    len(weights_dict[destination_gid]['syn_id']), \
                                    time.time() - local_time))
            count += 1
        else:
            logger.info('Rank: %i received destination_gid as None' % rank)
        gid_count += 1
        if (write_size > 0) and (gid_count % write_size == 0):
            if not dry_run:
                append_cell_attributes(weights_path, destination, weights_dict, namespace=weights_namespace,
                                       comm=comm, io_size=io_size, chunk_size=chunk_size, value_chunk_size=value_chunk_size)
            # print 'Rank: %i, just after append' % rank
            del source_syn_dict
            del source_gid_array
            del conn_attr_dict
            weights_dict.clear()
            gc.collect()

    if not dry_run:
        append_cell_attributes( weights_path, destination, weights_dict, namespace=weights_namespace,
                                comm=comm, io_size=io_size, chunk_size=chunk_size, value_chunk_size=value_chunk_size)
    global_count = comm.gather(count, root=0)
    if rank == 0:
        logger.info('destination: %s; %i ranks generated log-normal weights for %i cells in %.2f s' % \
                    (destination, comm.size, np.sum(global_count), time.time() - start_time))
    MPI.Finalize()
def main(config, weights_path, weights_namespace, connections_path, io_size,
         chunk_size, value_chunk_size, cache_size, dry_run, verbose):
    """

    :param weights_path: str
    :param weights_namespace: str
    :param connections_path: str
    :param io_size: int
    :param chunk_size: int
    :param value_chunk_size: int
    :param cache_size: int
    :param dry_run:  bool
    :param verbose:  bool
    """

    if verbose:
        logger.setLevel(logging.INFO)

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=comm, config_file=config)

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%s: %i ranks have been allocated' %
                    (script_name, comm.size))

    source_population_list = ['MC', 'MPP', 'LPP']
    target = 'GC'

    if (not dry_run) and (rank == 0):
        if not os.path.isfile(weights_path):
            input_file = h5py.File(connections_path, 'r')
            output_file = h5py.File(weights_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    seed_offset = int(
        env.modelConfig['Random Seeds']['PP Log-Normal Weigths 1'])

    pop_ranges, pop_size = read_population_ranges(connections_path, comm=comm)
    target_gid_offset = pop_ranges[target][0]

    count = 0
    start_time = time.time()

    connection_gen_list = []
    for source in source_population_list:
        connection_gen_list.append(NeuroH5ProjectionGen(connections_path, source, target, namespaces=['Synapses'], \
                                                        comm=comm, io_size=io_size, cache_size=cache_size))

    for itercount, attr_gen_package in enumerate(
            izip_longest(*connection_gen_list)):
        local_time = time.time()
        source_syn_map = defaultdict(list)
        source_weights = None
        source_gid_array = None
        conn_attr_dict = None
        syn_weight_map = {}
        weights_dict = {}
        target_gid = attr_gen_package[0][0]
        if not all([
                attr_gen_items[0] == target_gid
                for attr_gen_items in attr_gen_package
        ]):
            raise Exception(
                'Rank: %i; target: %s; target_gid not matched across multiple attribute generators: %s'
                % (rank, target, target_gid,
                   str([
                       attr_gen_items[0] for attr_gen_items in attr_gen_package
                   ])))
        if target_gid is not None:
            local_random.seed(int(target_gid + seed_offset))
            for this_target_gid, (source_gid_array,
                                  conn_attr_dict) in attr_gen_package:
                for j in xrange(len(source_gid_array)):
                    this_source_gid = source_gid_array[j]
                    this_syn_id = conn_attr_dict['Synapses'][0][j]
                    source_syn_map[this_source_gid].append(this_syn_id)
            source_weights = local_random.lognormal(mu, sigma,
                                                    len(source_syn_map))
            # weights are synchronized across all inputs from the same source_gid
            for this_source_gid, this_weight in zip(source_syn_map,
                                                    source_weights):
                for this_syn_id in source_syn_map[this_source_gid]:
                    syn_weight_map[this_syn_id] = this_weight
            weights_dict[target_gid - target_gid_offset] = \
                {'syn_id': np.array(syn_weight_map.keys()).astype('uint32', copy=False),
                 'weight': np.array(syn_weight_map.values()).astype('float32', copy=False)}
            logger( 'Rank %i; target: %s; target_gid %i; generated log-normal weights for %i inputs from %i sources in ' \
                    '%.2f s' % (rank, target, target_gid, len(syn_weight_map), len(source_weights),
                              time.time() - local_time))
            count += 1
        else:
            logger.info('Rank: %i received target_gid as None' % rank)
        if not dry_run:
            append_cell_attributes(weights_path,
                                   target,
                                   weights_dict,
                                   namespace=weights_namespace,
                                   comm=comm,
                                   io_size=io_size,
                                   chunk_size=chunk_size,
                                   value_chunk_size=value_chunk_size)
            # print 'Rank: %i, just after append' % rank
        del source_syn_map
        del source_weights
        del syn_weight_map
        del source_gid_array
        del conn_attr_dict
        del weights_dict
        gc.collect()
    global_count = comm.gather(count, root=0)
    if rank == 0:
        logger.info('target: %s; %i ranks generated log-normal weights for %i cells in %.2f s' % \
                        (target, comm.size, np.sum(global_count), time.time() - start_time))
예제 #18
0
from mpi4py import MPI
from neuroh5.io import read_trees, write_cell_attributes, append_cell_attributes, read_cell_attributes
import numpy as np

comm = MPI.COMM_WORLD
rank = comm.Get_rank()

(g,_) = read_trees("data/DGC_forest_append_test_20180116.h5", "GC")
datasize=3000
a = np.arange(rank*10,(rank+1)*10).astype('uint32')
b = np.arange(rank*20,(rank+1)*20).astype('float32')

#a = np.arange(rank,rank+datasize).astype('uint32')
#b = np.arange(rank+1,rank+1+datasize).astype('uint16')
#c = np.arange(rank+2,rank+2+datasize).astype('float32')
#d = np.arange(rank+3,rank+3+datasize).astype('uint32')
#e = np.arange(rank+4,rank+4+datasize).astype('uint32')

ranksize=5

#d = {n:{'a': a+n, 'b': b, 'c': c, 'd': d+n, 'e': e+n} for n in g.keys()}
d = {n:{'a': a+n, 'b': b+n} for n in range(rank*ranksize,(rank+1)*ranksize)}

append_cell_attributes("data/DGC_forest_attr_test_20200407.h5", "GC", d, io_size=2)
append_cell_attributes("data/DGC_forest_attr_test_20200407.h5", "GC", d, io_size=2)
def main(features_path, connectivity_path, connectivity_namespace, io_size,
         chunk_size, value_chunk_size, cache_size, trajectory_id, debug):
    """

    :param features_path:
    :param connectivity_path:
    :param connectivity_namespace:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    :param trajectory_id:
    :param debug:
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print('%i ranks have been allocated' % comm.size)
    sys.stdout.flush()

    population_range_dict = read_population_ranges(comm, features_path)

    features_dict = {}
    for population in ['MPP', 'LPP']:
        features_dict[population] = bcast_cell_attributes(
            comm,
            0,
            features_path,
            population,
            namespace='Feature Selectivity')

    arena_dimension = 100.  # minimum distance from origin to boundary (cm)

    run_vel = 30.  # cm/s
    spatial_resolution = 1.  # cm
    x = np.arange(-arena_dimension, arena_dimension, spatial_resolution)
    y = np.arange(-arena_dimension, arena_dimension, spatial_resolution)
    distance = np.insert(
        np.cumsum(np.sqrt(np.sum(
            [np.diff(x)**2., np.diff(y)**2.], axis=0))), 0, 0.)
    interp_distance = np.arange(distance[0], distance[-1], spatial_resolution)
    t = old_div(interp_distance, run_vel * 1000.)  # ms
    interp_x = np.interp(interp_distance, distance, x)
    interp_y = np.interp(interp_distance, distance, y)

    with h5py.File(features_path, 'a', driver='mpio', comm=comm) as f:
        if 'Trajectories' not in f:
            f.create_group('Trajectories')
        if str(trajectory_id) not in f['Trajectories']:
            f['Trajectories'].create_group(str(trajectory_id))
            f['Trajectories'][str(trajectory_id)].create_dataset(
                'x', dtype='float32', data=interp_x)
            f['Trajectories'][str(trajectory_id)].create_dataset(
                'y', dtype='float32', data=interp_y)
            f['Trajectories'][str(trajectory_id)].create_dataset(
                'd', dtype='float32', data=interp_distance)
            f['Trajectories'][str(trajectory_id)].create_dataset(
                't', dtype='float32', data=t)
        x = f['Trajectories'][str(trajectory_id)]['x'][:]
        y = f['Trajectories'][str(trajectory_id)]['y'][:]
        d = f['Trajectories'][str(trajectory_id)]['d'][:]

    prediction_namespace = 'Response Prediction ' + str(trajectory_id)

    target_population = 'GC'
    count = 0
    start_time = time.time()
    attr_gen = NeuroH5CellAttrGen(comm,
                                  connectivity_path,
                                  target_population,
                                  io_size=io_size,
                                  cache_size=cache_size,
                                  namespace=connectivity_namespace)
    if debug:
        attr_gen_wrapper = (next(attr_gen) for i in range(2))
    else:
        attr_gen_wrapper = attr_gen
    for gid, connectivity_dict in attr_gen_wrapper:
        local_time = time.time()
        source_gid_counts = {}
        response_dict = {}
        response = np.zeros_like(d, dtype='float32')
        if gid is not None:
            for population in ['MPP', 'LPP']:
                indexes = np.where(
                    (connectivity_dict[connectivity_namespace]['source_gid'] >=
                     population_range_dict[population][0])
                    & (connectivity_dict[connectivity_namespace]['source_gid']
                       < population_range_dict[population][0] +
                       population_range_dict[population][1]))[0]
                source_gid_counts[population] = \
                    Counter(connectivity_dict[connectivity_namespace]['source_gid'][indexes])
            for population in ['MPP', 'LPP']:
                for source_gid in (
                        source_gid
                        for source_gid in source_gid_counts[population]
                        if source_gid in features_dict[population]):
                    this_feature_dict = features_dict[population][source_gid]
                    selectivity_type = this_feature_dict['Selectivity Type'][0]
                    contact_count = source_gid_counts[population][source_gid]
                    if selectivity_type == selectivity_grid:
                        ori_offset = this_feature_dict['Grid Orientation'][0]
                        grid_spacing = this_feature_dict['Grid Spacing'][0]
                        x_offset = this_feature_dict['X Offset'][0]
                        y_offset = this_feature_dict['Y Offset'][0]
                        rate = np.vectorize(
                            grid_rate(grid_spacing, ori_offset, x_offset,
                                      y_offset))
                    elif selectivity_type == selectivity_place_field:
                        field_width = this_feature_dict['Field Width'][0]
                        x_offset = this_feature_dict['X Offset'][0]
                        y_offset = this_feature_dict['Y Offset'][0]
                        rate = np.vectorize(
                            place_rate(field_width, x_offset, y_offset))
                    response = np.add(response,
                                      contact_count * rate(x, y),
                                      dtype='float32')
            response_dict[gid] = {'waveform': response}
            baseline = np.mean(response[np.where(
                response <= np.percentile(response, 10.))[0]])
            peak = np.mean(response[np.where(
                response >= np.percentile(response, 90.))[0]])
            modulation = 0. if peak <= 0.1 else old_div(
                (peak - baseline), peak)
            peak_index = np.where(response == np.max(response))[0][0]
            response_dict[gid]['modulation'] = np.array([modulation],
                                                        dtype='float32')
            response_dict[gid]['peak_index'] = np.array([peak_index],
                                                        dtype='uint32')
            print('Rank %i: took %.2f s to compute predicted response for %s gid %i' % \
                  (rank, time.time() - local_time, target_population, gid))
            count += 1
        if not debug:
            append_cell_attributes(comm,
                                   features_path,
                                   target_population,
                                   response_dict,
                                   namespace=prediction_namespace,
                                   io_size=io_size,
                                   chunk_size=chunk_size,
                                   value_chunk_size=value_chunk_size)
        sys.stdout.flush()
        del response
        del response_dict
        del source_gid_counts
        gc.collect()

    global_count = comm.gather(count, root=0)
    if rank == 0:
        print('%i ranks took %.2f s to compute selectivity parameters for %i %s cells' % \
              (comm.size, time.time() - start_time, np.sum(global_count), target_population))
예제 #20
0
def main(stimulus_path, stimulus_namespace, weights_path,
         initial_weights_namespace, structured_weights_namespace,
         connections_path, io_size, chunk_size, value_chunk_size, cache_size,
         trajectory_id, seed_offset, target_sparsity, debug):
    """

    :param stimulus_path: str
    :param stimulus_namespace: str
    :param weights_path: str
    :param initial_weights_namespace: str
    :param structured_weights_namespace: str
    :param connections_path: str
    :param io_size: int
    :param chunk_size: int
    :param value_chunk_size: int
    :param cache_size: int
    :param trajectory_id: int
    :param seed_offset: int
    :param target_sparsity: float
    :param debug:  bool
    """
    # make sure random seeds are not being reused for various types of stochastic sampling
    seed_offset *= 2e6

    comm = MPI.COMM_WORLD
    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print '%s: %i ranks have been allocated' % (script_name, comm.size)
    sys.stdout.flush()

    stimulus_namespace += ' ' + str(trajectory_id)

    stimulus_attrs = {}
    source_population_list = ['MPP', 'LPP']
    for source in source_population_list:
        stimulus_attr_gen = bcast_cell_attributes(comm,
                                                  0,
                                                  stimulus_path,
                                                  source,
                                                  namespace=stimulus_namespace)
        stimulus_attrs[source] = {
            gid: attr_dict
            for gid, attr_dict in stimulus_attr_gen
        }

    trajectory_namespace = 'Trajectory %s' % str(trajectory_id)

    arena_dimension = 100.  # minimum distance from origin to boundary (cm)
    default_run_vel = 30.  # cm/s
    spatial_resolution = 1.  # cm

    with h5py.File(stimulus_path, 'a', driver='mpio', comm=comm) as f:
        if trajectory_namespace not in f:
            print 'Rank: %i; Creating %s datasets' % (rank,
                                                      trajectory_namespace)
            group = f.create_group(trajectory_namespace)
            t, x, y, d = stimulus.generate_trajectory(
                arena_dimension=arena_dimension,
                velocity=default_run_vel,
                spatial_resolution=spatial_resolution)
            for key, value in zip(['x', 'y', 'd', 't'], [x, y, d, t]):
                dataset = group.create_dataset(key, (value.shape[0], ),
                                               dtype='float32')
                with dataset.collective:
                    dataset[:] = value.astype('float32', copy=False)
        else:
            print 'Rank: %i; Reading %s datasets' % (rank,
                                                     trajectory_namespace)
            group = f[trajectory_namespace]
            dataset = group['x']
            with dataset.collective:
                x = dataset[:]
            dataset = group['y']
            with dataset.collective:
                y = dataset[:]
            dataset = group['d']
            with dataset.collective:
                d = dataset[:]
            dataset = group['t']
            with dataset.collective:
                t = dataset[:]

    plasticity_window_dur = 4.  # s
    plasticity_kernel_sigma = plasticity_window_dur * default_run_vel / 3. / np.sqrt(
        2.)  # cm
    plasticity_kernel = lambda d, d_offset: np.exp(-(
        (d - d_offset) / plasticity_kernel_sigma)**2.)
    plasticity_kernel = np.vectorize(plasticity_kernel, excluded=[1])
    max_plasticity_kernel_area = np.sum(plasticity_kernel(
        d,
        np.max(d) / 2.)) * spatial_resolution

    target = 'GC'

    pop_ranges, pop_size = read_population_ranges(comm, stimulus_path)
    target_gid_offset = pop_ranges[target][0]

    count = 0
    structured_count = 0
    start_time = time.time()

    gid_index_map = get_cell_attributes_gid_index_map(
        comm, weights_path, target, initial_weights_namespace)

    connection_gen_list = []
    for source in source_population_list:
        connection_gen_list.append(
            NeuroH5ProjectionGen(comm,
                                 connections_path,
                                 source,
                                 target,
                                 io_size=io_size,
                                 cache_size=cache_size,
                                 namespaces=['Synapses']))

    maxiter = 100 if debug else None
    for itercount, attr_gen_package in enumerate(
            izip_longest(*connection_gen_list)):
        local_time = time.time()
        syn_weight_map = {}
        source_syn_map = defaultdict(list)
        syn_peak_index_map = {}
        structured_weights_dict = {}
        modulated_inputs = 0
        source_gid_array = None
        conn_attr_dict = None
        target_gid = attr_gen_package[0][0]
        if not all([
                attr_gen_items[0] == target_gid
                for attr_gen_items in attr_gen_package
        ]):
            raise Exception(
                'Rank: %i; target: %s; target_gid not matched across multiple attribute generators: %s'
                % (rank, target,
                   [attr_gen_items[0] for attr_gen_items in attr_gen_package]))
            sys.stdout.flush()
        # else:
        #    print 'Rank: %i; received target: %s; target_gid: %s' % (rank, target, str(target_gid))
        initial_weights_dict = get_cell_attributes_by_gid(
            target_gid, comm, weights_path, gid_index_map, target,
            initial_weights_namespace, target_gid_offset)
        if target_gid is not None:
            if initial_weights_dict is None:
                raise Exception(
                    'Rank: %i; target: %s; target_gid: %s; get_cell_attributes_by_gid didn\'t work'
                    % (rank, target, str(target_gid)))
            local_random.seed(int(target_gid + seed_offset))
            syn_weight_map = dict(
                zip(initial_weights_dict['syn_id'],
                    initial_weights_dict['weight']))
            for this_target_gid, (source_gid_array,
                                  conn_attr_dict) in attr_gen_package:
                for i in xrange(len(source_gid_array)):
                    this_source_gid = source_gid_array[i]
                    this_syn_id = conn_attr_dict['Synapses'][0][i]
                    source_syn_map[this_source_gid].append(this_syn_id)
            if local_random.uniform() <= target_sparsity:
                modify_weights = True
                peak_loc = local_random.choice(d)
                this_plasticity_kernel = plasticity_kernel(d, peak_loc)
            else:
                modify_weights = False
            for source in stimulus_attrs:
                peak_rate = peak_rate_dict[source]
                for this_source_gid in stimulus_attrs[source]:
                    peak_index = stimulus_attrs[source][this_source_gid][
                        'peak_index'][0]
                    if modify_weights:
                        norm_rate = stimulus_attrs[source][this_source_gid][
                            'rate'] / peak_rate
                        this_plasticity_signal = np.sum(np.multiply(norm_rate, this_plasticity_kernel)) * \
                                                 spatial_resolution / max_plasticity_kernel_area
                        delta_weight = 2. * this_plasticity_signal
                    else:
                        delta_weight = 0.
                    for this_syn_id in source_syn_map[this_source_gid]:
                        syn_peak_index_map[this_syn_id] = peak_index
                        if delta_weight >= 0.1:
                            modulated_inputs += 1
                        syn_weight_map[this_syn_id] += delta_weight
            structured_weights_dict[target_gid - target_gid_offset] = \
                {'syn_id': np.array(syn_peak_index_map.keys()).astype('uint32', copy=False),
                 'weight': np.array([syn_weight_map[syn_id] for syn_id in syn_peak_index_map]).astype('float32',
                                                                                                      copy=False),
                 'peak_index': np.array(syn_peak_index_map.values()).astype('uint32', copy=False),
                 'structured': np.array([int(modify_weights)], dtype='uint32')}
            if modify_weights:
                print 'Rank %i; target: %s; gid %i; generated structured weights for %i/%i inputs in %.2f s' % \
                      (rank, target, target_gid, modulated_inputs, len(syn_weight_map), time.time() - local_time)
                structured_count += 1
            else:
                print 'Rank %i; target: %s; gid %i; calculated input peak_locs for %i inputs in %.2f s (not selected ' \
                      'for structured weights)' % (rank, target, target_gid, len(syn_weight_map),
                                                   time.time() - local_time)
            count += 1
        else:
            print 'Rank: %i received target_gid as None' % rank
        if not debug:
            append_cell_attributes(comm,
                                   weights_path,
                                   target,
                                   structured_weights_dict,
                                   namespace=structured_weights_namespace,
                                   io_size=io_size,
                                   chunk_size=chunk_size,
                                   value_chunk_size=value_chunk_size)
        sys.stdout.flush()
        del syn_weight_map
        del source_syn_map
        del syn_peak_index_map
        del structured_weights_dict
        del modulated_inputs
        del source_gid_array
        del conn_attr_dict
        gc.collect()
        if debug:
            comm.barrier()
            if maxiter is not None and itercount > maxiter:
                break
    if debug:
        print 'Rank: %i exited the loop' % rank
    global_count = comm.gather(count, root=0)
    global_structured_count = comm.gather(structured_count, root=0)
    if rank == 0:
        print 'target: %s; %i ranks processed %i cells (%i assigned structured weights) in %.2f s' % \
              (target, comm.size, np.sum(global_count), np.sum(global_structured_count), time.time() - start_time)
예제 #21
0
def main(arena_id, config, config_prefix, dataset_prefix, distances_namespace, spike_input_path, spike_input_namespace, spike_input_attr, input_features_namespaces, input_features_path, selection_path, output_path, io_size, trajectory_id, verbose):

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank
    if io_size == -1:
        io_size = comm.size

    env = Env(comm=comm, config_file=config, 
              config_prefix=config_prefix, dataset_prefix=dataset_prefix, 
              results_path=output_path, spike_input_path=spike_input_path, 
              spike_input_namespace=spike_input_namespace, spike_input_attr=spike_input_attr,
              arena_id=arena_id, trajectory_id=trajectory_id, io_size=io_size)

    selection = []
    f = open(selection_path, 'r')
    for line in f.readlines():
        selection.append(int(line))
    f.close()
    selection = set(selection)

    pop_ranges, pop_size = read_population_ranges(env.connectivity_file_path, comm=comm)

    distance_U_dict = {}
    distance_V_dict = {}
    range_U_dict = {}
    range_V_dict = {}

    selection_dict = defaultdict(set)

    comm0 = env.comm.Split(2 if rank == 0 else 0, 0)

    if rank == 0:
        for population in pop_ranges:
            distances = read_cell_attributes(env.data_file_path, population, namespace=distances_namespace, comm=comm0)
            soma_distances = { k: (v['U Distance'][0], v['V Distance'][0]) for (k,v) in distances }
            del distances
        
            numitems = len(list(soma_distances.keys()))

            if numitems == 0:
                continue

            distance_U_array = np.asarray([soma_distances[gid][0] for gid in soma_distances])
            distance_V_array = np.asarray([soma_distances[gid][1] for gid in soma_distances])

            U_min = np.min(distance_U_array)
            U_max = np.max(distance_U_array)
            V_min = np.min(distance_V_array)
            V_max = np.max(distance_V_array)

            range_U_dict[population] = (U_min, U_max)
            range_V_dict[population] = (V_min, V_max)
            
            distance_U = { gid: soma_distances[gid][0] for gid in soma_distances }
            distance_V = { gid: soma_distances[gid][1] for gid in soma_distances }
            
            distance_U_dict[population] = distance_U
            distance_V_dict[population] = distance_V
            
            min_dist = U_min
            max_dist = U_max 

            selection_dict[population] = set([ k for k in distance_U if k in selection ])
    

    env.comm.barrier()

    write_selection_file_path =  "%s/%s_selection.h5" % (env.results_path, env.modelName)

    if rank == 0:
        io_utils.mkout(env, write_selection_file_path)
    env.comm.barrier()
    selection_dict = env.comm.bcast(dict(selection_dict), root=0)
    env.cell_selection = selection_dict
    io_utils.write_cell_selection(env, write_selection_file_path)
    input_selection = io_utils.write_connection_selection(env, write_selection_file_path)
    if spike_input_path:
        io_utils.write_input_cell_selection(env, input_selection, write_selection_file_path)
    if input_features_path:
        for this_input_features_namespace in sorted(input_features_namespaces):
            for population in sorted(input_selection):
                logger.info(f"Extracting input features {this_input_features_namespace} for population {population}...")
                it = read_cell_attribute_selection(input_features_path, population, 
                                                   namespace=f"{this_input_features_namespace} {arena_id}", 
                                                   selection=input_selection[population], comm=env.comm)
                output_features_dict = { cell_gid : cell_features_dict for cell_gid, cell_features_dict in it }
                append_cell_attributes(write_selection_file_path, population, output_features_dict,
                                       namespace=f"{this_input_features_namespace} {arena_id}", 
                                       io_size=io_size, comm=env.comm)
    env.comm.barrier()
예제 #22
0
def main(population, forest_path, output_path, index_path, types_path,
         index_namespace, coords_namespace, sample_count, io_size, chunk_size,
         value_chunk_size, verbose):
    """

    :param population: str
    :param forest_path: str (path)
    :param output_path: str (path)
    :param index_path: str (path)
    :param io_size: int
    :param chunk_size: int
    :param value_chunk_size: int
    :param verbose: bool
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    random.seed(13)

    if rank == 0:
        if not os.path.isfile(output_path):
            input_file = h5py.File(types_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    (forest_pop_ranges, _) = read_population_ranges(forest_path)
    (forest_population_start,
     forest_population_count) = forest_pop_ranges[population]

    (pop_ranges, _) = read_population_ranges(output_path)

    (population_start, population_count) = pop_ranges[population]

    if rank == 0:
        logger.info('reading new cell index map...')

    reindex_map1 = {}
    reindex_map_gen = bcast_cell_attributes(index_path,
                                            population,
                                            namespace=index_namespace,
                                            root=0,
                                            comm=comm)
    for gid, attr_dict in reindex_map_gen:
        reindex_map1[gid] = attr_dict['New Cell Index'][0]

    if rank == 0:
        logger.info('reading cell coordinates...')

    old_coords_dict = {}
    coords_map_gen = bcast_cell_attributes(index_path,
                                           population,
                                           namespace=coords_namespace,
                                           root=0,
                                           comm=comm)
    for gid, attr_dict in coords_map_gen:
        old_coords_dict[gid] = attr_dict

    gc.collect()
    if rank == 0:
        logger.info('sampling cell population reindex...')

    N = len(reindex_map1)
    if sample_count is None:
        sample_count = min(population_count, N)
    else:
        sample_count = min(sample_count, N)
    reindex_map = None
    if rank == 0:
        reindex_map = {}
        reindex_map = dict(
            random_subset(utils.viewitems(reindex_map1), sample_count))
    reindex_map = comm.bcast(reindex_map, root=0)

    if rank == 0:
        logger.info('computing new population index...')

    gid_map = {
        k: i + population_start
        for i, k in enumerate(sorted(reindex_map.keys()))
    }

    new_coords_dict = {}
    new_trees_dict = {}
    for gid, old_trees_dict in NeuroH5TreeGen(forest_path,
                                              population,
                                              io_size=io_size,
                                              comm=comm,
                                              topology=False):
        if gid is not None and gid in reindex_map:
            reindex_gid = reindex_map[gid]
            new_gid = gid_map[gid]
            new_trees_dict[new_gid] = old_trees_dict
            new_coords_dict[new_gid] = old_coords_dict[gid]
            logger.info('Rank: %i mapping old gid: %i to new gid: %i' %
                        (rank, gid, new_gid))
    append_cell_trees(output_path,
                      population,
                      new_trees_dict,
                      io_size=io_size,
                      comm=comm)
    append_cell_attributes(output_path, population, new_coords_dict, \
                           namespace=coords_namespace, io_size=io_size, comm=comm)

    comm.barrier()
    if rank == 0:
        logger.info('Appended reindexed trees to %s' % output_path)
예제 #23
0
def main(config, template_path, output_path, forest_path, populations,
         distance_bin_size, io_size, chunk_size, value_chunk_size, cache_size,
         verbose):
    """

    :param config:
    :param template_path:
    :param forest_path:
    :param populations:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(script_name)

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=MPI.COMM_WORLD,
              config_file=config,
              template_paths=template_path)
    configure_hoc_env(env)

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    if output_path is None:
        output_path = forest_path

    if rank == 0:
        if not os.path.isfile(output_path):
            input_file = h5py.File(forest_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    layers = env.layers
    layer_idx_dict = {
        layers[layer_name]: layer_name
        for layer_name in ['GCL', 'IML', 'MML', 'OML', 'Hilus']
    }

    (pop_ranges, _) = read_population_ranges(forest_path, comm=comm)
    start_time = time.time()
    for population in populations:
        logger.info('Rank %i population: %s' % (rank, population))
        count = 0
        (population_start, _) = pop_ranges[population]
        template_class = load_cell_template(env,
                                            population,
                                            bcast_template=True)
        measures_dict = {}
        for gid, morph_dict in NeuroH5TreeGen(forest_path,
                                              population,
                                              io_size=io_size,
                                              comm=comm,
                                              topology=True):
            if gid is not None:
                logger.info('Rank %i gid: %i' % (rank, gid))
                cell = cells.make_neurotree_cell(template_class,
                                                 neurotree_dict=morph_dict,
                                                 gid=gid)
                secnodes_dict = morph_dict['section_topology']['nodes']

                apicalidx = set(cell.apicalidx)
                basalidx = set(cell.basalidx)

                dendrite_area_dict = {k: 0.0 for k in layer_idx_dict}
                dendrite_length_dict = {k: 0.0 for k in layer_idx_dict}
                dendrite_distances = []
                dendrite_diams = []
                for (i, sec) in enumerate(cell.sections):
                    if (i in apicalidx) or (i in basalidx):
                        secnodes = secnodes_dict[i]
                        for seg in sec.allseg():
                            L = seg.sec.L
                            nseg = seg.sec.nseg
                            seg_l = L / nseg
                            seg_area = h.area(seg.x)
                            seg_diam = seg.diam
                            seg_distance = get_distance_to_node(
                                cell,
                                list(cell.soma)[0], seg.sec, seg.x)
                            dendrite_diams.append(seg_diam)
                            dendrite_distances.append(seg_distance)
                            layer = synapses.get_node_attribute(
                                'layer', morph_dict, seg.sec, secnodes, seg.x)
                            dendrite_length_dict[layer] += seg_l
                            dendrite_area_dict[layer] += seg_area

                dendrite_distance_array = np.asarray(dendrite_distances)
                dendrite_diam_array = np.asarray(dendrite_diams)
                dendrite_distance_bin_range = int(
                    ((np.max(dendrite_distance_array)) -
                     np.min(dendrite_distance_array)) / distance_bin_size) + 1
                dendrite_distance_counts, dendrite_distance_edges = np.histogram(
                    dendrite_distance_array,
                    bins=dendrite_distance_bin_range,
                    density=False)
                dendrite_diam_sums, _ = np.histogram(
                    dendrite_distance_array,
                    weights=dendrite_diam_array,
                    bins=dendrite_distance_bin_range,
                    density=False)
                dendrite_mean_diam_hist = np.zeros_like(dendrite_diam_sums)
                np.divide(dendrite_diam_sums,
                          dendrite_distance_counts,
                          where=dendrite_distance_counts > 0,
                          out=dendrite_mean_diam_hist)

                dendrite_area_per_layer = np.asarray([
                    dendrite_area_dict[k]
                    for k in sorted(dendrite_area_dict.keys())
                ],
                                                     dtype=np.float32)
                dendrite_length_per_layer = np.asarray([
                    dendrite_length_dict[k]
                    for k in sorted(dendrite_length_dict.keys())
                ],
                                                       dtype=np.float32)

                measures_dict[gid] = {
                    'dendrite_distance_hist_edges':
                    np.asarray(dendrite_distance_edges, dtype=np.float32),
                    'dendrite_distance_counts':
                    np.asarray(dendrite_distance_counts, dtype=np.int32),
                    'dendrite_mean_diam_hist':
                    np.asarray(dendrite_mean_diam_hist, dtype=np.float32),
                    'dendrite_area_per_layer':
                    dendrite_area_per_layer,
                    'dendrite_length_per_layer':
                    dendrite_length_per_layer
                }

                del cell
                count += 1
            else:
                logger.info('Rank %i gid is None' % rank)
        append_cell_attributes(output_path,
                               population,
                               measures_dict,
                               namespace='Tree Measurements',
                               comm=comm,
                               io_size=io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size,
                               cache_size=cache_size)
    MPI.Finalize()
예제 #24
0
def main(forest_path, connectivity_namespace, coords_path, coords_namespace, io_size, chunk_size, value_chunk_size,
         cache_size):
    """

    :param forest_path:
    :param connectivity_namespace:
    :param coords_path:
    :param coords_namespace:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank  # The process ID (integer 0-3 for 4-process run)

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print('%i ranks have been allocated' % comm.size)
    sys.stdout.flush()

    start_time = time.time()

    soma_coords = {}
    source_populations = list(read_population_ranges(MPI._addressof(comm), coords_path).keys())
    for population in source_populations:
        soma_coords[population] = bcast_cell_attributes(MPI._addressof(comm), 0, coords_path, population,
                                                            namespace=coords_namespace)

    for population in soma_coords:
        for cell in viewvalues(soma_coords[population]):
            cell['u_index'] = get_array_index(u, cell['U Coordinate'][0])
            cell['v_index'] = get_array_index(v, cell['V Coordinate'][0])

    target = 'GC'

    layer_set, swc_type_set, syn_type_set = set(), set(), set()
    for source in layers[target]:
        layer_set.update(layers[target][source])
        swc_type_set.update(swc_types[target][source])
        syn_type_set.update(syn_types[target][source])

    count = 0
    for target_gid, attributes_dict in NeuroH5CellAttrGen(MPI._addressof(comm), forest_path, target, io_size=io_size,
                                                        cache_size=cache_size, namespace='Synapse_Attributes'):
        last_time = time.time()
        connection_dict = {}
        p_dict = {}
        source_gid_dict = {}
        if target_gid is None:
            print('Rank %i target gid is None' % rank)
        else:
            print('Rank %i received attributes for target: %s, gid: %i' % (rank, target, target_gid))
            synapse_dict = attributes_dict['Synapse_Attributes']
            connection_dict[target_gid] = {}
            local_np_random.seed(target_gid + connectivity_seed_offset)
            connection_dict[target_gid]['source_gid'] = np.array([], dtype='uint32')
            connection_dict[target_gid]['syn_id'] = np.array([], dtype='uint32')

            for layer in layer_set:
                for swc_type in swc_type_set:
                    for syn_type in syn_type_set:
                        sources, this_proportions = filter_sources(target, layer, swc_type, syn_type)
                        if sources:
                            if rank == 0 and count == 0:
                                source_list_str = '[' + ', '.join(['%s' % xi for xi in sources]) + ']'
                                print('Connections to target: %s in layer: %i ' \
                                    '(swc_type: %i, syn_type: %i): %s' % \
                                    (target, layer, swc_type, syn_type, source_list_str))
                            p, source_gid = np.array([]), np.array([])
                            for source, this_proportion in zip(sources, this_proportions):
                                if source not in source_gid_dict:
                                    this_p, this_source_gid = p_connect.get_p(target, source, target_gid, soma_coords,
                                                                              distance_U, distance_V)
                                    source_gid_dict[source] = this_source_gid
                                    p_dict[source] = this_p
                                else:
                                    this_source_gid = source_gid_dict[source]
                                    this_p = p_dict[source]
                                p = np.append(p, this_p * this_proportion)
                                source_gid = np.append(source_gid, this_source_gid)
                            syn_indexes = filter_synapses(synapse_dict, layer, swc_type, syn_type)
                            connection_dict[target_gid]['syn_id'] = \
                                np.append(connection_dict[target_gid]['syn_id'],
                                          synapse_dict['syn_id'][syn_indexes]).astype('uint32', copy=False)
                            this_source_gid = local_np_random.choice(source_gid, len(syn_indexes), p=p)
                            connection_dict[target_gid]['source_gid'] = \
                                np.append(connection_dict[target_gid]['source_gid'],
                                          this_source_gid).astype('uint32', copy=False)
            count += 1
            print('Rank %i took %i s to compute connectivity for target: %s, gid: %i' % (rank, time.time() - last_time,
                                                                                         target, target_gid))
            sys.stdout.flush()
        last_time = time.time()
        append_cell_attributes(MPI._addressof(comm), forest_path, target, connection_dict,
                               namespace=connectivity_namespace, io_size=io_size, chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size)
        if rank == 0:
            print('Appending connectivity attributes for target: %s took %i s' % (target, time.time() - last_time))
        sys.stdout.flush()
        del connection_dict
        del p_dict
        del source_gid_dict
        gc.collect()

    global_count = comm.gather(count, root=0)
    if rank == 0:
        print('%i ranks took took %i s to compute connectivity for %i cells' % (comm.size, time.time() - start_time,
                                                                                  np.sum(global_count)))
예제 #25
0
def main(config_file, config_prefix, erev, population, presyn_name, gid,
         load_weights, measurements, template_paths, dataset_prefix,
         results_path, results_file_id, results_namespace_id, syn_mech_name,
         syn_weight, syn_count, syn_layer, swc_type, stim_amp, v_init, dt,
         use_cvode, verbose):

    config_logging(verbose)

    if results_file_id is None:
        results_file_id = uuid.uuid4()
    if results_namespace_id is None:
        results_namespace_id = 'Cell Clamp Results'
    comm = MPI.COMM_WORLD
    np.seterr(all='raise')
    params = dict(locals())
    env = Env(**params)
    configure_hoc_env(env)
    io_utils.mkout(env, env.results_file_path)
    env.cell_selection = {}

    if measurements is not None:
        measurements = [x.strip() for x in measurements.split(",")]

    attr_dict = {}
    attr_dict[gid] = {}
    if 'passive' in measurements:
        attr_dict[gid].update(measure_passive(gid, population, v_init, env))
    if 'ap' in measurements:
        attr_dict[gid].update(measure_ap(gid, population, v_init, env))
    if 'ap_rate' in measurements:
        logger.info('ap_rate')
        attr_dict[gid].update(
            measure_ap_rate(gid, population, v_init, env, stim_amp=stim_amp))
    if 'fi' in measurements:
        attr_dict[gid].update(measure_fi(gid, population, v_init, env))
    if 'gap' in measurements:
        measure_gap_junction_coupling(gid, population, v_init, env)
    if 'psp' in measurements:
        assert (presyn_name is not None)
        assert (syn_mech_name is not None)
        assert (erev is not None)
        assert (syn_weight is not None)
        attr_dict[gid].update(
            measure_psp(gid,
                        population,
                        presyn_name,
                        syn_mech_name,
                        swc_type,
                        env,
                        v_init,
                        erev,
                        syn_layer=syn_layer,
                        syn_count=syn_count,
                        weight=syn_weight,
                        load_weights=load_weights))

    if results_path is not None:
        append_cell_attributes(env.results_file_path,
                               population,
                               attr_dict,
                               namespace=env.results_namespace_id,
                               comm=env.comm,
                               io_size=env.io_size)
예제 #26
0
def main(weights_path, weights_namespace, structured_weights_namespace, io_size, chunk_size, value_chunk_size,
         cache_size, debug):
    """

    :param weights_path:
    :param weights_namespace:
    :param structured_weights_namespace:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    :param debug:
    """
    comm = MPI.COMM_WORLD
    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print('%i ranks have been allocated' % comm.size)
    sys.stdout.flush()

    population = 'GC'
    count = 0
    structured_count = 0
    start_time = time.time()
    weights_gen = NeuroH5CellAttrGen(MPI._addressof(comm), weights_path, population, io_size=io_size,
                                        cache_size=cache_size, namespace=weights_namespace)
    structured_weights_gen = NeuroH5CellAttrGen(MPI._addressof(comm), weights_path, population, io_size=io_size,
                                   cache_size=cache_size, namespace=structured_weights_namespace)
    if debug:
        attr_gen = ((next(weights_gen), next(structured_weights_gen)) for i in range(10))
    else:
        attr_gen = list(zip(weights_gen, structured_weights_gen))
    for (gid, weights_dict), (structured_weights_gid, structured_weights_dict) in attr_gen:
        local_time = time.time()
        modified_dict = {}
        sorted_indexes = None
        sorted_weights = None
        sorted_structured_indexes = None
        sorted_structured_weights = None
        if gid is not None:
            if gid != structured_weights_gid:
                raise Exception('gid %i from weights_gen does not match gid %i from structured_weights_gen') % \
                      (gid, structured_weights_gid)
            sorted_indexes = weights_dict[weights_namespace]['syn_id'].argsort()
            sorted_weights = weights_dict[weights_namespace]['weight'][sorted_indexes]
            sorted_structured_indexes = structured_weights_dict[structured_weights_namespace]['syn_id'].argsort()
            sorted_structured_weights = \
                structured_weights_dict[structured_weights_namespace]['weight'][sorted_structured_indexes]
            if not np.all(weights_dict[weights_namespace]['syn_id'][sorted_indexes] ==
                          structured_weights_dict[structured_weights_namespace]['syn_id'][sorted_structured_indexes]):
                raise Exception('gid %i: sorted syn_ids from weights_namespace do not match '
                                'structured_weights_namespace') % gid
            modify_weights = not np.all(sorted_weights == sorted_structured_weights)
            modified_dict[gid] = {'structured': np.array([int(modify_weights)], dtype='uint32')}
            print('Rank %i: %s gid %i took %.2f s to check for structured weights: %s' % \
                  (rank, population, gid, time.time() - local_time, str(modify_weights)))
            if modify_weights:
                structured_count += 1
            count += 1
        if not debug:
            append_cell_attributes(MPI._addressof(comm), weights_path, population, modified_dict,
                                   namespace=structured_weights_namespace, io_size=io_size, chunk_size=chunk_size,
                                   value_chunk_size=value_chunk_size)
        else:
            comm.barrier()
        del sorted_indexes
        del sorted_weights
        del sorted_structured_indexes
        del sorted_structured_weights
        del modified_dict
        gc.collect()
        sys.stdout.flush()

    global_count = comm.gather(count, root=0)
    global_structured_count = comm.gather(structured_count, root=0)
    if rank == 0:
        print('%i ranks processed %i %s cells (%i assigned structured weights) in %.2f s' % \
              (comm.size, np.sum(global_count), population, np.sum(global_structured_count),
               time.time() - start_time))
예제 #27
0
파일: io_utils.py 프로젝트: soltesz-lab/ca1
def spikeout(env, output_path, t_start=None, clear_data=False):
    """
    Writes spike times to specified NeuroH5 output file.

    :param env:
    :param output_path:
    :param clear_data: 
    :return:
    """
    equilibration_duration = float(
        env.stimulus_config['Equilibration Duration'])
    n_trials = env.n_trials

    t_vec = np.array(env.t_vec, dtype=np.float32)
    id_vec = np.array(env.id_vec, dtype=np.uint32)

    trial_time_ranges = get_trial_time_ranges(env.t_rec.to_python(),
                                              env.n_trials)
    trial_time_bins = [
        t_trial_start for t_trial_start, t_trial_end in trial_time_ranges
    ]
    trial_dur = np.asarray([env.tstop + equilibration_duration] * n_trials,
                           dtype=np.float32)

    binlst = []
    typelst = sorted(env.celltypes.keys())
    binvect = np.asarray([env.celltypes[k]['start'] for k in typelst])
    sort_idx = np.argsort(binvect, axis=0)
    pop_names = [typelst[i] for i in sort_idx]
    bins = binvect[sort_idx][1:]
    inds = np.digitize(id_vec, bins)

    if env.results_namespace_id is None:
        namespace_id = "Spike Events"
    else:
        namespace_id = "Spike Events %s" % str(env.results_namespace_id)

    for i, pop_name in enumerate(pop_names):
        spkdict = {}
        sinds = np.where(inds == i)
        if len(sinds) > 0:
            ids = id_vec[sinds]
            ts = t_vec[sinds]
            for j in range(0, len(ids)):
                gid = ids[j]
                t = ts[j]
                if (t_start is None) or (t >= t_start):
                    if gid in spkdict:
                        spkdict[gid]['t'].append(t)
                    else:
                        spkdict[gid] = {'t': [t]}
            for gid in spkdict:
                is_artificial = gid in env.artificial_cells[pop_name]
                spiketrain = np.array(spkdict[gid]['t'], dtype=np.float32)
                if gid in env.spike_onset_delay:
                    spiketrain -= env.spike_onset_delay[gid]
                trial_bins = np.digitize(spiketrain, trial_time_bins) - 1
                trial_spikes = [
                    np.copy(spiketrain[np.where(trial_bins == trial_i)[0]])
                    for trial_i in range(n_trials)
                ]
                for trial_i, trial_spiketrain in enumerate(trial_spikes):
                    trial_spiketrain = trial_spikes[trial_i]
                    trial_spiketrain -= np.sum(
                        trial_dur[:(trial_i)]) + equilibration_duration
                spkdict[gid]['t'] = np.concatenate(trial_spikes)
                spkdict[gid]['Trial Duration'] = trial_dur
                spkdict[gid]['Trial Index'] = np.asarray(trial_bins,
                                                         dtype=np.uint8)
                spkdict[gid]['artificial'] = np.asarray(
                    [1 if is_artificial else 0], dtype=np.uint8)
        append_cell_attributes(output_path,
                               pop_name,
                               spkdict,
                               namespace=namespace_id,
                               comm=env.comm,
                               io_size=env.io_size)
        del (spkdict)

    if clear_data:
        env.t_vec.resize(0)
        env.id_vec.resize(0)

    env.comm.barrier()
    if env.comm.Get_rank() == 0:
        logger.info("*** Output spike results to file %s" % output_path)
def main(config, coordinates, field_width, gid, input_features_path,
         input_features_namespaces, initial_weights_path,
         output_features_namespace, output_features_path, output_weights_path,
         reference_weights_path, h5types_path, synapse_name,
         initial_weights_namespace, output_weights_namespace,
         reference_weights_namespace, connections_path, destination, sources,
         non_structured_sources, non_structured_weights_namespace,
         non_structured_weights_path, arena_id, field_width_scale,
         max_opt_iter, max_weight_decay_fraction, optimize_tol, peak_rate,
         reference_weights_are_delta, arena_margin, target_amplitude, io_size,
         chunk_size, value_chunk_size, cache_size, write_size, verbose,
         dry_run, plot, show_fig, save_fig, debug):
    """

    :param config: str (path to .yaml file)
    :param input_features_path: str (path to .h5 file)
    :param initial_weights_path: str (path to .h5 file)
    :param initial_weights_namespace: str
    :param output_weights_namespace: str
    :param connections_path: str (path to .h5 file)
    :param destination: str
    :param sources: list of str
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param write_size:
    :param verbose:
    :param dry_run:
    :return:
    """

    utils.config_logging(verbose)
    script_name = __file__
    logger = utils.get_script_logger(script_name)

    comm = MPI.COMM_WORLD
    rank = comm.rank
    nranks = comm.size

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info(f'{comm.size} ranks have been allocated')

    env = Env(comm=comm, config_file=config, io_size=io_size)
    env.comm.barrier()

    if plot and (not save_fig) and (not show_fig):
        show_fig = True

    if (not dry_run) and (rank == 0):
        if not os.path.isfile(output_weights_path):
            if initial_weights_path is not None:
                input_file = h5py.File(initial_weights_path, 'r')
            elif h5types_path is not None:
                input_file = h5py.File(h5types_path, 'r')
            else:
                raise RuntimeError(
                    'h5types input path must be specified when weights path is not specified.'
                )
            output_file = h5py.File(output_weights_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    env.comm.barrier()

    LTD_output_weights_namespace = f'LTD {output_weights_namespace} {arena_id}'
    LTP_output_weights_namespace = f'LTP {output_weights_namespace} {arena_id}'
    this_input_features_namespaces = [
        f'{input_features_namespace} {arena_id}'
        for input_features_namespace in input_features_namespaces
    ]

    selectivity_type_index = {
        i: n
        for n, i in viewitems(env.selectivity_types)
    }
    target_selectivity_type_name = 'place'
    target_selectivity_type = env.selectivity_types[
        target_selectivity_type_name]
    features_attrs = defaultdict(dict)
    source_features_attr_names = [
        'Selectivity Type', 'Num Fields', 'Field Width', 'Peak Rate',
        'Module ID', 'Grid Spacing', 'Grid Orientation',
        'Field Width Concentration Factor', 'X Offset', 'Y Offset'
    ]
    target_features_attr_names = [
        'Selectivity Type', 'Num Fields', 'Field Width', 'Peak Rate',
        'X Offset', 'Y Offset'
    ]

    seed_offset = int(
        env.model_config['Random Seeds']['GC Structured Weights'])
    spatial_resolution = env.stimulus_config['Spatial Resolution']  # cm

    arena = env.stimulus_config['Arena'][arena_id]
    default_run_vel = arena.properties['default run velocity']  # cm/s

    gid_count = 0
    start_time = time.time()

    target_gid_set = None
    if len(gid) > 0:
        target_gid_set = set(gid)
    projections = [(source, destination) for source in sources]
    graph_info = read_graph_info(connections_path,
                                 namespaces=['Connections', 'Synapses'],
                                 read_node_index=True)
    for projection in projections:
        if projection not in graph_info:
            raise RuntimeError(
                f'Projection {projection[0]} -> {projection[1]} is not present in connections file.'
            )
        if target_gid_set is None:
            target_gid_set = set(graph_info[projection][1])

    all_sources = sources + non_structured_sources
    src_input_features_attr_dict = {source: {} for source in all_sources}
    for source in sorted(all_sources):
        this_src_input_features_attr_dict = {}
        for this_input_features_namespace in this_input_features_namespaces:
            logger.info(
                f'Rank {rank}: Reading {this_input_features_namespace} feature data for cells in population {source}'
            )
            input_features_dict = scatter_read_cell_attributes(
                input_features_path,
                source,
                namespaces=[this_input_features_namespace],
                mask=set(source_features_attr_names),
                comm=env.comm,
                io_size=env.io_size)
            for gid, attr_dict in input_features_dict[
                    this_input_features_namespace]:
                this_src_input_features_attr_dict[gid] = attr_dict
        src_input_features_attr_dict[
            source] = this_src_input_features_attr_dict
        source_gid_count = env.comm.reduce(
            len(this_src_input_features_attr_dict), op=MPI.SUM, root=0)
        if rank == 0:
            logger.info(
                f'Rank {rank}: Read feature data for {source_gid_count} cells in population {source}'
            )

    dst_gids = []
    if target_gid_set is not None:
        for i, gid in enumerate(target_gid_set):
            if i % nranks == rank:
                dst_gids.append(gid)

    dst_input_features_attr_dict = {}
    for this_input_features_namespace in this_input_features_namespaces:
        feature_count = 0
        gid_count = 0
        logger.info(
            f'Rank {rank}: reading {this_input_features_namespace} feature data for {len(dst_gids)} cells in population {destination}'
        )
        input_features_iter = scatter_read_cell_attribute_selection(
            input_features_path,
            destination,
            namespace=this_input_features_namespace,
            mask=set(target_features_attr_names),
            selection=dst_gids,
            io_size=env.io_size,
            comm=env.comm)
        for gid, attr_dict in input_features_iter:
            gid_count += 1
            if (len(coordinates) > 0) or (attr_dict['Num Fields'][0] > 0):
                dst_input_features_attr_dict[gid] = attr_dict
                feature_count += 1

        logger.info(
            f'Rank {rank}: read {this_input_features_namespace} feature data for '
            f'{gid_count} / {feature_count} cells in population {destination}')
        feature_count = env.comm.reduce(feature_count, op=MPI.SUM, root=0)
        env.comm.barrier()
        if rank == 0:
            logger.info(
                f'Read {this_input_features_namespace} feature data for {feature_count} cells in population {destination}'
            )

    feature_dst_gids = list(dst_input_features_attr_dict.keys())
    all_feature_gids_per_rank = comm.allgather(feature_dst_gids)
    all_feature_gids = sorted(
        [item for sublist in all_feature_gids_per_rank for item in sublist])
    request_dst_gids = []
    for i, gid in enumerate(all_feature_gids):
        if i % nranks == rank:
            request_dst_gids.append(gid)

    dst_input_features_attr_dict = exchange_input_features(
        env.comm, request_dst_gids, dst_input_features_attr_dict)
    dst_gids = list(dst_input_features_attr_dict.keys())

    if rank == 0:
        logger.info(
            f"Rank {rank} feature dict is {dst_input_features_attr_dict}")

    dst_count = env.comm.reduce(len(dst_gids), op=MPI.SUM, root=0)

    logger.info(f"Rank {rank} has {len(dst_gids)} feature gids")
    if rank == 0:
        logger.info(f'Total {dst_count} feature gids')

    max_dst_count = env.comm.allreduce(len(dst_gids), op=MPI.MAX)
    env.comm.barrier()

    max_iter_count = max_dst_count
    output_features_dict = {}
    LTP_output_weights_dict = {}
    LTD_output_weights_dict = {}
    non_structured_output_weights_dict = {}
    for iter_count in range(max_iter_count):

        gc.collect()

        local_time = time.time()
        selection = []
        if len(dst_gids) > 0:
            dst_gid = dst_gids.pop()
            selection.append(dst_gid)
            logger.info(f'Rank {rank} received gid {dst_gid}')

        env.comm.barrier()

        arena_margin_size = 0.
        arena_margin = max(arena_margin, 0.)

        target_selectivity_features_dict = {}
        target_selectivity_config_dict = {}
        target_field_width_dict = {}

        for destination_gid in selection:
            arena_margin_size = init_selectivity_config(
                destination_gid,
                spatial_resolution,
                arena,
                arena_margin,
                arena_margin_size,
                coordinates,
                field_width,
                field_width_scale,
                peak_rate,
                target_selectivity_type,
                selectivity_type_index,
                dst_input_features_attr_dict,
                target_selectivity_features_dict,
                target_selectivity_config_dict,
                target_field_width_dict,
                logger=logger)

        arena_x, arena_y = stimulus.get_2D_arena_spatial_mesh(
            arena, spatial_resolution, margin=arena_margin_size)

        selection = list(target_selectivity_features_dict.keys())

        initial_weights_by_source_gid_dict = defaultdict(lambda: dict())
        initial_weights_by_syn_id_dict = \
          read_weights(initial_weights_path, initial_weights_namespace, synapse_name,
                       destination, selection, env.comm, env.io_size, defaultdict(lambda: dict()),
                       logger=logger if rank == 0 else None)

        non_structured_weights_by_source_gid_dict = defaultdict(lambda: dict())
        non_structured_weights_by_syn_id_dict = None
        if len(non_structured_sources) > 0:
            non_structured_weights_by_syn_id_dict = \
             read_weights(non_structured_weights_path, non_structured_weights_namespace, synapse_name,
                          destination, selection, env.comm, env.io_size, defaultdict(lambda: dict()),
                          logger=logger if rank == 0 else None)

        reference_weights_by_syn_id_dict = None
        reference_weights_by_source_gid_dict = defaultdict(lambda: dict())
        if reference_weights_path is not None:
            reference_weights_by_syn_id_dict = \
             read_weights(reference_weights_path, reference_weights_namespace, synapse_name,
                          destination, selection, env.comm, env.io_size, defaultdict(lambda: dict()),
                          logger=logger if rank == 0 else None)

        source_gid_set_dict = defaultdict(set)
        syn_count_by_source_gid_dict = defaultdict(lambda: defaultdict(int))
        syn_ids_by_source_gid_dict = defaultdict(lambda: defaultdict(list))
        structured_syn_id_count = defaultdict(int)
        non_structured_syn_id_count = defaultdict(int)

        projections = [(source, destination) for source in all_sources]
        edge_iter_dict, edge_attr_info = scatter_read_graph_selection(
            connections_path,
            selection=selection,
            namespaces=['Synapses'],
            projections=projections,
            comm=env.comm,
            io_size=env.io_size)

        syn_counts_by_source = init_syn_weight_dicts(
            destination, non_structured_sources, edge_iter_dict,
            edge_attr_info, initial_weights_by_syn_id_dict,
            initial_weights_by_source_gid_dict,
            non_structured_weights_by_syn_id_dict,
            non_structured_weights_by_source_gid_dict,
            reference_weights_by_syn_id_dict,
            reference_weights_by_source_gid_dict, source_gid_set_dict,
            syn_count_by_source_gid_dict, syn_ids_by_source_gid_dict,
            structured_syn_id_count, non_structured_syn_id_count)

        for source in syn_counts_by_source:
            for this_gid in syn_counts_by_source[source]:
                count = syn_counts_by_source[source][this_gid]
                logger.info(
                    f'Rank {rank}: destination: {destination}; gid {this_gid}; '
                    f'{count} edges from source population {source}')

        input_rate_maps_by_source_gid_dict = {}
        if len(non_structured_sources) > 0:
            non_structured_input_rate_maps_by_source_gid_dict = {}
        else:
            non_structured_input_rate_maps_by_source_gid_dict = None

        for source in all_sources:
            source_gids = list(source_gid_set_dict[source])
            if rank == 0:
                logger.info(
                    f'Rank {rank}: getting feature data for {len(source_gids)} cells in population {source}'
                )
            this_src_input_features = exchange_input_features(
                env.comm, source_gids, src_input_features_attr_dict[source])

            count = 0
            for this_gid in source_gids:
                attr_dict = this_src_input_features[this_gid]
                this_selectivity_type = attr_dict['Selectivity Type'][0]
                this_selectivity_type_name = selectivity_type_index[
                    this_selectivity_type]
                input_cell_config = stimulus.get_input_cell_config(
                    this_selectivity_type,
                    selectivity_type_index,
                    selectivity_attr_dict=attr_dict)
                this_arena_rate_map = np.asarray(
                    input_cell_config.get_rate_map(arena_x, arena_y),
                    dtype=np.float32)
                if source in non_structured_sources:
                    non_structured_input_rate_maps_by_source_gid_dict[
                        this_gid] = this_arena_rate_map
                else:
                    input_rate_maps_by_source_gid_dict[
                        this_gid] = this_arena_rate_map
                count += 1

        for destination_gid in selection:

            if is_interactive:
                context.update(locals())

            save_fig_path = None
            if save_fig is not None:
                save_fig_path = f'{save_fig}/Structured Weights {destination} {destination_gid}.png'

            reference_weight_dict = None
            if reference_weights_path is not None:
                reference_weight_dict = reference_weights_by_source_gid_dict[
                    destination_gid]

            LTP_delta_weights_dict, LTD_delta_weights_dict, arena_structured_map = \
               synapses.generate_structured_weights(destination_gid,
                                                 target_map=target_selectivity_features_dict[destination_gid]['Arena Rate Map'],
                                                 initial_weight_dict=initial_weights_by_source_gid_dict[destination_gid],
                                                 #reference_weight_dict=reference_weight_dict,
                                                 #reference_weights_are_delta=reference_weights_are_delta,
                                                 #reference_weights_namespace=reference_weights_namespace,
                                                 input_rate_map_dict=input_rate_maps_by_source_gid_dict,
                                                 non_structured_input_rate_map_dict=non_structured_input_rate_maps_by_source_gid_dict,
                                                 non_structured_weights_dict=non_structured_weights_by_source_gid_dict[destination_gid],
                                                 syn_count_dict=syn_count_by_source_gid_dict[destination_gid],
                                                 max_opt_iter=max_opt_iter,
                                                 max_weight_decay_fraction=max_weight_decay_fraction,
                                                 target_amplitude=target_amplitude,
                                                 arena_x=arena_x, arena_y=arena_y,
                                                 optimize_tol=optimize_tol,
                                                 verbose=verbose if rank == 0 else False,
                                                 plot=plot, show_fig=show_fig,
                                                 save_fig=save_fig_path,
                                                 fig_kwargs={'gid': destination_gid,
                                                             'field_width': target_field_width_dict[destination_gid]})
            input_rate_maps_by_source_gid_dict.clear()

            target_map_flat = target_selectivity_features_dict[
                destination_gid]['Arena Rate Map'].flat
            arena_map_residual_mae = np.mean(
                np.abs(arena_structured_map - target_map_flat))
            output_features_dict[destination_gid] = \
               { fld: target_selectivity_features_dict[destination_gid][fld]
                 for fld in ['Selectivity Type',
                             'Num Fields',
                             'Field Width',
                             'Peak Rate',
                             'X Offset',
                             'Y Offset',]}
            output_features_dict[destination_gid][
                'Rate Map Residual Mean Error'] = np.asarray(
                    [arena_map_residual_mae], dtype=np.float32)

            this_structured_syn_id_count = structured_syn_id_count[
                destination_gid]
            output_syn_ids = np.empty(this_structured_syn_id_count,
                                      dtype='uint32')
            LTD_output_weights = np.empty(this_structured_syn_id_count,
                                          dtype='float32')
            LTP_output_weights = np.empty(this_structured_syn_id_count,
                                          dtype='float32')
            i = 0
            for source_gid in LTP_delta_weights_dict:
                for syn_id in syn_ids_by_source_gid_dict[destination_gid][
                        source_gid]:
                    output_syn_ids[i] = syn_id
                    LTP_output_weights[i] = LTP_delta_weights_dict[source_gid]
                    LTD_output_weights[i] = LTD_delta_weights_dict[source_gid]
                    i += 1
            LTP_output_weights_dict[destination_gid] = {
                'syn_id': output_syn_ids,
                synapse_name: LTP_output_weights
            }
            LTD_output_weights_dict[destination_gid] = {
                'syn_id': output_syn_ids,
                synapse_name: LTD_output_weights
            }

            this_non_structured_syn_id_count = non_structured_syn_id_count[
                destination_gid]
            i = 0

            logger.info(
                f'Rank {rank}; destination: {destination}; gid {destination_gid}; '
                f'generated structured weights for {len(output_syn_ids)} inputs in {time.time() - local_time:.2f} s; '
                f'residual error is {arena_map_residual_mae:.2f}')
            gid_count += 1
            gc.collect()

        env.comm.barrier()
        if (write_size > 0) and (iter_count % write_size == 0):
            if not dry_run:
                append_cell_attributes(output_weights_path,
                                       destination,
                                       LTD_output_weights_dict,
                                       namespace=LTD_output_weights_namespace,
                                       comm=env.comm,
                                       io_size=env.io_size,
                                       chunk_size=chunk_size,
                                       value_chunk_size=value_chunk_size)
                append_cell_attributes(output_weights_path,
                                       destination,
                                       LTP_output_weights_dict,
                                       namespace=LTP_output_weights_namespace,
                                       comm=env.comm,
                                       io_size=env.io_size,
                                       chunk_size=chunk_size,
                                       value_chunk_size=value_chunk_size)
                count = env.comm.reduce(len(LTP_output_weights_dict),
                                        op=MPI.SUM,
                                        root=0)
                env.comm.barrier()

                if rank == 0:
                    logger.info(
                        f'Destination: {destination}; appended weights for {count} cells'
                    )
                if output_features_path is not None:
                    if output_features_namespace is None:
                        output_features_namespace = f'{target_selectivity_type_name.title()} Selectivity'
                    this_output_features_namespace = f'{output_features_namespace} {arena_id}'
                    append_cell_attributes(
                        output_features_path,
                        destination,
                        output_features_dict,
                        namespace=this_output_features_namespace)
                    count = env.comm.reduce(len(output_features_dict),
                                            op=MPI.SUM,
                                            root=0)
                    env.comm.barrier()

                    if rank == 0:
                        logger.info(
                            f'Destination: {destination}; appended selectivity features for {count} cells'
                        )

            LTP_output_weights_dict.clear()
            LTD_output_weights_dict.clear()
            output_features_dict.clear()
            gc.collect()

        env.comm.barrier()

        if (iter_count >= 10) and debug:
            break

    env.comm.barrier()
    if not dry_run:
        append_cell_attributes(output_weights_path,
                               destination,
                               LTD_output_weights_dict,
                               namespace=LTD_output_weights_namespace,
                               comm=env.comm,
                               io_size=env.io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size)
        append_cell_attributes(output_weights_path,
                               destination,
                               LTP_output_weights_dict,
                               namespace=LTP_output_weights_namespace,
                               comm=env.comm,
                               io_size=env.io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size)
        count = comm.reduce(len(LTP_output_weights_dict), op=MPI.SUM, root=0)
        env.comm.barrier()

        if rank == 0:
            logger.info(
                f'Destination: {destination}; appended weights for {count} cells'
            )
        if output_features_path is not None:
            if output_features_namespace is None:
                output_features_namespace = 'Selectivity Features'
            this_output_features_namespace = f'{output_features_namespace} {arena_id}'
            append_cell_attributes(output_features_path,
                                   destination,
                                   output_features_dict,
                                   namespace=this_output_features_namespace)
            count = env.comm.reduce(len(output_features_dict),
                                    op=MPI.SUM,
                                    root=0)
            env.comm.barrier()

            if rank == 0:
                logger.info(
                    f'Destination: {destination}; appended selectivity features for {count} cells'
                )

    env.comm.barrier()
    global_count = env.comm.gather(gid_count, root=0)
    env.comm.barrier()

    if rank == 0:
        total_count = np.sum(global_count)
        total_time = time.time() - start_time
        logger.info(
            f'Destination: {destination}; '
            f'{env.comm.size} ranks assigned structured weights to {total_count} cells in {total_time:.2f} s'
        )
예제 #29
0
def main(config, template_path, output_path, forest_path, populations, io_size,
         chunk_size, value_chunk_size, cache_size, verbose):
    """

    :param config:
    :param template_path:
    :param forest_path:
    :param populations:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(script_name)

    comm = MPI.COMM_WORLD
    rank = comm.rank

    env = Env(comm=MPI.COMM_WORLD,
              config_file=config,
              template_paths=template_path)
    h('objref nil, pc, templatePaths')
    h.load_file("nrngui.hoc")
    h.load_file("./templates/Value.hoc")
    h.xopen("./lib.hoc")
    h.pc = h.ParallelContext()

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    h.templatePaths = h.List()
    for path in env.templatePaths:
        h.templatePaths.append(h.Value(1, path))

    if output_path is None:
        output_path = forest_path

    if rank == 0:
        if not os.path.isfile(output_path):
            input_file = h5py.File(forest_path, 'r')
            output_file = h5py.File(output_path, 'w')
            input_file.copy('/H5Types', output_file)
            input_file.close()
            output_file.close()
    comm.barrier()

    (pop_ranges, _) = read_population_ranges(forest_path, comm=comm)
    start_time = time.time()
    for population in populations:
        logger.info('Rank %i population: %s' % (rank, population))
        count = 0
        (population_start, _) = pop_ranges[population]
        template_name = env.celltypes[population]['template']
        h.find_template(h.pc, h.templatePaths, template_name)
        template_class = eval('h.%s' % template_name)
        measures_dict = {}
        for gid, morph_dict in NeuroH5TreeGen(forest_path,
                                              population,
                                              io_size=io_size,
                                              comm=comm,
                                              topology=True):
            if gid is not None:
                logger.info('Rank %i gid: %i' % (rank, gid))
                cell = cells.make_neurotree_cell(template_class,
                                                 neurotree_dict=morph_dict,
                                                 gid=gid)
                secnodes_dict = morph_dict['section_topology']['nodes']

                apicalidx = set(cell.apicalidx)
                basalidx = set(cell.basalidx)

                dendrite_area_dict = {k + 1: 0.0 for k in range(0, 4)}
                dendrite_length_dict = {k + 1: 0.0 for k in range(0, 4)}
                for (i, sec) in enumerate(cell.sections):
                    if (i in apicalidx) or (i in basalidx):
                        secnodes = secnodes_dict[i]
                        prev_layer = None
                        for seg in sec.allseg():
                            L = seg.sec.L
                            nseg = seg.sec.nseg
                            seg_l = old_div(L, nseg)
                            seg_area = h.area(seg.x)
                            layer = cells.get_node_attribute(
                                'layer', morph_dict, seg.sec, secnodes, seg.x)
                            layer = layer if layer > 0 else (
                                prev_layer if prev_layer is not None else 1)
                            prev_layer = layer
                            dendrite_length_dict[layer] += seg_l
                            dendrite_area_dict[layer] += seg_area

                measures_dict[gid] = { 'dendrite_area': np.asarray([ dendrite_area_dict[k] for k in sorted(dendrite_area_dict.keys()) ], dtype=np.float32), \
                                       'dendrite_length': np.asarray([ dendrite_length_dict[k] for k in sorted(dendrite_length_dict.keys()) ], dtype=np.float32) }

                del cell
                count += 1
            else:
                logger.info('Rank %i gid is None' % rank)
        append_cell_attributes(output_path,
                               population,
                               measures_dict,
                               namespace='Tree Measurements',
                               comm=comm,
                               io_size=io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size,
                               cache_size=cache_size)
    MPI.Finalize()
예제 #30
0
def main(config, config_prefix, template_path, output_path, forest_path,
         populations, distribution, io_size, chunk_size, value_chunk_size,
         cache_size, write_size, verbose, dry_run):
    """

    :param config:
    :param config_prefix:
    :param template_path:
    :param forest_path:
    :param populations:
    :param distribution:
    :param io_size:
    :param chunk_size:
    :param value_chunk_size:
    :param cache_size:
    """

    utils.config_logging(verbose)
    logger = utils.get_script_logger(os.path.basename(__file__))

    comm = MPI.COMM_WORLD
    rank = comm.rank

    if rank == 0:
        logger.info('%i ranks have been allocated' % comm.size)

    env = Env(comm=MPI.COMM_WORLD,
              config_file=config,
              config_prefix=config_prefix,
              template_paths=template_path)

    configure_hoc_env(env)

    if io_size == -1:
        io_size = comm.size

    if output_path is None:
        output_path = forest_path

    if not dry_run:
        if rank == 0:
            if not os.path.isfile(output_path):
                input_file = h5py.File(forest_path, 'r')
                output_file = h5py.File(output_path, 'w')
                input_file.copy('/H5Types', output_file)
                input_file.close()
                output_file.close()
        comm.barrier()

    (pop_ranges, _) = read_population_ranges(forest_path, comm=comm)
    start_time = time.time()
    syn_stats = {}
    for population in populations:
        logger.info('Rank %i population: %s' % (rank, population))
        (population_start, _) = pop_ranges[population]
        template_class = load_cell_template(env, population)

        density_dict = env.celltypes[population]['synapses']['density']
        layer_set_dict = defaultdict(set)
        swc_set_dict = defaultdict(set)
        for sec_name, sec_dict in viewitems(density_dict):
            for syn_type, syn_dict in viewitems(sec_dict):
                swc_set_dict[syn_type].add(env.SWC_Types[sec_name])
                for layer_name in syn_dict:
                    if layer_name != 'default':
                        layer = env.layers[layer_name]
                        layer_set_dict[syn_type].add(layer)

        syn_stats_dict = { 'section': defaultdict(lambda: { 'excitatory': 0, 'inhibitory': 0 }), \
                           'layer': defaultdict(lambda: { 'excitatory': 0, 'inhibitory': 0 }), \
                           'swc_type': defaultdict(lambda: { 'excitatory': 0, 'inhibitory': 0 }), \
                           'total': { 'excitatory': 0, 'inhibitory': 0 } }

        count = 0
        gid_count = 0
        synapse_dict = {}
        for gid, morph_dict in NeuroH5TreeGen(forest_path,
                                              population,
                                              io_size=io_size,
                                              comm=comm,
                                              topology=True):
            local_time = time.time()
            if gid is not None:
                logger.info('Rank %i gid: %i' % (rank, gid))
                cell = cells.make_neurotree_cell(template_class,
                                                 neurotree_dict=morph_dict,
                                                 gid=gid)
                cell_sec_dict = {
                    'apical': (cell.apical, None),
                    'basal': (cell.basal, None),
                    'soma': (cell.soma, None),
                    'ais': (cell.ais, None),
                    'hillock': (cell.hillock, None)
                }
                cell_secidx_dict = {
                    'apical': cell.apicalidx,
                    'basal': cell.basalidx,
                    'soma': cell.somaidx,
                    'ais': cell.aisidx,
                    'hillock': cell.hilidx
                }

                random_seed = env.model_config['Random Seeds'][
                    'Synapse Locations'] + gid
                if distribution == 'uniform':
                    syn_dict, seg_density_per_sec = synapses.distribute_uniform_synapses(
                        random_seed, env.Synapse_Types, env.SWC_Types,
                        env.layers, density_dict, morph_dict, cell_sec_dict,
                        cell_secidx_dict)

                elif distribution == 'poisson':
                    syn_dict, seg_density_per_sec = synapses.distribute_poisson_synapses(
                        random_seed, env.Synapse_Types, env.SWC_Types,
                        env.layers, density_dict, morph_dict, cell_sec_dict,
                        cell_secidx_dict)
                else:
                    raise Exception('Unknown distribution type: %s' %
                                    distribution)

                synapse_dict[gid] = syn_dict
                this_syn_stats = update_syn_stats(env, syn_stats_dict,
                                                  syn_dict)
                check_syns(gid, morph_dict, this_syn_stats,
                           seg_density_per_sec, layer_set_dict, swc_set_dict,
                           env, logger)

                del cell
                num_syns = len(synapse_dict[gid]['syn_ids'])
                logger.info(
                    'Rank %i took %i s to compute %d synapse locations for %s gid: %i'
                    % (rank, time.time() - local_time, num_syns, population,
                       gid))
                logger.info(
                    '%s gid %i synapses: %s' %
                    (population, gid, local_syn_summary(this_syn_stats)))
                gid_count += 1
            else:
                logger.info('Rank %i gid is None' % rank)
            if (not dry_run) and (gid_count % write_size == 0):
                append_cell_attributes(output_path,
                                       population,
                                       synapse_dict,
                                       namespace='Synapse Attributes',
                                       comm=comm,
                                       io_size=io_size,
                                       chunk_size=chunk_size,
                                       value_chunk_size=value_chunk_size,
                                       cache_size=cache_size)
                synapse_dict = {}
                gc.collect()
            syn_stats[population] = syn_stats_dict
            count += 1

        if not dry_run:
            append_cell_attributes(output_path,
                                   population,
                                   synapse_dict,
                                   namespace='Synapse Attributes',
                                   comm=comm,
                                   io_size=io_size,
                                   chunk_size=chunk_size,
                                   value_chunk_size=value_chunk_size,
                                   cache_size=cache_size)

        global_count = comm.gather(gid_count, root=0)

        if gid_count > 0:
            color = 1
        else:
            color = 0

        comm0 = comm.Split(color, 0)
        if color == 1:
            summary = global_syn_summary(comm0,
                                         syn_stats,
                                         np.sum(global_count),
                                         root=0)
            if rank == 0:
                logger.info(
                    'target: %s, %i ranks took %i s to compute synapse locations for %i cells'
                    % (population, comm.size, time.time() - start_time,
                       np.sum(global_count)))
                logger.info(summary)
        comm0.Free()
        comm.barrier()

    MPI.Finalize()