Esempio n. 1
0
def _circular_mask(image, radius, polarity='outwards', center=None):
    '''
        Given height and width, create circular mask around point with defined radius
        Polarity:
            'inwards' : True inside,  False outside
            'outwards': True outside, False inside
        '''
    h = image.shape[0]
    w = image.shape[1]

    if center is None:
        center = [int(h / 2), int(w / 2)]

    Y, X = np.ogrid[:h, :w]
    dist_from_center = np.sqrt(
        np.power(X - center[1], 2) + np.power(Y - center[0], 2))
    if polarity.lower() == 'inwards':
        mask = dist_from_center <= radius
    elif polarity.lower() == 'outwards':
        mask = dist_from_center >= radius
    else:
        raise err.ArgumentError('Polarity "{}" not defined'.format(polarity))
    return mask
Esempio n. 2
0
def validate_keyword_arena_shape(arena_shape):
    '''
    Ensure that the arena_shape is a meaningful value
    
    Parameters
    ----------
    arena_shape : str
        the value given for the keyword `arena_shape`
    
    Returns
    -------
    arena_shape : str
        A value that is guaranteed to be an acceptable member of one of the
        recognised groups of arena_shapes
    '''
    if not isinstance(arena_shape, str):
        raise err.ArgumentError(
            "Keyword `arena_shape` must be a string, not type `{type(arena_shape)}`"
        )
    else:
        arena_shape = arena_shape.lower()

    if arena_shape in default.shapes_square:
        # this is ok
        pass
    elif arena_shape in default.shapes_circle:
        # this is ok
        pass
    elif arena_shape in default.shapes_linear:
        # this is ok
        pass
    else:
        raise NotImplementedError(
            f"Arena shape '{arena_shape}' not implemented")

    return arena_shape
Esempio n. 3
0
def spatial_occupancy(time, position, speed, arena_size, **kwargs):
    '''
    Generate an occpuancy map: how much time the animal spent in each location
    in the arena.

    NOTES: This assumes that the positions have already been aligned and curated
    to remove NaNs. This is based on the expectation that it will primarily be
    used within the DataJoint framework, where the curation takes place at a
    much earlier stage.

    Parameters
    ----------
    time: np.ndarray
        timestamps of position and speed data
    position: np.ndarray (x, [y])
        1d or 2d array of positions at timestamps. If 2d, then row major, such
        that `position[0]` corresponds to all `x`; and `position[1]` to all `y`
    speed: np.ndarray
        1d array of speeds at timestamps
    arena_size: float or tuple of floats
        Dimensions of arena (in cm)
            * For a linear track, length
            * For a circular arena, diameter
            * For a rectangular arena, length or (length, length)
    speed_cutoff: float
        Timestamps with instantaneous speed beneath this value are ignored. Default 0
    arena_shape: {"square", "rect", "circle", "line"}
        Rectangular and square are equivalent. Elliptical or n!=4 polygons
        not currently supported. Defaults to Rectangular
    bin_width: float
        Bin size in cm. Default 2.5cm. If bin_width is supplied, `limit` must
        also be supplied. One of `bin_width`, `bin_number`, `bin_edges` must be
        provided
    bin_number: int or tuple of int
        Number of bins. If provided as a tuple, then `(x_bins, y_bins)`. One
        of `bin_width`, `bin_number`, `bin_edges` must be provided
    bin_edges: array-like
        Edges of the bins. Provided either as `edges` or `(x_edges, y_edges)`.
        One of `bin_width`, `bin_number`, `bin_edges` must be provided
    limits: tuple or np.ndarray
        (x_min, x_max) or (x_min, x_max, y_min, y_max)
        Provide concrete limits to the range over which the histogram searches
        Any observations outside these limits are discarded
        If no limits are provided, then use np.nanmin(data), np.nanmax(data)
        to generate default limits.
        As is standard in python, acceptable values include the lower bound
        and exclude the upper bound
    debug: bool, optional
        If `true`, print out debugging information throughout the function.
        Default `False`

    Returns
    -------
    masked_map: np.ma.MaskedArray
        Unsmoothed map of time the animal spent in each bin.
        Bins which the animal never visited are masked (i.e. the mask value is
        `True` at these locations)
    coverage: float
        Fraction of the bins that the animal visited. In range [0, 1]
    bin_edges: ndarray or tuple of ndarray
        x, or (x, y), where x, y are 1d np.ndarrays
        Here x, y correspond to the output histogram
    '''
    # Check for correct shapes.
    # `positions` may be either 1D (x only) or 2D (x only, or x and y)
    # Theremfore, must have ndim == 1 or 2
    # ANd if ndim == 2, must have min(shape) == 1 or 2
    dimensionality = position.ndim
    num_dimensions_data = min(position.shape)
    num_samples = max(position.shape)
    if dimensionality not in (1, 2):
        raise errors.ArgumentError(
            "Positions array must be a 1D or 2D array, you have provided a"
            " {}d array".format(dimensionality))
    if dimensionality == 2 and num_dimensions_data not in (1, 2):
        raise errors.ArgumentError(
            "Positions array must contain either 1D data (x only) or 2D data"
            " (x, y). You have provided {}d data".format(num_dimensions_data))
    if speed.ndim != 1:
        raise errors.ArgumentError(
            "Speed array has the wrong number of columns ({}, should be 1)".
            format(speed.ndim))
    if speed.size != num_samples:
        raise errors.ArgumentError(
            "Speed array does not have the same number of samples as Positions"
        )

    # Handle NaN positions by converting to a Masked Array
    position = np.ma.masked_invalid(position)

    speed_cutoff = kwargs.get("speed_cutoff", default.speed_cutoff)
    debug = kwargs.get("debug", False)

    if debug:
        print("Number of time stamps: %d" % len(time))
        print("Maximum time stamp value: %.2f" % time[-1])
        print("Time stamp delta: %f" % np.min(np.diff(time)))

    good = np.ma.greater_equal(speed, speed_cutoff)
    # Mask `positions`, for either 1d or 2d (only accepted shapes)

    if dimensionality == 1:
        pos = position[good]
    elif dimensionality == 2:
        pos = np.array(
            [position[i, :][good] for i in range(min(position.shape))])

    occupancy_map, bin_edges = opexebo.general.accumulate_spatial(
        pos, arena_size, **kwargs)
    if debug:
        print(f"Frames included in histogram: {np.sum(occupancy_map)}"\
              f" ({np.sum(occupancy_map)/len(time):3})")

    # So far, times are expressed in units of tracking frames
    # Convert to seconds:
    frame_duration = np.min(np.diff(time))
    occupancy_map_time = occupancy_map * frame_duration

    if debug:
        print(f"Time length included in histogram: {np.sum(occupancy_map_time):2}"\
              f"({np.sum(occupancy_map_time)/time[-1]:3})")

    masked_map = np.ma.masked_where(occupancy_map < 0.001, occupancy_map_time)

    # Calculate the fractional coverage based on the mask. The occupancy_map is
    # zero where the animal has not gone, and therefore non-zero where the animal
    # HAS gone. . Coverage is 1.0 when the animal has visited every location
    # Does not take account of a circular arena, where not all locations are
    # accessible

    shape = kwargs.get("arena_shape", default.shape)

    if shape.lower() in default.shapes_square:
        coverage = np.count_nonzero(occupancy_map) / occupancy_map.size
    elif shape.lower() in default.shapes_circle:
        if isinstance(arena_size, (float, int)):
            diameter = arena_size
        elif isinstance(arena_size, (tuple, list, np.ndarray)):
            diameter = arena_size[0]
        in_field, _, _ = opexebo.general.circular_mask(bin_edges, diameter)
        coverage = np.count_nonzero(occupancy_map) / (np.sum(in_field))
        coverage = min(1.0, coverage)
        # Due to the thresholding, coverage might be calculated to be  > 1
        # In this case, cut off to a maximum value of 1.
    elif shape.lower() in default.shapes_linear:
        #        raise NotImplementedError("Spatial Occupancy does not currently"\
        #                                  " support linear arenas")
        coverage = np.count_nonzero(occupancy_map) / occupancy_map.size
    else:
        raise NotImplementedError(f"Arena shape '{shape}' not understood")

    return masked_map, coverage, bin_edges
Esempio n. 4
0
def tuning_curve_stats(tuning_curve, **kwargs):
    """ Calculate statistics about a turning curve
    
    STATUS : EXPERIMENTAL

    Calculates various statistics for a turning curve.
    1. Mean vector length of a head direction rate map.
    The value will range from 0 to 1. 0 means that there are so much dispersion
    that a mean angle cannot be described. 1 means that all data are
    concentrated at the same direction. Note that 0 does not necessarily
    indicate a uniform distribution.
    Calculation is based on Section 26.4, J.H Zar - Biostatistical Analysis 5th edition,
    see eq. 26.13, 26.14.

    Parameters
    ----------
    tuning_curve : np.ma.MaskedArray
        Smoothed turning curve of firing rate as a function of angle
        Nx1 array
    kwargs
        percentile : float
            Percentile value for the head direction arc calculation
            Arc is between two points with values around
            globalPeak * percentile. Value should be in range [0, 1]

    Returns
    -------
    tcstat : dict
        hd_score         : float
            Score for how strongly modulated by angle the cell is
        hd_mvl           : float
            mean vector length
        hd_peak_rate     : float
            Peak firing rate  [Hz]
        hd_mean_rate     : float
            Mean firing rate [Hz]
        hd_peak_direction : float
            Direction of peak firing rate [degrees]
        hd_peak_direction_rad : float
            Direction of peak firing rate
        hd_mean_direction: float
            Direction of mean firing rate [degrees]
        hd_mean_direction_rad: float
            Direction of mean firing rate
        hd_stdev         : float
            Circular standard deviation [degrees]
        halfCwInd  : int
            Indicies of at the start, end of the range defined by percentile
            (clockwise).
        halfCcwInd : int
            Indicies of at the start, end of the range defined by percentile
            (counter-clockwise).
        halfCwRad : float
            Angle of the start, end of the range defined by percentile
        halfCcwRad  : float
            Angle of the start, end of the range defined by percentile
        arc_angle_rad : float
            Angle of the arc defined by percentile
        arc_angle_rad : float
            Angle of the arc defined by percentile

    Notes
    --------
    BNT.+analyses.tcStatistics

    Copyright (C) 2019 by Simon Ball

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.
    """

    debug = kwargs.get("debug", False)
    percentile = kwargs.get('percentile', default.hd_percentile)
    ndim = tuning_curve.ndim
    if ndim != 1:
        raise error.DimensionMismatchError(
            "tuning_curve should be a 1D array. You have provided {} dimensions"
            .format(ndim))
    if not 0 <= percentile <= 1:
        raise error.ArgumentError(
            "Keyword 'percentile' should be in the range [0, 1]. You provided  {:.2f.}"
            .format(percentile))
    if type(tuning_curve) != np.ma.MaskedArray:
        tuning_curve = np.ma.masked_invalid(tuning_curve)

    num_bin = tuning_curve.size
    bin_width = 2 * np.pi / num_bin
    hb = bin_width / 2
    bin_centres = np.linspace(hb, (2 * np.pi) - hb, num_bin)

    if debug:
        print("Num_bin: %d" % num_bin)
        print("Therefore, bin_width = %.3g deg = %.3g rad" %
              (np.degrees(bin_width), bin_width))

    #### Calculate the simple values
    tcstat = {}
    # The average of the values of angles, weighted by the firing rate at those angles
    mean_dir_radians = cs.circmean(data=bin_centres, weights=tuning_curve)
    tcstat['hd_mean_direction_rad'] = mean_dir_radians
    tcstat['hd_mean_direction'] = np.degrees(mean_dir_radians)

    # The direction in which the highest firing rate occurs
    peak_dir_index = np.nanargmax(tuning_curve)
    peak_dir_angle_radians = _index_to_angle(peak_dir_index, bin_width)
    tcstat['hd_peak_direction_rad'] = peak_dir_angle_radians
    tcstat['hd_peak_direction'] = np.degrees(peak_dir_angle_radians)

    # The peak firing rate IN Hz
    peak_rate_hz = np.nanmax(tuning_curve)
    tcstat['hd_peak_rate'] = peak_rate_hz

    # The mean firing rate across all angles IN Hz
    if tuning_curve.mask.all():
        #### Added to cope with numpy bug in nanmean with fully masked array
        mean_rate_hz = np.nan
    else:
        mean_rate_hz = np.nanmean(tuning_curve)
    tcstat['hd_mean_rate'] = mean_rate_hz

    #### Calculate the more complex ones:
    # mvl
    mvl = np.sum(tuning_curve * np.exp(1j * bin_centres))
    mvl = np.abs(mvl) / np.sum(tuning_curve)
    tcstat['hd_mvl'] = mvl

    # hd_stdev
    # Eq. 26.20 from J. H. Zar
    tcstat['hd_stdev'] = np.sqrt(2 * (1 - mvl))

    # Percentile arc
    half_peak = peak_rate_hz * percentile

    # Because Python doesn't natively handle circular arrays, reshape such that
    # the peak rate occurs at the centre of the array - then don't have to worry
    # about whether the arc goes off one edge of the array or not
    # Must be careful to keep track of the array to which the indicies point
    tuning_curve_re = np.zeros_like(tuning_curve)
    centre_index = int(num_bin / 2)
    offset = centre_index - peak_dir_index
    tuning_curve_re = np.roll(tuning_curve, offset)
    # A positive offset means that the peak angle was in the range [0, pi], and
    # is now at the central index. Therefore, to get the "proper" index,
    # subtract offset from index in tuning_curve_re

    if debug:
        print("Centre index: %d, value" % centre_index)
        print("Peak index: %d" % peak_dir_index)
        print("Offset: %d" % offset)

    # Clockwise and counter-clockwise edges of arc around peak defined by
    # percentile. ccw index +1 to account for width of central peak
    cw_hp_index = np.where(tuning_curve_re >= (half_peak))[0][0] - offset
    ccw_hp_index = np.where(tuning_curve_re >= (half_peak))[0][-1] - offset + 1

    cw_hp_ang = _index_to_angle(cw_hp_index, bin_width)
    ccw_hp_ang = _index_to_angle(ccw_hp_index, bin_width)
    arc_angle = ccw_hp_ang - cw_hp_ang

    if debug:
        print("CW: %d, %.3g rad" % (cw_hp_index, cw_hp_ang))
        print("CCW: %d, %.3g rad" % (ccw_hp_index, ccw_hp_ang))
        print("Arc: %.3g rad" % arc_angle)

    score = 1 - (arc_angle / np.pi)
    tcstat['halfCwInd'] = cw_hp_index
    tcstat['halfCcwInd'] = ccw_hp_index
    tcstat['halfCwRad'] = cw_hp_ang
    tcstat['halfCcwRad'] = ccw_hp_ang
    tcstat['arc_angle_rad'] = arc_angle
    tcstat['arc_angle_deg'] = np.degrees(arc_angle)
    tcstat['hd_score'] = score

    return tcstat
Esempio n. 5
0
def validatekeyword__arena_size(kwv, provided_dimensions):
    '''
    Decipher the possible meanings of the keyword "arena_size".
    
    "arena_size" is given to describe the arena in which the animal is moving
    It should be either a float, or an array-like of 2 floats (x, y)
    
    Parameters
    ----------
    kw: float or array-like of floats
        The value given for the keyword `arena_size`
    provided_dimensions : int
        the number of spatial dimensions provided to the original function.
        Acceptable values are 1 or 2
        E.g. if the original function was provided with positions = [t, x, y], then
        provided_dimensions=2 (x and y)

    Returns
    -------
    arena_size : float or np.ndarray of floats
    
    Raises
    ------
    ValueError
    IndexError
    '''
    if provided_dimensions == 1:
        is_2d = False
    elif provided_dimensions == 2:
        is_2d = True
    else:
        raise NotImplementedError("Only 1d and 2d arenas are supported. You"\
                                  " provided %dd" % provided_dimensions)
    if type(kwv) in (float, int, str):
        kwv = float(kwv)
        if kwv <= 0:
            raise err.ArgumentError("Keyword 'arena_size' value must be greater than"\
                             " zero (value given %f)" % kwv)
        if is_2d:
            arena_size = np.array((kwv, kwv))
        else:
            arena_size = kwv
    elif type(kwv) in (list, tuple, np.ndarray):
        if len(kwv) == 1:
            if is_2d:

                arena_size = np.array(kwv[0], kwv[0])
            else:
                arena_size = kwv[0]
        elif len(kwv) == 2 and not is_2d:
            raise err.DimensionMismatchError("Mismatch in dimensions: 1d position data but 2d"\
                             " arena specified")
        elif len(kwv) not in [1, 2]:
            raise err.ArgumentError("Keyword 'arena_size' value is invalid. Provide"\
                             " either a float or a 2-element tuple")
        else:
            arena_size = np.array(kwv)
    else:
        raise err.ArgumentError("Keyword 'arena_size' value not understood. Please"\
                         " provide either a float or a tuple of 2 floats. Value"\
                         " provided: '%s'" % str(kwv))
    return arena_size, is_2d
Esempio n. 6
0
def walk_filter(speed: np.ndarray, speed_cutoff: float, *args, fmt="remove"):
    """
    It is common practice when studying a freely moving subject to exclude data
    from periods when the subject was stationary, or nearly stationary. This
    method is described as a "walk-filter" - a high-pass filter on subject speed.
    
    This function allows an arbitrary number of arrays to be filtered in parallel
    to the speed (or whatever other filtering criteria are used). Filters can be
    performed either by removing the unwanted elements, or by masking them and
    retunring a MaskedArray.
    
    Example
    -------
    Filter speed only
    >>> speed = np.arange(500)
    >>> cutoff = 200
    >>> speed = walk_filter(speed, cutoff, fmt="remove")
    >>> speed.size
    300
    
    Filter other arrays as well
    >>> speed = np.arange(500)
    >>> cutoff = 200
    >>> pos_x = np.linspace(-25, 73, speed.size)
    >>> pos_y = np.linspace(0, 98, speed.size)
    >>> speed, pos_x, pos_y = walk_filter(speed, cutoff, pos_x, pos_y, fmt="remove")
    >>> speed.size
    300
    >>> pos_x.size
    300
    

    Parameters
    ----------
    speed : np.ndarray
        Array of speeds for other data points
    speed_cutoff : float
        The cutoff, below which values in ``speed`` will be excluded.
    *args : np.ndarray, optional
        Any other arrays that should be filtered in parallel with speed
        Optional arguments here _must_ be np.ndarrays with size equal to that of
        ``speed``
    fmt : str, optional
        Either "remove" or "mask". Determines how the values are returned
        "remove" (default) - the invalid valaues are removed from the array
        "mask" - the original array is returned as a MaskedArray, with the invalid
        values masked out.
    

    Returns
    -------
    np.ndarray
        Filtered copy of ``speed``
    [np.ndarray]
        Arbitrary other filtered arrays, if any other arrays were provided as *args
    """

    if not isinstance(speed, np.ndarray):
        raise errors.ArgumentError(
            "`speed` should be an ndarray, not ({})".format(type(speed)))

    if not isinstance(speed_cutoff, (float, int)):
        raise errors.ArgumentError(
            "`speed_cutoff` should be a numeric value ({})".format(
                type(speed_cutoff)))
    if speed_cutoff <= 0 or not np.isfinite(speed_cutoff):
        raise errors.ArgumentError(
            "\speed_cutoff` should be a finite positive value")

    if fmt.lower() not in ("remove", "mask"):
        raise errors.ArgumentError("`fmt` should be either 'remove' or 'mask'")

    if len(args):
        for i, arg in enumerate(args):
            if not isinstance(arg, np.ndarray):
                raise errors.ArgumentError(
                    f"`arg {i} is not a Numpy array ({arg})")
            if not arg.shape == speed.shape:
                raise errors.ArgumentError(
                    f"`arg {i} is a different size to `speed`")

    good = speed >= speed_cutoff

    if fmt.lower() == "mask":
        bad = np.logical_not(good)
        speed = np.ma.masked_where(bad, speed)
        out_args = [np.ma.masked_where(bad, arg) for arg in args]
    elif fmt.lower() == "remove":
        speed = speed[good]
        out_args = [arg[good] for arg in args]

    if out_args:
        out_args.insert(0, speed)
        return out_args
    else:
        return speed
Esempio n. 7
0
def place_field(firing_map, **kwargs):
    '''
    Locate place fields on a firing map.

    Identifies place fields in 2D firing map. Placefields are identified by
    using an adaptive threshold. The idea is that we start with a peak value as
    the threshold. Then we gradually decrease the threshold until the field
    area doesn't change any more or the area explodes (this means the threshold
    is too low).

    Parameters
    ----------
    firing_map: np.ndarray or np.ma.MaskedArray
        smoothed rate map.
        If supplied as an np.ndarray, it is assumed that the map takes values
        of np.nan at locations of zero occupancy. If supplied as an np.ma.MaskedArray,
        it is assumed that the map is masked at locations of zero occupancy
    
    Other Parameters
    ----------------
    min_bins: int
        Fields containing fewer than this many bins will be discarded. Default 9
    min_peak: float
        Fields with a peak firing rate lower than this absolute value will
        be discarded. Default 1 Hz
    min_mean: float
        Fields with a mean firing rate lower than this absolute value will
        be discarded. Default 0 Hz
    init_thresh: float
        Initial threshold to search for fields from. Must be in the range [0, 1].
        Default 0.96
    search_method: str
        Peak detection finding method. By default, use `skimage.morphology.local_maxima`
        Acceptable values are defined in `opexebo.defaults`. Not required if 
        peak_coords are provided
    peak_coords: array-like
        List of peak co-ordinates to consider instead of auto detection. [y, x].
        Default None

    Returns
    -------
    fields: list of dict
        coords: np.ndarray
            Coordinates of all bins in the firing field
        peak_coords: np.ndarray
            Coordinates peak firing rate [y,x]
        centroid_coords: np.ndarray
            Coordinates of centroid (decimal) [y,x]
        area: int
            Number of bins in firing field. [bins]
        bbox: tuple
            Coordinates of bounding box including the firing field
            (y_min, x_min, y_max, y_max)
        mean_rate: float
            mean firing rate [Hz]
        peak_rate: float
            peak firing rate [Hz]
        map: np.ndarray
            Binary map of arena. Cells inside firing field have value 1, all
            other cells have value 0
    fields_map : np.ndarray
        labelled integer image (i.e. background = 0, field1 = 1, field2 = 2, etc.)

    Raises
    ------
    ValueError
        Invalid input arguments
    NotImplementedError
        non-defined peack searching methods

    Notes
    --------
    BNT.+analyses.placefieldAdaptive

    https://se.mathworks.com/help/images/understanding-morphological-reconstruction.html

    Copyright (C) 2018 by Vadim Frolov, (C) 2019 by Simon Ball, Horst Obenhaus
    '''
    ##########################################################################
    #####                   Part 1: Handle inputs
    # Get keyword arguments
    min_bins = kwargs.get("min_bins", default.firing_field_min_bins)
    min_peak = kwargs.get("min_peak", default.firing_field_min_peak)
    min_mean = kwargs.get("min_mean", default.firing_field_min_mean)
    init_thresh = kwargs.get("init_thresh", default.initial_search_threshold)
    search_method = kwargs.get("search_method", default.search_method)
    peak_coords = kwargs.get("peak_coords", None)
    debug = kwargs.get("debug", False)

    if not 0 < init_thresh <= 1:
        raise err.ArgumentError("Keyword 'init_thresh' must be in the range [0, 1]."\
                         f" You provided {init_thresh}")
    try:
        search_method = search_method.lower()
    except AttributeError:
        raise err.ArgumentError("Keyword 'search_method' is expected to be a string"\
                         f" You provided a {type(search_method)} ({search_method})")
    if search_method not in default.all_methods:
        raise err.ArgumentError("Keyword 'search_method' must be left blank or given a"\
                         f" value from the following list: {default.all_methods}."\
                         f" You provided '{search_method}'.")

    global_peak = np.nanmax(firing_map)
    if np.isnan(global_peak) or global_peak == 0:
        if debug:
            print(f"Terminating due to invalid global peak: {global_peak}")
        return [], np.zeros_like(firing_map)

    # Construct a mask of bins that the animal never visited (never visited -> true)
    # This needs to account for multiple input formats.
    # The standard that I want to push is that firing_map is type MaskedArray
        # In this case, the cells that an animal never visited have firing_map.mask[cell]=True
        # while firing_map.data[cell] PROBABLY = 0
    # An alternative is the BNT standard, where firing_map is an ndarray
        # In this case, the cells never visited are firing_map[cell] = np.nan
    # In either case, we need to get out the following:
        # finite_firing_map is an ndarray (float) where unvisted cells have a
        # meaningfully finite value (e.g. zero, or min())
        # mask is an ndarray (bool) where unvisited cells are True, all other cells are False

    if isinstance(firing_map, np.ma.MaskedArray):
        occupancy_mask = firing_map.mask
        finite_firing_map = firing_map.data.copy()
        finite_firing_map[np.isnan(firing_map.data)] = 0

    else:
        occupancy_mask = np.zeros_like(firing_map).astype('bool')
        occupancy_mask[np.isnan(firing_map)] = True
        finite_firing_map = firing_map.copy()
        finite_firing_map[np.isnan(firing_map)] = 0

    structured_element = morphology.disk(1)
    image_eroded = morphology.erosion(finite_firing_map, structured_element)
    fmap = morphology.reconstruction(image_eroded, finite_firing_map)
    
    ##########################################################################
    #####                   Part 2: find local maxima
    # Based on the user-requested search method, find the co-ordinates of local maxima
    if peak_coords is None:
        if search_method == default.search_method:
            peak_coords = opexebo.general.peak_search(fmap, **kwargs)
        elif search_method == "sep":
            #fmap = finite_firing_map
            peak_coords = opexebo.general.peak_search(fmap, **kwargs)
        else:
            raise NotImplementedError("The search method you have requested (%s) is"\
                                      " not yet implemented" % search_method)

    # obtain value of found peaks
    found_peaks = finite_firing_map[peak_coords[:, 0], peak_coords[:, 1]]

    # leave only peaks that satisfy the threshold
    good_peaks = (found_peaks >= min_peak)
    peak_coords = peak_coords[good_peaks, :]


    ##########################################################################
    #####    Part 3: from local maxima get fields by expanding around maxima
    max_value = np.max(fmap)
    # prevent peaks with small values from being detected
    # SWB - This causes problems where a local peak is next to a cell that the animal never went
    # As that risks the field becoming the entire null region
    # Therefore, adding 2nd criterion to avoid adding information where none was actually known.
    fmap[np.logical_and(fmap < min_peak, fmap > 0.01)] = max_value * 1.5

    # this can be confusing, but this variable is just an index for the vector
    # peak_linear_ind
    peaks_index = np.arange(len(peak_coords))
    fields_map = np.zeros(fmap.shape, dtype=np.integer)
    field_id = 1
    for i, peak_rc in enumerate(peak_coords):
        # peak_rc == [row, col]

        # select all peaks except the current one
        other_fields = peak_coords[peaks_index != i]
        if other_fields.size > 0:
            other_fields_linear = np.ravel_multi_index(
                        multi_index=(other_fields[:, 0], other_fields[:, 1]),
                        dims=fmap.shape, order='F')
        else:
            other_fields_linear = []

        used_th = init_thresh
        res = _area_change(fmap, occupancy_mask, peak_rc, used_th,
                           used_th-0.02, other_fields_linear)
        initial_change = res['acceleration']
        area2 = res['area2']
        first_pixels = np.nan
        if np.isnan(initial_change):
            for j in np.linspace(used_th+0.01, 1., 4):
                # Thresholds get higher, area should tend downwards to 1
                # (i.e. only including the actual peak)
                res = _area_change(fmap, occupancy_mask, peak_rc, j, j-0.01, other_fields_linear)
                initial_change = res['acceleration']
                area1 = res['area1']
                area2 = res['area2']
                # initial_change is the change from area1 to area 2
                # area2>area1 -> initial_change > 1
                # area2<area1 -> initial_change < 1
                # area 2 is calculated with lower threshold - should usually be larger
                first_pixels = res['first_pixels']
                if not np.isnan(initial_change) and initial_change > 0:
                    # True is both area1 and area2 are valid
                    # Weird conditonal from Vadim - initial change will EITHER:
                    # be greater than zero (can't get a negative area to give negative % change)
                    # OR be NaN (which will always yield false when compared to a number)
                    used_th = j - 0.01
                    break

            if np.isnan(initial_change) and not np.isnan(area1):
                # For the final change
                pixels = np.unravel_index(first_pixels, fmap.shape, 'F')
                fmap[pixels] = max_value * 1.5
                fields_map[pixels] = field_id
                field_id = field_id + 1

            if np.isnan(initial_change):
                # failed to extract the field
                # Do nothing and continue for-loop
                pass

        pixel_list = _expand_field(fmap, occupancy_mask, peak_rc, initial_change, area2,
                                   other_fields_linear, used_th)
        if np.any(np.isnan(pixel_list)):
            _, pixel_list, _ = _area_for_threshold(fmap, occupancy_mask,
                                                   peak_rc, used_th+0.01,
                                                   other_fields_linear)
        if len(pixel_list) > 0:
            pixels = np.unravel_index(pixel_list, fmap.shape, 'F')
        else:
            pixels = []

        fmap[pixels] = max_value * 1.5
        fields_map[pixels] = field_id
        field_id = field_id + 1


    ##########################################################################
    #####     Part 4: Determine which, if any, fields meet filtering criteria
    regions = measure.regionprops(fields_map)

    fields = []
    fields_map = np.zeros(finite_firing_map.shape)  # void it as we can eliminate some fields

    for region in regions:
        field_map = finite_firing_map[region.coords[:, 0], region.coords[:, 1]]
        mean_rate = np.nanmean(field_map)
        num_bins = len(region.coords)

        peak_rate = np.nanmax(field_map)
        peak_relative_index = np.argmax(field_map)
        peak_coords = region.coords[peak_relative_index, :]

        if num_bins >= min_bins and mean_rate >= min_mean:
            field = {}
            field['coords'] = region.coords
            field['peak_coords'] = peak_coords
            field['area'] = region.area
            field['bbox'] = region.bbox
            field['centroid_coords'] = region.centroid
            field['mean_rate'] = mean_rate
            field['peak_rate'] = peak_rate
            mask = np.zeros(finite_firing_map.shape)
            mask[region.coords[:, 0], region.coords[:, 1]] = 1
            field['map'] = mask

            fields.append(field)

            fields_map[region.coords[:, 0], region.coords[:, 1]] = len(fields)
        elif debug:
            # Print out some information about *why* the field failed
            if num_bins < min_bins:
                print("Field size too small (%d)" % num_bins)
            if mean_rate < min_mean:
                print("Field mean rate too low (%.2f Hz)" % mean_rate)
        else:
            # Field too small and debugging information not needed
            # Do nothing
            pass
    #fields_map = np.ma.masked_where(occupancy_mask, fields_map)
    return (fields, fields_map)
Esempio n. 8
0
def angular_occupancy(time, angle, **kwargs):
    '''
    Calculate angular occupancy from tracking angle and kwargs over (0,2*pi)

    Parameters
    ----------
    time : numpy.ndarray
        time stamps of angles in seconds
    angle : numpy array
        Head angle in radians
        Nx1 array
    bin_width : float, optional
        Width of histogram bin in degrees

    Returns
    -------
    masked_histogram : numpy masked array
        Angular histogram, masked at angles at which the animal was never 
        observed. A mask value of True means that the animal never occupied
        that angle. 
    coverage : float
        Fraction of the bins that the animal visited. In range [0, 1]
    bin_edges : list-like
        x, or (x, y), where x, y are 1d np.ndarrays
        Here x, y correspond to the output histogram
    
    Notes
    --------
    Copyright (C) 2019 by Simon Ball, Horst Obenhaus

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.
    '''
    if time.ndim != 1:
        raise errors.ArgumentError("time must be provided as a 1D array. You provided %d"\
                         " dimensions" % time.ndim)
    if angle.ndim != 1:
        raise errors.ArgumentError("angle must be provided as a 1D array. You provided %d"\
                         " dimensions" % angle.ndim)
    if time.size != angle.size:
        raise errors.ArgumentError("Arrays 'time' and 'angle' must have the same number"\
                         f" of elements. You provided {time.size} and {angle.size}")
    if time.size == 0:
        raise errors.ArgumentError(
            "Zero length array provided when data expected")
    if np.nanmax(angle) > 2 * np.pi:
        raise Warning("Angles greater than 2pi detected. Please check that your"\
                      " angle array is in radians. If it is in degrees, you can"\
                      " convert with 'np.radians(array)'")

    bin_width = kwargs.get('bin_width', default.bin_angle)
    bin_width = np.radians(bin_width)
    arena_size = 2 * np.pi
    limits = (0, arena_size)

    angle_histogram, bin_edges = opexebo.general.accumulate_spatial(
        angle, bin_width=bin_width, arena_size=arena_size, limits=limits)
    masked_angle_histogram = np.ma.masked_where(angle_histogram == 0,
                                                angle_histogram)

    # masked_angle_histogram is in units of frames. It needs to be converted to units of seconds
    frame_duration = np.mean(np.diff(time))
    masked_angle_seconds = masked_angle_histogram * frame_duration

    # Calculate the fractional coverage based on locations where the histogram
    # is zero. If all locations are  non-zero, then coverage is 1.0
    coverage = np.count_nonzero(angle_histogram) / masked_angle_seconds.size

    return masked_angle_seconds, coverage, bin_edges
Esempio n. 9
0
def shuffle(
    times: np.ndarray,
    offset_lim: float,
    iterations: int,
    t_start: float = None,
    t_stop: float = None,
):
    """
    Duplicate the provided time series ``iterations`` number of times. Each
    duplicate will be incremented circularly by a random value not smaller than
    ``offset_lim``.

    Circular incrementation results in (the majority) of time _differences_
    remaining preserved

    * Initially, we have a time series, ``times``, both with
      values in the range ``[min(times), max(times)]``. ``t_start`` may be smaller than
      ``min(times)``, and ``t_stop`` may be larger than ``max(times)``
    * ``iterations`` number of duplicates of ``times`` are created.
    * In each iteraction, a random increment ``T`` is generated, and added to
      each value in that iteration, such that values now fall into the range
      ``[min(times)+T, max(times)+T]``. ``max(times)+T`` may exceed ``t_stop``.
    * All timestamps matching ``t_n > t_stop`` are mapped back into the range
    ``[t_start, t_stop]`` by subtracting ``(t_stop-t_start)``
    * The iteration is re-ordered by value (moving those beyond the far edge
      back to the beginning)

    Parameters
    ----------
    times : np.ndarray
        1D array of floats. Time series data to be shuffled
    offset_lim : float
        Minimum offset from the original time values. Each iteration is
        incremented by a random value evenly distributed in the range
        ``[offset_lim, t_stop-offset_lim]``
    iterations : int
        Number of repeats of ``times`` to be returned
    t_start : float, optional
        Lower bound of time domain. Must meet the criteria ``t_start <= min(times)``
        Defaults to ``min(times)``
    t_stop : float, optional
        Upper bound of time domain. Must meet the criteria ``t_stop >= max(times)``
        Defaults to ``max(times)``

    Returns
    -------
    output : np.ndarray
        iterations x N array of times. A single iteration is accessed as
        ``output[i]``
    increments : np.ndarray
        1D array of offset values that were used
    """
    # Argument checking begins here
    if not isinstance(times, np.ndarray):
        raise errors.ArgumentError(
            "`times` must be 1 Numpy array ({})".format(type(times))
        )
    if not times.ndim == 1:
        raise errors.ArgumentError("`times` must be a 1D array ({})".format(times.ndim))
    if not np.isfinite(times).all():
        raise errors.ArgumentError("`times` cannot include non-finite or NaN values")

    if offset_lim <= 0:
        raise errors.ArgumentError(
            "`offset_lim` must be greater than zero ({}".format(offset_lim)
        )
    if not np.isfinite(offset_lim):
        raise errors.ArgumentError(
            "`offset_lim` must be finite ({})".format(offset_lim)
        )

    if iterations < 2:
        raise errors.ArgumentError(
            "qiterations must be a positive integer greater than 1 ({})".format(
                iterations
            )
        )
    if not np.isfinite(iterations):
        raise errors.ArgumentError("`iterations` must be finite".format(iterations))

    if not np.isfinite(t_start):
        raise errors.ArgumentError("`t_start` must be finite".format(t_start))
    if not np.isfinite(t_stop):
        raise errors.ArgumentError("`t_stop` must be finite".format(t_stop))

    if t_start is None:
        t_start = min(times)
    if t_stop is None:
        t_stop = max(times)

    if t_start > min(times):
        raise errors.ArgumentError(
            "`t_start` must be greater than or equal to `min(times)`"
        )
    if t_stop < max(times):
        raise errors.ArgumentError(
            "`t_stop` must be less than or equal to `max(times)`"
        )

    if t_start == t_stop:
        raise errors.ArgumentError(
            "`t_start` and `t_stop` cannot be identical ({})".format(t_start)
        )

    if offset_lim >= 0.5 * (t_stop - t_start):
        raise errors.ArgumentError(
            "`offset_lim` must be less than half of the time span ({}, {})".format(
                offset_lim, t_stop - t_start
            )
        )
    # argument checking ends here

    # Main logic begins here
    increments_base = np.random.RandomState().rand(
        iterations
    )  # uniformly distributed in [0,1]
    increments = (
        t_start + offset_lim + (increments_base * (t_stop - t_start - 2 * offset_lim))
    )

    # Stack copies of `times`, one per row, for `iterations` number of rows
    # stack copies of `increments`, one per column, for `times.size` number of columns
    # We get two identically shaped arrays that can just be added together to perform the increments.
    output = np.repeat(times[np.newaxis, :], iterations, axis=0)
    increments_arr = np.repeat(increments[:, np.newaxis], times.size, axis=1)

    output = increments_arr + output

    # Circularising: i.e. folding times outside the boundary back inside the
    # boundary, and then re-ordering by the updated, refolded times
    out_of_bounds = output > t_stop
    output[out_of_bounds] = output[out_of_bounds] - (t_stop - t_start)

    output.sort(axis=1)  # sort along each row independently of all other rows.

    return output, increments