def trajectories(frame, data, f, parameters=None, call_num=None):
    #This can only be run on a linked trajectory
    method_key = get_method_key('trajectories', call_num=call_num)
    x_col_name = parameters[method_key]['x_column']
    y_col_name = parameters[method_key]['y_column']

    #In this case subset_df is only used to get the particle_ids and colours of trajectories.
    subset_df = get_class_subset(data, f, parameters, method=method_key)
    particle_ids = subset_df['particle'].values

    colours = colour_array(subset_df, f, parameters, method=method_key)
    thickness = get_param_val(parameters[method_key]['thickness'])
    traj_length = get_param_val(parameters[method_key]['traj_length'])

    if (f-traj_length) < 0:
        traj_length = f

    df = data.df.sort_index()
    df.index.name='frame'
    df['frame'] = df.index
    df2 = df.loc[f-traj_length:f]

    df3 = df2.set_index(['particle','frame']).sort_index(level='particle')

    for index, particle in enumerate(particle_ids):
        traj_pts = df3[[x_col_name,y_col_name]].loc[particle]
        traj_pts = np.array(traj_pts.values, np.int32).reshape((-1,1,2))
        frame = cv2.polylines(frame,[traj_pts],False,colours[index],thickness)
    return frame
def circles(frame, data, f, parameters=None, call_num=None):
    '''
    Function draws circles on an image at x,y locations. If data.df['r'] exists
    circles have this radius, else 'r' col is created with value set from annotation
    sub dictionary.

    :param frame: frame to be annotated should be 3 colour channel
    :param data: datastore with particle information
    :param f: frame number
    :param parameters: annotation sub dictionary

    :return: annotated frame
    '''
    method_key = get_method_key('circles', call_num=call_num)
    if 'r' not in list(data.df.columns):
        data.add_particle_property('r', get_param_val(parameters[method_key]['radius']))
    thickness = get_param_val(parameters[method_key]['thickness'])

    subset_df = get_class_subset(data, f, parameters, method=method_key)
    circles = subset_df[['x', 'y', 'r']].values
    colours = colour_array(subset_df, f, parameters, method=method_key)
    for i, circle in enumerate(circles):
        try:
            frame = cv2.circle(frame, (int(circle[0]), int(circle[1])), int(circle[2]), colours[i], thickness)
        except:
            print('Failed plotting circle, check data is valid')
    return frame
def trackpy(frame,_, parameters=None, call_num=None):
    method_key = get_method_key('trackpy', call_num)
    df = tp.locate(frame, get_param_val(parameters[method_key]['size_estimate']), invert=get_param_val(parameters[method_key]['invert']))

    if parameters[method_key]['get_intensities']:
        x = df['x'].to_numpy()
        y = df['y'].to_numpy()
        intensity = []
        for i in range(np.size(x)):
            xc = x[i]
            yc = y[i]
            rc = get_param_val(parameters[method_key]['intensity_radius'])

            try:
                # Try because some circles overlap the edge giving meaningless answers
                cut_out_frame = frame[int(yc - rc):int(yc + rc), int(xc - rc):int(xc + rc)]
                h, w = cut_out_frame.shape[:2]
                mask = create_circular_mask(h, w)
                masked_img = cut_out_frame.copy()
                masked_img[~mask] = 0
                value = getattr(im, parameters[method_key]['get_intensities'])(masked_img)
            except:
                value = np.Nan

            intensity.append(value)
        df['intensities'] = np.array(intensity)
    return df
예제 #4
0
    def link_trajectories(self, f_index=None):

        """Implements the trackpy functions link_df and filter_stubs"""
        # Reload DataStore
        if f_index is None:
            'When processing whole video store in file with same name as movie'
            data_filename = self.data_filename
        else:
            'store temporarily'
            data_filename = self.data_filename[:-5] + '_temp.hdf5'

        with dataframes.DataStore(data_filename, load=True) as data:
            if f_index is None:
                # Trackpy methods
                data.reset_index()
                data.df = trackpy.link_df(data.df, get_param_val(self.parameters['default']['max_frame_displacement']),memory=get_param_val(self.parameters['default']['memory']))
                data.df = trackpy.filter_stubs(data.df, get_param_val(self.parameters['default']['min_frame_life']))
            else:
                #Adds a particle id to single temporary dataframes for convenience
                num_particles = np.shape(data.df)[0]
                pids = np.linspace(0,num_particles-1, num=num_particles).astype(int)
                data.df['particle'] = pids

            # Save DataStore
            data.save(filename=data_filename)
def hough(frame, _,parameters=None, call_num=None):
    '''
    Performs the opencv hough circles transform to locate
    circles in an image.

    :param frame:
    :param parameters:
    :param call_num:
    :return:
    '''
    method_key = get_method_key('hough', call_num)

    circles = np.squeeze(cv2.HoughCircles(
        frame,
        cv2.HOUGH_GRADIENT, 1,
        get_param_val(parameters[method_key]['min_dist']),
        param1=get_param_val(parameters[method_key]['p1']),
        param2=get_param_val(parameters[method_key]['p2']),
        minRadius=get_param_val(parameters[method_key]['min_rad']),
        maxRadius=get_param_val(parameters[method_key]['max_rad'])))

    try:
        circles_dict = {'x': circles[:, 0], 'y': circles[:, 1], 'r': circles[:, 2]}
    except:
        circles_dict={'x':[1],'y':[1],'r':[5]}


    if parameters[method_key]['get_intensities']:
        intensity = []
        for i,_ in enumerate(circles_dict['x']):
            xc = circles_dict['x'][i]
            yc = circles_dict['y'][i]
            rc = circles_dict['r'][i]

            try:
                #Try because some circles overlap the edge giving meaningless answers
                cut_out_frame = frame[int(yc - rc):int(yc + rc), int(xc - rc):int(xc + rc)]
                h,w= cut_out_frame.shape[:2]
                mask = create_circular_mask(h, w)
                masked_img = cut_out_frame.copy()
                masked_img[~mask] = 0
                value = getattr(im, parameters[method_key]['get_intensities'])(masked_img)
            except:
                value = np.Nan

            intensity.append(value)

        circles_dict['intensities']=np.array(intensity)

    df = pd.DataFrame(circles_dict)

    return df
def boxes(frame, data, f, parameters=None, call_num=None):
    method_key = get_method_key('boxes', call_num=call_num)
    thickness = get_param_val(parameters[method_key]['thickness'])
    subset_df = get_class_subset(data, f, parameters, method=method_key)
    box_pts = subset_df[['box']].values

    colours = colour_array(subset_df, f, parameters, method=method_key)
    sz = np.shape(frame)
    for index, box in enumerate(box_pts):
        if contour_inside_img(sz, box):
            frame = _draw_contours(frame, box, col=colours[index],
                                       thickness=get_param_val(parameters[method_key]['thickness']))
    return frame
예제 #7
0
def adaptive_threshold(frame, parameters=None, call_num=None):
    '''Adaptive threshold

    Notes
    -----

    This applies an adaptive threshold. This differs from global threshold
    in that for each pixel the cutoff threshold is defined based on a block of local
    pixels around it. This enables you to cope with gradual changes in illumination
    across the image etc.

    options
    ~~~~~~~

    parameters['adaptive threshold']['block size'] : Size of local block of pixels to calculate threshold on
    parameters['adaptive threshold']['C'] : The mean-c value see here: http://homepages.inf.ed.ac.uk/rbf/HIPR2/adpthrsh.htm
    parameters['adaptive threshold']['ad_mode'] : inverts behaviour

    Parameters
    ----------

    frame: np.ndarray
        frame grayscale
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    binary image with 255 above threshold else 0.

    '''

    method_key = get_method_key('adaptive_threshold', call_num=call_num)
    print(method_key)
    params = parameters['preprocess'][method_key]
    print(params)
    block = get_param_val(params['block_size'])
    const = get_param_val(params['C'])
    invert = get_param_val(params['ad_mode'])

    if invert == 1:
        out = cv2.adaptiveThreshold(frame, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                    cv2.THRESH_BINARY_INV, block, const)
    else:
        out = cv2.adaptiveThreshold(frame, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                    cv2.THRESH_BINARY, block, const)
    return out
def contours(pp_frame, frame, parameters=None, call_num=None):
    '''
    boxes method finds contour of object but reduces the info to
    a rotated bounding box. Use for finding an angle of object or
    estimate of size. If you need to do something with the pixels
    use contours instead.

    contours stores: the centroid cx, cy, area enclosed by contour,
    the bounding rectangle which is used with contour to generate
    mask so that you can extract pixels from original image
    and perform some analysis.
    '''
    sz = np.shape(frame)
    if np.shape(sz)[0] == 3:
        frame= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    method_key = get_method_key('contours',call_num=call_num)
    params = parameters[method_key]
    get_intensities = params['get_intensities']

    area_min = get_param_val(params['area_min'])
    area_max = get_param_val(params['area_max'])
    info = []

    contour_pts = _find_contours(pp_frame)

    for index, contour in enumerate(contour_pts):
        M = cv2.moments(contour)
        if M['m00'] > 0:
            area = cv2.contourArea(contour)
            if (area < area_max) & (area > area_min):
                cx = int(M['m10'] / M['m00'])
                cy = int(M['m01'] / M['m00'])

                box = cv2.boundingRect(contour)
                if get_intensities:
                    intensity = _find_intensity_inside_contour(contour, frame, params['get_intensities'])
                    info_contour = [cx, cy, area, contour, box, intensity]
                else:
                    info_contour = [cx, cy, area, contour, box]
                info.append(info_contour)

    if get_intensities:
        info_headings = ['x', 'y', 'area', 'contours', 'boxes', 'intensities']
    else:
        info_headings = ['x', 'y', 'area', 'contours', 'boxes']
    df = pd.DataFrame(data=info, columns=info_headings)

    return df
예제 #9
0
def medianblur(frame, parameters=None, call_num=None):
    '''Median blur

    Notes
    -----

    Applies a median blur to the image (https://en.wikipedia.org/wiki/Median_filter)
    Good for removing speckle noise

    options
    ~~~~~~~

    parameters['blur_kernel'] specifies the dimensions of a square kernel

    Parameters
    ----------

    frame: np.ndarray
        frame
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    blurred image

    '''
    method_key = get_method_key('medianblur', call_num=call_num)
    params = parameters['preprocess'][method_key]
    kernel = get_param_val(params['kernel'])
    out = cv2.medianBlur(frame, kernel)
    return out
예제 #10
0
def resize(frame, parameters=None, call_num=None):
    ''' Resize an image

    Notes
    -----

    resizes an input image by the scale specified

    options
    ~~~~~~~

    parameters['resize scale'] : factor for scale operation

    Parameters
    ----------

    frame: np.ndarray
        frame
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    Resized frame

    '''
    method_key = get_method_key('resize', call_num=call_num)
    params = parameters['preprocess'][method_key]

    scale = get_param_val(params['scale']) / 100
    return cv2.resize(frame, scale)
예제 #11
0
def _find_delaunay(df, parameters=None, call_num=None):
    method_key = get_method_key('neighbours')
    cutoff = get_param_val(parameters[method_key]['cutoff'])

    points = df[['x', 'y']].values
    particle_ids = df[['particle']].values.flatten()
    tess = sp.Delaunay(points)
    list_indices, point_indices = tess.vertex_neighbor_vertices
    # neighbour_ids = [particle_ids[point_indices[a:b].tolist()] for a, b in zip(list_indices[:-1], list_indices[1:])]
    neighbour_ids = [
        point_indices[a:b].tolist()
        for a, b in zip(list_indices[:-1], list_indices[1:])
    ]
    dist = sp.distance.squareform(sp.distance.pdist(points))

    neighbour_dists = [(dist[i, row] < cutoff).tolist()
                       for i, row in enumerate(neighbour_ids)]
    indices = []
    for index, row in enumerate(neighbour_ids):
        indices.append([
            particle_ids[neighbour_ids[index][j]]
            for j, dummy in enumerate(row) if neighbour_dists[index][j]
        ])
    df.loc[:, ['neighbours']] = indices
    return df
예제 #12
0
def difference(data, f_index=None, parameters=None, call_num=None):
    '''Difference in time of a column of dataframe.

    Notes
    -----

    The differences are calculated at separations equal
    to span along the column. Where this is not possible
    or at both ends of column, the value np.Nan is inserted.

    Returns
    -------

    Dataframe with new column of rolling differences named according to outputname in PARAMETERS

    '''

    method_key = get_method_key('difference', call_num)
    span = get_param_val(parameters[method_key]['span'])
    column = parameters[method_key]['column_name']
    output_name = parameters[method_key]['output_name']
    data.index.name = 'index'
    data = data.sort_values(['particle', 'frame'])
    data[output_name] = data[column].diff(periods=span)
    data['nan'] = data['particle'].diff(periods=span).astype(bool)

    data[output_name][data['nan'] == True] = np.NaN
    data.drop(labels='nan', axis=1)
    return data
예제 #13
0
def classify(data, f_index=None, parameters=None, call_num=None):
    method_key = get_method_key('classify', call_num)
    column = parameters[method_key]['column_name']
    output_name = parameters[method_key]['output_name']
    threshold_value = get_param_val(parameters[method_key]['value'])
    data[output_name] = data[column].apply(_classify_fn,
                                           threshold_value=threshold_value)
    return data
def vectors(frame, data, f, parameters=None, call_num=None):
    method_key = get_method_key('vectors', call_num=call_num)
    dx = parameters[method_key]['dx_column']
    dy = parameters[method_key]['dy_column']

    vectors = data.get_info(f, ['x', 'y',dx, dy])

    thickness = get_param_val(parameters[method_key]['thickness'])
    line_type = 8
    tipLength = 0.01*get_param_val(parameters[method_key]['tip_length'])
    vector_scale = 0.01*get_param_val(parameters[method_key]['vector_scale'])

    colours = colour_array(data.df, f, parameters, method=method_key)

    for i, vector in enumerate(vectors):
        frame = cv2.arrowedLine(frame, (int(vector[0]), int(vector[1])),
                                (int(vector[0]+vector[2]*vector_scale),int(vector[1]+vector[3]*vector_scale)),
                                color=colours[i], thickness=thickness,line_type=line_type,shift=0,tipLength=tipLength)
    return frame
def contours(frame, data, f, parameters=None, call_num=None):
    method_key = get_method_key('contours', call_num=call_num)
    thickness = get_param_val(parameters[method_key]['thickness'])
    subset_df = get_class_subset(data, f, parameters, method=method_key)
    contour_pts = subset_df[['contours']].values
    colours = colour_array(subset_df, f, parameters, method=method_key)

    for index, contour in enumerate(contour_pts):
       frame = _draw_contours(frame, contour, col=colours[index],
                                       thickness=thickness)
    return frame
예제 #16
0
def _find_kdtree(df, parameters=None):
    method_key = get_method_key('neighbours')
    cutoff = get_param_val(parameters[method_key]['cutoff'])
    num_neighbours = get_param_val(parameters[method_key]['neighbours'])

    points = df[['x', 'y']].values
    particle_ids = df[['particle']].values.flatten()
    tree = sp.KDTree(points)
    _, indices = tree.query(points,
                            k=num_neighbours + 1,
                            distance_upper_bound=cutoff)
    neighbour_ids = []
    fill_val = np.size(particle_ids)
    for index, row in enumerate(indices):
        neighbour_ids.append([
            particle_ids[row[i + 1]] for i in range(num_neighbours)
            if row[i + 1] != fill_val
        ])
    df.loc[:, ['neighbours']] = neighbour_ids
    return df
예제 #17
0
def threshold(frame, parameters=None, call_num=None):
    '''Apply a global image threshold

    Notes
    -----

    This takes a cutoff threshold value and returns white above and
    black below this value.

    options
    ~~~~~~
    parameters['threshold'] : sets the value of the cutoff threshold
    parameters['th_mode] : Can be used to invert the above behaviour

    Parameters
    ----------

    frame: np.ndarray
        frame grayscale
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    binary image with 255 for pixel val > threshold else 0.

    '''

    method_key = get_method_key('threshold', call_num=call_num)
    params = parameters['preprocess'][method_key]

    threshold = get_param_val(params['threshold'])
    mode = get_param_val(params['th_mode'])
    ret, out = cv2.threshold(frame, threshold, 255, mode)

    return out
예제 #18
0
def erosion(frame, parameters=None, call_num=None):
    ''' Morphological erosion

    Notes
    -----

    erodes a binary image. This means pixels are set to
    zero based on their connectivity with neighbours

    options
    ~~~~~~~

    parameters['resize scale'] : factor for scale operation

    Parameters
    ----------

    frame: np.ndarray
        frame
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

        Resized frame

        '''

    method_key = get_method_key('erosion', call_num=call_num)
    params = parameters['preprocess'][method_key]
    kernel = get_param_val(params['erosion_kernel'])
    iterations = get_param_val(params['iterations'])

    kernel = np.ones((kernel, kernel))

    return cv2.erode(frame, kernel, iterations=iterations)
def boxes(frame, _,parameters=None, call_num=None):
    '''
    boxes method finds contour of object but reduces the info to
    a rotated bounding box. Use for finding an angle of object or
    estimate of size.
    '''
    method_key = get_method_key('boxes',call_num=call_num)
    params = parameters[method_key]
    get_intensities = params['get_intensities']

    area_min = get_param_val(params['area_min'])
    area_max = get_param_val(params['area_max'])
    info = []
    contour_pts = _find_contours(frame)

    for index, contour in enumerate(contour_pts):
        area = int(cv2.contourArea(contour))
        if (area < area_max) and (area >= area_min):
            info_contour = _rotated_bounding_rectangle(contour)
            cx, cy = np.mean(info_contour[5], axis=0)
            angle = info_contour[2]
            width = info_contour[3]
            length = info_contour[4]
            box = info_contour[5]

            if get_intensities:
                intensity = _find_intensity_inside_contour(contour, frame, parameters['get_intensities'])
                info_contour = [cx, cy, angle, width, length, contour, box, intensity]
            else:
                info_contour = [cx, cy, angle, width, length, contour, box]
            info.append(info_contour)

    if get_intensities:
        info_headings = ['x', 'y', 'theta', 'width', 'length', 'contours','box', 'intensities']
    else:
        info_headings = ['x', 'y', 'theta', 'width', 'length', 'contours','box']
    df = pd.DataFrame(data=info, columns=info_headings)
    return df
def networks(frame, data, f, parameters=None, call_num=None):
    method_key = get_method_key('networks', call_num=call_num)
    df = get_class_subset(data, f, parameters, method=method_key)
    df = df.set_index('particle')
    particle_ids = df.index.values
    colours = colour_array(df, f, parameters, method=method_key)
    thickness = get_param_val(parameters[method_key]['thickness'])

    for index, particle in enumerate(particle_ids):
        pt = df.loc[particle, ['x', 'y']].values
        pt1 = (int(pt[0]), int(pt[1]))
        neighbour_ids = df.loc[particle, 'neighbours']
        for index2, neighbour in enumerate(neighbour_ids):
            pt = df.loc[neighbour, ['x','y']].values
            pt2 = (int(pt[0]), int(pt[1]))
            frame = cv2.line(frame,pt1, pt2, colours[index], thickness, lineType=cv2.LINE_AA)
    return frame
예제 #21
0
def colour_array(subset_df, f, parameters, method=None):
    cmap_type = parameters[method]['cmap_type']
    sz = np.shape(subset_df.index.values)
    if cmap_type == 'static':
        colour_val = parameters[method]['colour']
        colours = colour_val * np.ones((sz[0], 3))
    elif cmap_type == 'continuous':
        cmap_column = parameters[method]['cmap_column']
        colour_data = subset_df[[cmap_column]].values
        cmap_max = get_param_val(
            parameters[method]['cmap_max']) / parameters[method]['cmap_scale']
        cmap_name = 'jet'
        colour_obj = plt.get_cmap(cmap_name, np.size(colour_data))
        colour_vals = 255 * colour_obj(colour_data / cmap_max)
        colours = []
        for colour in colour_vals:
            colours.append((colour[0, 0], colour[0, 1], colour[0, 2]))
        colours = np.array(colours)
    return colours
예제 #22
0
def cmap_variables(data, f, parameters, method=None):
    '''
    Convenience method to extract the inputs necessary for building a colourmap

    :param data: DataStore
    :param f: frame number
    :param parameters: annotation parameters dictionary
    :param method: String of the method being used to annotate

    :return: Numpy array of data to be used in colour coding, type of colour map, maximum value to scale data.
    '''
    cmap_column = parameters[method]['cmap_column']
    if cmap_column is None:
        sz = np.shape(data.df.loc[f].index.values)
        colour_data = np.ones(sz)
        cmap_type = 'discrete'
    else:
        colour_data = data.get_info(f, cmap_column)
        cmap_type = parameters[method]['cmap_type']
    cmax_max = get_param_val(parameters[method]['cmap_max']) / 10
    return colour_data, cmap_type, cmax_max
예제 #23
0
def gamma(image, parameters=None, call_num=None):
    ''' Gamma correction

    Notes
    -----

    generates a lookup table which maps the values 0-255 to 0-255
    however not in a linear way. The mapping follows a power law
    with exponent gamma/100.0.

    Parameters
    ----------

    frame: np.ndarray
        frame
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    gamma corrected image

    '''

    method_key = get_method_key('gamma', call_num=call_num)
    params = parameters['preprocess'][method_key]

    gamma = get_param_val(params['gamma']) / 100.0
    # build a lookup table mapping the pixel values [0, 255] to
    # their adjusted gamma values
    invGamma = 1.0 / gamma

    table = np.array([((i / 255.0)**invGamma) * 255
                      for i in np.arange(0, 256)]).astype("uint8")

    return cv2.LUT(image, table)
예제 #24
0
def blur(frame, parameters=None, call_num=None):
    ''' Gaussian blur

    Notes
    -----

    Applies a gaussian blur to the image (https://en.wikipedia.org/wiki/Gaussian_blur)
    Usually useful to apply before subtracting 2 images.

    options
    ~~~~~~~

    parameters['blur kernel'] specifies the dimensions of a square kernel

    Parameters
    ----------

    frame: np.ndarray
        frame
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    blurred image

    '''
    method_key = get_method_key('blur', call_num=call_num)
    params = parameters['preprocess'][method_key]
    kernel = get_param_val(params['kernel'])
    out = cv2.GaussianBlur(frame, (kernel, kernel), 0)

    return out
예제 #25
0
def subtract_bkg(frame, parameters=None, call_num=None):
    ''' Subtract a bkg image

    Notes
    -----

    This function subtracts a background image from a grayscale frame.

    options:
    parameters['subtract bkg type'] == 'mean' : subtracts the mean intensity from image
                                    == 'img' : subtracts a pre-prepared background image.
                                            (See preprocessing > meanbkg_img.py)
    parameters['subtract bkg norm'] == True   : Stretches range of final image intensities 0-255


    Parameters
    ----------

    frame: np.ndarray
        frame either colour or grayscale
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls


    Returns
    -------

    image with background image subtracted

    '''
    method_key = get_method_key('subtract_bkg', call_num=call_num)
    params = parameters['preprocess'][method_key]

    if params['subtract_bkg_type'] == 'mean':
        mean_val = int(np.mean(frame))
        subtract_frame = mean_val * np.ones(np.shape(frame), dtype=np.uint8)
        if get_param_val(params['subtract_bkg_invert']):
            frame2 = cv2.subtract(subtract_frame, frame)
        else:
            frame2 = cv2.subtract(frame, subtract_frame)

    elif params['subtract_bkg_type'] == 'img':
        # This option subtracts the previously created image which is added to dictionary.
        #These parameters are fed to the blur function
        temp_params = {}
        temp_params['preprocess'] = {
            'blur': {
                'kernel': get_param_val(params['subtract_bkg_blur_kernel'])
            }
        }

        #Load bkg img
        if parameters['experiment']['bkg_img'] is None:
            name = parameters['experiment']['video_filename']
            subtract_frame = cv2.imread(name[:-4] + '_bkgimg.png',
                                        cv2.IMREAD_GRAYSCALE)
        else:
            subtract_frame = cv2.imread(parameters['experiment']['bkg_img'],
                                        cv2.IMREAD_GRAYSCALE)

        assert (np.shape(frame) == np.shape(subtract_frame)),\
            'Warning: input frame and subtracted frame must have same shape'

        #blur frames
        frame2 = blur(frame, temp_params)
        frame2 = frame2.astype(np.uint8)
        subtract_frame = blur(subtract_frame, temp_params)
        subtract_frame = subtract_frame.astype(np.uint8)

        if get_param_val(params['subtract_bkg_invert']):
            frame2 = cv2.subtract(subtract_frame, frame2)
        else:
            frame2 = cv2.subtract(frame2, subtract_frame)

        if np.max(frame) == 0:
            frame2 = frame

    if params['subtract_bkg_norm'] == True:
        frame2 = cv2.normalize(frame2,
                               None,
                               alpha=0,
                               beta=255,
                               norm_type=cv2.NORM_MINMAX)

    return frame2
예제 #26
0
def variance(frame, parameters=None, call_num=None):
    ''' Variance of an image

    Notes
    -----

    This function finds the mean value of image and then returns
    frame which is the absolute difference of each pixel from that value

    options
    ~~~~~~~

    parameters['variance type'] == 'mean' : Returns the absolute difference from the mean
                                            img value
    parameters['variance type'] == 'img'  : Returns the absolute difference from a supplied
                                            bkg img. Bkg img is read into parameters['bkg_img'].
                                            bkg img must be in the same folder as the processed
                                            video with name = {video_name}_bkgimg.png
                                            A helpful script meanbkg_img.py can be used to average
                                            all the frames of a video together. If you have lots
                                            of small objects moving around and the video is long
                                            enough you can get a pretty good background estimate
                                            without having to take a bkg.

    parameters['variance bkg norm'] == True: will stretch the range of the largest difference to 255

    Parameters
    ----------

    frame: np.ndarray
        frame either colour or grayscale
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    image of absolute difference from mean

    '''

    method_key = get_method_key('variance', call_num=call_num)
    params = parameters['preprocess'][method_key]

    if params['variance_type'] == 'mean':
        mean_val = int(np.mean(frame))
        subtract_frame = mean_val * np.ones(np.shape(frame), dtype=np.uint8)
    elif params['variance_type'] == 'img':
        temp_params = {}
        temp_params['preprocess'] = {
            'blur': {
                'kernel': get_param_val(params['variance_blur_kernel'])
            }
        }

        if parameters['experiment']['bkg_img'] is None:
            name = parameters['experiment']['video_filename']
            subtract_frame = cv2.imread(name[:-4] + '_bkgimg.png', -1)
        else:
            subtract_frame = cv2.imread(parameters['experiment']['bkg_img'],
                                        -1)

        assert (np.shape(frame) == np.shape(subtract_frame)), \
            'Warning: input frame and subtracted frame must have same shape'

        frame2 = blur(frame, temp_params)
        subtract_frame = blur(subtract_frame, temp_params)
    elif params['variance_type'] == 'zeros':
        subtract_frame = np.zeros(np.shape(frame))

    mean_subtract = np.mean(subtract_frame)
    mean_frame = np.mean(frame)
    subtract_frame = subtract_frame * (mean_frame / mean_subtract)
    subtract_frame = subtract_frame.astype(np.uint8)

    frame1 = cv2.subtract(subtract_frame, frame)
    frame1 = cv2.normalize(frame1, frame1, 0, 255, cv2.NORM_MINMAX)
    frame2 = cv2.subtract(frame, subtract_frame)
    frame2 = cv2.normalize(frame2, frame2, 0, 255, cv2.NORM_MINMAX)
    frame = cv2.add(frame1, frame2)

    if params['variance_bkg_norm'] == True:
        frame = cv2.normalize(frame,
                              None,
                              alpha=0,
                              beta=255,
                              norm_type=cv2.NORM_MINMAX)

    return frame