def trajectories(frame, data, f, parameters=None, call_num=None):
    #This can only be run on a linked trajectory
    method_key = get_method_key('trajectories', call_num=call_num)
    x_col_name = parameters[method_key]['x_column']
    y_col_name = parameters[method_key]['y_column']

    #In this case subset_df is only used to get the particle_ids and colours of trajectories.
    subset_df = get_class_subset(data, f, parameters, method=method_key)
    particle_ids = subset_df['particle'].values

    colours = colour_array(subset_df, f, parameters, method=method_key)
    thickness = get_param_val(parameters[method_key]['thickness'])
    traj_length = get_param_val(parameters[method_key]['traj_length'])

    if (f-traj_length) < 0:
        traj_length = f

    df = data.df.sort_index()
    df.index.name='frame'
    df['frame'] = df.index
    df2 = df.loc[f-traj_length:f]

    df3 = df2.set_index(['particle','frame']).sort_index(level='particle')

    for index, particle in enumerate(particle_ids):
        traj_pts = df3[[x_col_name,y_col_name]].loc[particle]
        traj_pts = np.array(traj_pts.values, np.int32).reshape((-1,1,2))
        frame = cv2.polylines(frame,[traj_pts],False,colours[index],thickness)
    return frame
Exemplo n.º 2
0
def difference(data, f_index=None, parameters=None, call_num=None):
    '''Difference in time of a column of dataframe.

    Notes
    -----

    The differences are calculated at separations equal
    to span along the column. Where this is not possible
    or at both ends of column, the value np.Nan is inserted.

    Returns
    -------

    Dataframe with new column of rolling differences named according to outputname in PARAMETERS

    '''

    method_key = get_method_key('difference', call_num)
    span = get_param_val(parameters[method_key]['span'])
    column = parameters[method_key]['column_name']
    output_name = parameters[method_key]['output_name']
    data.index.name = 'index'
    data = data.sort_values(['particle', 'frame'])
    data[output_name] = data[column].diff(periods=span)
    data['nan'] = data['particle'].diff(periods=span).astype(bool)

    data[output_name][data['nan'] == True] = np.NaN
    data.drop(labels='nan', axis=1)
    return data
def circles(frame, data, f, parameters=None, call_num=None):
    '''
    Function draws circles on an image at x,y locations. If data.df['r'] exists
    circles have this radius, else 'r' col is created with value set from annotation
    sub dictionary.

    :param frame: frame to be annotated should be 3 colour channel
    :param data: datastore with particle information
    :param f: frame number
    :param parameters: annotation sub dictionary

    :return: annotated frame
    '''
    method_key = get_method_key('circles', call_num=call_num)
    if 'r' not in list(data.df.columns):
        data.add_particle_property('r', get_param_val(parameters[method_key]['radius']))
    thickness = get_param_val(parameters[method_key]['thickness'])

    subset_df = get_class_subset(data, f, parameters, method=method_key)
    circles = subset_df[['x', 'y', 'r']].values
    colours = colour_array(subset_df, f, parameters, method=method_key)
    for i, circle in enumerate(circles):
        try:
            frame = cv2.circle(frame, (int(circle[0]), int(circle[1])), int(circle[2]), colours[i], thickness)
        except:
            print('Failed plotting circle, check data is valid')
    return frame
def trackpy(frame,_, parameters=None, call_num=None):
    method_key = get_method_key('trackpy', call_num)
    df = tp.locate(frame, get_param_val(parameters[method_key]['size_estimate']), invert=get_param_val(parameters[method_key]['invert']))

    if parameters[method_key]['get_intensities']:
        x = df['x'].to_numpy()
        y = df['y'].to_numpy()
        intensity = []
        for i in range(np.size(x)):
            xc = x[i]
            yc = y[i]
            rc = get_param_val(parameters[method_key]['intensity_radius'])

            try:
                # Try because some circles overlap the edge giving meaningless answers
                cut_out_frame = frame[int(yc - rc):int(yc + rc), int(xc - rc):int(xc + rc)]
                h, w = cut_out_frame.shape[:2]
                mask = create_circular_mask(h, w)
                masked_img = cut_out_frame.copy()
                masked_img[~mask] = 0
                value = getattr(im, parameters[method_key]['get_intensities'])(masked_img)
            except:
                value = np.Nan

            intensity.append(value)
        df['intensities'] = np.array(intensity)
    return df
Exemplo n.º 5
0
def magnitude(data, f_index=None, parameters=None, call_num=None):
    ''' Calculates the magnitude of 2 input columns (x^2 + y^2)^0.5 = r

    Notes
    -----

    Combines 2 columns according to (x**2 + y**2)**0.5

    Returns
    -------

    Pandas dataframe with new column named according to outputname in PARAMETERS

    '''

    method_key = get_method_key('magnitude', call_num)
    columns = parameters[method_key]['column_names']
    output_name = parameters[method_key]['output_name']
    column_data = data[[columns[0], columns[1]]]
    if np.size(columns) == 2:
        data[output_name] = (column_data[columns[0]]**2 +
                             column_data[columns[1]]**2)**0.5
    elif np.size(columns) == 3:
        data[output_name] = (column_data[columns[0]]**2 +
                             column_data[columns[1]]**2 +
                             column_data[columns[2]]**2)**0.5
    return data
Exemplo n.º 6
0
def _find_delaunay(df, parameters=None, call_num=None):
    method_key = get_method_key('neighbours')
    cutoff = get_param_val(parameters[method_key]['cutoff'])

    points = df[['x', 'y']].values
    particle_ids = df[['particle']].values.flatten()
    tess = sp.Delaunay(points)
    list_indices, point_indices = tess.vertex_neighbor_vertices
    # neighbour_ids = [particle_ids[point_indices[a:b].tolist()] for a, b in zip(list_indices[:-1], list_indices[1:])]
    neighbour_ids = [
        point_indices[a:b].tolist()
        for a, b in zip(list_indices[:-1], list_indices[1:])
    ]
    dist = sp.distance.squareform(sp.distance.pdist(points))

    neighbour_dists = [(dist[i, row] < cutoff).tolist()
                       for i, row in enumerate(neighbour_ids)]
    indices = []
    for index, row in enumerate(neighbour_ids):
        indices.append([
            particle_ids[neighbour_ids[index][j]]
            for j, dummy in enumerate(row) if neighbour_dists[index][j]
        ])
    df.loc[:, ['neighbours']] = indices
    return df
Exemplo n.º 7
0
def medianblur(frame, parameters=None, call_num=None):
    '''Median blur

    Notes
    -----

    Applies a median blur to the image (https://en.wikipedia.org/wiki/Median_filter)
    Good for removing speckle noise

    options
    ~~~~~~~

    parameters['blur_kernel'] specifies the dimensions of a square kernel

    Parameters
    ----------

    frame: np.ndarray
        frame
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    blurred image

    '''
    method_key = get_method_key('medianblur', call_num=call_num)
    params = parameters['preprocess'][method_key]
    kernel = get_param_val(params['kernel'])
    out = cv2.medianBlur(frame, kernel)
    return out
Exemplo n.º 8
0
def resize(frame, parameters=None, call_num=None):
    ''' Resize an image

    Notes
    -----

    resizes an input image by the scale specified

    options
    ~~~~~~~

    parameters['resize scale'] : factor for scale operation

    Parameters
    ----------

    frame: np.ndarray
        frame
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    Resized frame

    '''
    method_key = get_method_key('resize', call_num=call_num)
    params = parameters['preprocess'][method_key]

    scale = get_param_val(params['scale']) / 100
    return cv2.resize(frame, scale)
def particle_values(frame, data, f, parameters=None, call_num=None):
    '''
    Function annotates image with particle ids
    This function only makes sense if run on linked trajectories

    :param frame: frame to be annotated should be 3 colour channel
    :param data: datastore with particle information
    :param f: frame number
    :param parameters: annotation sub dictionary

    :return: annotated frame
    '''
    method_key = get_method_key('particle_values', call_num=None)
    x = data.get_info(f, 'x')
    y = data.get_info(f, 'y')

    particle_values = data.get_info(f, parameters[method_key]['values_column']).astype(int)

    for index, particle_val in enumerate(particle_values):
        frame = cv2.putText(frame, str(particle_val), (int(x[index]), int(y[index])),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL,
                            parameters[method_key]['font_size'],
                            parameters[method_key]['font_colour'],
                            parameters[method_key]['font_thickness'],
                            cv2.LINE_AA)

    return frame
def var_label(frame, data, f, parameters=None, call_num=None):
    '''
    Function puts text on an image at specific location.
    This function is for adding data specific to a single frame or info not labelling
    particles with their ids. The data for a given frame should be stored in 'var_column'
    ie all particles have this value stored. You could use it to put the "temperature" on
    a frame or the mean order parameter etc.

    :param frame: frame to be annotated should be 3 colour channel
    :param data: datastore with particle information
    :param f: frame number
    :param parameters: annotation sub dictionary.

    :return: annotated frame
    '''
    method_key = get_method_key('var_label', call_num=call_num)
    var_column=parameters[method_key]['var_column']
    text = str(data.get_info(f, var_column)[0])
    position = parameters[method_key]['position']

    annotated_frame=cv2.putText(frame, text, position, cv2.FONT_HERSHEY_COMPLEX_SMALL,
                                parameters[method_key]['font_size'],
                                parameters[method_key]['font_colour'],
                                parameters[method_key]['font_thickness'],
                                cv2.LINE_AA)

    return annotated_frame
Exemplo n.º 11
0
def classify(data, f_index=None, parameters=None, call_num=None):
    method_key = get_method_key('classify', call_num)
    column = parameters[method_key]['column_name']
    output_name = parameters[method_key]['output_name']
    threshold_value = get_param_val(parameters[method_key]['value'])
    data[output_name] = data[column].apply(_classify_fn,
                                           threshold_value=threshold_value)
    return data
def contours(frame, data, f, parameters=None, call_num=None):
    method_key = get_method_key('contours', call_num=call_num)
    thickness = get_param_val(parameters[method_key]['thickness'])
    subset_df = get_class_subset(data, f, parameters, method=method_key)
    contour_pts = subset_df[['contours']].values
    colours = colour_array(subset_df, f, parameters, method=method_key)

    for index, contour in enumerate(contour_pts):
       frame = _draw_contours(frame, contour, col=colours[index],
                                       thickness=thickness)
    return frame
def hough(frame, _,parameters=None, call_num=None):
    '''
    Performs the opencv hough circles transform to locate
    circles in an image.

    :param frame:
    :param parameters:
    :param call_num:
    :return:
    '''
    method_key = get_method_key('hough', call_num)

    circles = np.squeeze(cv2.HoughCircles(
        frame,
        cv2.HOUGH_GRADIENT, 1,
        get_param_val(parameters[method_key]['min_dist']),
        param1=get_param_val(parameters[method_key]['p1']),
        param2=get_param_val(parameters[method_key]['p2']),
        minRadius=get_param_val(parameters[method_key]['min_rad']),
        maxRadius=get_param_val(parameters[method_key]['max_rad'])))

    try:
        circles_dict = {'x': circles[:, 0], 'y': circles[:, 1], 'r': circles[:, 2]}
    except:
        circles_dict={'x':[1],'y':[1],'r':[5]}


    if parameters[method_key]['get_intensities']:
        intensity = []
        for i,_ in enumerate(circles_dict['x']):
            xc = circles_dict['x'][i]
            yc = circles_dict['y'][i]
            rc = circles_dict['r'][i]

            try:
                #Try because some circles overlap the edge giving meaningless answers
                cut_out_frame = frame[int(yc - rc):int(yc + rc), int(xc - rc):int(xc + rc)]
                h,w= cut_out_frame.shape[:2]
                mask = create_circular_mask(h, w)
                masked_img = cut_out_frame.copy()
                masked_img[~mask] = 0
                value = getattr(im, parameters[method_key]['get_intensities'])(masked_img)
            except:
                value = np.Nan

            intensity.append(value)

        circles_dict['intensities']=np.array(intensity)

    df = pd.DataFrame(circles_dict)

    return df
def boxes(frame, data, f, parameters=None, call_num=None):
    method_key = get_method_key('boxes', call_num=call_num)
    thickness = get_param_val(parameters[method_key]['thickness'])
    subset_df = get_class_subset(data, f, parameters, method=method_key)
    box_pts = subset_df[['box']].values

    colours = colour_array(subset_df, f, parameters, method=method_key)
    sz = np.shape(frame)
    for index, box in enumerate(box_pts):
        if contour_inside_img(sz, box):
            frame = _draw_contours(frame, box, col=colours[index],
                                       thickness=get_param_val(parameters[method_key]['thickness']))
    return frame
Exemplo n.º 15
0
def adaptive_threshold(frame, parameters=None, call_num=None):
    '''Adaptive threshold

    Notes
    -----

    This applies an adaptive threshold. This differs from global threshold
    in that for each pixel the cutoff threshold is defined based on a block of local
    pixels around it. This enables you to cope with gradual changes in illumination
    across the image etc.

    options
    ~~~~~~~

    parameters['adaptive threshold']['block size'] : Size of local block of pixels to calculate threshold on
    parameters['adaptive threshold']['C'] : The mean-c value see here: http://homepages.inf.ed.ac.uk/rbf/HIPR2/adpthrsh.htm
    parameters['adaptive threshold']['ad_mode'] : inverts behaviour

    Parameters
    ----------

    frame: np.ndarray
        frame grayscale
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    binary image with 255 above threshold else 0.

    '''

    method_key = get_method_key('adaptive_threshold', call_num=call_num)
    print(method_key)
    params = parameters['preprocess'][method_key]
    print(params)
    block = get_param_val(params['block_size'])
    const = get_param_val(params['C'])
    invert = get_param_val(params['ad_mode'])

    if invert == 1:
        out = cv2.adaptiveThreshold(frame, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                    cv2.THRESH_BINARY_INV, block, const)
    else:
        out = cv2.adaptiveThreshold(frame, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                    cv2.THRESH_BINARY, block, const)
    return out
def contours(pp_frame, frame, parameters=None, call_num=None):
    '''
    boxes method finds contour of object but reduces the info to
    a rotated bounding box. Use for finding an angle of object or
    estimate of size. If you need to do something with the pixels
    use contours instead.

    contours stores: the centroid cx, cy, area enclosed by contour,
    the bounding rectangle which is used with contour to generate
    mask so that you can extract pixels from original image
    and perform some analysis.
    '''
    sz = np.shape(frame)
    if np.shape(sz)[0] == 3:
        frame= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    method_key = get_method_key('contours',call_num=call_num)
    params = parameters[method_key]
    get_intensities = params['get_intensities']

    area_min = get_param_val(params['area_min'])
    area_max = get_param_val(params['area_max'])
    info = []

    contour_pts = _find_contours(pp_frame)

    for index, contour in enumerate(contour_pts):
        M = cv2.moments(contour)
        if M['m00'] > 0:
            area = cv2.contourArea(contour)
            if (area < area_max) & (area > area_min):
                cx = int(M['m10'] / M['m00'])
                cy = int(M['m01'] / M['m00'])

                box = cv2.boundingRect(contour)
                if get_intensities:
                    intensity = _find_intensity_inside_contour(contour, frame, params['get_intensities'])
                    info_contour = [cx, cy, area, contour, box, intensity]
                else:
                    info_contour = [cx, cy, area, contour, box]
                info.append(info_contour)

    if get_intensities:
        info_headings = ['x', 'y', 'area', 'contours', 'boxes', 'intensities']
    else:
        info_headings = ['x', 'y', 'area', 'contours', 'boxes']
    df = pd.DataFrame(data=info, columns=info_headings)

    return df
def networks(frame, data, f, parameters=None, call_num=None):
    method_key = get_method_key('networks', call_num=call_num)
    df = get_class_subset(data, f, parameters, method=method_key)
    df = df.set_index('particle')
    particle_ids = df.index.values
    colours = colour_array(df, f, parameters, method=method_key)
    thickness = get_param_val(parameters[method_key]['thickness'])

    for index, particle in enumerate(particle_ids):
        pt = df.loc[particle, ['x', 'y']].values
        pt1 = (int(pt[0]), int(pt[1]))
        neighbour_ids = df.loc[particle, 'neighbours']
        for index2, neighbour in enumerate(neighbour_ids):
            pt = df.loc[neighbour, ['x','y']].values
            pt2 = (int(pt[0]), int(pt[1]))
            frame = cv2.line(frame,pt1, pt2, colours[index], thickness, lineType=cv2.LINE_AA)
    return frame
Exemplo n.º 18
0
def colour_channel(frame, parameters=None, call_num=None):
    """
    This function selects a particular colour channel, returning a
     grayscale image from a colour input frame
    """
    method_key = get_method_key('colour_channel', call_num=call_num)
    params = parameters['preprocess'][method_key]
    colour = params['colour']
    assert (colour == 'red') or (colour == 'green') or (
        colour == 'blue'), "colour param must be 'red', 'green' or 'blue'"
    if colour == 'red':
        index = 0
    elif colour == 'green':
        index = 1
    elif colour == 'blue':
        index = 2
    return frame[:, :, index]
Exemplo n.º 19
0
def neighbours(
    df,
    f_index=None,
    parameters=None,
    call_num=None,
):

    #https: // docs.scipy.org / doc / scipy / reference / generated / scipy.spatial.Delaunay.html
    method_key = get_method_key('neighbours', call_num)
    method = parameters[method_key]['method']
    df['neighbours'] = np.NaN
    for f in _every_frame(df, f_index):
        df = df.loc[f]
        if method == 'delaunay':
            df = _find_delaunay(df, parameters=parameters)
        elif method == 'kdtree':
            df = _find_kdtree(df, parameters=parameters)
        df.loc[f] = df
    return df
def vectors(frame, data, f, parameters=None, call_num=None):
    method_key = get_method_key('vectors', call_num=call_num)
    dx = parameters[method_key]['dx_column']
    dy = parameters[method_key]['dy_column']

    vectors = data.get_info(f, ['x', 'y',dx, dy])

    thickness = get_param_val(parameters[method_key]['thickness'])
    line_type = 8
    tipLength = 0.01*get_param_val(parameters[method_key]['tip_length'])
    vector_scale = 0.01*get_param_val(parameters[method_key]['vector_scale'])

    colours = colour_array(data.df, f, parameters, method=method_key)

    for i, vector in enumerate(vectors):
        frame = cv2.arrowedLine(frame, (int(vector[0]), int(vector[1])),
                                (int(vector[0]+vector[2]*vector_scale),int(vector[1]+vector[3]*vector_scale)),
                                color=colours[i], thickness=thickness,line_type=line_type,shift=0,tipLength=tipLength)
    return frame
Exemplo n.º 21
0
def max(data, f_index=None, parameters=None, call_num=None):
    ''' Max of a columns values

    Notes
    -----

    Returns the max of a particle's trajectory values to a new
    column. The value is repeated next to all entries for that trajectory

    :return: dataframe with new column defined in output_name of parameters

    '''

    method_key = get_method_key('max', call_num)
    column = parameters[method_key]['column_name']
    output_name = parameters[method_key]['output_name']
    temp = data.groupby('particle')[column].transform('max')
    data[output_name] = temp
    return data
Exemplo n.º 22
0
def rate(data, f_index=None, parameters=None, call_num=None):
    '''Rate of change of data in a column

    Notes
    -----

    rate function takes an input column and calculates the
    rate of change of the quantity. It takes into account
    the fact that particles go missing from frames. Where this
    is the case the rate = change in quantity between observations
    divided by the gap between observations.
    Nans are inserted at end and beginning of particle trajectories
    where calc is not possible.

    We sort by particle and then calculate diffs. This leads to differences
    between pairs of particles above one another in dataframe. We then backfill
    these slots with Nans.

    Returns
    -------

    Pandas dataframe with new column named according to outputname in PARAMETERS

    '''

    method_key = get_method_key('rate', call_num)
    column = parameters[method_key]['column_name']
    output_name = parameters[method_key]['output_name']

    data = data.sort_values(['particle', 'index'])
    #Change and time over which change happened
    data['temp_diff'] = data[column].diff()
    data['nan'] = data['particle'].diff().astype(bool)
    data['temp_diff'][data['nan'] == True] = np.NaN
    data['time'] = (1 / parameters[method_key]['fps']) * data.index
    data['dt'] = data['time'].diff()
    #Put Nans in values crossing particles.
    data[data['dt'] < 0]['dt'] == np.NaN
    data[output_name] = data['temp_diff'] / data['dt']
    #remove temporary columns
    data.drop(labels=['nan', 'temp_diff', 'dt'], axis=1)
    return data
Exemplo n.º 23
0
def _find_kdtree(df, parameters=None):
    method_key = get_method_key('neighbours')
    cutoff = get_param_val(parameters[method_key]['cutoff'])
    num_neighbours = get_param_val(parameters[method_key]['neighbours'])

    points = df[['x', 'y']].values
    particle_ids = df[['particle']].values.flatten()
    tree = sp.KDTree(points)
    _, indices = tree.query(points,
                            k=num_neighbours + 1,
                            distance_upper_bound=cutoff)
    neighbour_ids = []
    fill_val = np.size(particle_ids)
    for index, row in enumerate(indices):
        neighbour_ids.append([
            particle_ids[row[i + 1]] for i in range(num_neighbours)
            if row[i + 1] != fill_val
        ])
    df.loc[:, ['neighbours']] = neighbour_ids
    return df
Exemplo n.º 24
0
def erosion(frame, parameters=None, call_num=None):
    ''' Morphological erosion

    Notes
    -----

    erodes a binary image. This means pixels are set to
    zero based on their connectivity with neighbours

    options
    ~~~~~~~

    parameters['resize scale'] : factor for scale operation

    Parameters
    ----------

    frame: np.ndarray
        frame
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

        Resized frame

        '''

    method_key = get_method_key('erosion', call_num=call_num)
    params = parameters['preprocess'][method_key]
    kernel = get_param_val(params['erosion_kernel'])
    iterations = get_param_val(params['iterations'])

    kernel = np.ones((kernel, kernel))

    return cv2.erode(frame, kernel, iterations=iterations)
Exemplo n.º 25
0
def gamma(image, parameters=None, call_num=None):
    ''' Gamma correction

    Notes
    -----

    generates a lookup table which maps the values 0-255 to 0-255
    however not in a linear way. The mapping follows a power law
    with exponent gamma/100.0.

    Parameters
    ----------

    frame: np.ndarray
        frame
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    gamma corrected image

    '''

    method_key = get_method_key('gamma', call_num=call_num)
    params = parameters['preprocess'][method_key]

    gamma = get_param_val(params['gamma']) / 100.0
    # build a lookup table mapping the pixel values [0, 255] to
    # their adjusted gamma values
    invGamma = 1.0 / gamma

    table = np.array([((i / 255.0)**invGamma) * 255
                      for i in np.arange(0, 256)]).astype("uint8")

    return cv2.LUT(image, table)
Exemplo n.º 26
0
def threshold(frame, parameters=None, call_num=None):
    '''Apply a global image threshold

    Notes
    -----

    This takes a cutoff threshold value and returns white above and
    black below this value.

    options
    ~~~~~~
    parameters['threshold'] : sets the value of the cutoff threshold
    parameters['th_mode] : Can be used to invert the above behaviour

    Parameters
    ----------

    frame: np.ndarray
        frame grayscale
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------

    binary image with 255 for pixel val > threshold else 0.

    '''

    method_key = get_method_key('threshold', call_num=call_num)
    params = parameters['preprocess'][method_key]

    threshold = get_param_val(params['threshold'])
    mode = get_param_val(params['th_mode'])
    ret, out = cv2.threshold(frame, threshold, 255, mode)

    return out
def boxes(frame, _,parameters=None, call_num=None):
    '''
    boxes method finds contour of object but reduces the info to
    a rotated bounding box. Use for finding an angle of object or
    estimate of size.
    '''
    method_key = get_method_key('boxes',call_num=call_num)
    params = parameters[method_key]
    get_intensities = params['get_intensities']

    area_min = get_param_val(params['area_min'])
    area_max = get_param_val(params['area_max'])
    info = []
    contour_pts = _find_contours(frame)

    for index, contour in enumerate(contour_pts):
        area = int(cv2.contourArea(contour))
        if (area < area_max) and (area >= area_min):
            info_contour = _rotated_bounding_rectangle(contour)
            cx, cy = np.mean(info_contour[5], axis=0)
            angle = info_contour[2]
            width = info_contour[3]
            length = info_contour[4]
            box = info_contour[5]

            if get_intensities:
                intensity = _find_intensity_inside_contour(contour, frame, parameters['get_intensities'])
                info_contour = [cx, cy, angle, width, length, contour, box, intensity]
            else:
                info_contour = [cx, cy, angle, width, length, contour, box]
            info.append(info_contour)

    if get_intensities:
        info_headings = ['x', 'y', 'theta', 'width', 'length', 'contours','box', 'intensities']
    else:
        info_headings = ['x', 'y', 'theta', 'width', 'length', 'contours','box']
    df = pd.DataFrame(data=info, columns=info_headings)
    return df
Exemplo n.º 28
0
def subtract_drift(data, f_index=None, parameters=None, call_num=None):
    ''' subtract drift from an x,y coordinate trajectory

    Notes
    -----

    Returns the median of a particle's trajectory values to a new
    column. The value is repeated next to all entries for that trajectory

    Returns
    -------

    dataframe with new columns defined in output_name of parameters

    '''

    method_key = get_method_key('subtract_drift', call_num)
    drift = tp.motion.compute_drift(data)
    drift_corrected = tp.motion.subtract_drift(data.copy(), drift)
    drift_corrected.index.name = 'index'
    drift_corrected = drift_corrected.sort_values(['particle', 'index'])
    data[['x_drift', 'y_drift']] = drift_corrected[['x', 'y']]
    return data
def text_label(frame, data, f, parameters=None, call_num=None):
    '''
    Function puts text on an image at specific location.
    This function is for adding metadata or info not labelling
    particles with their ids.

    :param frame: frame to be annotated should be 3 colour channel
    :param data: datastore with particle information
    :param f: frame number
    :param parameters: annotation sub dictionary

    :return: annotated frame
    '''
    method_key = get_method_key('text_label', call_num=call_num)
    text=parameters[method_key]['text']
    position = parameters[method_key]['position']
    annotated_frame=cv2.putText(frame, text, position, cv2.FONT_HERSHEY_COMPLEX_SMALL,
                                parameters[method_key]['font_size'],
                                parameters[method_key]['font_colour'],
                                parameters[method_key]['font_thickness'],
                                cv2.LINE_AA)

    return annotated_frame
Exemplo n.º 30
0
def grayscale(frame, parameters=None, call_num=None):
    ''' Convert colour frame to grayscale

    Notes
    -----
    Takes a colour image and applies opencvs colour to grayscale method
    cv2.cvtColor. Checks shape and dimensions of frame. Returns grayscale image
    regardless of whether input frame is colour or grayscale. If the colour depth
    is not 1 or 3 it errors.

    Parameters
    ----------

    frame: np.ndarray
        frame either colour or grayscale
    parameters: dict, optional
        parameters dictionary
    call_num: int or None
        number specifying the call number to this function. allows multiple calls

    Returns
    -------
    np.ndarray - The grayscale image

    '''
    method_key = get_method_key('grayscale', call_num=call_num)
    params = parameters['preprocess'][method_key]

    sz = np.shape(frame)
    if np.shape(sz)[0] == 3:
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    elif np.shape(sz)[0] == 2:
        print('Image is already grayscale')
    else:
        print('Something went wrong! Shape img not recognised')

    return frame