Ejemplo n.º 1
0
def watershed_segmentation(rgb_img, mask, distance=10):
    """Uses the watershed algorithm to detect boundary of objects. Needs a marker file which specifies area which is
       object (white), background (grey), unknown area (black).

    Inputs:
    rgb_img             = image to perform watershed on needs to be 3D (i.e. np.shape = x,y,z not np.shape = x,y)
    mask                = binary image, single channel, object in white and background black
    distance            = min_distance of local maximum

    Returns:
    analysis_images     = list of output images

    :param rgb_img: numpy.ndarray
    :param mask: numpy.ndarray
    :param distance: int
    :return analysis_images: list
    """
    params.device += 1
    # # Will be depricating opencv version 2
    # if cv2.__version__[0] == '2':
    #     dist_transform = cv2.distanceTransform(mask, cv2.cv.CV_DIST_L2, maskSize=0)
    # else:
    dist_transform = cv2.distanceTransformWithLabels(mask, cv2.DIST_L2, maskSize=0)[0]

    localMax = peak_local_max(dist_transform, indices=False, min_distance=distance, labels=mask)

    markers = ndi.label(localMax, structure=np.ones((3, 3)))[0]
    dist_transform1 = -dist_transform
    labels = watershed(dist_transform1, markers, mask=mask)

    img1 = np.copy(rgb_img)

    for x in np.unique(labels):
        rand_color = color_palette(len(np.unique(labels)))
        img1[labels == x] = rand_color[x]

    img2 = apply_mask(img1, mask, 'black')

    joined = np.concatenate((img2, rgb_img), axis=1)

    estimated_object_count = len(np.unique(markers)) - 1

    analysis_image = []
    analysis_image.append(joined)

    if params.debug == 'print':
        print_image(dist_transform, os.path.join(params.debug_outdir, str(params.device) + '_watershed_dist_img.png'))
        print_image(joined, os.path.join(params.debug_outdir, str(params.device) + '_watershed_img.png'))
    elif params.debug == 'plot':
        plot_image(dist_transform, cmap='gray')
        plot_image(joined)

    outputs.add_observation(variable='estimated_object_count', trait='estimated object count',
                            method='plantcv.plantcv.watershed', scale='none', datatype=int,
                            value=estimated_object_count, label='none')

    # Store images
    outputs.images.append(analysis_image)

    return analysis_image
Ejemplo n.º 2
0
def segment_path_length(segmented_img, objects):
    """ Use segments to calculate geodesic distance per segment

        Inputs:
        segmented_img = Segmented image to plot lengths on
        objects       = List of contours

        Returns:
        labeled_img        = Segmented debugging image with lengths labeled

        :param segmented_img: numpy.ndarray
        :param objects: list
        :return labeled_img: numpy.ndarray

        """

    label_coord_x = []
    label_coord_y = []
    segment_lengths = []
    labeled_img = segmented_img.copy()

    for i, cnt in enumerate(objects):
        # Calculate geodesic distance, divide by two since cv2 seems to be taking the perimeter of the contour
        segment_lengths.append(cv2.arcLength(objects[i], False) / 2)
        # Store coordinates for labels
        label_coord_x.append(objects[i][0][0][0])
        label_coord_y.append(objects[i][0][0][1])

    segment_ids = []

    # Put labels of length
    for c, value in enumerate(segment_lengths):
        text = "{:.2f}".format(value)
        w = label_coord_x[c]
        h = label_coord_y[c]
        cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
        segment_label = "ID" + str(c)
        segment_ids.append(c)

    outputs.add_observation(variable='segment_path_length', trait='segment path length',
                            method='plantcv.plantcv.morphology.segment_path_length', scale='pixels', datatype=list,
                            value=segment_lengths, label=segment_ids)

    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segment_path_lengths.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return labeled_img
def y_axis_pseudolandmarks(img, obj, mask):
    """Divide up object contour into 19 equidistant segments and generate landmarks for each

    Inputs:
    img      = This is a copy of the original plant image generated using np.copy if debug is true it will be drawn on
    obj      = a contour of the plant object (this should be output from the object_composition.py fxn)
    mask     = this is a binary image. The object should be white and the background should be black

    Returns:
    left      = List of landmarks within the left side
    right   = List of landmarks within the right side
    center_h = List of landmarks within the center

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :return left: list
    :return right: list
    :return center_h: list
    """
    # Lets get some landmarks scanning along the y-axis
    params.device += 1
    if not np.any(obj):
        return ('NA', 'NA'), ('NA', 'NA'), ('NA', 'NA')
    x, y, width, height = cv2.boundingRect(obj)
    extent = height

    # Outputs
    left = []
    right = []
    center_h = []
    left_list = []
    right_list = []
    center_h_list = []

    # If height is greater than 21 pixels make 20 increments (5% intervals)
    if extent >= 21:
        inc = int(extent / 21)
        # Define variable for max points and min points
        pts_max = []
        pts_min = []
        # Get max and min points for each of the intervals
        for i in range(1, 21):
            if i == 1:
                pt_max = y
                pt_min = y + (inc * i)
            else:
                pt_max = y + (inc * (i - 1))
                pt_min = y + (inc * i)
            # Put these in an array
            pts_max.append(pt_max)
            pts_min.append(pt_min)
        # Combine max and min into a set of tuples
        point_range = list(zip(pts_max, pts_min))
        # define some list variables to fill
        row_median = []
        row_ave = []
        max_width = []
        left_points = []
        right_points = []
        y_vals = []
        x_centroids = []
        y_centroids = []
        # For each of the 20 intervals
        for pt in point_range:
            # Get the lower and upper bounds
            # (lower and higher in terms of value; low point is actually towards top of photo, higher is lower of photo)
            low_point, high_point = pt
            # Get all rows within these two points
            rows = []
            lps = []
            rps = []
            # Get a continuous list of the values between the top and the bottom of the interval save as vals
            vals = list(range(low_point, high_point))
            # For each row... get all coordinates from object contour that match row
            for v in vals:
                # Value is all entries that match the row
                value = obj[v == obj[:, 0, 1]]
                if len(value) > 0:
                    # Could potentially be more than two points in all contour in each pixel row
                    # Grab largest x coordinate (column)
                    largest = value[:, 0, 0].max()
                    # Grab smallest x coordinate (column)
                    smallest = value[:, 0, 0].min()
                    # Take the difference between the two (this is how far across the object is on this plane)
                    row_width = largest - smallest
                    # Append this value to a list
                    rows.append(row_width)
                    lps.append(smallest)
                    rps.append(largest)
                if len(value) == 0:
                    row_width = 1
                    rows.append(row_width)
                    lps.append(1)
                    rps.append(1)
            # For each of the points find the median and average width
            row_median.append(np.median(np.array(rows)))
            row_ave.append(np.mean(np.array(rows)))
            max_width.append(np.max(np.array(rows)))
            left_points.append(np.mean(smallest))
            right_points.append(np.mean(largest))
            yval = int((high_point + low_point) / 2)
            y_vals.append(yval)
            # Make a copy of the mask; we want to get landmark points from this
            window = np.copy(mask)
            window[:low_point] = 0
            window[high_point:] = 0
            s = cv2.moments(window)
            # Centroid (center of mass x, center of mass y)
            if largest - smallest > 3:
                if s['m00'] > 0.001:
                    smx, smy = (s['m10'] / s['m00'], s['m01'] / s['m00'])
                    x_centroids.append(int(smx))
                    y_centroids.append(int(smy))
                if s['m00'] < 0.001:
                    smx, smy = (s['m10'] / 0.001, s['m01'] / 0.001)
                    x_centroids.append(int(smx))
                    y_centroids.append(int(smy))
            else:
                smx = (largest + smallest) / 2
                smy = yval
                x_centroids.append(int(smx))
                y_centroids.append(int(smy))
        # Get the indicie of the largest median/average x-axis value (if there is a tie it takes largest index)
        # indice_median = row_median.index(max(row_median))
        # indice_ave = row_ave.index(max(row_ave))
        # median_value = row_median[indice_median]
        # ave_value = row_ave[indice_ave]
        # max_value = max_width[indice_ave]
        left = list(zip(left_points, y_vals))
        left = np.array(left)
        left.shape = (20, 1, 2)
        right = list(zip(right_points, y_vals))
        right = np.array(right)
        right.shape = (20, 1, 2)
        center_h = list(zip(x_centroids, y_centroids))
        center_h = np.array(center_h)
        center_h.shape = (20, 1, 2)

        img2 = np.copy(img)
        for i in left:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 0), -1)
        for i in right:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 255), -1)
        for i in center_h:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (0, 79, 255), -1)

        if params.debug == 'plot':
            plot_image(img2)
        elif params.debug == 'print':
            print_image(img2, os.path.join(params.debug_outdir, (str(params.device) + '_y_axis_pseudolandmarks.png')))
    elif extent < 21:
        # If the length of the object is less than 20 pixels just make the object a 20 pixel rectangle
        x, y, width, height = cv2.boundingRect(obj)
        y_coords = list(range(y, y + 20))
        l_points = [x] * 20
        left = list(zip(l_points, y_coords))
        left = np.array(left)
        left.shape = (20, 1, 2)
        r_points = [x + width] * 20
        right = list(zip(r_points, y_coords))
        right = np.array(right)
        right.shape = (20, 1, 2)
        m = cv2.moments(mask, binaryImage=True)
        # Centroid (center of mass x, center of mass y)
        if m['m00'] == 0:
            fatal_error('Check input parameters, first moment=0')
        else:
            cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
            c_points = [cmx] * 20
            center_h = list(zip(c_points, y_coords))
            center_h = np.array(center_h)
            center_h.shape = (20, 1, 2)

        img2 = np.copy(img)
        for i in left:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 0), -1)
        for i in right:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 255), -1)
        for i in center_h:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (0, 79, 255), -1)
        # print_image(img2, (str(device) + '_y_axis_pseudolandmarks.png'))
        if params.debug == 'plot':
            plot_image(img2)
        elif params.debug == 'print':
            print_image(img2, os.path.join(params.debug_outdir, (str(params.device) + '_y_axis_pseudolandmarks.png')))

    # Store into global measurements
    for pt in left:
        left_list.append(pt[0].tolist())
    for pt in right:
        right_list.append(pt[0].tolist())
    for pt in center_h:
        center_h_list.append(pt[0].tolist())

    outputs.add_observation(variable='left_lmk', trait='left landmark coordinates',
                            method='plantcv.plantcv.x_axis_pseudolandmarks', scale='none', datatype=list,
                            value=left_list, label='none')
    outputs.add_observation(variable='right_lmk', trait='right landmark coordinates',
                            method='plantcv.plantcv.x_axis_pseudolandmarks', scale='none', datatype=list,
                            value=right_list, label='none')
    outputs.add_observation(variable='center_h_lmk', trait='center horizontal landmark coordinates',
                            method='plantcv.plantcv.x_axis_pseudolandmarks', scale='none', datatype=list,
                            value=center_h_list, label='none')

    return left, right, center_h
def x_axis_pseudolandmarks(img, obj, mask):
    """Divide up object contour into 20 equidistance segments and generate landmarks for each

    Inputs:
    img      = This is a copy of the original plant image generated using np.copy if debug is true it will be drawn on
    obj      = a contour of the plant object (this should be output from the object_composition.py fxn)
    mask     = this is a binary image. The object should be white and the background should be black

    Returns:
    top      = List of landmark points within 'top' portion
    bottom   = List of landmark points within the 'bottom' portion
    center_v = List of landmark points within the middle portion

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :return top: list
    :return bottom: list
    :return center_v: list
    """

    # Lets get some landmarks scanning along the x-axis
    params.device += 1
    if not np.any(obj):
        return ('NA', 'NA'), ('NA', 'NA'), ('NA', 'NA')
    x, y, width, height = cv2.boundingRect(obj)
    extent = width

    # Outputs
    top = []
    bottom = []
    center_v = []
    top_list = []
    bottom_list = []
    center_v_list = []

    # If width is greater than 21 pixels make 20 increments (5% intervals)
    if extent >= 21:
        inc = int(extent / 21)
        # Define variable for max points and min points
        pts_max = []
        pts_min = []
        # Get max and min points for each of the intervals
        for i in range(1, 21):
            if i == 1:
                pt_max = x + (inc * i)
                pt_min = x 
            else:
                pt_max = x + (inc * i)
                pt_min = x + (inc * (i - 1))
            # Put these in an array
            pts_max.append(pt_max)
            pts_min.append(pt_min)
        # Combine max and min into a set of tuples
        point_range = list(zip(pts_min, pts_max))
        # define some list variables to fill
        col_median = []
        col_ave = []
        max_height = []
        top_points = []
        bottom_points = []
        x_vals = []
        x_centroids = []
        y_centroids = []
        # For each of the 20 intervals
        for pt in point_range:
            # Get the left and right bounds    
            left_point, right_point = pt
            # Get all cols within these two points
            cols = []
            ups = []
            bps = []
            # Get a continuous list of the values between the left and the right of the interval save as vals
            vals = list(range(left_point, right_point))
            # For each col... get all coordinates from object contour that match col
            for v in vals:
                # Value is all entries that match the col
                value = obj[v == obj[:, 0, 0]]
                if len(value) > 0:
                    # Could potentially be more than two points in all contour in each pixel row
                    # Grab largest y coordinate (row)
                    largest = value[:, 0, 1].max()
                    # Grab smallest y coordinate (row)
                    smallest = value[:, 0, 1].min()
                    # Take the difference between the two (this is how far across the object is on this plane)
                    col_width = largest - smallest
                    # Append this value to a list
                    cols.append(col_width)
                    ups.append(smallest)
                    bps.append(largest)
                if len(value) == 0:
                    col_width = 1
                    cols.append(col_width)
                    ups.append(1)
                    bps.append(1)
            # For each of the points find the median and average width
            col_median.append(np.median(np.array(cols)))
            col_ave.append(np.mean(np.array(cols)))
            max_height.append(np.max(np.array(cols)))
            top_points.append(np.mean(smallest))
            bottom_points.append(np.mean(largest))
            xval = int((left_point + right_point) / 2)
            x_vals.append(xval)
            # Make a copy of the mask; we want to get landmark points from this
            window = np.copy(mask)
            window[:, :left_point] = 0
            window[:, right_point:] = 0
            s = cv2.moments(window)
            # Centroid (center of mass x, center of mass y)
            if largest - smallest > 3:
                    if s['m00'] > 0.001:
                        smx, smy = (s['m10'] / s['m00'], s['m01'] / s['m00'])
                        x_centroids.append(int(smx))
                        y_centroids.append(int(smy))
                    if s['m00'] < 0.001:
                        smx, smy = (s['m10'] / 0.001, s['m01'] / 0.001)
                        x_centroids.append(int(smx))
                        y_centroids.append(int(smy))
            else:
                smx = (largest + smallest) / 2
                smy = xval
                x_centroids.append(int(smx))
                y_centroids.append(int(smy))
        # Get the indicie of the largest median/average y-axis value (if there is a tie it takes largest index)
        # indice_median = col_median.index(max(col_median))
        # indice_ave = col_ave.index(max(col_ave))
        # median_value = col_median[indice_median]
        # ave_value = col_ave[indice_ave]
        # max_value = max_width[indice_ave]
        top = list(zip(x_vals, top_points))
        top = np.array(top)
        top.shape = (20, 1, 2)
        bottom = list(zip(x_vals, bottom_points))
        bottom = np.array(bottom)
        bottom.shape = (20, 1, 2)
        center_v = list(zip(x_centroids, y_centroids))
        center_v = np.array(center_v)
        center_v.shape = (20, 1, 2)

        img2 = np.copy(img)
        for i in top:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 0), -1)
        for i in bottom:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 255), -1)
        for i in center_v:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (0, 79, 255), -1)
        if params.debug == 'plot':
            plot_image(img2)
        elif params.debug == 'print':
            print_image(img2,
                        os.path.join(params.debug_outdir, (str(params.device) + '_x_axis_pseudolandmarks.png')))
    elif extent < 21:
        # If the width of the object is less than 20 pixels just make the object a 20 pixel rectangle
        x, y, width, height = cv2.boundingRect(obj)
        x_coords = list(range(x, x + 20))
        u_points = [y] * 20
        top = list(zip(x_coords, u_points))
        top = np.array(top)
        top.shape = (20, 1, 2)
        b_points = [y + width] * 20
        bottom = list(zip(x_coords, b_points))
        bottom = np.array(bottom)
        bottom.shape = (20, 1, 2)
        m = cv2.moments(mask, binaryImage=True)
        if m['m00'] == 0:
            fatal_error('Check input parameters, first moment=0')
        else:
            # Centroid (center of mass x, center of mass y)
            cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
            c_points = [cmy] * 20
            center_v = list(zip(x_coords, c_points))
            center_v = np.array(center_v)
            center_v.shape = (20, 1, 2)

        img2 = np.copy(img)
        for i in top:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 0), -1)
        for i in bottom:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (255, 0, 255), -1)
        for i in center_v:
            x = i[0, 0]
            y = i[0, 1]
            cv2.circle(img2, (int(x), int(y)), params.line_thickness, (0, 79, 255), -1)
        if params.debug == 'plot':
                plot_image(img2)
        elif params.debug == 'print':
                print_image(img2,
                            os.path.join(params.debug_outdir, (str(params.device) + '_x_axis_pseudolandmarks.png')))

    # Store into global measurements
    for pt in top:
        top_list.append(pt[0].tolist())
    for pt in bottom:
        bottom_list.append(pt[0].tolist())
    for pt in center_v:
        center_v_list.append(pt[0].tolist())

    outputs.add_observation(variable='top_lmk', trait='top landmark coordinates',
                            method='plantcv.plantcv.x_axis_pseudolandmarks', scale='none', datatype=list,
                            value=top_list, label='none')
    outputs.add_observation(variable='bottom_lmk', trait='bottom landmark coordinates',
                            method='plantcv.plantcv.x_axis_pseudolandmarks', scale='none', datatype=list,
                            value=bottom_list, label='none')
    outputs.add_observation(variable='center_v_lmk', trait='center vertical landmark coordinates',
                            method='plantcv.plantcv.x_axis_pseudolandmarks', scale='none', datatype=list,
                            value=center_v_list, label='none')

    return top, bottom, center_v
def analyze_bound_horizontal(img, obj, mask, line_position):
    """User-input boundary line tool

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary mask made from selected contours
    line_position   = position of boundary line (a value of 0 would draw the line through the top of the image)

    Returns:
    analysis_images = list of output images

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :param line_position: int
    :return analysis_images: list
    """

    params.device += 1
    ori_img = np.copy(img)

    # Draw line horizontal line through bottom of image, that is adjusted to user input height
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
    iy, ix, iz = np.shape(ori_img)
    size = (iy, ix)
    size1 = (iy, ix, 3)
    background = np.zeros(size, dtype=np.uint8)
    wback = (np.zeros(size1, dtype=np.uint8)) + 255
    x_coor = int(ix)
    y_coor = line_position
    rec_corner = int(iy - 2)
    rec_point1 = (1, rec_corner)
    rec_point2 = (x_coor - 2, y_coor - 2)
    cv2.rectangle(background, rec_point1, rec_point2, (255), 1)
    below_contour, below_hierarchy = cv2.findContours(background, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    x, y, width, height = cv2.boundingRect(obj)

    if y_coor - y <= 0:
        height_above_bound = 0
        height_below_bound = height
    elif y_coor - y > 0:
        height_1 = y_coor - y
        if height - height_1 <= 0:
            height_above_bound = height
            height_below_bound = 0
        else:
            height_above_bound = y_coor - y
            height_below_bound = height - height_above_bound

    below = []
    above = []
    mask_nonzerox, mask_nonzeroy = np.nonzero(mask)
    obj_points = np.vstack((mask_nonzeroy, mask_nonzerox))
    obj_points1 = np.transpose(obj_points)

    for i, c in enumerate(obj_points1):
        xy = tuple(c)
        pptest = cv2.pointPolygonTest(below_contour[0], xy, measureDist=False)
        if pptest == 1:
            below.append(xy)
            cv2.circle(ori_img, xy, 1, (155, 0, 255))
            cv2.circle(wback, xy, 1, (155, 0, 255))
        else:
            above.append(xy)
            cv2.circle(ori_img, xy, 1, (0, 255, 0))
            cv2.circle(wback, xy, 1, (0, 255, 0))
    above_bound_area = len(above)
    below_bound_area = len(below)
    percent_bound_area_above = ((float(above_bound_area)) / (float(above_bound_area + below_bound_area))) * 100
    percent_bound_area_below = ((float(below_bound_area)) / (float(above_bound_area + below_bound_area))) * 100

    analysis_images = []

    if above_bound_area or below_bound_area:
        point3 = (0, y_coor - 4)
        point4 = (x_coor, y_coor - 4)
        cv2.line(ori_img, point3, point4, (255, 0, 255), params.line_thickness)
        cv2.line(wback, point3, point4, (255, 0, 255), params.line_thickness)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if y_coor - y <= 0:
            cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (0, 255, 0), params.line_thickness)
            cv2.line(wback, (int(cmx), y), (int(cmx), y + height), (0, 255, 0), params.line_thickness)
        elif y_coor - y > 0:
            height_1 = y_coor - y
            if height - height_1 <= 0:
                cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (255, 0, 0), params.line_thickness)
                cv2.line(wback, (int(cmx), y), (int(cmx), y + height), (255, 0, 0), params.line_thickness)
            else:
                cv2.line(ori_img, (int(cmx), y_coor - 2), (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(ori_img, (int(cmx), y_coor - 2), (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2), (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2), (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
        # Output image with boundary line, above/below bound area
        analysis_images.append(wback)
        analysis_images.append(ori_img)

    if params.debug is not None:
        point3 = (0, y_coor - 4)
        point4 = (x_coor, y_coor - 4)
        cv2.line(ori_img, point3, point4, (255, 0, 255), params.line_thickness)
        cv2.line(wback, point3, point4, (255, 0, 255), params.line_thickness)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if y_coor - y <= 0:
            cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (0, 255, 0), params.line_thickness)
            cv2.line(wback, (int(cmx), y), (int(cmx), y + height), (0, 255, 0), params.line_thickness)
        elif y_coor - y > 0:
            height_1 = y_coor - y
            if height - height_1 <= 0:
                cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (255, 0, 0), params.line_thickness)
                cv2.line(wback, (int(cmx), y), (int(cmx), y + height), (255, 0, 0), params.line_thickness)
            else:
                cv2.line(ori_img, (int(cmx), y_coor - 2), (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(ori_img, (int(cmx), y_coor - 2), (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2), (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2), (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
        if params.debug == 'print':
            print_image(wback, os.path.join(params.debug_outdir, str(params.device) + '_boundary_on_white.jpg'))
            print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_boundary_on_img.jpg'))
        if params.debug == 'plot':
            plot_image(wback)
            plot_image(ori_img)

    outputs.add_observation(variable='horizontal_reference_position', trait='horizontal reference position',
                            method='plantcv.plantcv.analyze_bound_horizontal', scale='none', datatype=int,
                            value=line_position, label='none')
    outputs.add_observation(variable='height_above_reference', trait='height above reference',
                            method='plantcv.plantcv.analyze_bound_horizontal', scale='pixels', datatype=int,
                            value=height_above_bound, label='pixels')
    outputs.add_observation(variable='height_below_reference', trait='height_below_reference',
                            method='plantcv.plantcv.analyze_bound_horizontal', scale='pixels', datatype=int,
                            value=height_below_bound, label='pixels')
    outputs.add_observation(variable='area_above_reference', trait='area above reference',
                            method='plantcv.plantcv.analyze_bound_horizontal', scale='pixels', datatype=int,
                            value=above_bound_area, label='pixels')
    outputs.add_observation(variable='percent_area_above_reference', trait='percent area above reference',
                            method='plantcv.plantcv.analyze_bound_horizontal', scale='none', datatype=float,
                            value=percent_bound_area_above, label='none')
    outputs.add_observation(variable='area_below_reference', trait='area below reference',
                            method='plantcv.plantcv.analyze_bound_horizontal', scale='pixels', datatype=int,
                            value=below_bound_area, label='pixels')
    outputs.add_observation(variable='percent_area_below_reference', trait='percent area below reference',
                            method='plantcv.plantcv.analyze_bound_horizontal', scale='none', datatype=float,
                            value=percent_bound_area_below, label='none')

    # Store images
    outputs.images.append(analysis_images)

    return analysis_images
Ejemplo n.º 6
0
def segment_angle(segmented_img, objects):
    """ Calculate angle of segments (in degrees) by fitting a linear regression line to segments.

        Inputs:
        segmented_img  = Segmented image to plot slope lines and angles on
        objects        = List of contours

        Returns:
        labeled_img    = Segmented debugging image with angles labeled


        :param segmented_img: numpy.ndarray
        :param objects: list
        :return labeled_img: numpy.ndarray
        """

    label_coord_x = []
    label_coord_y = []
    segment_angles = []

    labeled_img = segmented_img.copy()

    rand_color = color_palette(len(objects))

    for i, cnt in enumerate(objects):
        # Find bounds for regression lines to get drawn
        rect = cv2.minAreaRect(cnt)
        pts = cv2.boxPoints(rect)
        df = pd.DataFrame(pts, columns=('x', 'y'))
        x_max = int(df['x'].max())
        x_min = int(df['x'].min())

        # Find line fit to each segment
        [vx, vy, x, y] = cv2.fitLine(objects[i], cv2.DIST_L2, 0, 0.01, 0.01)
        slope = -vy / vx
        left_list = int(((x - x_min) * slope) + y)
        right_list = int(((x - x_max) * slope) + y)

        if slope > 1000000 or slope < -1000000:
            print("Slope of contour with ID#", i, "is", slope, "and cannot be plotted.")
        else:
            # Draw slope lines
            cv2.line(labeled_img, (x_max - 1, right_list), (x_min, left_list), rand_color[i], 1)

        # Store coordinates for labels
        label_coord_x.append(objects[i][0][0][0])
        label_coord_y.append(objects[i][0][0][1])

        # Calculate degrees from slopes
        segment_angles.append(np.arctan(slope[0]) * 180 / np.pi)

    segment_ids = []
    for i, cnt in enumerate(objects):
        # Label slope lines
        w = label_coord_x[i]
        h = label_coord_y[i]
        text = "{:.2f}".format(segment_angles[i])
        cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
        segment_label = "ID" + str(i)
        segment_ids.append(i)

    outputs.add_observation(variable='segment_angle', trait='segment angle',
                            method='plantcv.plantcv.morphology.segment_angle', scale='degrees', datatype=list,
                            value=segment_angles, label=segment_ids)

    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segmented_angles.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return labeled_img
def analyze_nir_intensity(gray_img, mask, bins=256, histplot=False):
    """This function calculates the intensity of each pixel associated with the plant and writes the values out to
       a file. It can also print out a histogram plot of pixel intensity and a pseudocolor image of the plant.

    Inputs:
    gray_img     = 8- or 16-bit grayscale image data
    mask         = Binary mask made from selected contours
    bins         = number of classes to divide spectrum into
    histplot     = if True plots histogram of intensity values

    Returns:
    analysis_images = NIR histogram image

    :param gray_img: numpy array
    :param mask: numpy array
    :param bins: int
    :param histplot: bool
    :return analysis_images: list
    """

    params.device += 1

    # apply plant shaped mask to image
    mask1 = binary_threshold(mask, 0, 255, 'light')
    mask1 = (mask1 / 255)
    masked = np.multiply(gray_img, mask1)

    # calculate histogram
    if gray_img.dtype == 'uint16':
        maxval = 65536
    else:
        maxval = 256

    # Make a pseudo-RGB image
    rgbimg = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR)

    hist_nir, hist_bins = np.histogram(masked, bins, (1, maxval))

    hist_bins1 = hist_bins[:-1]
    hist_bins2 = [float(round(l, 2)) for l in hist_bins1]

    hist_nir1 = [float(l) for l in hist_nir]

    # make hist percentage for plotting
    pixels = cv2.countNonZero(mask1)
    hist_percent = (hist_nir / float(pixels)) * 100

    # No longer returning a pseudocolored image
    # make mask to select the background
    # mask_inv = cv2.bitwise_not(mask)
    # img_back = cv2.bitwise_and(rgbimg, rgbimg, mask=mask_inv)
    # img_back1 = cv2.applyColorMap(img_back, colormap=1)

    # mask the background and color the plant with color scheme 'jet'
    # cplant = cv2.applyColorMap(rgbimg, colormap=2)
    # masked1 = apply_mask(cplant, mask, 'black')
    masked1 = cv2.bitwise_and(rgbimg, rgbimg, mask=mask)
    # cplant_back = cv2.add(masked1, img_back1)
    if params.debug is not None:
        if params.debug == "print":
            print_image(masked1, os.path.join(params.debug_outdir, str(params.device) + "_masked_nir_plant.jpg"))
        if params.debug == "plot":
            plot_image(masked1)

    analysis_images = []

    if histplot is True:
        hist_x = hist_percent
        bin_labels = np.arange(0, bins)
        dataset = pd.DataFrame({'Grayscale pixel intensity': bin_labels,
                                'Proportion of pixels (%)': hist_x})
        fig_hist = (ggplot(data=dataset,
                           mapping=aes(x='Grayscale pixel intensity',
                                       y='Proportion of pixels (%)'))
                    + geom_line(color='red')
                    + scale_x_continuous(breaks=list(range(0, bins, 25))))

        analysis_images.append(fig_hist)
        if params.debug == "print":
            fig_hist.save(os.path.join(params.debug_outdir, str(params.device) + '_nir_hist.png'))
        elif params.debug == "plot":
            print(fig_hist)

    outputs.add_observation(variable='nir_frequencies', trait='near-infrared frequencies',
                            method='plantcv.plantcv.analyze_nir_intensity', scale='frequency', datatype=list,
                            value=hist_nir1, label=hist_bins2)

    # Store images
    outputs.images.append(analysis_images)

    return analysis_images
Ejemplo n.º 8
0
def segment_curvature(segmented_img, objects):
    """ Calculate segment curvature as defined by the ratio between geodesic and euclidean distance.
        Measurement of two-dimensional tortuosity.

        Inputs:
        segmented_img     = Segmented image to plot lengths on
        objects           = List of contours

        Returns:
        labeled_img        = Segmented debugging image with curvature labeled


        :param segmented_img: numpy.ndarray
        :param objects: list
        :return labeled_img: numpy.ndarray

        """
    # Store debug
    debug = params.debug
    params.debug = None

    label_coord_x = []
    label_coord_y = []

    _ = segment_euclidean_length(segmented_img, objects)
    labeled_img = segment_path_length(segmented_img, objects)
    eu_lengths = outputs.observations['segment_eu_length']['value']
    path_lengths = outputs.observations['segment_path_length']['value']
    curvature_measure = [x/y for x, y in zip(path_lengths, eu_lengths)]
    rand_color = color_palette(len(objects))

    for i, cnt in enumerate(objects):
        # Store coordinates for labels
        label_coord_x.append(objects[i][0][0][0])
        label_coord_y.append(objects[i][0][0][1])

        # Draw segments one by one to group segment tips together
        finding_tips_img = np.zeros(segmented_img.shape[:2], np.uint8)
        cv2.drawContours(finding_tips_img, objects, i, (255, 255, 255), 1, lineType=8)
        segment_tips = find_tips(finding_tips_img)
        tip_objects, tip_hierarchies = find_objects(segment_tips, segment_tips)
        points = []

        for t in tip_objects:
            # Gather pairs of coordinates
            x, y = t.ravel()
            coord = (x, y)
            points.append(coord)

        # Draw euclidean distance lines
        cv2.line(labeled_img, points[0], points[1], rand_color[i], 1)

    segment_ids = []
    for i, cnt in enumerate(objects):
        # Calculate geodesic distance
        text = "{:.3f}".format(curvature_measure[i])
        w = label_coord_x[i]
        h = label_coord_y[i]
        cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
        segment_label = "ID" + str(i)
        segment_ids.append(i)

    outputs.add_observation(variable='segment_curvature', trait='segment curvature',
                            method='plantcv.plantcv.morphology.segment_curvature', scale='none', datatype=list,
                            value=curvature_measure, label=segment_ids)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segment_curvature.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return labeled_img
def find_tips(skel_img, mask=None):
    """
    The endpoints algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699
    Find tips in skeletonized image.

    Inputs:
    skel_img    = Skeletonized image
    mask        = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.

    Returns:
    tip_img   = Image with just tips, rest 0

    :param skel_img: numpy.ndarray
    :param mask: numpy.ndarray
    :return tip_img: numpy.ndarray
    """

    # In a kernel: 1 values line up with 255s, -1s line up with 0s, and 0s correspond to dont care
    endpoint1 = np.array([[-1, -1, -1],
                          [-1, 1, -1],
                          [0, 1,  0]])
    endpoint2 = np.array([[-1, -1, -1],
                          [-1, 1, 0],
                          [-1, 0, 1]])

    endpoint3 = np.rot90(endpoint1)
    endpoint4 = np.rot90(endpoint2)
    endpoint5 = np.rot90(endpoint3)
    endpoint6 = np.rot90(endpoint4)
    endpoint7 = np.rot90(endpoint5)
    endpoint8 = np.rot90(endpoint6)

    endpoints = [endpoint1, endpoint2, endpoint3, endpoint4, endpoint5, endpoint6, endpoint7, endpoint8]
    tip_img = np.zeros(skel_img.shape[:2], dtype=int)
    for endpoint in endpoints:
        tip_img = np.logical_or(cv2.morphologyEx(skel_img, op=cv2.MORPH_HITMISS, kernel=endpoint,
                                                 borderType=cv2.BORDER_CONSTANT, borderValue=0), tip_img)
    tip_img = tip_img.astype(np.uint8) * 255
    # Store debug
    debug = params.debug
    params.debug = None
    tip_objects, _ = find_objects(tip_img, tip_img)

    if mask is None:
        # Make debugging image
        dilated_skel = dilate(skel_img, params.line_thickness, 1)
        tip_plot = cv2.cvtColor(dilated_skel, cv2.COLOR_GRAY2RGB)

    else:
        # Make debugging image on mask
        mask_copy = mask.copy()
        tip_plot = cv2.cvtColor(mask_copy, cv2.COLOR_GRAY2RGB)
        skel_obj, skel_hier = find_objects(skel_img, skel_img)
        cv2.drawContours(tip_plot, skel_obj, -1, (150, 150, 150), params.line_thickness,
                         lineType=8, hierarchy=skel_hier)

    # Initialize list of tip data points
    tip_list = []
    tip_labels = []
    for i, tip in enumerate(tip_objects):
        x, y = tip.ravel()[:2]
        tip_list.append((int(x), int(y)))
        tip_labels.append(i)
        cv2.circle(tip_plot, (x, y), params.line_thickness, (0, 255, 0), -1)

    outputs.add_observation(variable='tips', trait='list of tip coordinates identified from a skeleton',
                            method='plantcv.plantcv.morphology.find_tips', scale='pixels', datatype=list,
                            value=tip_list, label=tip_labels)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(tip_plot, os.path.join(params.debug_outdir, str(params.device) + '_skeleton_tips.png'))
    elif params.debug == 'plot':
        plot_image(tip_plot)

    return tip_img
Ejemplo n.º 10
0
def analyze_color(rgb_img, mask, hist_plot_type=None, label="default"):
    """Analyze the color properties of an image object
    Inputs:
    rgb_img          = RGB image data
    mask             = Binary mask made from selected contours
    hist_plot_type   = None, 'all', 'rgb','lab' or 'hsv'
    label            = optional label parameter, modifies the variable name of observations recorded

    Returns:
    analysis_image   = histogram output

    :param rgb_img: numpy.ndarray
    :param mask: numpy.ndarray
    :param hist_plot_type: str
    :param label: str
    :return analysis_images: list
    """
    if len(np.shape(rgb_img)) < 3:
        fatal_error("rgb_img must be an RGB image")

    # Mask the input image
    masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
    # Extract the blue, green, and red channels
    b, g, r = cv2.split(masked)
    # Convert the BGR image to LAB
    lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)
    # Extract the lightness, green-magenta, and blue-yellow channels
    l, m, y = cv2.split(lab)
    # Convert the BGR image to HSV
    hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV)
    # Extract the hue, saturation, and value channels
    h, s, v = cv2.split(hsv)

    # Color channel dictionary
    channels = {
        "b": b,
        "g": g,
        "r": r,
        "l": l,
        "m": m,
        "y": y,
        "h": h,
        "s": s,
        "v": v
    }

    # Histogram plot types
    hist_types = {
        "ALL": ("b", "g", "r", "l", "m", "y", "h", "s", "v"),
        "RGB": ("b", "g", "r"),
        "LAB": ("l", "m", "y"),
        "HSV": ("h", "s", "v")
    }

    if hist_plot_type is not None and hist_plot_type.upper() not in hist_types:
        fatal_error(
            "The histogram plot type was " + str(hist_plot_type) +
            ', but can only be one of the following: None, "all", "rgb", "lab", or "hsv"!'
        )
    # Store histograms, plotting colors, and plotting labels
    histograms = {
        "b": {
            "label":
            "blue",
            "graph_color":
            "blue",
            "hist": [
                float(i[0]) for i in cv2.calcHist([channels["b"]], [0], mask,
                                                  [256], [0, 255])
            ]
        },
        "g": {
            "label":
            "green",
            "graph_color":
            "forestgreen",
            "hist": [
                float(i[0]) for i in cv2.calcHist([channels["g"]], [0], mask,
                                                  [256], [0, 255])
            ]
        },
        "r": {
            "label":
            "red",
            "graph_color":
            "red",
            "hist": [
                float(i[0]) for i in cv2.calcHist([channels["r"]], [0], mask,
                                                  [256], [0, 255])
            ]
        },
        "l": {
            "label":
            "lightness",
            "graph_color":
            "dimgray",
            "hist": [
                float(i[0]) for i in cv2.calcHist([channels["l"]], [0], mask,
                                                  [256], [0, 255])
            ]
        },
        "m": {
            "label":
            "green-magenta",
            "graph_color":
            "magenta",
            "hist": [
                float(i[0]) for i in cv2.calcHist([channels["m"]], [0], mask,
                                                  [256], [0, 255])
            ]
        },
        "y": {
            "label":
            "blue-yellow",
            "graph_color":
            "yellow",
            "hist": [
                float(i[0]) for i in cv2.calcHist([channels["y"]], [0], mask,
                                                  [256], [0, 255])
            ]
        },
        "h": {
            "label":
            "hue",
            "graph_color":
            "blueviolet",
            "hist": [
                float(i[0]) for i in cv2.calcHist([channels["h"]], [0], mask,
                                                  [256], [0, 255])
            ]
        },
        "s": {
            "label":
            "saturation",
            "graph_color":
            "cyan",
            "hist": [
                float(i[0]) for i in cv2.calcHist([channels["s"]], [0], mask,
                                                  [256], [0, 255])
            ]
        },
        "v": {
            "label":
            "value",
            "graph_color":
            "orange",
            "hist": [
                float(i[0]) for i in cv2.calcHist([channels["v"]], [0], mask,
                                                  [256], [0, 255])
            ]
        }
    }

    # Create list of bin labels for 8-bit data
    binval = np.arange(0, 256)

    analysis_image = None
    # Create a dataframe of bin labels and histogram data
    dataset = pd.DataFrame({
        'bins': binval,
        'blue': histograms["b"]["hist"],
        'green': histograms["g"]["hist"],
        'red': histograms["r"]["hist"],
        'lightness': histograms["l"]["hist"],
        'green-magenta': histograms["m"]["hist"],
        'blue-yellow': histograms["y"]["hist"],
        'hue': histograms["h"]["hist"],
        'saturation': histograms["s"]["hist"],
        'value': histograms["v"]["hist"]
    })

    # Make the histogram figure using plotnine
    if hist_plot_type is not None:
        if hist_plot_type.upper() == 'RGB':
            df_rgb = pd.melt(dataset,
                             id_vars=['bins'],
                             value_vars=['blue', 'green', 'red'],
                             var_name='Color Channel',
                             value_name='Pixels')
            hist_fig = (ggplot(
                df_rgb, aes(x='bins', y='Pixels', color='Color Channel')) +
                        geom_line() +
                        scale_x_continuous(breaks=list(range(0, 256, 25))) +
                        scale_color_manual(['blue', 'green', 'red']))

        elif hist_plot_type.upper() == 'LAB':
            df_lab = pd.melt(
                dataset,
                id_vars=['bins'],
                value_vars=['lightness', 'green-magenta', 'blue-yellow'],
                var_name='Color Channel',
                value_name='Pixels')
            hist_fig = (ggplot(
                df_lab, aes(x='bins', y='Pixels', color='Color Channel')) +
                        geom_line() +
                        scale_x_continuous(breaks=list(range(0, 256, 25))) +
                        scale_color_manual(['yellow', 'magenta', 'dimgray']))

        elif hist_plot_type.upper() == 'HSV':
            df_hsv = pd.melt(dataset,
                             id_vars=['bins'],
                             value_vars=['hue', 'saturation', 'value'],
                             var_name='Color Channel',
                             value_name='Pixels')
            hist_fig = (ggplot(
                df_hsv, aes(x='bins', y='Pixels', color='Color Channel')) +
                        geom_line() +
                        scale_x_continuous(breaks=list(range(0, 256, 25))) +
                        scale_color_manual(['blueviolet', 'cyan', 'orange']))

        elif hist_plot_type.upper() == 'ALL':
            s = pd.Series([
                'blue', 'green', 'red', 'lightness', 'green-magenta',
                'blue-yellow', 'hue', 'saturation', 'value'
            ],
                          dtype="category")
            color_channels = [
                'blue', 'yellow', 'green', 'magenta', 'blueviolet', 'dimgray',
                'red', 'cyan', 'orange'
            ]
            df_all = pd.melt(dataset,
                             id_vars=['bins'],
                             value_vars=s,
                             var_name='Color Channel',
                             value_name='Pixels')
            hist_fig = (ggplot(
                df_all, aes(x='bins', y='Pixels', color='Color Channel')) +
                        geom_line() +
                        scale_x_continuous(breaks=list(range(0, 256, 25))) +
                        scale_color_manual(color_channels))
        analysis_image = hist_fig
    # Hue values of zero are red but are also the value for pixels where hue is undefined. The hue value of a pixel will
    # be undef. when the color values are saturated. Therefore, hue values of 0 are excluded from the calculations below

    # Calculate the median hue value (median is rescaled from the encoded 0-179 range to the 0-359 degree range)
    hue_median = np.median(h[np.where(h > 0)]) * 2

    # Calculate the circular mean and standard deviation of the encoded hue values
    # The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range
    hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2
    hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2

    # Plot or print the histogram
    if hist_plot_type is not None:
        params.device += 1
        _debug(visual=hist_fig,
               filename=os.path.join(
                   params.debug_outdir,
                   str(params.device) + '_analyze_color_hist.png'))

    # Store into global measurements
    # RGB signal values are in an unsigned 8-bit scale of 0-255
    rgb_values = [i for i in range(0, 256)]
    # Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval
    hue_values = [i * 2 + 1 for i in range(0, 180)]
    # Percentage values on a 0-100 scale (lightness, saturation, and value)
    percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)]
    # Diverging values on a -128 to 127 scale (green-magenta and blue-yellow)
    diverging_values = [i for i in range(-128, 128)]

    if hist_plot_type is not None:
        if hist_plot_type.upper() == 'RGB' or hist_plot_type.upper() == 'ALL':
            outputs.add_observation(sample=label,
                                    variable='blue_frequencies',
                                    trait='blue frequencies',
                                    method='plantcv.plantcv.analyze_color',
                                    scale='frequency',
                                    datatype=list,
                                    value=histograms["b"]["hist"],
                                    label=rgb_values)
            outputs.add_observation(sample=label,
                                    variable='green_frequencies',
                                    trait='green frequencies',
                                    method='plantcv.plantcv.analyze_color',
                                    scale='frequency',
                                    datatype=list,
                                    value=histograms["g"]["hist"],
                                    label=rgb_values)
            outputs.add_observation(sample=label,
                                    variable='red_frequencies',
                                    trait='red frequencies',
                                    method='plantcv.plantcv.analyze_color',
                                    scale='frequency',
                                    datatype=list,
                                    value=histograms["r"]["hist"],
                                    label=rgb_values)

        if hist_plot_type.upper() == 'LAB' or hist_plot_type.upper() == 'ALL':
            outputs.add_observation(sample=label,
                                    variable='lightness_frequencies',
                                    trait='lightness frequencies',
                                    method='plantcv.plantcv.analyze_color',
                                    scale='frequency',
                                    datatype=list,
                                    value=histograms["l"]["hist"],
                                    label=percent_values)
            outputs.add_observation(sample=label,
                                    variable='green-magenta_frequencies',
                                    trait='green-magenta frequencies',
                                    method='plantcv.plantcv.analyze_color',
                                    scale='frequency',
                                    datatype=list,
                                    value=histograms["m"]["hist"],
                                    label=diverging_values)
            outputs.add_observation(sample=label,
                                    variable='blue-yellow_frequencies',
                                    trait='blue-yellow frequencies',
                                    method='plantcv.plantcv.analyze_color',
                                    scale='frequency',
                                    datatype=list,
                                    value=histograms["y"]["hist"],
                                    label=diverging_values)

        if hist_plot_type.upper() == 'HSV' or hist_plot_type.upper() == 'ALL':
            outputs.add_observation(sample=label,
                                    variable='hue_frequencies',
                                    trait='hue frequencies',
                                    method='plantcv.plantcv.analyze_color',
                                    scale='frequency',
                                    datatype=list,
                                    value=histograms["h"]["hist"][0:180],
                                    label=hue_values)
            outputs.add_observation(sample=label,
                                    variable='saturation_frequencies',
                                    trait='saturation frequencies',
                                    method='plantcv.plantcv.analyze_color',
                                    scale='frequency',
                                    datatype=list,
                                    value=histograms["s"]["hist"],
                                    label=percent_values)
            outputs.add_observation(sample=label,
                                    variable='value_frequencies',
                                    trait='value frequencies',
                                    method='plantcv.plantcv.analyze_color',
                                    scale='frequency',
                                    datatype=list,
                                    value=histograms["v"]["hist"],
                                    label=percent_values)

    # Always save hue stats
    outputs.add_observation(sample=label,
                            variable='hue_circular_mean',
                            trait='hue circular mean',
                            method='plantcv.plantcv.analyze_color',
                            scale='degrees',
                            datatype=float,
                            value=hue_circular_mean,
                            label='degrees')
    outputs.add_observation(sample=label,
                            variable='hue_circular_std',
                            trait='hue circular standard deviation',
                            method='plantcv.plantcv.analyze_color',
                            scale='degrees',
                            datatype=float,
                            value=hue_circular_std,
                            label='degrees')
    outputs.add_observation(sample=label,
                            variable='hue_median',
                            trait='hue median',
                            method='plantcv.plantcv.analyze_color',
                            scale='degrees',
                            datatype=float,
                            value=hue_median,
                            label='degrees')

    # Store images
    outputs.images.append(analysis_image)

    return analysis_image
Ejemplo n.º 11
0
def find_branch_pts(skel_img, mask=None):
    """
    The branching algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699
    Inputs:
    skel_img    = Skeletonized image
    mask        = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.

    Returns:
    branch_pts_img = Image with just branch points, rest 0

    :param skel_img: numpy.ndarray
    :param mask: np.ndarray
    :return branch_pts_img: numpy.ndarray
    """

    # In a kernel: 1 values line up with 255s, -1s line up with 0s, and 0s correspond to don't care
    # T like branch points
    t1 = np.array([[-1, 1, -1], [1, 1, 1], [-1, -1, -1]])
    t2 = np.array([[1, -1, 1], [-1, 1, -1], [1, -1, -1]])
    t3 = np.rot90(t1)
    t4 = np.rot90(t2)
    t5 = np.rot90(t3)
    t6 = np.rot90(t4)
    t7 = np.rot90(t5)
    t8 = np.rot90(t6)

    # Y like branch points
    y1 = np.array([[1, -1, 1], [0, 1, 0], [0, 1, 0]])
    y2 = np.array([[-1, 1, -1], [1, 1, 0], [-1, 0, 1]])
    y3 = np.rot90(y1)
    y4 = np.rot90(y2)
    y5 = np.rot90(y3)
    y6 = np.rot90(y4)
    y7 = np.rot90(y5)
    y8 = np.rot90(y6)
    kernels = [t1, t2, t3, t4, t5, t6, t7, t8, y1, y2, y3, y4, y5, y6, y7, y8]

    branch_pts_img = np.zeros(skel_img.shape[:2], dtype=int)

    # Store branch points
    for kernel in kernels:
        branch_pts_img = np.logical_or(
            cv2.morphologyEx(skel_img,
                             op=cv2.MORPH_HITMISS,
                             kernel=kernel,
                             borderType=cv2.BORDER_CONSTANT,
                             borderValue=0), branch_pts_img)

    # Switch type to uint8 rather than bool
    branch_pts_img = branch_pts_img.astype(np.uint8) * 255

    # Store debug
    debug = params.debug
    params.debug = None

    # Make debugging image
    if mask is None:
        dilated_skel = dilate(skel_img, params.line_thickness, 1)
        branch_plot = cv2.cvtColor(dilated_skel, cv2.COLOR_GRAY2RGB)
    else:
        # Make debugging image on mask
        mask_copy = mask.copy()
        branch_plot = cv2.cvtColor(mask_copy, cv2.COLOR_GRAY2RGB)
        skel_obj, skel_hier = find_objects(skel_img, skel_img)
        cv2.drawContours(branch_plot,
                         skel_obj,
                         -1, (150, 150, 150),
                         params.line_thickness,
                         lineType=8,
                         hierarchy=skel_hier)

    branch_objects, _ = find_objects(branch_pts_img, branch_pts_img)

    # Initialize list of tip data points
    branch_list = []
    branch_labels = []
    for i, branch in enumerate(branch_objects):
        x, y = branch.ravel()[:2]
        coord = (int(x), int(y))
        branch_list.append(coord)
        branch_labels.append(i)
        cv2.circle(branch_plot, (x, y), params.line_thickness, (255, 0, 255),
                   -1)

    outputs.add_observation(
        variable='branch_pts',
        trait='list of branch-point coordinates identified from a skeleton',
        method='plantcv.plantcv.morphology.find_branch_pts',
        scale='pixels',
        datatype=list,
        value=branch_list,
        label=branch_labels)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(
            branch_plot,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_branch_pts.png'))
    elif params.debug == 'plot':
        plot_image(branch_plot)

    return branch_pts_img
Ejemplo n.º 12
0
def analyze_bound_horizontal(img, obj, mask, line_position):
    """User-input boundary line tool

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary mask made from selected contours
    line_position   = position of boundary line (a value of 0 would draw the line through the top of the image)

    Returns:
    analysis_images = list of output images

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :param line_position: int
    :return analysis_images: list
    """

    params.device += 1
    ori_img = np.copy(img)

    # Draw line horizontal line through bottom of image, that is adjusted to user input height
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
    iy, ix, iz = np.shape(ori_img)
    size = (iy, ix)
    size1 = (iy, ix, 3)
    background = np.zeros(size, dtype=np.uint8)
    wback = (np.zeros(size1, dtype=np.uint8)) + 255
    x_coor = int(ix)
    y_coor = line_position
    rec_corner = int(iy - 2)
    rec_point1 = (1, rec_corner)
    rec_point2 = (x_coor - 2, y_coor - 2)
    cv2.rectangle(background, rec_point1, rec_point2, (255), 1)
    below_contour, below_hierarchy = cv2.findContours(
        background, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    x, y, width, height = cv2.boundingRect(obj)

    if y_coor - y <= 0:
        height_above_bound = 0
        height_below_bound = height
    elif y_coor - y > 0:
        height_1 = y_coor - y
        if height - height_1 <= 0:
            height_above_bound = height
            height_below_bound = 0
        else:
            height_above_bound = y_coor - y
            height_below_bound = height - height_above_bound

    below = []
    above = []
    mask_nonzerox, mask_nonzeroy = np.nonzero(mask)
    obj_points = np.vstack((mask_nonzeroy, mask_nonzerox))
    obj_points1 = np.transpose(obj_points)

    for i, c in enumerate(obj_points1):
        xy = tuple(c)
        pptest = cv2.pointPolygonTest(below_contour[0], xy, measureDist=False)
        if pptest == 1:
            below.append(xy)
            cv2.circle(ori_img, xy, 1, (155, 0, 255))
            cv2.circle(wback, xy, 1, (155, 0, 255))
        else:
            above.append(xy)
            cv2.circle(ori_img, xy, 1, (0, 255, 0))
            cv2.circle(wback, xy, 1, (0, 255, 0))
    above_bound_area = len(above)
    below_bound_area = len(below)
    percent_bound_area_above = (
        (float(above_bound_area)) /
        (float(above_bound_area + below_bound_area))) * 100
    percent_bound_area_below = (
        (float(below_bound_area)) /
        (float(above_bound_area + below_bound_area))) * 100

    analysis_images = []

    if above_bound_area or below_bound_area:
        point3 = (0, y_coor - 4)
        point4 = (x_coor, y_coor - 4)
        cv2.line(ori_img, point3, point4, (255, 0, 255), params.line_thickness)
        cv2.line(wback, point3, point4, (255, 0, 255), params.line_thickness)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if y_coor - y <= 0:
            cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                     (0, 255, 0), params.line_thickness)
            cv2.line(wback, (int(cmx), y), (int(cmx), y + height), (0, 255, 0),
                     params.line_thickness)
        elif y_coor - y > 0:
            height_1 = y_coor - y
            if height - height_1 <= 0:
                cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), params.line_thickness)
                cv2.line(wback, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), params.line_thickness)
            else:
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
        # Output image with boundary line, above/below bound area
        analysis_images.append(wback)
        analysis_images.append(ori_img)

    if params.debug is not None:
        point3 = (0, y_coor - 4)
        point4 = (x_coor, y_coor - 4)
        cv2.line(ori_img, point3, point4, (255, 0, 255), params.line_thickness)
        cv2.line(wback, point3, point4, (255, 0, 255), params.line_thickness)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if y_coor - y <= 0:
            cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                     (0, 255, 0), params.line_thickness)
            cv2.line(wback, (int(cmx), y), (int(cmx), y + height), (0, 255, 0),
                     params.line_thickness)
        elif y_coor - y > 0:
            height_1 = y_coor - y
            if height - height_1 <= 0:
                cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), params.line_thickness)
                cv2.line(wback, (int(cmx), y), (int(cmx), y + height),
                         (255, 0, 0), params.line_thickness)
            else:
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(ori_img, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor - height_above_bound), (255, 0, 0),
                         params.line_thickness)
                cv2.line(wback, (int(cmx), y_coor - 2),
                         (int(cmx), y_coor + height_below_bound), (0, 255, 0),
                         params.line_thickness)
        if params.debug == 'print':
            print_image(
                wback,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_boundary_on_white.jpg'))
            print_image(
                ori_img,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_boundary_on_img.jpg'))
        if params.debug == 'plot':
            plot_image(wback)
            plot_image(ori_img)

    outputs.add_observation(variable='horizontal_reference_position',
                            trait='horizontal reference position',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='none',
                            datatype=int,
                            value=line_position,
                            label='none')
    outputs.add_observation(variable='height_above_reference',
                            trait='height above reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='pixels',
                            datatype=int,
                            value=height_above_bound,
                            label='pixels')
    outputs.add_observation(variable='height_below_reference',
                            trait='height_below_reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='pixels',
                            datatype=int,
                            value=height_below_bound,
                            label='pixels')
    outputs.add_observation(variable='area_above_reference',
                            trait='area above reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='pixels',
                            datatype=int,
                            value=above_bound_area,
                            label='pixels')
    outputs.add_observation(variable='percent_area_above_reference',
                            trait='percent area above reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='none',
                            datatype=float,
                            value=percent_bound_area_above,
                            label='none')
    outputs.add_observation(variable='area_below_reference',
                            trait='area below reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='pixels',
                            datatype=int,
                            value=below_bound_area,
                            label='pixels')
    outputs.add_observation(variable='percent_area_below_reference',
                            trait='percent area below reference',
                            method='plantcv.plantcv.analyze_bound_horizontal',
                            scale='none',
                            datatype=float,
                            value=percent_bound_area_below,
                            label='none')

    # Store images
    outputs.images.append(analysis_images)

    return analysis_images
Ejemplo n.º 13
0
def check_cycles(skel_img):
    """ Check for cycles in a skeleton image
    Inputs:
    skel_img     = Skeletonized image

    Returns:
    cycle_img    = Image with cycles identified

    :param skel_img: numpy.ndarray
    :return cycle_img: numpy.ndarray
    """

    # Store debug
    debug = params.debug
    params.debug = None

    # Create the mask needed for cv2.floodFill, must be larger than the image
    h, w = skel_img.shape[:2]
    mask = np.zeros((h + 2, w + 2), np.uint8)

    # Copy the skeleton since cv2.floodFill will draw on it
    skel_copy = skel_img.copy()
    cv2.floodFill(skel_copy, mask=mask, seedPoint=(0, 0), newVal=255)

    # Invert so the holes are white and background black
    just_cycles = cv2.bitwise_not(skel_copy)

    # Erode slightly so that cv2.findContours doesn't think diagonal pixels are separate contours
    just_cycles = erode(just_cycles, 2, 1)

    # Use pcv.find_objects to turn plots of holes into countable contours
    cycle_objects, cycle_hierarchies = find_objects(just_cycles, just_cycles)

    # Count the number of holes
    num_cycles = len(cycle_objects)

    # Make debugging image
    cycle_img = skel_img.copy()
    cycle_img = dilate(cycle_img, params.line_thickness, 1)
    cycle_img = cv2.cvtColor(cycle_img, cv2.COLOR_GRAY2RGB)
    rand_color = color_palette(num_cycles)
    for i, cnt in enumerate(cycle_objects):
        cv2.drawContours(cycle_img, cycle_objects, i, rand_color[i], params.line_thickness, lineType=8,
                         hierarchy=cycle_hierarchies)

    # Store Cycle Data
    outputs.add_observation(variable='num_cycles', trait='number of cycles',
                            method='plantcv.plantcv.morphology.check_cycles', scale='none', datatype=int,
                            value=num_cycles, label='none')

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(cycle_img, os.path.join(params.debug_outdir, str(params.device) + '_cycles.png'))
    elif params.debug == 'plot':
        plot_image(cycle_img)

    return cycle_img
Ejemplo n.º 14
0
def analyze_thermal_values(thermal_array,
                           mask,
                           histplot=None,
                           label="default"):
    """This extracts the thermal values of each pixel writes the values out to
       a file. It can also print out a histogram plot of pixel intensity
       and a pseudocolor image of the plant.

    Inputs:
    array        = numpy array of thermal values
    mask         = Binary mask made from selected contours
    histplot     = if True plots histogram of intensity values
    label        = optional label parameter, modifies the variable name of observations recorded

    Returns:
    analysis_image = output image

    :param thermal_array: numpy.ndarray
    :param mask: numpy.ndarray
    :param histplot: bool
    :param label: str
    :return analysis_image: ggplot
    """

    if histplot is not None:
        deprecation_warning(
            "'histplot' will be deprecated in a future version of PlantCV. "
            "This function creates a histogram by default.")

    # Store debug mode
    debug = params.debug

    # apply plant shaped mask to image and calculate statistics based on the masked image
    masked_thermal = thermal_array[np.where(mask > 0)]
    maxtemp = np.amax(masked_thermal)
    mintemp = np.amin(masked_thermal)
    avgtemp = np.average(masked_thermal)
    mediantemp = np.median(masked_thermal)

    # call the histogram function
    params.debug = None
    hist_fig, hist_data = histogram(thermal_array, mask=mask, hist_data=True)
    bin_labels, hist_percent = hist_data['pixel intensity'].tolist(
    ), hist_data['proportion of pixels (%)'].tolist()

    # Store data into outputs class
    outputs.add_observation(sample=label,
                            variable='max_temp',
                            trait='maximum temperature',
                            method='plantcv.plantcv.analyze_thermal_values',
                            scale='degrees',
                            datatype=float,
                            value=maxtemp,
                            label='degrees')
    outputs.add_observation(sample=label,
                            variable='min_temp',
                            trait='minimum temperature',
                            method='plantcv.plantcv.analyze_thermal_values',
                            scale='degrees',
                            datatype=float,
                            value=mintemp,
                            label='degrees')
    outputs.add_observation(sample=label,
                            variable='mean_temp',
                            trait='mean temperature',
                            method='plantcv.plantcv.analyze_thermal_values',
                            scale='degrees',
                            datatype=float,
                            value=avgtemp,
                            label='degrees')
    outputs.add_observation(sample=label,
                            variable='median_temp',
                            trait='median temperature',
                            method='plantcv.plantcv.analyze_thermal_values',
                            scale='degrees',
                            datatype=float,
                            value=mediantemp,
                            label='degrees')
    outputs.add_observation(sample=label,
                            variable='thermal_frequencies',
                            trait='thermal frequencies',
                            method='plantcv.plantcv.analyze_thermal_values',
                            scale='frequency',
                            datatype=list,
                            value=hist_percent,
                            label=bin_labels)
    # Restore user debug setting
    params.debug = debug

    # change column names of "hist_data"
    hist_fig = hist_fig + labs(x="Temperature C", y="Proportion of pixels (%)")

    # Print or plot histogram
    _debug(visual=hist_fig,
           filename=os.path.join(params.debug_outdir,
                                 str(params.device) + "_therm_histogram.png"))

    analysis_image = hist_fig
    # Store images
    outputs.images.append(analysis_image)

    return analysis_image
Ejemplo n.º 15
0
def analyze_nir_intensity(gray_img, mask, bins=256, histplot=False):
    """This function calculates the intensity of each pixel associated with the plant and writes the values out to
       a file. It can also print out a histogram plot of pixel intensity and a pseudocolor image of the plant.

    Inputs:
    gray_img     = 8- or 16-bit grayscale image data
    mask         = Binary mask made from selected contours
    bins         = number of classes to divide spectrum into
    histplot     = if True plots histogram of intensity values

    Returns:
    analysis_images = NIR histogram image

    :param gray_img: numpy array
    :param mask: numpy array
    :param bins: int
    :param histplot: bool
    :return analysis_images: plotnine ggplot
    """

    params.device += 1

    # apply plant shaped mask to image
    mask1 = binary_threshold(mask, 0, 255, 'light')
    mask1 = (mask1 / 255)
    # masked = np.multiply(gray_img, mask1)

    # calculate histogram
    if gray_img.dtype == 'uint16':
        maxval = 65536
    else:
        maxval = 256

    # Make a pseudo-RGB image
    rgbimg = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR)

    # Calculate histogram
    hist_nir = [
        float(l[0])
        for l in cv2.calcHist([gray_img], [0], mask, [bins], [0, maxval])
    ]
    # Create list of bin labels
    bin_width = maxval / float(bins)
    b = 0
    bin_labels = [float(b)]
    for i in range(bins - 1):
        b += bin_width
        bin_labels.append(b)

    # make hist percentage for plotting
    pixels = cv2.countNonZero(mask1)
    hist_percent = [(p / float(pixels)) * 100 for p in hist_nir]

    # No longer returning a pseudocolored image
    # make mask to select the background
    # mask_inv = cv2.bitwise_not(mask)
    # img_back = cv2.bitwise_and(rgbimg, rgbimg, mask=mask_inv)
    # img_back1 = cv2.applyColorMap(img_back, colormap=1)

    # mask the background and color the plant with color scheme 'jet'
    # cplant = cv2.applyColorMap(rgbimg, colormap=2)
    # masked1 = apply_mask(cplant, mask, 'black')
    masked1 = cv2.bitwise_and(rgbimg, rgbimg, mask=mask)
    # cplant_back = cv2.add(masked1, img_back1)
    if params.debug is not None:
        if params.debug == "print":
            print_image(
                masked1,
                os.path.join(params.debug_outdir,
                             str(params.device) + "_masked_nir_plant.jpg"))
        if params.debug == "plot":
            plot_image(masked1)

    analysis_image = None

    if histplot is True:
        hist_x = hist_percent
        # bin_labels = np.arange(0, bins)
        dataset = pd.DataFrame({
            'Grayscale pixel intensity': bin_labels,
            'Proportion of pixels (%)': hist_x
        })
        fig_hist = (ggplot(data=dataset,
                           mapping=aes(x='Grayscale pixel intensity',
                                       y='Proportion of pixels (%)')) +
                    geom_line(color='red') +
                    scale_x_continuous(breaks=list(range(0, maxval, 25))))

        analysis_image = fig_hist
        if params.debug == "print":
            fig_hist.save(
                os.path.join(params.debug_outdir,
                             str(params.device) + '_nir_hist.png'))
        elif params.debug == "plot":
            print(fig_hist)

    outputs.add_observation(variable='nir_frequencies',
                            trait='near-infrared frequencies',
                            method='plantcv.plantcv.analyze_nir_intensity',
                            scale='frequency',
                            datatype=list,
                            value=hist_nir,
                            label=bin_labels)

    # Store images
    outputs.images.append(analysis_image)

    return analysis_image
def segment_euclidean_length(segmented_img, objects):
    """ Use segmented skeleton image to gather euclidean length measurements per segment

        Inputs:
        segmented_img = Segmented image to plot lengths on
        objects       = List of contours

        Returns:
        labeled_img      = Segmented debugging image with lengths labeled

        :param segmented_img: numpy.ndarray
        :param objects: list
        :return labeled_img: numpy.ndarray

        """
    # Store debug
    debug = params.debug
    params.debug = None

    x_list = []
    y_list = []
    segment_lengths = []
    rand_color = color_palette(len(objects))


    labeled_img = segmented_img.copy()

    for i, cnt in enumerate(objects):
        # Store coordinates for labels
        x_list.append(objects[i][0][0][0])
        y_list.append(objects[i][0][0][1])

        # Draw segments one by one to group segment tips together
        finding_tips_img = np.zeros(segmented_img.shape[:2], np.uint8)
        cv2.drawContours(finding_tips_img, objects, i, (255, 255, 255), 1, lineType=8)
        segment_tips = find_tips(finding_tips_img)
        tip_objects, tip_hierarchies = find_objects(segment_tips, segment_tips)
        points = []
        if not len(tip_objects) == 2:
            fatal_error("Too many tips found per segment, try pruning again")
        for t in tip_objects:
            # Gather pairs of coordinates
            x, y = t.ravel()
            coord = (x, y)
            points.append(coord)

        # Draw euclidean distance lines
        cv2.line(labeled_img, points[0], points[1], rand_color[i], 1)

        # Calculate euclidean distance between tips of each contour
        segment_lengths.append(euclidean(points[0], points[1]))

    segment_ids = []
    # Put labels of length
    for c, value in enumerate(segment_lengths):
        text = "{:.2f}".format(value)
        w = x_list[c]
        h = y_list[c]
        cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
        segment_label = "ID" + str(c)
        segment_ids.append(c)

    outputs.add_observation(variable='segment_eu_length', trait='segment euclidean length',
                            method='plantcv.plantcv.morphology.segment_euclidean_length', scale='pixels', datatype=list,
                            value=segment_lengths, label=segment_ids)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segment_eu_lengths.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return labeled_img
def landmark_reference_pt_dist(points_r, centroid_r, bline_r):
    """landmark_reference_pt_dist

    For each point in contour, get a point before (pre) and after (post) the point of interest.
    The win argument specifies the pre and post point distances.

    Inputs:
    points_r   = a set of rescaled points (basically the output of the acute_vertex fxn after the scale_features fxn)
    centroid_r = a tuple that contains the rescaled centroid coordinates
    bline_r    = a tuple that contains the rescaled boundary line - centroid coordinates

    :param points_r: ndarray
    :param centroid_r: tuple
    :param bline_r: tuple
    """

    # scaled_img = np.zeros((1500,1500,3), np.uint8)
    # plotter = np.array(points_r)
    # plotter = plotter * 1000
    # for i in plotter:
    #  x,y = i.ravel()
    #  cv2.circle(scaled_img,(int(x) + 250, int(y) + 250),15,(255,255,255),-1)
    # cv2.circle(scaled_img,(int(cmx_scaled * 1000) + 250, int(cmy_scaled * 1000) + 250),25,(0,0,255), -1)
    # cv2.circle(scaled_img,(int(blx_scaled * 1000) + 250, int(bly_scaled * 1000) + 250),25,(0,255,0), -1)
    params.device += 1
    vert_dist_c = []
    hori_dist_c = []
    euc_dist_c = []
    angles_c = []
    cx, cy = centroid_r
    # Check to see if points are numerical or NA
    if not isinstance(cy, numbers.Number):
        return ('NA', 'NA'), ('NA', 'NA'), ('NA', 'NA'), ('NA', 'NA'), ('NA', 'NA'), ('NA', 'NA'), \
               ('NA', 'NA'), ('NA', 'NA')
    # Do this for centroid
    for pt in points_r:
        # Get coordinates from point
        x, y = pt
        # Get vertical distance and append to list
        v = y - cy
        # print "Here is the centroid vertical distance: " + str(v)
        vert_dist_c.append(v)
        # cv2.line(scaled_img, (int((x*1000)+250), int((cy*1000)+250)), (int((x*1000)+250), int((y*1000)+250)),
        #          (0,0,255), 5)
        # Get horizontal distance and append to list
        h = abs(x - cx)
        # print "Here is the centroid horizotnal distance: " + str(h)
        hori_dist_c.append(h)
        e = np.sqrt((cx - x) * (cx - x) + (cy - y) * (cy - y))
        # print "Here is the centroid euclidian distance: " + str(h)
        euc_dist_c.append(e)
        # cv2.line(scaled_img, (int((cx*1000)+250), int((cy*1000)+250)), (int((x*1000)+250),
        #                                                                 int((y*1000)+250)), (0,165,255), 5)
        # a = (h*h + v*v - e*e)/(2*h*v)
        a = (h * h + e * e - v * v) / (2 * h * e)
        # Used a random number generator to test if either of these cases were possible but neither is possible
        # if a > 1:              # If float excedes 1 prevent arcos error and force to equal 1
        #     a = 1
        # elif a < -1:           # If float excedes -1 prevent arcos error and force to equal -1
        #     a = -1
        ang = abs(math.degrees(math.acos(a)))
        if v < 0:
            ang = ang * -1
        # print "Here is the centroid angle: " + str(ang)
        angles_c.append(ang)

    vert_ave_c = np.mean(vert_dist_c)
    hori_ave_c = np.mean(hori_dist_c)
    euc_ave_c = np.mean(euc_dist_c)
    ang_ave_c = np.mean(angles_c)
  
    vert_dist_b = []
    hori_dist_b = []
    euc_dist_b = []
    angles_b = []
    bx, by = bline_r
    # Do this for baseline
    for pt in points_r:
        # Get coordinates from point
        x, y = pt
        # Get vertical distance and append to list
        v = y - by
        # print "Here is the baseline vertical distance: " + str(v)
        vert_dist_b.append(v)
        # cv2.line(scaled_img, (int((x*1000)+250), int((by*1000)+250)), (int((x*1000)+250),
        #                                                                int((y*1000)+250)), (255,255,102), 5)
        # Get horizontal distance and append to list
        h = abs(x - bx)
        # print "Here is the baseline horizotnal distance: " + str(h)
        hori_dist_b.append(h)
        e = np.sqrt((bx - x) * (bx - x) + (by - y) * (by - y))
        # print "Here is the baseline euclidian distance: " + str(h)
        euc_dist_b.append(e)
        # cv2.line(scaled_img, (int((bx*1000)+250), int((by*1000)+250)), (int((x*1000)+250),
        #                                                                 int((y*1000)+250)), (255,178,102), 5)
        # a = (h*h + v*v - e*e)/(2*h*v)
        a = (h * h + e * e - v * v) / (2 * h * e)

        # Used a random number generator to test if either of these cases were possible but neither is possible
        # if a > 1:              # If float excedes 1 prevent arcos error and force to equal 1
        #     a = 1
        # elif a < -1:           # If float excedes -1 prevent arcos error and force to equal -1
        #     a = -1
        ang = abs(math.degrees(math.acos(a)))
        if v < 0:
            ang = ang * -1
        # print "Here is the baseline angle: " + str(ang)
        angles_b.append(ang)

    vert_ave_b = np.mean(vert_dist_b)
    hori_ave_b = np.mean(hori_dist_b)
    euc_ave_b = np.mean(euc_dist_b)
    ang_ave_b = np.mean(angles_b)
    # cv2.line(scaled_img, (int(2), int((cy*1000)+250)), (int(1498), int((cy*1000)+250)), (0,215,255), 5)
    # cv2.line(scaled_img, (int(2), int((by*1000)+250)), (int(1498), int((by*1000)+250)), (255,0,0), 5)
    # cv2.circle(scaled_img,(int(cx * 1000) + 250, int(cy * 1000) + 250),25,(0,215,255), -1)
    # cv2.circle(scaled_img,(int(bx * 1000) + 250, int(by * 1000) + 250),25,(255,0,0), -1)
    # flipped_scaled = cv2.flip(scaled_img, 0)
    # cv2.imwrite('centroid_dist.png', flipped_scaled)

    outputs.add_observation(variable='vert_ave_c', trait='average vertical distance from centroid',
                            method='plantcv.plantcv.landmark_reference_pt_dist', scale='pixels', datatype=float,
                            value=vert_ave_c, label='pixels')
    outputs.add_observation(variable='hori_ave_c', trait='average horizontal distance from centeroid',
                            method='plantcv.plantcv.landmark_reference_pt_dist', scale='pixels', datatype=float,
                            value=hori_ave_c, label='pixels')
    outputs.add_observation(variable='euc_ave_c', trait='average euclidean distance from centroid',
                            method='plantcv.plantcv.landmark_reference_pt_dist', scale='pixels', datatype=float,
                            value=euc_ave_c, label='pixels')
    outputs.add_observation(variable='ang_ave_c', trait='average angle between landmark point and centroid',
                            method='plantcv.plantcv.landmark_reference_pt_dist', scale='degrees', datatype=float,
                            value=ang_ave_c, label='degrees')
    outputs.add_observation(variable='vert_ave_b', trait='average vertical distance from baseline',
                            method='plantcv.plantcv.landmark_reference_pt_dist', scale='pixels', datatype=float,
                            value=vert_ave_b, label='pixels')
    outputs.add_observation(variable='hori_ave_b', trait='average horizontal distance from baseline',
                            method='plantcv.plantcv.landmark_reference_pt_dist', scale='pixels', datatype=float,
                            value=hori_ave_b, label='pixels')
    outputs.add_observation(variable='euc_ave_b', trait='average euclidean distance from baseline',
                            method='plantcv.plantcv.landmark_reference_pt_dist', scale='pixels', datatype=float,
                            value=euc_ave_b, label='pixels')
    outputs.add_observation(variable='ang_ave_b', trait='average angle between landmark point and baseline',
                            method='plantcv.plantcv.landmark_reference_pt_dist', scale='degrees', datatype=float,
                            value=ang_ave_b, label='degrees')
Ejemplo n.º 18
0
def report_size_marker_area(img, roi_contour, roi_hierarchy, marker='define', objcolor='dark', thresh_channel=None,
                            thresh=None):
    """Detects a size marker in a specified region and reports its size and eccentricity

    Inputs:
    img             = An RGB or grayscale image to plot the marker object on
    roi_contour     = A region of interest contour (e.g. output from pcv.roi.rectangle or other methods)
    roi_hierarchy   = A region of interest contour hierarchy (e.g. output from pcv.roi.rectangle or other methods)
    marker          = 'define' or 'detect'. If define it means you set an area, if detect it means you want to
                      detect within an area
    objcolor        = Object color is 'dark' or 'light' (is the marker darker or lighter than the background)
    thresh_channel  = 'h', 's', or 'v' for hue, saturation or value
    thresh          = Binary threshold value (integer)

    Returns:
    analysis_images = List of output images

    :param img: numpy.ndarray
    :param roi_contour: list
    :param roi_hierarchy: numpy.ndarray
    :param marker: str
    :param objcolor: str
    :param thresh_channel: str
    :param thresh: int
    :return: analysis_images: list
    """
    # Store debug
    debug = params.debug
    params.debug = None

    params.device += 1
    # Make a copy of the reference image
    ref_img = np.copy(img)
    # If the reference image is grayscale convert it to color
    if len(np.shape(ref_img)) == 2:
        ref_img = cv2.cvtColor(ref_img, cv2.COLOR_GRAY2BGR)

    # Marker components
    # If the marker type is "defined" then the marker_mask and marker_contours are equal to the input ROI
    # Initialize a binary image
    roi_mask = np.zeros(np.shape(img)[:2], dtype=np.uint8)
    # Draw the filled ROI on the mask
    cv2.drawContours(roi_mask, roi_contour, -1, (255), -1)
    marker_mask = []
    marker_contour = []

    # If the marker type is "detect" then we will use the ROI to isolate marker contours from the input image
    if marker.upper() == 'DETECT':
        # We need to convert the input image into an one of the HSV channels and then threshold it
        if thresh_channel is not None and thresh is not None:
            # Mask the input image
            masked = apply_mask(rgb_img=ref_img, mask=roi_mask, mask_color="black")
            # Convert the masked image to hue, saturation, or value
            marker_hsv = rgb2gray_hsv(rgb_img=masked, channel=thresh_channel)
            # Threshold the HSV image
            marker_bin = binary_threshold(gray_img=marker_hsv, threshold=thresh, max_value=255, object_type=objcolor)
            # Identify contours in the masked image
            contours, hierarchy = find_objects(img=ref_img, mask=marker_bin)
            # Filter marker contours using the input ROI
            kept_contours, kept_hierarchy, kept_mask, obj_area = roi_objects(img=ref_img, object_contour=contours,
                                                                             obj_hierarchy=hierarchy,
                                                                             roi_contour=roi_contour,
                                                                             roi_hierarchy=roi_hierarchy,
                                                                             roi_type="partial")
            # If there are more than one contour detected, combine them into one
            # These become the marker contour and mask
            marker_contour, marker_mask = object_composition(img=ref_img, contours=kept_contours,
                                                             hierarchy=kept_hierarchy)
        else:
            fatal_error('thresh_channel and thresh must be defined in detect mode')
    elif marker.upper() == "DEFINE":
        # Identify contours in the masked image
        contours, hierarchy = find_objects(img=ref_img, mask=roi_mask)
        # If there are more than one contour detected, combine them into one
        # These become the marker contour and mask
        marker_contour, marker_mask = object_composition(img=ref_img, contours=contours, hierarchy=hierarchy)
    else:
        fatal_error("marker must be either 'define' or 'detect' but {0} was provided.".format(marker))

    # Calculate the moments of the defined marker region
    m = cv2.moments(marker_mask, binaryImage=True)
    # Calculate the marker area
    marker_area = m['m00']

    # Fit a bounding ellipse to the marker
    center, axes, angle = cv2.fitEllipse(marker_contour)
    major_axis = np.argmax(axes)
    minor_axis = 1 - major_axis
    major_axis_length = axes[major_axis]
    minor_axis_length = axes[minor_axis]
    # Calculate the bounding ellipse eccentricity
    eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2)

    # Make a list to store output images
    analysis_image = []
    cv2.drawContours(ref_img, marker_contour, -1, (255, 0, 0), 5)
    # out_file = os.path.splitext(filename)[0] + '_sizemarker.jpg'
    # print_image(ref_img, out_file)
    analysis_image.append(ref_img)

    # Reset debug mode
    params.debug = debug
    
    if params.debug is 'print':
        print_image(ref_img, os.path.join(params.debug_outdir, str(params.device) + '_marker_shape.png'))
    elif params.debug is 'plot':
        plot_image(ref_img)

    outputs.add_observation(variable='marker_area', trait='marker area',
                            method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
                            value=marker_area, label='pixels')
    outputs.add_observation(variable='marker_ellipse_major_axis', trait='marker ellipse major axis length',
                            method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
                            value=major_axis_length, label='pixels')
    outputs.add_observation(variable='marker_ellipse_minor_axis', trait='marker ellipse minor axis length',
                            method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
                            value=minor_axis_length, label='pixels')
    outputs.add_observation(variable='marker_ellipse_eccentricity', trait='marker ellipse eccentricity',
                            method='plantcv.plantcv.report_size_marker_area', scale='none', datatype=float,
                            value=eccentricity, label='none')

    # Store images
    outputs.images.append(analysis_image)

    return analysis_image
def analyze_bound_vertical(img, obj, mask, line_position):
    """User-input boundary line tool

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary mask made from selected contours
    shape_header    = pass shape header data to function
    shape_data      = pass shape data so that analyze_bound data can be appended to it
    line_position   = position of boundary line (a value of 0 would draw the line through the left side of the image)

    Returns:
    analysis_images = output images

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :param line_position: int
    :return analysis_images: list
    """

    params.device += 1
    ori_img = np.copy(img)

    # Draw line horizontal line through bottom of image, that is adjusted to user input height
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)
    iy, ix, iz = np.shape(ori_img)
    size = (iy, ix)
    size1 = (iy, ix, 3)
    background = np.zeros(size, dtype=np.uint8)
    wback = (np.zeros(size1, dtype=np.uint8)) + 255
    x_coor = 0 + int(line_position)
    y_coor = int(iy)
    rec_point1 = (0, 0)
    rec_point2 = (x_coor, y_coor - 2)
    cv2.rectangle(background, rec_point1, rec_point2, (255), -1)
    right_contour, right_hierarchy = cv2.findContours(background, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

    x, y, width, height = cv2.boundingRect(obj)

    if x_coor - x <= 0:
        width_left_bound = 0
        width_right_bound = width
    elif x_coor - x > 0:
        width_1 = x_coor - x
        if width - width_1 <= 0:
            width_left_bound = width
            width_right_bound = 0
        else:
            width_left_bound = x_coor - x
            width_right_bound = width - width_left_bound

    right = []
    left = []
    mask_nonzerox, mask_nonzeroy = np.nonzero(mask)
    obj_points = np.vstack((mask_nonzeroy, mask_nonzerox))
    obj_points1 = np.transpose(obj_points)

    for i, c in enumerate(obj_points1):
        xy = tuple(c)
        pptest = cv2.pointPolygonTest(right_contour[0], xy, measureDist=False)
        if pptest == 1:
            left.append(xy)
            cv2.circle(ori_img, xy, 1, (155, 0, 255))
            cv2.circle(wback, xy, 1, (155, 0, 255))
        else:
            right.append(xy)
            cv2.circle(ori_img, xy, 1, (0, 255, 0))
            cv2.circle(wback, xy, 1, (0, 255, 0))
    right_bound_area = len(right)
    left_bound_area = len(left)
    percent_bound_area_right = ((float(right_bound_area)) / (float(left_bound_area + right_bound_area))) * 100
    percent_bound_area_left = ((float(left_bound_area)) / (float(right_bound_area + left_bound_area))) * 100

    analysis_images = []

    if left_bound_area or right_bound_area:
        point3 = (x_coor+2, 0)
        point4 = (x_coor+2, y_coor)
        cv2.line(ori_img, point3, point4, (255, 0, 255), params.line_thickness)
        cv2.line(wback, point3, point4, (255, 0, 255), params.line_thickness)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if x_coor - x <= 0:
            cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)), (0, 255, 0), params.line_thickness)
            cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)), (0, 255, 0), params.line_thickness)
        elif x_coor - x > 0:
            width_1 = x_coor - x
            if width - width_1 <= 0:
                cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)), (255, 0, 0), params.line_thickness)
                cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)), (255, 0, 0), params.line_thickness)
            else:
                cv2.line(ori_img, (x_coor + 2, int(cmy)), (x_coor + width_left_bound, int(cmy)), (255, 0, 0),
                         params.line_thickness)
                cv2.line(ori_img, (x_coor + 2, int(cmy)), (x_coor - width_right_bound, int(cmy)), (0, 255, 0),
                         params.line_thickness)
                cv2.line(wback, (x_coor + 2, int(cmy)), (x_coor + width_left_bound, int(cmy)), (255, 0, 0),
                         params.line_thickness)
                cv2.line(wback, (x_coor + 2, int(cmy)), (x_coor - width_right_bound, int(cmy)), (0, 255, 0),
                         params.line_thickness)
        # Output images with boundary line
        analysis_images.append(wback)
        analysis_images.append(ori_img)

    if params.debug is not None:
        point3 = (x_coor+2, 0)
        point4 = (x_coor+2, y_coor)
        cv2.line(ori_img, point3, point4, (255, 0, 255), params.line_thickness)
        cv2.line(wback, point3, point4, (255, 0, 255), params.line_thickness)
        m = cv2.moments(mask, binaryImage=True)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        if x_coor - x <= 0:
            cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)), (0, 255, 0), params.line_thickness)
            cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)), (0, 255, 0), params.line_thickness)
        elif x_coor - x > 0:
            width_1 = x_coor - x
            if width - width_1 <= 0:
                cv2.line(ori_img, (x, int(cmy)), (x + width, int(cmy)), (255, 0, 0), params.line_thickness)
                cv2.line(wback, (x, int(cmy)), (x + width, int(cmy)), (255, 0, 0), params.line_thickness)
            else:
                cv2.line(ori_img, (x_coor + 2, int(cmy)), (x_coor + width_left_bound, int(cmy)), (255, 0, 0),
                         params.line_thickness)
                cv2.line(ori_img, (x_coor + 2, int(cmy)), (x_coor - width_right_bound, int(cmy)), (0, 255, 0),
                         params.line_thickness)
                cv2.line(wback, (x_coor + 2, int(cmy)), (x_coor + width_left_bound, int(cmy)), (255, 0, 0),
                         params.line_thickness)
                cv2.line(wback, (x_coor + 2, int(cmy)), (x_coor - width_right_bound, int(cmy)), (0, 255, 0),
                         params.line_thickness)
        if params.debug == 'print':
            print_image(wback, os.path.join(params.debug_outdir, str(params.device) + '_boundary_on_white.jpg'))
            print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_boundary_on_img.jpg'))
        if params.debug == 'plot':
            plot_image(wback)
            plot_image(ori_img)

    outputs.add_observation(variable='vertical_reference_position', trait='vertical reference position',
                            method='plantcv.plantcv.analyze_bound_vertical', scale='none', datatype=int,
                            value=line_position, label='none')
    outputs.add_observation(variable='width_left_reference', trait='width left of reference',
                            method='plantcv.plantcv.analyze_bound_vertical', scale='pixels', datatype=int,
                            value=width_left_bound, label='pixels')
    outputs.add_observation(variable='width_right_reference', trait='width right of reference',
                            method='plantcv.plantcv.analyze_bound_vertical', scale='pixels', datatype=int,
                            value=width_right_bound, label='pixels')
    outputs.add_observation(variable='area_left_reference', trait='area left of reference',
                            method='plantcv.plantcv.analyze_bound_vertical', scale='pixels', datatype=int,
                            value=left_bound_area, label='pixels')
    outputs.add_observation(variable='percent_area_left_reference', trait='percent area left of reference',
                            method='plantcv.plantcv.analyze_bound_vertical', scale='none', datatype=float,
                            value=percent_bound_area_left, label='none')
    outputs.add_observation(variable='area_right_reference', trait='area right of reference',
                            method='plantcv.plantcv.analyze_bound_vertical', scale='pixels', datatype=int,
                            value=right_bound_area, label='pixels')
    outputs.add_observation(variable='percent_area_right_reference', trait='percent area right of reference',
                            method='plantcv.plantcv.analyze_bound_vertical', scale='none', datatype=float,
                            value=percent_bound_area_right, label='none')

    # Store images
    outputs.images.append(analysis_images)

    return analysis_images
Ejemplo n.º 20
0
def fluor_fvfm(fdark, fmin, fmax, mask, bins=256):
    """Analyze PSII camera images.
    Inputs:
    fdark       = grayscale fdark image
    fmin        = grayscale fmin image
    fmax        = grayscale fmax image
    mask        = mask of plant (binary, single channel)
    bins        = number of bins (1 to 256 for 8-bit; 1 to 65,536 for 16-bit; default is 256)
    Returns:
    analysis_images = list of images (fv image and fvfm histogram image)
    :param fdark: numpy.ndarray
    :param fmin: numpy.ndarray
    :param fmax: numpy.ndarray
    :param mask: numpy.ndarray
    :param bins: int
    :return analysis_images: numpy.ndarray
    """

    # Auto-increment the device counter
    params.device += 1
    # Check that fdark, fmin, and fmax are grayscale (single channel)
    if not all(len(np.shape(i)) == 2 for i in [fdark, fmin, fmax]):
        fatal_error("The fdark, fmin, and fmax images must be grayscale images.")
    # # Check that fdark, fmin, and fmax are the same bit
    # if  not (all(i.dtype == "uint16" for i in [fdark, fmin, fmax]) or
    #         (all(i.dtype == "uint8" for i in [fdark, fmin, fmax]))):
    #     fatal_error("The fdark, fmin, and fmax images must all be the same bit depth.")
    # Check that fdark, fmin, and fmax are 16-bit images
    # if not all(i.dtype == "uint16" for i in [fdark, fmin, fmax]):
    #     fatal_error("The fdark, fmin, and fmax images must be 16-bit images.")

    # QC Fdark Image
    fdark_mask = cv2.bitwise_and(fdark, fdark, mask=mask)
    if np.amax(fdark_mask) > 2000:
        qc_fdark = False
    else:
        qc_fdark = True

    # Mask Fmin and Fmax Image
    fmin_mask = cv2.bitwise_and(fmin, fmin, mask=mask)
    fmax_mask = cv2.bitwise_and(fmax, fmax, mask=mask)

    # Calculate Fvariable, where Fv = Fmax - Fmin (masked)
    fv = np.subtract(fmax_mask, fmin_mask)

    # When Fmin is greater than Fmax, a negative value is returned.
    # Because the data type is unsigned integers, negative values roll over, resulting in nonsensical values
    # Wherever Fmin is greater than Fmax, set Fv to zero
    fv[np.where(fmax_mask < fmin_mask)] = 0
    analysis_images = []
    analysis_images.append(fv)

    # Calculate Fv/Fm (Fvariable / Fmax) where Fmax is greater than zero
    # By definition above, wherever Fmax is zero, Fvariable will also be zero
    # To calculate the divisions properly we need to change from unit16 to float64 data types
    fvfm = fv.astype(np.float64)
    fmax_flt = fmax_mask.astype(np.float64)
    fvfm[np.where(fmax_mask > 0)] /= fmax_flt[np.where(fmax_mask > 0)]

    # Calculate the median Fv/Fm value for non-zero pixels
    fvfm_median = np.median(fvfm[np.where(fvfm > 0)])

    # Calculate the histogram of Fv/Fm non-zero values
    fvfm_hist, fvfm_bins = np.histogram(fvfm[np.where(fvfm > 0)], bins, range=(0, 1))
    # fvfm_bins is a bins + 1 length list of bin endpoints, so we need to calculate bin midpoints so that
    # the we have a one-to-one list of x (FvFm) and y (frequency) values.
    # To do this we add half the bin width to each lower bin edge x-value
    midpoints = fvfm_bins[:-1] + 0.5 * np.diff(fvfm_bins)

    # Calculate which non-zero bin has the maximum Fv/Fm value
    max_bin = midpoints[np.argmax(fvfm_hist)]

    # Print F-variable image
    # print_image(fv, (os.path.splitext(filename)[0] + '_fv_img.png'))
    # analysis_images.append(['IMAGE', 'fv', os.path.splitext(filename)[0] + '_fv_img.png'])

    # Create Histogram Plot, if you change the bin number you might need to change binx so that it prints
    # an appropriate number of labels
    # Create a dataframe
    dataset = pd.DataFrame({'Plant Pixels': fvfm_hist, 'Fv/Fm': midpoints})
    # Make the histogram figure using plotnine
    fvfm_hist_fig = (ggplot(data=dataset, mapping=aes(x='Fv/Fm', y='Plant Pixels'))
                     + geom_line(color='green', show_legend=True)
                     + geom_label(label='Peak Bin Value: ' + str(max_bin),
                                  x=.15, y=205, size=8, color='green'))
    analysis_images.append(fvfm_hist_fig)

    # Changed histogram method over from matplotlib pyplot to plotnine
    # binx = int(bins / 50)
    # plt.plot(midpoints, fvfm_hist, color='green', label='Fv/Fm')
    # plt.xticks(list(midpoints[0::binx]), rotation='vertical', size='xx-small')
    # plt.legend()
    # ax = plt.subplot(111)
    # ax.set_ylabel('Plant Pixels')
    # ax.text(0.05, 0.95, ('Peak Bin Value: ' + str(max_bin)), transform=ax.transAxes, verticalalignment='top')
    # plt.grid()
    # plt.title('Fv/Fm of ' + os.path.splitext(filename)[0])
    # fig_name = (os.path.splitext(filename)[0] + '_fvfm_hist.svg')
    # plt.savefig(fig_name)
    # plt.clf()
    # analysis_images.append(['IMAGE', 'fvfm_hist', fig_name])

    # No longer pseudocolor the image, instead can be pseudocolored by pcv.pseudocolor
    # # Pseudocolored Fv/Fm image
    # plt.imshow(fvfm, vmin=0, vmax=1, cmap="viridis")
    # plt.colorbar()
    # # fvfm_8bit = fvfm * 255
    # # fvfm_8bit = fvfm_8bit.astype(np.uint8)
    # # plt.imshow(fvfm_8bit, vmin=0, vmax=1, cmap=cm.jet_r)
    # # plt.subplot(111)
    # # mask_inv = cv2.bitwise_not(mask)
    # # background = np.dstack((mask, mask, mask, mask_inv))
    # # my_cmap = plt.get_cmap('binary_r')
    # # plt.imshow(background, cmap=my_cmap)
    # plt.axis('off')
    # fig_name = (os.path.splitext(filename)[0] + '_pseudo_fvfm.png')
    # plt.savefig(fig_name, dpi=600, bbox_inches='tight')
    # plt.clf()
    # analysis_images.append(['IMAGE', 'fvfm_pseudo', fig_name])

    # path = os.path.dirname(filename)
    # fig_name = 'FvFm_pseudocolor_colorbar.svg'
    # if not os.path.isfile(os.path.join(path, fig_name)):
    #     plot_colorbar(path, fig_name, 2)

    if params.debug == 'print':
        print_image(fmin_mask, os.path.join(params.debug_outdir, str(params.device) + '_fmin_mask.png'))
        print_image(fmax_mask, os.path.join(params.debug_outdir, str(params.device) + '_fmax_mask.png'))
        print_image(fv, os.path.join(params.debug_outdir, str(params.device) + '_fv_convert.png'))
        fvfm_hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_fv_hist.png'))
    elif params.debug == 'plot':
        plot_image(fmin_mask, cmap='gray')
        plot_image(fmax_mask, cmap='gray')
        plot_image(fv, cmap='gray')
        print(fvfm_hist_fig)

    outputs.add_observation(variable='fvfm_hist', trait='Fv/Fm frequencies',
                            method='plantcv.plantcv.fluor_fvfm', scale='none', datatype=list,
                            value=fvfm_hist.tolist(), label=np.around(midpoints, decimals=len(str(bins))).tolist())
    outputs.add_observation(variable='fvfm_hist_peak', trait='peak Fv/Fm value',
                            method='plantcv.plantcv.fluor_fvfm', scale='none', datatype=float,
                            value=float(max_bin), label='none')
    outputs.add_observation(variable='fvfm_median', trait='Fv/Fm median',
                            method='plantcv.plantcv.fluor_fvfm', scale='none', datatype=float,
                            value=float(np.around(fvfm_median, decimals=4)), label='none')
    outputs.add_observation(variable='fdark_passed_qc', trait='Fdark passed QC',
                            method='plantcv.plantcv.fluor_fvfm', scale='none', datatype=bool,
                            value=qc_fdark, label='none')

    # Store images
    outputs.images.append(analysis_images)

    return analysis_images
Ejemplo n.º 21
0
def analyze_color(rgb_img, mask, hist_plot_type=None):
    """Analyze the color properties of an image object
    Inputs:
    rgb_img          = RGB image data
    mask             = Binary mask made from selected contours
    hist_plot_type   = 'None', 'all', 'rgb','lab' or 'hsv'
    
    Returns:
    analysis_image   = histogram output
    
    :param rgb_img: numpy.ndarray
    :param mask: numpy.ndarray
    :param hist_plot_type: str
    :return analysis_images: list
    """

    params.device += 1

    if len(np.shape(rgb_img)) < 3:
        fatal_error("rgb_img must be an RGB image")

    # Mask the input image
    masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
    # Extract the blue, green, and red channels
    b, g, r = cv2.split(masked)
    # Convert the BGR image to LAB
    lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)
    # Extract the lightness, green-magenta, and blue-yellow channels
    l, m, y = cv2.split(lab)
    # Convert the BGR image to HSV
    hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV)
    # Extract the hue, saturation, and value channels
    h, s, v = cv2.split(hsv)

    # Color channel dictionary
    channels = {"b": b, "g": g, "r": r, "l": l, "m": m, "y": y, "h": h, "s": s, "v": v}

    # Histogram plot types
    hist_types = {"ALL": ("b", "g", "r", "l", "m", "y", "h", "s", "v"),
                  "RGB": ("b", "g", "r"),
                  "LAB": ("l", "m", "y"),
                  "HSV": ("h", "s", "v")}

    if hist_plot_type is not None and hist_plot_type.upper() not in hist_types:
        fatal_error("The histogram plot type was " + str(hist_plot_type) +
                    ', but can only be one of the following: None, "all", "rgb", "lab", or "hsv"!')
    # Store histograms, plotting colors, and plotting labels
    histograms = {
        "b": {"label": "blue", "graph_color": "blue",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["b"]], [0], mask, [256], [0, 255])]},
        "g": {"label": "green", "graph_color": "forestgreen",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["g"]], [0], mask, [256], [0, 255])]},
        "r": {"label": "red", "graph_color": "red",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["r"]], [0], mask, [256], [0, 255])]},
        "l": {"label": "lightness", "graph_color": "dimgray",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["l"]], [0], mask, [256], [0, 255])]},
        "m": {"label": "green-magenta", "graph_color": "magenta",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["m"]], [0], mask, [256], [0, 255])]},
        "y": {"label": "blue-yellow", "graph_color": "yellow",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["y"]], [0], mask, [256], [0, 255])]},
        "h": {"label": "hue", "graph_color": "blueviolet",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["h"]], [0], mask, [256], [0, 255])]},
        "s": {"label": "saturation", "graph_color": "cyan",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["s"]], [0], mask, [256], [0, 255])]},
        "v": {"label": "value", "graph_color": "orange",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["v"]], [0], mask, [256], [0, 255])]}
    }

    # Create list of bin labels for 8-bit data
    binval = np.arange(0, 256)
    bin_values = [l for l in binval]

    analysis_images = []
    # Create a dataframe of bin labels and histogram data
    dataset = pd.DataFrame({'bins': binval, 'blue': histograms["b"]["hist"],
                            'green': histograms["g"]["hist"], 'red': histograms["r"]["hist"],
                            'lightness': histograms["l"]["hist"], 'green-magenta': histograms["m"]["hist"],
                            'blue-yellow': histograms["y"]["hist"], 'hue': histograms["h"]["hist"],
                            'saturation': histograms["s"]["hist"], 'value': histograms["v"]["hist"]})

    # Make the histogram figure using plotnine
    if hist_plot_type is not None:
        if hist_plot_type.upper() == 'RGB':
            df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_rgb, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['blue', 'green', 'red'])
                        )
            analysis_images.append(hist_fig)

        elif hist_plot_type.upper() == 'LAB':
            df_lab = pd.melt(dataset, id_vars=['bins'],
                             value_vars=['lightness', 'green-magenta', 'blue-yellow'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_lab, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['yellow', 'magenta', 'dimgray'])
                        )
            analysis_images.append(hist_fig)

        elif hist_plot_type.upper() == 'HSV':
            df_hsv = pd.melt(dataset, id_vars=['bins'],
                             value_vars=['hue', 'saturation', 'value'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_hsv, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['blueviolet', 'cyan', 'orange'])
                        )
            analysis_images.append(hist_fig)

        elif hist_plot_type.upper() == 'ALL':
            s = pd.Series(['blue', 'green', 'red', 'lightness', 'green-magenta',
                           'blue-yellow', 'hue', 'saturation', 'value'], dtype="category")
            color_channels = ['blue', 'yellow', 'green', 'magenta', 'blueviolet',
                              'dimgray', 'red', 'cyan', 'orange']
            df_all = pd.melt(dataset, id_vars=['bins'], value_vars=s, var_name='Color Channel',
                             value_name='Pixels')
            hist_fig = (ggplot(df_all, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(color_channels)
                        )
            analysis_images.append(hist_fig)

    # Hue values of zero are red but are also the value for pixels where hue is undefined
    # The hue value of a pixel will be undefined when the color values are saturated
    # Therefore, hue values of zero are excluded from the calculations below

    # Calculate the median hue value
    # The median is rescaled from the encoded 0-179 range to the 0-359 degree range
    hue_median = np.median(h[np.where(h > 0)]) * 2

    # Calculate the circular mean and standard deviation of the encoded hue values
    # The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range
    hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2
    hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2

    # Store into lists instead for pipeline and print_results
    # stats_dict = {'mean': circular_mean, 'std' : circular_std, 'median': median}

    # Plot or print the histogram
    if hist_plot_type is not None:
        if params.debug == 'print':
            hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_analyze_color_hist.png'))
        elif params.debug == 'plot':
            print(hist_fig)

    # Store into global measurements
    # RGB signal values are in an unsigned 8-bit scale of 0-255
    rgb_values = [i for i in range(0, 256)]
    # Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval
    hue_values = [i * 2 + 1 for i in range(0, 180)]
    # Percentage values on a 0-100 scale (lightness, saturation, and value)
    percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)]
    # Diverging values on a -128 to 127 scale (green-magenta and blue-yellow)
    diverging_values = [i for i in range(-128, 128)]
    # outputs.measurements['color_data'] = {
    #     'histograms': {
    #         'blue': {'signal_values': rgb_values, 'frequency': histograms["b"]["hist"]},
    #         'green': {'signal_values': rgb_values, 'frequency': histograms["g"]["hist"]},
    #         'red': {'signal_values': rgb_values, 'frequency': histograms["r"]["hist"]},
    #         'lightness': {'signal_values': percent_values, 'frequency': histograms["l"]["hist"]},
    #         'green-magenta': {'signal_values': diverging_values, 'frequency': histograms["m"]["hist"]},
    #         'blue-yellow': {'signal_values': diverging_values, 'frequency': histograms["y"]["hist"]},
    #         'hue': {'signal_values': hue_values, 'frequency': histograms["h"]["hist"]},
    #         'saturation': {'signal_values': percent_values, 'frequency': histograms["s"]["hist"]},
    #         'value': {'signal_values': percent_values, 'frequency': histograms["v"]["hist"]}
    #     },
    #     'color_features': {
    #         'hue_circular_mean': hue_circular_mean,
    #         'hue_circular_std': hue_circular_std,
    #         'hue_median': hue_median
    #     }
    # }
    outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["b"]["hist"], label=rgb_values)
    outputs.add_observation(variable='green_frequencies', trait='green frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["g"]["hist"], label=rgb_values)
    outputs.add_observation(variable='red_frequencies', trait='red frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["r"]["hist"], label=rgb_values)
    outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["l"]["hist"], label=percent_values)
    outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["m"]["hist"], label=diverging_values)
    outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["y"]["hist"], label=diverging_values)
    outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["h"]["hist"], label=hue_values)
    outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["s"]["hist"], label=percent_values)
    outputs.add_observation(variable='value_frequencies', trait='value frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["v"]["hist"], label=percent_values)
    outputs.add_observation(variable='hue_circular_mean', trait='hue circular mean',
                            method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
                            value=hue_circular_mean, label='degrees')
    outputs.add_observation(variable='hue_circular_std', trait='hue circular standard deviation',
                            method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
                            value=hue_median, label='degrees')
    outputs.add_observation(variable='hue_median', trait='hue median',
                            method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
                            value=hue_median, label='degrees')

    # Store images
    outputs.images.append(analysis_images)

    return analysis_images
Ejemplo n.º 22
0
def analyze_object(img, obj, mask):
    """Outputs numeric properties for an input object (contour or grouped contours).

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary image to use as mask

    Returns:
    analysis_images = list of output images

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :return analysis_images: list
    """

    params.device += 1

    # Valid objects can only be analyzed if they have >= 5 vertices
    if len(obj) < 5:
        return None, None, None

    ori_img = np.copy(img)
    # Convert grayscale images to color
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)

    if len(np.shape(img)) == 3:
        ix, iy, iz = np.shape(img)
    else:
        ix, iy = np.shape(img)
    size = ix, iy, 3
    size1 = ix, iy
    background = np.zeros(size, dtype=np.uint8)
    background1 = np.zeros(size1, dtype=np.uint8)
    background2 = np.zeros(size1, dtype=np.uint8)

    # Check is object is touching image boundaries (QC)
    in_bounds = within_frame(mask)

    # Convex Hull
    hull = cv2.convexHull(obj)
    hull_vertices = len(hull)
    # Moments
    #  m = cv2.moments(obj)
    m = cv2.moments(mask, binaryImage=True)
    # Properties
    # Area
    area = m['m00']

    if area:
        # Convex Hull area
        hull_area = cv2.contourArea(hull)
        # Solidity
        solidity = 1
        if int(hull_area) != 0:
            solidity = area / hull_area
        # Perimeter
        perimeter = cv2.arcLength(obj, closed=True)
        # x and y position (bottom left?) and extent x (width) and extent y (height)
        x, y, width, height = cv2.boundingRect(obj)
        # Centroid (center of mass x, center of mass y)
        cmx, cmy = (float(m['m10'] / m['m00']), float(m['m01'] / m['m00']))
        # Ellipse
        center, axes, angle = cv2.fitEllipse(obj)
        major_axis = np.argmax(axes)
        minor_axis = 1 - major_axis
        major_axis_length = float(axes[major_axis])
        minor_axis_length = float(axes[minor_axis])
        eccentricity = float(np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2))

        # Longest Axis: line through center of mass and point on the convex hull that is furthest away
        cv2.circle(background, (int(cmx), int(cmy)), 4, (255, 255, 255), -1)
        center_p = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
        ret, centerp_binary = cv2.threshold(center_p, 0, 255, cv2.THRESH_BINARY)
        centerpoint, cpoint_h = cv2.findContours(centerp_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

        dist = []
        vhull = np.vstack(hull)

        for i, c in enumerate(vhull):
            xy = tuple(c)
            pptest = cv2.pointPolygonTest(centerpoint[0], xy, measureDist=True)
            dist.append(pptest)

        abs_dist = np.absolute(dist)
        max_i = np.argmax(abs_dist)

        caliper_max_x, caliper_max_y = list(tuple(vhull[max_i]))
        caliper_mid_x, caliper_mid_y = [int(cmx), int(cmy)]

        xdiff = float(caliper_max_x - caliper_mid_x)
        ydiff = float(caliper_max_y - caliper_mid_y)

        # Set default values
        slope = 1

        if xdiff != 0:
            slope = (float(ydiff / xdiff))
        b_line = caliper_mid_y - (slope * caliper_mid_x)

        if slope != 0:
            xintercept = int(-b_line / slope)
            xintercept1 = int((ix - b_line) / slope)
            if 0 <= xintercept <= iy and 0 <= xintercept1 <= iy:
                cv2.line(background1, (xintercept1, ix), (xintercept, 0), (255), params.line_thickness)
            elif xintercept < 0 or xintercept > iy or xintercept1 < 0 or xintercept1 > iy:
                # Used a random number generator to test if either of these cases were possible but neither is possible
                # if xintercept < 0 and 0 <= xintercept1 <= iy:
                #     yintercept = int(b_line)
                #     cv2.line(background1, (0, yintercept), (xintercept1, ix), (255), 5)
                # elif xintercept > iy and 0 <= xintercept1 <= iy:
                #     yintercept1 = int((slope * iy) + b_line)
                #     cv2.line(background1, (iy, yintercept1), (xintercept1, ix), (255), 5)
                # elif 0 <= xintercept <= iy and xintercept1 < 0:
                #     yintercept = int(b_line)
                #     cv2.line(background1, (0, yintercept), (xintercept, 0), (255), 5)
                # elif 0 <= xintercept <= iy and xintercept1 > iy:
                #     yintercept1 = int((slope * iy) + b_line)
                #     cv2.line(background1, (iy, yintercept1), (xintercept, 0), (255), 5)
                # else:
                yintercept = int(b_line)
                yintercept1 = int((slope * iy) + b_line)
                cv2.line(background1, (0, yintercept), (iy, yintercept1), (255), 5)
        else:
            cv2.line(background1, (iy, caliper_mid_y), (0, caliper_mid_y), (255), params.line_thickness)

        ret1, line_binary = cv2.threshold(background1, 0, 255, cv2.THRESH_BINARY)
        # print_image(line_binary,(str(device)+'_caliperfit.png'))

        cv2.drawContours(background2, [hull], -1, (255), -1)
        ret2, hullp_binary = cv2.threshold(background2, 0, 255, cv2.THRESH_BINARY)
        # print_image(hullp_binary,(str(device)+'_hull.png'))

        caliper = cv2.multiply(line_binary, hullp_binary)
        # print_image(caliper,(str(device)+'_caliperlength.png'))

        caliper_y, caliper_x = np.array(caliper.nonzero())
        caliper_matrix = np.vstack((caliper_x, caliper_y))
        caliper_transpose = np.transpose(caliper_matrix)
        caliper_length = len(caliper_transpose)

        caliper_transpose1 = np.lexsort((caliper_y, caliper_x))
        caliper_transpose2 = [(caliper_x[i], caliper_y[i]) for i in caliper_transpose1]
        caliper_transpose = np.array(caliper_transpose2)

    # else:
    #  hull_area, solidity, perimeter, width, height, cmx, cmy = 'ND', 'ND', 'ND', 'ND', 'ND', 'ND', 'ND'

    analysis_images = []

    # Draw properties
    if area:
        cv2.drawContours(ori_img, obj, -1, (255, 0, 0), params.line_thickness)
        cv2.drawContours(ori_img, [hull], -1, (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (x, y), (x + width, y), (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])), (tuple(caliper_transpose[0])), (255, 0, 255),
                 params.line_thickness)
        cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (255, 0, 255), params.line_thickness)
        # Output images with convex hull, extent x and y
        # out_file = os.path.splitext(filename)[0] + '_shapes.jpg'
        # out_file1 = os.path.splitext(filename)[0] + '_mask.jpg'

        # print_image(ori_img, out_file)
        analysis_images.append(ori_img)

        # print_image(mask, out_file1)
        analysis_images.append(mask)

    else:
        pass

    outputs.add_observation(variable='area', trait='area',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=area, label='pixels')
    outputs.add_observation(variable='convex_hull_area', trait='convex hull area',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=hull_area, label='pixels')
    outputs.add_observation(variable='solidity', trait='solidity',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=float,
                            value=solidity, label='none')
    outputs.add_observation(variable='perimeter', trait='perimeter',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=perimeter, label='pixels')
    outputs.add_observation(variable='width', trait='width',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=width, label='pixels')
    outputs.add_observation(variable='height', trait='height',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=height, label='pixels')
    outputs.add_observation(variable='longest_path', trait='longest path',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=caliper_length, label='pixels')
    outputs.add_observation(variable='center_of_mass', trait='center of mass',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=tuple,
                            value=(cmx, cmy), label='none')
    outputs.add_observation(variable='convex_hull_vertices', trait='convex hull vertices',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=int,
                            value=hull_vertices, label='none')
    outputs.add_observation(variable='object_in_frame', trait='object in frame',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=bool,
                            value=in_bounds, label='none')
    outputs.add_observation(variable='ellipse_center', trait='ellipse center',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=tuple,
                            value=(center[0], center[1]), label='none')
    outputs.add_observation(variable='ellipse_major_axis', trait='ellipse major axis length',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=major_axis_length, label='pixels')
    outputs.add_observation(variable='ellipse_minor_axis', trait='ellipse minor axis length',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=minor_axis_length, label='pixels')
    outputs.add_observation(variable='ellipse_angle', trait='ellipse major axis angle',
                            method='plantcv.plantcv.analyze_object', scale='degrees', datatype=float,
                            value=float(angle), label='degrees')
    outputs.add_observation(variable='ellipse_eccentricity', trait='ellipse eccentricity',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=float,
                            value=float(eccentricity), label='none')

    if params.debug is not None:
        cv2.drawContours(ori_img, obj, -1, (255, 0, 0), params.line_thickness)
        cv2.drawContours(ori_img, [hull], -1, (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (x, y), (x + width, y), (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (255, 0, 255), params.line_thickness)
        cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])), (tuple(caliper_transpose[0])), (255, 0, 255),
                 params.line_thickness)
        if params.debug == 'print':
            print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_shapes.jpg'))
        elif params.debug == 'plot':
            if len(np.shape(img)) == 3:
                plot_image(ori_img)
            else:
                plot_image(ori_img, cmap='gray')

    # Store images
    outputs.images.append(analysis_images)
    return analysis_images
def report_size_marker_area(img, roi_contour, roi_hierarchy, marker='define', objcolor='dark', thresh_channel=None,
                            thresh=None):
    """Detects a size marker in a specified region and reports its size and eccentricity

    Inputs:
    img             = An RGB or grayscale image to plot the marker object on
    roi_contour     = A region of interest contour (e.g. output from pcv.roi.rectangle or other methods)
    roi_hierarchy   = A region of interest contour hierarchy (e.g. output from pcv.roi.rectangle or other methods)
    marker          = 'define' or 'detect'. If define it means you set an area, if detect it means you want to
                      detect within an area
    objcolor        = Object color is 'dark' or 'light' (is the marker darker or lighter than the background)
    thresh_channel  = 'h', 's', or 'v' for hue, saturation or value
    thresh          = Binary threshold value (integer)

    Returns:
    analysis_images = List of output images

    :param img: numpy.ndarray
    :param roi_contour: list
    :param roi_hierarchy: numpy.ndarray
    :param marker: str
    :param objcolor: str
    :param thresh_channel: str
    :param thresh: int
    :return: analysis_images: list
    """

    params.device += 1
    # Make a copy of the reference image
    ref_img = np.copy(img)
    # If the reference image is grayscale convert it to color
    if len(np.shape(ref_img)) == 2:
        ref_img = cv2.cvtColor(ref_img, cv2.COLOR_GRAY2BGR)

    # Marker components
    # If the marker type is "defined" then the marker_mask and marker_contours are equal to the input ROI
    # Initialize a binary image
    roi_mask = np.zeros(np.shape(img)[:2], dtype=np.uint8)
    # Draw the filled ROI on the mask
    cv2.drawContours(roi_mask, roi_contour, -1, (255), -1)
    marker_mask = []
    marker_contour = []

    # If the marker type is "detect" then we will use the ROI to isolate marker contours from the input image
    if marker.upper() == 'DETECT':
        # We need to convert the input image into an one of the HSV channels and then threshold it
        if thresh_channel is not None and thresh is not None:
            # Mask the input image
            masked = apply_mask(rgb_img=ref_img, mask=roi_mask, mask_color="black")
            # Convert the masked image to hue, saturation, or value
            marker_hsv = rgb2gray_hsv(rgb_img=masked, channel=thresh_channel)
            # Threshold the HSV image
            marker_bin = binary_threshold(gray_img=marker_hsv, threshold=thresh, max_value=255, object_type=objcolor)
            # Identify contours in the masked image
            contours, hierarchy = find_objects(img=ref_img, mask=marker_bin)
            # Filter marker contours using the input ROI
            kept_contours, kept_hierarchy, kept_mask, obj_area = roi_objects(img=ref_img, object_contour=contours,
                                                                             obj_hierarchy=hierarchy,
                                                                             roi_contour=roi_contour,
                                                                             roi_hierarchy=roi_hierarchy,
                                                                             roi_type="partial")
            # If there are more than one contour detected, combine them into one
            # These become the marker contour and mask
            marker_contour, marker_mask = object_composition(img=ref_img, contours=kept_contours,
                                                             hierarchy=kept_hierarchy)
        else:
            fatal_error('thresh_channel and thresh must be defined in detect mode')
    elif marker.upper() == "DEFINE":
        # Identify contours in the masked image
        contours, hierarchy = find_objects(img=ref_img, mask=roi_mask)
        # If there are more than one contour detected, combine them into one
        # These become the marker contour and mask
        marker_contour, marker_mask = object_composition(img=ref_img, contours=contours, hierarchy=hierarchy)
    else:
        fatal_error("marker must be either 'define' or 'detect' but {0} was provided.".format(marker))

    # Calculate the moments of the defined marker region
    m = cv2.moments(marker_mask, binaryImage=True)
    # Calculate the marker area
    marker_area = m['m00']

    # Fit a bounding ellipse to the marker
    center, axes, angle = cv2.fitEllipse(marker_contour)
    major_axis = np.argmax(axes)
    minor_axis = 1 - major_axis
    major_axis_length = axes[major_axis]
    minor_axis_length = axes[minor_axis]
    # Calculate the bounding ellipse eccentricity
    eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2)

    # Make a list to store output images
    analysis_image = []
    cv2.drawContours(ref_img, marker_contour, -1, (255, 0, 0), 5)
    # out_file = os.path.splitext(filename)[0] + '_sizemarker.jpg'
    # print_image(ref_img, out_file)
    analysis_image.append(ref_img)
    if params.debug is 'print':
        print_image(ref_img, os.path.join(params.debug_outdir, str(params.device) + '_marker_shape.png'))
    elif params.debug is 'plot':
        plot_image(ref_img)

    outputs.add_observation(variable='marker_area', trait='marker area',
                            method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
                            value=marker_area, label='pixels')
    outputs.add_observation(variable='marker_ellipse_major_axis', trait='marker ellipse major axis length',
                            method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
                            value=major_axis_length, label='pixels')
    outputs.add_observation(variable='marker_ellipse_minor_axis', trait='marker ellipse minor axis length',
                            method='plantcv.plantcv.report_size_marker_area', scale='pixels', datatype=int,
                            value=minor_axis_length, label='pixels')
    outputs.add_observation(variable='marker_ellipse_eccentricity', trait='marker ellipse eccentricity',
                            method='plantcv.plantcv.report_size_marker_area', scale='none', datatype=float,
                            value=eccentricity, label='none')

    # Store images
    outputs.images.append(analysis_image)

    return analysis_image
Ejemplo n.º 24
0
def segment_tangent_angle(segmented_img, objects, size):
    """ Find 'tangent' angles in degrees of skeleton segments. Use `size` pixels on either end of
        each segment to find a linear regression line, and calculate angle between the two lines
        drawn per segment.

        Inputs:
        segmented_img  = Segmented image to plot slope lines and intersection angles on
        objects   = List of contours
        size      = Size of ends used to calculate "tangent" lines

        Returns:
        labeled_img    = Segmented debugging image with angles labeled

        :param segmented_img: numpy.ndarray
        :param objects: list
        :param size: int
        :return labeled_img: numpy.ndarray
        """
    # Store debug
    debug = params.debug
    params.debug = None

    labeled_img = segmented_img.copy()
    intersection_angles = []
    label_coord_x = []
    label_coord_y = []

    rand_color = color_palette(len(objects))

    for i, cnt in enumerate(objects):
        find_tangents = np.zeros(segmented_img.shape[:2], np.uint8)
        cv2.drawContours(find_tangents, objects, i, 255, 1, lineType=8)
        cv2.drawContours(labeled_img,
                         objects,
                         i,
                         rand_color[i],
                         params.line_thickness,
                         lineType=8)
        pruned_segment = _iterative_prune(find_tangents, size)
        segment_ends = find_tangents - pruned_segment
        segment_end_obj, segment_end_hierarchy = find_objects(
            segment_ends, segment_ends)
        slopes = []
        for j, obj in enumerate(segment_end_obj):
            # Find bounds for regression lines to get drawn
            rect = cv2.minAreaRect(cnt)
            pts = cv2.boxPoints(rect)
            df = pd.DataFrame(pts, columns=('x', 'y'))
            x_max = int(df['x'].max())
            x_min = int(df['x'].min())

            # Find line fit to each segment
            [vx, vy, x, y] = cv2.fitLine(obj, cv2.DIST_L2, 0, 0.01, 0.01)
            slope = -vy / vx
            left_list = int(((x - x_min) * slope) + y)
            right_list = int(((x - x_max) * slope) + y)
            slopes.append(slope)

            if slope > 1000000 or slope < -1000000:
                print("Slope of contour with ID#", i, "is", slope,
                      "and cannot be plotted.")
            else:
                # Draw slope lines
                cv2.line(labeled_img, (x_max - 1, right_list),
                         (x_min, left_list), rand_color[i], 1)

        if len(slopes) < 2:
            # If size*2>len(obj) then pruning will remove the segment completely, and
            # makes segment_end_objs contain just one contour.
            print("Size too large, contour with ID#", i,
                  "got pruned away completely.")
            intersection_angles.append("NA")
        else:
            # Calculate intersection angles
            slope1 = slopes[0][0]
            slope2 = slopes[1][0]
            intersection_angle = _slope_to_intesect_angle(slope1, slope2)
            intersection_angles.append(intersection_angle)

        # Store coordinates for labels
        label_coord_x.append(objects[i][0][0][0])
        label_coord_y.append(objects[i][0][0][1])

    segment_ids = []
    # Reset debug mode
    params.debug = debug

    for i, cnt in enumerate(objects):
        # Label slope lines
        w = label_coord_x[i]
        h = label_coord_y[i]
        if type(intersection_angles[i]) is str:
            text = "{}".format(intersection_angles[i])
        else:
            text = "{:.2f}".format(intersection_angles[i])
        cv2.putText(img=labeled_img,
                    text=text,
                    org=(w, h),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=params.text_size,
                    color=(150, 150, 150),
                    thickness=params.text_thickness)
        segment_label = "ID" + str(i)
        segment_ids.append(i)

    outputs.add_observation(
        variable='segment_tangent_angle',
        trait='segment tangent angle',
        method='plantcv.plantcv.morphology.segment_tangent_angle',
        scale='degrees',
        datatype=list,
        value=intersection_angles,
        label=segment_ids)

    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(
            labeled_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_segment_tangent_angles.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return labeled_img