Пример #1
0
def psIImask(img, mode='thresh'):
    # pcv.plot_image(img)
    if mode is 'thresh':

        # this entropy based technique seems to work well when algae is present
        algaethresh = filters.threshold_yen(image=img)
        threshy = pcv.threshold.binary(img, algaethresh, 255, 'light')
        # mask = pcv.dilate(threshy, 2, 1)
        mask = pcv.fill(threshy, 250)
        mask = pcv.erode(mask, 2, 1)
        mask = pcv.fill(mask, 100)
        final_mask = mask  # pcv.fill(mask, 270)

    elif isinstance(mode, pd.DataFrame):
        mode = curvedf
        rownum = mode.imageid.values.argmax()
        imgdf = mode.iloc[[1, rownum]]
        fm = cv2.imread(imgdf.filename[0])
        fmp = cv2.imread(imgdf.filename[1])
        npq = np.float32(np.divide(fm, fmp, where=fmp != 0) - 1)
        npq = np.ma.array(fmp, mask=fmp < 200)
        plt.imshow(npq)
        # pcv.plot_image(npq)

        final_mask = np.zeros_like(img)

    else:
        pcv.fatal_error(
            'mode must be "thresh" (default) or an object of class pd.DataFrame'
        )

    return final_mask
def psIImask(img, mode='thresh'):
    ''' 
    Input:
    img = greyscale image
    mode = type of thresholding to perform. Currently only 'thresh' is available
    '''

    # pcv.plot_image(img)
    if mode is 'thresh':

        # this entropy based technique seems to work well when algae is present
        algaethresh = filters.threshold_yen(image=img)
        threshy = pcv.threshold.binary(img, algaethresh, 255, 'light')
        # mask = pcv.dilate(threshy, 2, 1)
        mask = pcv.fill(threshy, 150)
        mask = pcv.erode(mask, 2, 1)
        mask = pcv.fill(mask, 45)
        # mask = pcv.dilate(mask, 2,1)
        final_mask = mask  # pcv.fill(mask, 270)

    else:
        pcv.fatal_error(
            'mode must be "thresh" (default) or an object of class pd.DataFrame'
        )

    return final_mask
Пример #3
0
def local_orientation_label_cost(labeled_lines,
                                 labeled_lines_num,
                                 intact_lines_num,
                                 max_orientation,
                                 max_response,
                                 theta,
                                 radius_constant=18):
    pixel_list = regionprops(np.transpose(labeled_lines))
    label_cost = np.zeros((labeled_lines_num + 1, 1))
    local_max_orientation = np.zeros((labeled_lines_num, 1))
    logical = labeled_lines > 0
    logical_double = logical.astype(np.double)
    # TODO decide number of iterations
    border_mask = np.logical_and(logical,
                                 np.logical_not(erode(logical_double, 3, 1)))
    border_mask = border_mask.astype(np.double)
    # area divide by perimeter
    sw = np.sum(logical_double) / np.sum(border_mask)
    se = int(round(sw * radius_constant))
    line_theta = np.zeros((labeled_lines_num, 1))
    for i in range(intact_lines_num, labeled_lines_num):
        x = pixel_list[i].coords
        try:
            pca = PCA()
            pca_res = pca.fit(x)
            pcav = pca_res.components_[0]
            line_theta[i] = math.atan(pcav[1] / pcav[0])
        except Exception as e:
            line_theta[i] = np.inf
            continue
        max_row_coord = np.amax(x, 0)[1] + se
        min_row_coord = max(np.amin(x, 0)[1] - se, 0)
        max_col_coord = np.amax(x, 0)[0] + se
        min_col_coord = max(np.amin(x, 0)[0] - se, 0)
        roi = labeled_lines[min_row_coord:max_row_coord,
                            min_col_coord:max_col_coord]
        logical_roi = roi == pixel_list[i].label
        # logical = labeled_lines == i + 1
        logical_double = logical_roi.astype(np.double)
        mask = np.full(labeled_lines.shape, False)
        # TODO number of iterations
        mask[min_row_coord:max_row_coord,
             min_col_coord:max_col_coord] = dilate(logical_double, se, 1)
        # mask = dilate(logical_double, se, 1)
        res = estimate_local_orientations(max_orientation, max_response, theta,
                                          mask)
        index = np.argmax(res[:, 1])
        local_max_orientation[i] = res[index, 0]
        label_cost[i] = 10 * np.exp(50 * (1 - abs(
            np.cos(math.radians(local_max_orientation[i]) - line_theta[i]))))
    return label_cost
Пример #4
0
def main():
    # Create input arguments object
    args = options()

    # Set debug mode
    pcv.params.debug = args.debug

    # Open a single image
    img, imgpath, imgname = pcv.readimage(filename=args.image)

    # Visualize colorspaces
    all_cs = pcv.visualize.colorspaces(rgb_img=img)

    # Extract the Blue-Yellow ("b") channel from the LAB colorspace
    gray_img = pcv.rgb2gray_lab(rgb_img=img, channel="b")

    # Plot a histogram of pixel values for the Blue-Yellow ("b") channel.
    hist_plot = pcv.visualize.histogram(gray_img=gray_img)

    # Apply a binary threshold to the Blue-Yellow ("b") grayscale image.
    thresh_img = pcv.threshold.binary(gray_img=gray_img,
                                      threshold=140,
                                      max_value=255,
                                      object_type="light")

    # Apply a dilation with a 5x5 kernel and 3 iterations
    dil_img = pcv.dilate(gray_img=thresh_img, ksize=5, i=3)

    # Fill in small holes in the leaves
    closed_img = pcv.fill_holes(bin_img=dil_img)

    # Erode the plant pixels using a 5x5 kernel and 3 iterations
    er_img = pcv.erode(gray_img=closed_img, ksize=5, i=3)

    # Apply a Gaussian blur with a 5 x 5 kernel.
    blur_img = pcv.gaussian_blur(img=er_img, ksize=(5, 5))

    # Set pixel values less than 255 to 0
    blur_img[np.where(blur_img < 255)] = 0

    # Fill/remove objects less than 300 pixels in area
    cleaned = pcv.fill(bin_img=blur_img, size=300)

    # Create a circular ROI
    roi, roi_str = pcv.roi.circle(img=img, x=1725, y=1155, r=400)

    # Identify objects in the binary image
    cnts, cnts_str = pcv.find_objects(img=img, mask=cleaned)

    # Filter objects by region of interest
    plant_cnt, plant_str, plant_mask, plant_area = pcv.roi_objects(
        img=img,
        roi_contour=roi,
        roi_hierarchy=roi_str,
        object_contour=cnts,
        obj_hierarchy=cnts_str)

    # Combine objects into one
    plant, mask = pcv.object_composition(img=img,
                                         contours=plant_cnt,
                                         hierarchy=plant_str)

    # Measure size and shape properties
    shape_img = pcv.analyze_object(img=img, obj=plant, mask=mask)
    if args.writeimg:
        pcv.print_image(img=shape_img,
                        filename=os.path.join(args.outdir,
                                              "shapes_" + imgname))

    # Analyze color properties
    color_img = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="hsv")
    if args.writeimg:
        pcv.print_image(img=color_img,
                        filename=os.path.join(args.outdir,
                                              "histogram_" + imgname))

    # Save the measurements to a file
    pcv.print_results(filename=args.result)
Пример #5
0
def check_cycles(skel_img):
    """ Check for cycles in a skeleton image
    Inputs:
    skel_img     = Skeletonized image

    Returns:
    cycle_img    = Image with cycles identified

    :param skel_img: numpy.ndarray
    :return cycle_img: numpy.ndarray
    """

    # Store debug
    debug = params.debug
    params.debug = None

    # Create the mask needed for cv2.floodFill, must be larger than the image
    h, w = skel_img.shape[:2]
    mask = np.zeros((h + 2, w + 2), np.uint8)

    # Copy the skeleton since cv2.floodFill will draw on it
    skel_copy = skel_img.copy()
    cv2.floodFill(skel_copy, mask=mask, seedPoint=(0, 0), newVal=255)

    # Invert so the holes are white and background black
    just_cycles = cv2.bitwise_not(skel_copy)

    # Erode slightly so that cv2.findContours doesn't think diagonal pixels are separate contours
    just_cycles = erode(just_cycles, 2, 1)

    # Use pcv.find_objects to turn plots of holes into countable contours
    cycle_objects, cycle_hierarchies = find_objects(just_cycles, just_cycles)

    # Count the number of holes
    num_cycles = len(cycle_objects)

    # Make debugging image
    cycle_img = skel_img.copy()
    cycle_img = dilate(cycle_img, params.line_thickness, 1)
    cycle_img = cv2.cvtColor(cycle_img, cv2.COLOR_GRAY2RGB)
    if num_cycles > 0:
        rand_color = color_palette(num_cycles)
        for i, cnt in enumerate(cycle_objects):
            cv2.drawContours(cycle_img,
                             cycle_objects,
                             i,
                             rand_color[i],
                             params.line_thickness,
                             lineType=8,
                             hierarchy=cycle_hierarchies)

    # Store Cycle Data
    outputs.add_observation(variable='num_cycles',
                            trait='number of cycles',
                            method='plantcv.plantcv.morphology.check_cycles',
                            scale='none',
                            datatype=int,
                            value=num_cycles,
                            label='none')

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(
            cycle_img,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_cycles.png'))
    elif params.debug == 'plot':
        plot_image(cycle_img)

    return cycle_img
    def get_coordinates(self):
        if self.debug:
            pcv.params.debug = 'print'  # set debug mode
            pcv.params.debug_outdir = './images/DracaenaVision/'  # set output directory

        blue_threshold = pcv.threshold.binary(gray_img=self.blue,
                                              threshold=50,
                                              max_value=255,
                                              object_type='dark')
        pcv.apply_mask(self.colour_image,
                       mask=blue_threshold,
                       mask_color='white')

        # Calculate moments of binary image
        moments = cv.moments(blue_threshold)

        # Calculate x,y coordinate of center
        self.centre_x = int(moments["m10"] / moments["m00"])
        self.centre_y = int(moments["m01"] / moments["m00"])

        # Put text and highlight the center
        cv.circle(self.colour_image, (self.centre_x, self.centre_y), 5,
                  (255, 255, 255), -1)
        cv.putText(self.colour_image, "Centre",
                   (self.centre_x - 25, self.centre_y - 25),
                   cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

        red_threshold = pcv.threshold.binary(gray_img=self.red,
                                             threshold=70,
                                             max_value=255,
                                             object_type='light')

        # Erode/Dilate the red threshold to remove noise
        red_threshold = pcv.erode(red_threshold, ksize=5, i=1)
        red_threshold = pcv.dilate(red_threshold, ksize=5, i=2)

        # Create mask of area that is of interest
        mask_pts = np.array(
            [[576, 415], [800, 420], [1105, 630], [720, 685], [285, 590]],
            np.int32).reshape((-1, 1, 2))
        area_of_interest = np.zeros(self.colour_image.shape[:2], np.uint8)
        cv.fillPoly(area_of_interest, [mask_pts], (255, 255, 255))

        red_threshold_in_area_of_interest = pcv.logical_and(
            area_of_interest, red_threshold)
        pcv.apply_mask(img=self.colour_image,
                       mask=red_threshold_in_area_of_interest,
                       mask_color='black')

        params = cv.SimpleBlobDetector_Params()
        # Unused filters
        params.filterByCircularity = False
        params.filterByConvexity = False
        params.filterByInertia = False
        # Area filter
        params.filterByArea = True
        params.maxArea = 50000
        params.minArea = 100
        # Colour filter
        params.filterByColor = True
        params.blobColor = 255
        # Misc options
        params.minDistBetweenBlobs = 100

        blob_detector = cv.SimpleBlobDetector_create(params)

        keypoints = blob_detector.detect(red_threshold, mask=area_of_interest)
        keypoints.sort(reverse=True, key=lambda kp: kp.size)

        now = datetime.now().strftime('%d-%m %H %M %S')

        im_with_keypoints = cv.drawKeypoints(
            self.colour_image, keypoints, np.array([]), (0, 0, 255),
            cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

        self.detection_image = im_with_keypoints.copy()
        if self.full_screen:
            self.detection_image = cv.resize(self.detection_image,
                                             (1920, 1080))
        cv.imshow(self.window_name, self.detection_image)
        cv.waitKey(1)

        cv.polylines(im_with_keypoints, [mask_pts],
                     True, (0, 255, 0),
                     thickness=3)
        cv.imwrite('./images/DracaenaVision/{} detections.png'.format(now),
                   im_with_keypoints)

        coordinates = []

        for keypoint in keypoints:
            x = keypoint.pt[0]
            y = keypoint.pt[1]

            # For each X and Y determine depth (z-coordinate)
            depth = -1
            step_size_to_centre_x = (self.centre_x - x) / 100
            step_size_to_centre_y = (self.centre_y - y) / 100

            for i in range(21):
                delta_x = step_size_to_centre_x * i
                delta_y = step_size_to_centre_y * i
                new_x = round(x + delta_x)
                new_y = round(y + delta_y)
                depth = self.depth_array[new_y, new_x]
                if depth < 0.55:
                    print('Depth found with delta ({},{})'.format(
                        delta_x, delta_y))
                    print('Depth of: {} at {} X, {} Y'.format(
                        depth, new_x, new_y))
                    break

            z = depth if depth < 0.55 or depth <= 0 else 0.55

            # Determine the angle at which the tool should be held towards the plant
            angle = 180 - math.degrees(
                math.atan2(y - self.centre_y, x - self.centre_x))

            coordinate = [x, y, z, angle]
            coordinates.append(coordinate)

        return coordinates
def main():
    args = options()  #create options object for argument parsing
    device = 0  #set device
    params.debug = args.debug  #set debug

    outfile = False
    if args.writeimg:
        outfile = os.path.join(args.outdir, os.path.basename(args.image)[:-4])

    # In[114]:

    img, path, filename = pcv.readimage(filename=args.image,
                                        debug=args.debug)  #read in image
    background = pcv.transform.load_matrix(
        args.npz)  #read in background mask image for subtraction

    # In[115]:

    device, mask = pcv.naive_bayes_classifier(
        img, args.pdf, device, args.debug)  #naive bayes on image

    #if args.writeimg:
    #   pcv.print_image(img=mask["94,104,47"], filename=outfile + "_nb_mask.png")

    # In[116]:

    new_mask = pcv.image_subtract(mask["94,104,47"],
                                  background)  #subtract background noise

    # In[117]:

    #image blurring using scipy median filter
    blurred_img = ndimage.median_filter(new_mask, (7, 1))
    blurred_img = ndimage.median_filter(blurred_img, (1, 7))
    device, cleaned = pcv.fill(np.copy(blurred_img), np.copy(blurred_img), 50,
                               0, args.debug)  #fill leftover noise

    # In[118]:

    #dilate and erode to repair plant breaks from background subtraction
    device, cleaned_dilated = pcv.dilate(cleaned, 6, 1, 0)
    device, cleaned = pcv.erode(cleaned_dilated, 6, 1, 0, args.debug)

    # In[119]:

    device, objects, obj_hierarchy = pcv.find_objects(
        img, cleaned, device, debug=args.debug)  #find objects using mask
    if "TM015" in args.image:
        h = 1620
    elif "TM016" in args.image:
        h = 1555
    else:
        h = 1320
    roi_contour, roi_hierarchy = pcv.roi.rectangle(x=570,
                                                   y=0,
                                                   h=h,
                                                   w=1900 - 550,
                                                   img=img)  #grab ROI

    # In[120]:

    #isolate plant objects within ROI
    device, roi_objects, hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img,
        'partial',
        roi_contour,
        roi_hierarchy,
        objects,
        obj_hierarchy,
        device,
        debug=args.debug)

    #Analyze only images with plants present.
    if roi_objects > 0:
        # In[121]:

        # Object combine kept objects
        device, plant_contour, plant_mask = pcv.object_composition(
            img=img,
            contours=roi_objects,
            hierarchy=hierarchy,
            device=device,
            debug=args.debug)

        if args.writeimg:
            pcv.print_image(img=plant_mask, filename=outfile + "_mask.png")

        # In[122]:

        # Find shape properties, output shape image (optional)
        device, shape_header, shape_data, shape_img = pcv.analyze_object(
            img=img,
            imgname=args.image,
            obj=plant_contour,
            mask=plant_mask,
            device=device,
            debug=args.debug,
            filename=outfile + ".png")

        # In[123]:

        if "TM015" in args.image:
            line_position = 380
        elif "TM016" in args.image:
            line_position = 440
        else:
            line_position = 690

        # Shape properties relative to user boundary line (optional)
        device, boundary_header, boundary_data, boundary_img = pcv.analyze_bound_horizontal(
            img=img,
            obj=plant_contour,
            mask=plant_mask,
            line_position=line_position,
            device=device,
            debug=args.debug,
            filename=outfile + ".png")

        # In[124]:

        # Determine color properties: Histograms, Color Slices and Pseudocolored Images,
        # output color analyzed images (optional)
        device, color_header, color_data, color_img = pcv.analyze_color(
            img=img,
            imgname=args.image,
            mask=plant_mask,
            bins=256,
            device=device,
            debug=args.debug,
            hist_plot_type=None,
            pseudo_channel="v",
            pseudo_bkg="img",
            resolution=300,
            filename=outfile + ".png")

        # In[55]:

        # Output shape and color data
        result = open(args.result, "a")
        result.write('\t'.join(map(str, shape_header)) + "\n")
        result.write('\t'.join(map(str, shape_data)) + "\n")
        for row in shape_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.write('\t'.join(map(str, color_header)) + "\n")
        result.write('\t'.join(map(str, color_data)) + "\n")
        result.write('\t'.join(map(str, boundary_header)) + "\n")
        result.write('\t'.join(map(str, boundary_data)) + "\n")
        result.write('\t'.join(map(str, boundary_img)) + "\n")
        for row in color_img:
            result.write('\t'.join(map(str, row)) + "\n")
        result.close()
Пример #8
0
def main():
    # Get options
    args = options()

    if args.debug:
        pcv.params.debug = args.debug  # set debug mode
        if args.debugdir:
            pcv.params.debug_outdir = args.debugdir  # set debug directory
            os.makedirs(args.debugdir, exist_ok=True)

    # pixel_resolution
    # mm
    # see pixel_resolution.xlsx for calibration curve for pixel to mm translation
    pixelresolution = 0.052

    # The result file should exist if plantcv-workflow.py was run
    if os.path.exists(args.result):
        # Open the result file
        results = open(args.result, "r")
        # The result file would have image metadata in it from plantcv-workflow.py, read it into memory
        metadata = json.load(results)
        # Close the file
        results.close()
        # Delete the file, we will create new ones
        os.remove(args.result)
        plantbarcode = metadata['metadata']['plantbarcode']['value']
        print(plantbarcode,
              metadata['metadata']['timestamp']['value'],
              sep=' - ')

    else:
        # If the file did not exist (for testing), initialize metadata as an empty string
        metadata = "{}"
        regpat = re.compile(args.regex)
        plantbarcode = re.search(regpat, args.image).groups()[0]

    # read images and create mask
    img, _, fn = pcv.readimage(args.image)
    imagename = os.path.splitext(fn)[0]

    # create mask

    # taf=filters.try_all_threshold(s_img)
    ## remove background
    s_img = pcv.rgb2gray_hsv(img, 's')
    min_s = filters.threshold_minimum(s_img)
    thresh_s = pcv.threshold.binary(s_img, min_s, 255, 'light')
    rm_bkgrd = pcv.fill_holes(thresh_s)

    ## low greenness
    thresh_s = pcv.threshold.binary(s_img, min_s + 15, 255, 'dark')
    # taf = filters.try_all_threshold(s_img)
    c = pcv.logical_xor(rm_bkgrd, thresh_s)
    cinv = pcv.invert(c)
    cinv_f = pcv.fill(cinv, 500)
    cinv_f_c = pcv.closing(cinv_f, np.ones((5, 5)))
    cinv_f_c_e = pcv.erode(cinv_f_c, 2, 1)

    ## high greenness
    a_img = pcv.rgb2gray_lab(img, channel='a')
    # taf = filters.try_all_threshold(a_img)
    t_a = filters.threshold_isodata(a_img)
    thresh_a = pcv.threshold.binary(a_img, t_a, 255, 'dark')
    thresh_a = pcv.closing(thresh_a, np.ones((5, 5)))
    thresh_a_f = pcv.fill(thresh_a, 500)
    ## combined mask
    lor = pcv.logical_or(cinv_f_c_e, thresh_a_f)
    close = pcv.closing(lor, np.ones((2, 2)))
    fill = pcv.fill(close, 800)
    erode = pcv.erode(fill, 3, 1)
    fill2 = pcv.fill(erode, 1200)
    # dilate = pcv.dilate(fill2,2,2)
    mask = fill2

    final_mask = np.zeros_like(mask)

    # Compute greenness
    # split color channels
    b, g, r = cv2.split(img)
    # print green intensity
    # g_img = pcv.visualize.pseudocolor(g, cmap='Greens', background='white', min_value=0, max_value=255, mask=mask, axes=False)

    # convert color channels to int16 so we can add them (values will be greater than 255 which is max of current uint8 format)
    g = g.astype('uint16')
    r = r.astype('uint16')
    b = b.astype('uint16')
    denom = g + r + b

    # greenness index
    out_flt = np.zeros_like(denom, dtype='float32')
    # divide green by sum of channels to compute greenness index with values 0-1
    gi = np.divide(g,
                   denom,
                   out=out_flt,
                   where=np.logical_and(denom != 0, mask > 0))

    # find objects
    c, h = pcv.find_objects(img, mask)
    rc, rh = pcv.roi.multi(img, coord=[(1300, 900), (1300, 2400)], radius=350)
    # Turn off debug temporarily, otherwise there will be a lot of plots
    pcv.params.debug = None
    # Loop over each region of interest
    i = 0
    rc_i = rc[i]
    for i, rc_i in enumerate(rc):
        rh_i = rh[i]

        # Add ROI number to output. Before roi_objects so result has NA if no object.
        pcv.outputs.add_observation(variable='roi',
                                    trait='roi',
                                    method='roi',
                                    scale='int',
                                    datatype=int,
                                    value=i,
                                    label='#')

        roi_obj, hierarchy_obj, submask, obj_area = pcv.roi_objects(
            img,
            roi_contour=rc_i,
            roi_hierarchy=rh_i,
            object_contour=c,
            obj_hierarchy=h,
            roi_type='partial')

        if obj_area == 0:

            print('\t!!! No object found in ROI', str(i))
            pcv.outputs.add_observation(
                variable='plantarea',
                trait='plant area in sq mm',
                method='observations.area*pixelresolution^2',
                scale=pixelresolution,
                datatype="<class 'float'>",
                value=0,
                label='sq mm')

        else:

            # Combine multiple objects
            # ple plant objects within an roi together
            plant_object, plant_mask = pcv.object_composition(
                img=img, contours=roi_obj, hierarchy=hierarchy_obj)

            final_mask = pcv.image_add(final_mask, plant_mask)

            # Save greenness for individual ROI
            grnindex = np.mean(gi[np.where(plant_mask > 0)])
            pcv.outputs.add_observation(
                variable='greenness_index',
                trait='mean normalized greenness index',
                method='g/sum(b+g+r)',
                scale='[0,1]',
                datatype="<class 'float'>",
                value=float(grnindex),
                label='/1')

            # Analyze all colors
            hist = pcv.analyze_color(img, plant_mask, 'all')

            # Analyze the shape of the current plant
            shape_img = pcv.analyze_object(img, plant_object, plant_mask)
            plant_area = pcv.outputs.observations['area'][
                'value'] * pixelresolution**2
            pcv.outputs.add_observation(
                variable='plantarea',
                trait='plant area in sq mm',
                method='observations.area*pixelresolution^2',
                scale=pixelresolution,
                datatype="<class 'float'>",
                value=plant_area,
                label='sq mm')

        # end if-else

        # At this point we have observations for one plant
        # We can write these out to a unique results file
        # Here I will name the results file with the ROI ID combined with the original result filename
        basename, ext = os.path.splitext(args.result)
        filename = basename + "-roi" + str(i) + ext
        # Save the existing metadata to the new file
        with open(filename, "w") as r:
            json.dump(metadata, r)
        pcv.print_results(filename=filename)
        # The results are saved, now clear out the observations so the next loop adds new ones for the next plant
        pcv.outputs.clear()

        if args.writeimg and obj_area != 0:
            imgdir = os.path.join(args.outdir, 'shape_images', plantbarcode)
            os.makedirs(imgdir, exist_ok=True)
            pcv.print_image(
                shape_img,
                os.path.join(imgdir,
                             imagename + '-roi' + str(i) + '-shape.png'))

            imgdir = os.path.join(args.outdir, 'colorhist_images',
                                  plantbarcode)
            os.makedirs(imgdir, exist_ok=True)
            pcv.print_image(
                hist,
                os.path.join(imgdir,
                             imagename + '-roi' + str(i) + '-colorhist.png'))

# end roi loop

    if args.writeimg:
        # save grnness image of entire tray
        imgdir = os.path.join(args.outdir, 'pseudocolor_images', plantbarcode)
        os.makedirs(imgdir, exist_ok=True)
        gi_img = pcv.visualize.pseudocolor(gi,
                                           obj=None,
                                           mask=final_mask,
                                           cmap='viridis',
                                           axes=False,
                                           min_value=0.3,
                                           max_value=0.6,
                                           background='black',
                                           obj_padding=0)
        gi_img = add_scalebar(gi_img,
                              pixelresolution=pixelresolution,
                              barwidth=20,
                              barlocation='lower left')
        gi_img.set_size_inches(6, 6, forward=False)
        gi_img.savefig(os.path.join(imgdir, imagename + '-greenness.png'),
                       bbox_inches='tight')
        gi_img.clf()
############################################
######    Perform Calculations    ##########
############################################

# combine leaf and labels
mask_image_label, path, filename = pcv.readimage(
    '1_naive_bayes_labels_mask.jpg')
mask_image = mask_image_plant + mask_image_label
device, mask_image = pcv.rgb2gray_lab(mask_image, 'l', device)

#clean the mask up
device, img_binary = pcv.binary_threshold(mask_image, 50, 255, 'light', device)
pcv.print_image(img_binary, 'img_binary.tif')
device, blur_img = pcv.erode(
    img_binary, 3, 1, device, debug='print'
)  # Erode to remove soil and Dilate so that you don't lose leaves (just in case)

mask = np.copy(blur_img)
device, fill_image = pcv.fill(blur_img, mask, 100, device)
pcv.print_image(fill_image, 'fill_image.tif')
device, binary_image = pcv.median_blur(fill_image, 1, device)
pcv.print_image(binary_image, 'binary_image.tif')
device, masked_image = device, dilate_image = pcv.dilate(
    fill_image, 3, 3, device)

############################################
###########    Create Output   #############
############################################

#Print grid of images for QC
def main_side():
    # Setting "args"

    # Get options
    pcv.params.debug = args.debug  #set debug mode
    pcv.params.debug_outdir = args.outdir  #set output directory

    # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb')
    # Inputs:
    #   filename - Image file to be read in
    #   mode - Return mode of image; either 'native' (default), 'rgb', 'gray', or 'csv'
    filename = args.image
    img = cv2.imread(args.image, flags=0)
    #img = pcv.invert(img)
    path, img_name = os.path.split(args.image)
    img_bkgrd = cv2.imread("background.png", flags=0)
    #print(img)
    #print(img_bkgrd)
    bkg_sub_img = pcv.image_subtract(img_bkgrd, img)
    bkg_sub_thres_img, masked_img = pcv.threshold.custom_range(
        rgb_img=bkg_sub_img,
        lower_thresh=[50],
        upper_thresh=[255],
        channel='gray')
    # Laplace filtering (identify edges based on 2nd derivative)

    # Inputs:
    #   gray_img - Grayscale image data
    #   ksize - Aperture size used to calculate the second derivative filter,
    #           specifies the size of the kernel (must be an odd integer)
    #   scale - Scaling factor applied (multiplied) to computed Laplacian values
    #           (scale = 1 is unscaled)
    lp_img = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)

    # Plot histogram of grayscale values
    pcv.visualize.histogram(gray_img=lp_img)

    # Lapacian image sharpening, this step will enhance the darkness of the edges detected
    lp_shrp_img = pcv.image_subtract(gray_img1=img, gray_img2=lp_img)

    # Plot histogram of grayscale values, this helps to determine thresholding value
    pcv.visualize.histogram(gray_img=lp_shrp_img)
    # Sobel filtering
    # 1st derivative sobel filtering along horizontal axis, kernel = 1)

    # Inputs:
    #   gray_img - Grayscale image data
    #   dx - Derivative of x to analyze
    #   dy - Derivative of y to analyze
    #   ksize - Aperture size used to calculate 2nd derivative, specifies the size of the kernel and must be an odd integer
    # NOTE: Aperture size must be greater than the largest derivative (ksize > dx & ksize > dy)
    sbx_img = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)

    # 1st derivative sobel filtering along vertical axis, kernel = 1)
    sby_img = pcv.sobel_filter(gray_img=img, dx=0, dy=1, ksize=1)

    # Combine the effects of both x and y filters through matrix addition
    # This will capture edges identified within each plane and emphasize edges found in both images

    # Inputs:
    #   gray_img1 - Grayscale image data to be added to gray_img2
    #   gray_img2 - Grayscale image data to be added to gray_img1
    sb_img = pcv.image_add(gray_img1=sbx_img, gray_img2=sby_img)

    # Use a lowpass (blurring) filter to smooth sobel image

    # Inputs:
    #   gray_img - Grayscale image data
    #   ksize - Kernel size (integer or tuple), (ksize, ksize) box if integer input,
    #           (n, m) box if tuple input
    mblur_img = pcv.median_blur(gray_img=sb_img, ksize=1)

    # Inputs:
    #   gray_img - Grayscale image data
    mblur_invert_img = pcv.invert(gray_img=mblur_img)

    # combine the smoothed sobel image with the laplacian sharpened image
    # combines the best features of both methods as described in "Digital Image Processing" by Gonzalez and Woods pg. 169
    edge_shrp_img = pcv.image_add(gray_img1=mblur_invert_img,
                                  gray_img2=lp_shrp_img)

    # Perform thresholding to generate a binary image
    tr_es_img = pcv.threshold.binary(gray_img=edge_shrp_img,
                                     threshold=145,
                                     max_value=255,
                                     object_type='dark')

    # Do erosion with a 3x3 kernel (ksize=3)

    # Inputs:
    #   gray_img - Grayscale (usually binary) image data
    #   ksize - The size used to build a ksize x ksize
    #            matrix using np.ones. Must be greater than 1 to have an effect
    #   i - An integer for the number of iterations
    e1_img = pcv.erode(gray_img=tr_es_img, ksize=3, i=1)
    # Bring the two object identification approaches together.
    # Using a logical OR combine object identified by background subtraction and the object identified by derivative filter.

    # Inputs:
    #   bin_img1 - Binary image data to be compared in bin_img2
    #   bin_img2 - Binary image data to be compared in bin_img1
    comb_img = pcv.logical_or(bin_img1=e1_img, bin_img2=bkg_sub_thres_img)

    # Get masked image, Essentially identify pixels corresponding to plant and keep those.

    # Inputs:
    #   rgb_img - RGB image data
    #   mask - Binary mask image data
    #   mask_color - 'black' or 'white'
    masked_erd = pcv.apply_mask(rgb_img=img, mask=comb_img, mask_color='black')

    # Need to remove the edges of the image, we did that by generating a set of rectangles to mask the edges
    # img is (1280 X 960)
    # mask for the bottom of the image

    # Inputs:
    #   img - RGB or grayscale image data
    #   p1 - Point at the top left corner of the rectangle (tuple)
    #   p2 - Point at the bottom right corner of the rectangle (tuple)
    #   color 'black' (default), 'gray', or 'white'
    #
    masked1, box1_img, rect_contour1, hierarchy1 = pcv.rectangle_mask(img=img,
                                                                      p1=(500,
                                                                          875),
                                                                      p2=(720,
                                                                          960))
    # mask the edges
    masked2, box2_img, rect_contour2, hierarchy2 = pcv.rectangle_mask(img=img,
                                                                      p1=(1,
                                                                          1),
                                                                      p2=(1279,
                                                                          959))
    bx12_img = pcv.logical_or(bin_img1=box1_img, bin_img2=box2_img)
    inv_bx1234_img = bx12_img  # we dont invert
    inv_bx1234_img = bx12_img
    #inv_bx1234_img = pcv.invert(gray_img=bx12_img)

    edge_masked_img = pcv.apply_mask(rgb_img=masked_erd,
                                     mask=inv_bx1234_img,
                                     mask_color='black')
    #print("here we create a mask")
    mask, masked = pcv.threshold.custom_range(rgb_img=edge_masked_img,
                                              lower_thresh=[25],
                                              upper_thresh=[175],
                                              channel='gray')
    masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white')
    #print("end")
    # Identify objects

    # Inputs:
    #   img - RGB or grayscale image data for plotting
    #   mask - Binary mask used for detecting contours
    id_objects, obj_hierarchy = pcv.find_objects(img=edge_masked_img,
                                                 mask=mask)

    # Define ROI

    # Inputs:
    #   img - RGB or grayscale image to plot the ROI on
    #   x - The x-coordinate of the upper left corner of the rectangle
    #   y - The y-coordinate of the upper left corner of the rectangle
    #   h - The height of the rectangle
    #   w - The width of the rectangle
    roi1, roi_hierarchy = pcv.roi.rectangle(img=edge_masked_img,
                                            x=100,
                                            y=100,
                                            h=800,
                                            w=1000)

    # Decide which objects to keep

    # Inputs:
    #    img            = img to display kept objects
    #    roi_contour    = contour of roi, output from any ROI function
    #    roi_hierarchy  = contour of roi, output from any ROI function
    #    object_contour = contours of objects, output from pcv.find_objects function
    #    obj_hierarchy  = hierarchy of objects, output from pcv.find_objects function
    #    roi_type       = 'partial' (default, for partially inside), 'cutto', or
    #    'largest' (keep only largest contour)
    with HiddenPrints():
        roi_objects, hierarchy5, kept_mask, obj_area = pcv.roi_objects(
            img=edge_masked_img,
            roi_contour=roi1,
            roi_hierarchy=roi_hierarchy,
            object_contour=id_objects,
            obj_hierarchy=obj_hierarchy,
            roi_type='largest')

    rgb_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

    # Inputs:
    #   img - RGB or grayscale image data for plotting
    #   contours - Contour list
    #   hierarchy - Contour hierarchy array
    o, m = pcv.object_composition(img=rgb_img,
                                  contours=roi_objects,
                                  hierarchy=hierarchy5)

    ### Analysis ###

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Perform signal analysis

    # Inputs:
    #   img - RGB or grayscale image data
    #   obj- Single or grouped contour object
    #   mask - Binary image mask to use as mask for moments analysis
    shape_img = pcv.analyze_object(img=img, obj=o, mask=m)
    new_im = Image.fromarray(shape_img)
    new_im.save("output//" + args.filename + "shape_img_side.png")

    # Inputs:
    #   gray_img - 8 or 16-bit grayscale image data
    #   mask - Binary mask made from selected contours
    #   bins - Number of classes to divide the spectrum into
    #   histplot - If True, plots the histogram of intensity values
    nir_hist = pcv.analyze_nir_intensity(gray_img=img,
                                         mask=kept_mask,
                                         bins=256,
                                         histplot=True)

    # Pseudocolor the grayscale image to a colormap

    # Inputs:
    #     gray_img - Grayscale image data
    #     obj - Single or grouped contour object (optional), if provided the pseudocolored image gets cropped down to the region of interest.
    #     mask - Binary mask (optional)
    #     background - Background color/type. Options are "image" (gray_img), "white", or "black". A mask must be supplied.
    #     cmap - Colormap
    #     min_value - Minimum value for range of interest
    #     max_value - Maximum value for range of interest
    #     dpi - Dots per inch for image if printed out (optional, if dpi=None then the default is set to 100 dpi).
    #     axes - If False then the title, x-axis, and y-axis won't be displayed (default axes=True).
    #     colorbar - If False then the colorbar won't be displayed (default colorbar=True)
    pseudocolored_img = pcv.visualize.pseudocolor(gray_img=img,
                                                  mask=kept_mask,
                                                  cmap='viridis')

    # Perform shape analysis

    # Inputs:
    #   img - RGB or grayscale image data
    #   obj- Single or grouped contour object
    #   mask - Binary image mask to use as mask for moments analysis
    shape_imgs = pcv.analyze_object(img=rgb_img, obj=o, mask=m)

    # Write shape and nir data to results file
    pcv.print_results(filename=args.result)
Пример #11
0
import cv2

pcv.params.debug = 'plot'
plt.rcParams['figure.figsize'] = [12, 12]

visfn = "data/vis/A6-GoldStandard2_RGB-20190801T043947-VIS0-0.png"
psIIfn = "data/psII/A6-GoldStandard2_PSII-20190801T003230-PSII0-2.png"

visimg, bn, fn = pcv.readimage(visfn)
psIIimg, bn, fn = pcv.readimage(psIIfn)

vis_x = visimg.shape[1]
vis_y = visimg.shape[0]
psII_x = psIIimg.shape[1]
psII_y = psIIimg.shape[0]

masko = pcv.threshold.otsu(psIIimg, 255, 'light')
mask = pcv.erode(masko, 2, 2)
final_mask = mask

mask_shift_x = pcv.shift_img(final_mask, 14, 'left')
mask_shift_y = pcv.shift_img(mask_shift_x, 3, 'top')

# vis_mask = pcv.resize(final_mask, resize_x = vis_x/psII_x, resize_y=vis_y/psII_y)

vis_mask2 = cv2.resize(mask_shift_y, (vis_x, vis_y),
                       interpolation=cv2.INTER_CUBIC)

vis_masked = pcv.apply_mask(visimg, vis_mask2, 'black')

# vs_ws=pcv.watershed_segmentation(visimg,vis_mask2)
Пример #12
0
def check_cycles(skel_img):
    """ Check for cycles in a skeleton image
    Inputs:
    skel_img     = Skeletonized image

    Returns:
    cycle_img    = Image with cycles identified

    :param skel_img: numpy.ndarray
    :return cycle_img: numpy.ndarray
    """

    # Store debug
    debug = params.debug
    params.debug = None

    # Create the mask needed for cv2.floodFill, must be larger than the image
    h, w = skel_img.shape[:2]
    mask = np.zeros((h + 2, w + 2), np.uint8)

    # Copy the skeleton since cv2.floodFill will draw on it
    skel_copy = skel_img.copy()
    cv2.floodFill(skel_copy, mask=mask, seedPoint=(0, 0), newVal=255)

    # Invert so the holes are white and background black
    just_cycles = cv2.bitwise_not(skel_copy)

    # Erode slightly so that cv2.findContours doesn't think diagonal pixels are separate contours
    just_cycles = erode(just_cycles, 2, 1)

    # Use pcv.find_objects to turn plots of holes into countable contours
    cycle_objects, cycle_hierarchies = find_objects(just_cycles, just_cycles)

    # Count the number of holes
    num_cycles = len(cycle_objects)

    # Make debugging image
    cycle_img = skel_img.copy()
    cycle_img = dilate(cycle_img, params.line_thickness, 1)
    cycle_img = cv2.cvtColor(cycle_img, cv2.COLOR_GRAY2RGB)
    rand_color = color_palette(num_cycles)
    for i, cnt in enumerate(cycle_objects):
        cv2.drawContours(cycle_img, cycle_objects, i, rand_color[i], params.line_thickness, lineType=8,
                         hierarchy=cycle_hierarchies)

    # Store Cycle Data
    outputs.add_observation(variable='num_cycles', trait='number of cycles',
                            method='plantcv.plantcv.morphology.check_cycles', scale='none', datatype=int,
                            value=num_cycles, label='none')

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(cycle_img, os.path.join(params.debug_outdir, str(params.device) + '_cycles.png'))
    elif params.debug == 'plot':
        plot_image(cycle_img)

    return cycle_img