Example #1
0
def vismask(img):

    a_img = pcv.rgb2gray_lab(img, channel='a')
    thresh_a = pcv.threshold.binary(a_img, 124, 255, 'dark')
    b_img = pcv.rgb2gray_lab(img, channel='b')
    thresh_b = pcv.threshold.binary(b_img, 127, 255, 'light')

    mask = pcv.logical_and(thresh_a, thresh_b)
    mask = pcv.fill(mask, 800)
    final_mask = pcv.dilate(mask, 2, 1)

    return final_mask
Example #2
0
def plant_cv(img):
    counter = 0
    debug = None

    counter, s = pcv.rgb2gray_hsv(img, 's', counter, debug)
    counter, s_thresh = pcv.binary_threshold(s, 145, 255, 'light', counter,
                                             debug)
    counter, s_mblur = pcv.median_blur(s_thresh, 5, counter, debug)

    # Convert RGB to LAB and extract the Blue channel
    counter, b = pcv.rgb2gray_lab(img, 'b', counter, debug)

    # Threshold the blue image
    counter, b_thresh = pcv.binary_threshold(b, 145, 255, 'light', counter,
                                             debug)
    counter, b_cnt = pcv.binary_threshold(b, 145, 255, 'light', counter, debug)
    # Join the thresholded saturation and blue-yellow images
    counter, bs = pcv.logical_or(s_mblur, b_cnt, counter, debug)
    counter, masked = pcv.apply_mask(img, bs, 'white', counter, debug)

    #----------------------------------------
    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    counter, masked_a = pcv.rgb2gray_lab(masked, 'a', counter, debug)
    counter, masked_b = pcv.rgb2gray_lab(masked, 'b', counter, debug)

    # Threshold the green-magenta and blue images
    counter, maskeda_thresh = pcv.binary_threshold(masked_a, 115, 255, 'dark',
                                                   counter, debug)
    counter, maskeda_thresh1 = pcv.binary_threshold(masked_a, 135, 255,
                                                    'light', counter, debug)
    counter, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light',
                                                   counter, debug)

    # Join the thresholded saturation and blue-yellow images (OR)
    counter, ab1 = pcv.logical_or(maskeda_thresh, maskedb_thresh, counter,
                                  debug)
    counter, ab = pcv.logical_or(maskeda_thresh1, ab1, counter, debug)
    counter, ab_cnt = pcv.logical_or(maskeda_thresh1, ab1, counter, debug)

    # Fill small objects
    counter, ab_fill = pcv.fill(ab, ab_cnt, 200, counter, debug)

    # Apply mask (for vis images, mask_color=white)
    counter, masked2 = pcv.apply_mask(masked, ab_fill, 'white', counter, debug)

    zeros = np.zeros(masked2.shape[:2], dtype="uint8")
    merged = cv2.merge([zeros, ab_fill, zeros])

    return merged, masked2
 def process_pot(self, pot_image):
     device = 0
     # debug=None
     updated_pot_image = self.threshold_green(pot_image)
     # plt.imshow(updated_pot_image)
     # plt.show()
     device, a = pcv.rgb2gray_lab(updated_pot_image, 'a', device)
     device, img_binary = pcv.binary_threshold(a, 127, 255, 'dark', device,
                                               None)
     # plt.imshow(img_binary)
     # plt.show()
     mask = np.copy(img_binary)
     device, fill_image = pcv.fill(img_binary, mask, 50, device)
     device, dilated = pcv.dilate(fill_image, 1, 1, device)
     device, id_objects, obj_hierarchy = pcv.find_objects(
         updated_pot_image, updated_pot_image, device)
     device, roi1, roi_hierarchy = pcv.define_roi(updated_pot_image,
                                                  'rectangle', device, None,
                                                  'default', debug, False)
     device, roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
         updated_pot_image, 'partial', roi1, roi_hierarchy, id_objects,
         obj_hierarchy, device, debug)
     device, obj, mask = pcv.object_composition(updated_pot_image,
                                                roi_objects, hierarchy3,
                                                device, debug)
     device, shape_header, shape_data, shape_img = pcv.analyze_object(
         updated_pot_image, "Example1", obj, mask, device, debug, False)
     print(shape_data[1])
def main(path, imagename):
    args = {'names': 'names.txt', 'outdir': './output-images'}
    #Read image
    img1, path, filename = pcv.readimage(path + imagename, "native")
    #pcv.params.debug=args['debug']
    #img1 = pcv.white_balance(img,roi=(400,800,200,200))
    #img1 = cv2.resize(img1,(4000,2000))
    shift1 = pcv.shift_img(img1, 10, 'top')
    img1 = shift1
    a = pcv.rgb2gray_lab(img1, 'a')
    img_binary = pcv.threshold.binary(a, 120, 255, 'dark')
    fill_image = pcv.fill(img_binary, 10)
    dilated = pcv.dilate(fill_image, 1, 1)
    id_objects, obj_hierarchy = pcv.find_objects(img1, dilated)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(4000, 2000, -2000, -4000,
                                                   img1)
    #print(roi_contour)
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img1, 'partial', roi_contour, roi_hierarchy, id_objects, obj_hierarchy)
    clusters_i, contours, hierarchies = pcv.cluster_contours(
        img1, roi_objects, roi_obj_hierarchy, 1, 4)
    '''
	pcv.params.debug = "print"'''
    out = args['outdir']
    names = args['names']
    output_path = pcv.cluster_contour_splitimg(img1,
                                               clusters_i,
                                               contours,
                                               hierarchies,
                                               out,
                                               file=filename,
                                               filenames=names)
def generate_plant_mask_new(input_image):
    '''
    Generate a mask which segments the plant out in the input image

    Ref: https://plantcv.readthedocs.io/en/latest/vis_tutorial/

    Args:
        input_image: the input image
    Returns:
        mask: boolean numpy array as the same size of the input image which segments the plant
    '''

    # Get the saturation channel
    # hsv_image = rgb2hsv(input_image)
    # s = hsv_image[:,:,1]
    s = pcv.rgb2gray_hsv(rgb_img=input_image, channel='s')

    # threshold the saturation image
    s_thresh = s > 130

    # perform blur on the thresholding
    # s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)
    s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=15)

    # extract the LAb channels and theshold them
    b = pcv.rgb2gray_lab(rgb_img=input_image, channel='b')
    a = pcv.rgb2gray_lab(rgb_img=input_image, channel='a')

    a_thresh = a <= 120
    b_thresh = b >= 105

    lab_mask = np.logical_and(a_thresh, b_thresh)

    lab_cnt = pcv.median_blur(gray_img=lab_mask, ksize=15)

    # join the two thresholdes mask
    bs = np.logical_and(s_cnt, lab_cnt)

    # filling small holes
    res = np.ones(bs.shape, dtype=np.uint8) * 255
    res[bs] = 0
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
    res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)

    res = res == 0

    return res
Example #6
0
def main():
    # Get options
    args = options()

    debug = args.debug

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    device, corrected_img = pcv.white_balance(device, img, debug,
                                              (500, 1000, 500, 500))
    img = corrected_img

    device, img_gray_sat = pcv.rgb2gray_lab(img, 'a', device, debug)

    device, img_binary = pcv.binary_threshold(img_gray_sat, 120, 255, 'dark',
                                              device, debug)

    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, 300, device, debug)

    device, id_objects, obj_hierarchy = pcv.find_objects(
        img, fill_image, device, debug)

    device, roi, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None,
                                                'default', debug, True, 1800,
                                                1600, -1500, -500)

    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi, roi_hierarchy, id_objects, obj_hierarchy, device,
        debug)

    device, obj, mask = pcv.object_composition(img, roi_objects,
                                               roi_obj_hierarchy, device,
                                               debug)

    outfile = os.path.join(args.outdir, filename)

    device, color_header, color_data, color_img = pcv.analyze_color(
        img, img, mask, 256, device, debug, None, 'v', 'img', 300, outfile)

    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, "img", obj, mask, device, debug, outfile)

    shapepath = outfile[:-4] + '_shapes.jpg'
    shapepic = cv2.imread(shapepath)
    plantsize = "The plant is " + str(np.sum(mask)) + " pixels large"
    cv2.putText(shapepic, plantsize, (500, 500), cv2.FONT_HERSHEY_SIMPLEX, 5,
                (0, 255, 0), 10)
    pcv.print_image(shapepic, outfile[:-4] + '-out_shapes.jpg')
Example #7
0
def main():
    #Menangkap gambar IP Cam dengan opencv
    ##Ngosek, koding e wis ono tapi carane ngonek ning gcloud rung reti wkwk

    #Mengambil gambar yang sudah didapatkan dari opencv untuk diproses di plantcv
    path = 'Image test\capture (1).jpg'
    gmbTumbuhanRaw, path, filename = pcv.readimage(path, mode='native')

    #benarkan gambar yang miring
    koreksiRot = pcv.rotate(gmbTumbuhanRaw, 2, True)
    gmbKoreksi = koreksiRot
    pcv.print_image(gmbKoreksi, 'Image test\Hasil\gambar_koreksi.jpg')

    #Mengatur white balance dari gambar
    #usahakan gambar rata (tanpa bayangan dari manapun!)!
    #GANTI nilai dari region of intrest (roi) berdasarkan ukuran gambar!!
    koreksiWhiteBal = pcv.white_balance(gmbTumbuhanRaw,
                                        roi=(2, 100, 1104, 1200))
    pcv.print_image(koreksiWhiteBal, 'Image test\Hasil\koreksi_white_bal.jpg')

    #mengubah kontras gambar agar berbeda dengan warna background
    #tips: latar jangan sama hijaunya
    kontrasBG = pcv.rgb2gray_lab(koreksiWhiteBal, channel='a')
    pcv.print_image(kontrasBG, 'Image test\Hasil\koreksi_kontras.jpg')

    #binary threshol gambar
    #sesuaikan thresholdnya
    binthres = pcv.threshold.binary(gray_img=kontrasBG,
                                    threshold=115,
                                    max_value=255,
                                    object_type='dark')

    #hilangkan noise dengan fill noise
    resiksitik = pcv.fill(binthres, size=10)
    pcv.print_image(resiksitik, 'Image test\Hasil\\noiseFill.jpg')

    #haluskan dengan dilate
    dilasi = pcv.dilate(resiksitik, ksize=12, i=1)

    #ambil objek dan set besar roi
    id_objek, hirarki_objek = pcv.find_objects(gmbTumbuhanRaw, mask=dilasi)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=gmbKoreksi,
                                                   x=20,
                                                   y=96,
                                                   h=1100,
                                                   w=680)

    #keluarkan gambar (untuk debug aja sih)
    roicontour = cv2.drawContours(gmbKoreksi, roi_contour, -1, (0, 0, 255), 3)
    pcv.print_image(roicontour, 'Image test\Hasil\\roicontour.jpg')
    """
Example #8
0
        # cv2.imshow('aligned', aligned_depth_color_image)

        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            continue

        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        s = pcv.rgb2gray_hsv(color_image, 's')  # plantcv
        s_thresh = pcv.threshold.binary(s, 85, 255, 'light')  # plantcv
        s_mblur = pcv.median_blur(s_thresh, 5)
        s_cnt = pcv.median_blur(s_thresh, 5)
        # cv2.imshow('color - depth3', s_mblur)
        # Convert RGB to LAB and extract the Blue channel
        b = pcv.rgb2gray_lab(color_image, 'b')

        # Threshold the blue image
        b_thresh = pcv.threshold.binary(b, 160, 255, 'light')
        b_cnt = pcv.threshold.binary(b, 160, 255, 'light')

        # Fill small objects
        # b_fill = pcv.fill(b_thresh, 10)
        mask = pcv.naive_bayes_classifier(color_image, "naive_bayes_pdfs.txt")
        #histogram_low = np.sum(mask["plant"][int(360/2):, :], axis=0)
        #histogram_up = np.sum(mask["plant"][:int(360/2), :], axis=0)
        histogram = np.sum(mask["plant"][int(360 / 2):, :], axis=0)
        win_left_x = np.argmax(histogram)

        # ################################################################################################
        branch_x, branch_y, left_dot_x, left_dot_y, cv_rgb_sliding_windows = functions.sliding_windows(
def main():
    # Initialize options
    args = options()
    # Set PlantCV debug mode to input debug method
    pcv.params.debug = args.debug

    # Use PlantCV to read in the input image. The function outputs an image as a NumPy array, the path to the file,
    # and the image filename
    img, path, filename = pcv.readimage(filename=args.image)

    # ## Segmentation

    # ### Saturation channel
    # Convert the RGB image to HSV colorspace and extract the saturation channel
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='s')

    # Use a binary threshold to set an inflection value where all pixels in the grayscale saturation image below the
    # threshold get set to zero (pure black) and all pixels at or above the threshold get set to 255 (pure white)
    s_thresh = pcv.threshold.binary(gray_img=s, threshold=80, max_value=255, object_type='light')

    # ### Blue-yellow channel
    # Convert the RGB image to LAB colorspace and extract the blue-yellow channel
    b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

    # Use a binary threshold to set an inflection value where all pixels in the grayscale blue-yellow image below the
    # threshold get set to zero (pure black) and all pixels at or above the threshold get set to 255 (pure white)
    b_thresh = pcv.threshold.binary(gray_img=b, threshold=134, max_value=255, object_type='light')

    # ### Green-magenta channel
    # Convert the RGB image to LAB colorspace and extract the green-magenta channel
    a = pcv.rgb2gray_lab(rgb_img=img, channel='a')

    # In the green-magenta image the plant pixels are darker than the background. Setting object_type="dark" will
    # invert the image first and then use a binary threshold to set an inflection value where all pixels in the
    # grayscale green-magenta image below the threshold get set to zero (pure black) and all pixels at or above the
    # threshold get set to 255 (pure white)
    a_thresh = pcv.threshold.binary(gray_img=a, threshold=122, max_value=255, object_type='dark')

    # Combine the binary images for the saturation and blue-yellow channels. The "or" operator returns a binary image
    # that is white when a pixel was white in either or both input images
    bs = pcv.logical_or(bin_img1=s_thresh, bin_img2=b_thresh)

    # Combine the binary images for the combined saturation and blue-yellow channels and the green-magenta channel.
    # The "or" operator returns a binary image that is white when a pixel was white in either or both input images
    bsa = pcv.logical_or(bin_img1=bs, bin_img2=a_thresh)

    # The combined binary image labels plant pixels well but the background still has pixels labeled as foreground.
    # Small white noise (salt) in the background can be removed by filtering white objects in the image by size and
    # setting a size threshold where smaller objects can be removed
    bsa_fill1 = pcv.fill(bin_img=bsa, size=15)  # Fill small noise

    # Before more stringent size filtering is done we want to connect plant parts that may still be disconnected from
    # the main plant. Use a dilation to expand the boundary of white regions. Ksize is the size of a box scanned
    # across the image and i is the number of times a scan is done
    bsa_fill2 = pcv.dilate(gray_img=bsa_fill1, ksize=3, i=3)

    # Remove small objects by size again but use a higher threshold
    bsa_fill3 = pcv.fill(bin_img=bsa_fill2, size=250)

    # Use the binary image to identify objects or connected components.
    id_objects, obj_hierarchy = pcv.find_objects(img=img, mask=bsa_fill3)

    # Because the background still contains pixels labeled as foreground, the object list contains background.
    # Because these images were collected in an automated system the plant is always centered in the image at the
    # same position each time. Define a region of interest (ROI) to set the area where we expect to find plant
    # pixels. PlantCV can make simple ROI shapes like rectangles, circles, etc. but here we use a custom ROI to fit a
    # polygon around the plant area
    roi_custom, roi_hier_custom = pcv.roi.custom(img=img, vertices=[[1085, 1560], [1395, 1560], [1395, 1685],
                                                                    [1890, 1744], [1890, 25], [600, 25], [615, 1744],
                                                                    [1085, 1685]])

    # Use the ROI to filter out objects found outside the ROI. When `roi_type = "cutto"` objects outside the ROI are
    # cropped out. The default `roi_type` is "partial" which allows objects to overlap the ROI and be retained
    roi_objects, hierarchy, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi_custom,
                                                                  roi_hierarchy=roi_hier_custom,
                                                                  object_contour=id_objects,
                                                                  obj_hierarchy=obj_hierarchy, roi_type='cutto')

    # Filter remaining objects by size again to remove any remaining background objects
    filled_mask1 = pcv.fill(bin_img=kept_mask, size=350)

    # Use a closing operation to first dilate (expand) and then erode (shrink) the plant to fill in any additional
    # gaps in leaves or stems
    filled_mask2 = pcv.closing(gray_img=filled_mask1)

    # Remove holes or dark spot noise (pepper) in the plant binary image
    filled_mask3 = pcv.fill_holes(filled_mask2)

    # With the clean binary image identify the contour of the plant
    id_objects, obj_hierarchy = pcv.find_objects(img=img, mask=filled_mask3)

    # Because a plant or object of interest may be composed of multiple contours, it is required to combine all
    # remaining contours into a single contour before measurements can be done
    obj, mask = pcv.object_composition(img=img, contours=id_objects, hierarchy=obj_hierarchy)

    # ## Measurements PlantCV has several built-in measurement or analysis methods. Here, basic measurements of size
    # and shape are done. Additional typical modules would include plant height (`pcv.analyze_bound_horizontal`) and
    # color (`pcv.analyze_color`)
    shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask)

    # Save the shape image if requested
    if args.writeimg:
        outfile = os.path.join(args.outdir, filename[:-4] + "_shapes.png")
        pcv.print_image(img=shape_img, filename=outfile)

    # ## Morphology workflow

    # Update a few PlantCV parameters for plotting purposes
    pcv.params.text_size = 1.5
    pcv.params.text_thickness = 5
    pcv.params.line_thickness = 15

    # Convert the plant mask into a "skeletonized" image where each path along the stem and leaves are a single pixel
    # wide
    skel = pcv.morphology.skeletonize(mask=mask)

    # Sometimes wide parts of leaves or stems are skeletonized in the direction perpendicular to the main path. These
    # "barbs" or "spurs" can be removed by pruning the skeleton to remove small paths. Pruning will also separate the
    # individual path segments (leaves and stem parts)
    pruned, segmented_img, segment_objects = pcv.morphology.prune(skel_img=skel, size=30, mask=mask)
    pruned, segmented_img, segment_objects = pcv.morphology.prune(skel_img=pruned, size=3, mask=mask)

    # Leaf and stem segments above are separated but only into individual paths. We can sort the segments into stem
    # and leaf paths by identifying primary segments (stems; those that end in a branch point) and secondary segments
    # (leaves; those that begin at a branch point and end at a tip point)
    leaf_objects, other_objects = pcv.morphology.segment_sort(skel_img=pruned, objects=segment_objects, mask=mask)

    # Label the segment unique IDs
    segmented_img, labeled_id_img = pcv.morphology.segment_id(skel_img=pruned, objects=leaf_objects, mask=mask)

    # Measure leaf insertion angles. Measures the angle between a line fit through the stem paths and a line fit
    # through the first `size` points of each leaf path
    labeled_angle_img = pcv.morphology.segment_insertion_angle(skel_img=pruned, segmented_img=segmented_img,
                                                               leaf_objects=leaf_objects, stem_objects=other_objects,
                                                               size=22)

    # Save leaf angle image if requested
    if args.writeimg:
        outfile = os.path.join(args.outdir, filename[:-4] + "_leaf_insertion_angles.png")
        pcv.print_image(img=labeled_angle_img, filename=outfile)

    # ## Other potential morphological measurements There are many other functions that extract data from within the
    # morphology sub-package of PlantCV. For our purposes, we are most interested in the relative angle between each
    # leaf and the stem which we measure with `plantcv.morphology.segment_insertion_angle`. However, the following
    # cells show some of the other traits that we are able to measure from images that can be succesfully sorted into
    # primary and secondary segments.

    # Segment the plant binary mask using the leaf and stem segments. Allows for the measurement of individual leaf
    # areas
    # filled_img = pcv.morphology.fill_segments(mask=mask, objects=leaf_objects)

    # Measure the path length of each leaf (geodesic distance)
    # labeled_img2 = pcv.morphology.segment_path_length(segmented_img=segmented_img, objects=leaf_objects)

    # Measure the straight-line, branch point to tip distance (Euclidean) for each leaf
    # labeled_img3 = pcv.morphology.segment_euclidean_length(segmented_img=segmented_img, objects=leaf_objects)

    # Measure the curvature of each leaf (Values closer to 1 indicate that a segment is a straight line while larger
    # values indicate the segment has more curvature)
    # labeled_img4 = pcv.morphology.segment_curvature(segmented_img=segmented_img, objects=leaf_objects)

    # Measure absolute leaf angles (angle of linear regression line fit to each leaf object) Note: negative values
    # signify leaves to the left of the stem, positive values signify leaves to the right of the stem
    # labeled_img5 = pcv.morphology.segment_angle(segmented_img=segmented_img, objects=leaf_objects)

    # Measure leaf curvature in degrees
    # labeled_img6 = pcv.morphology.segment_tangent_angle(segmented_img=segmented_img, objects=leaf_objects, size=35)

    # Measure stem characteristics like stem angle and length
    # stem_img = pcv.morphology.analyze_stem(rgb_img=img, stem_objects=other_objects)

    # Remove unneeded observations (hack)
    _ = pcv.outputs.observations.pop("tips")
    _ = pcv.outputs.observations.pop("branch_pts")
    angles = pcv.outputs.observations["segment_insertion_angle"]["value"]
    remove_indices = []
    for i, value in enumerate(angles):
        if value == "NA":
            remove_indices.append(i)
    remove_indices.sort(reverse=True)
    for i in remove_indices:
        _ = pcv.outputs.observations["segment_insertion_angle"]["value"].pop(i)

    # ## Save the results out to file for downsteam analysis
    pcv.print_results(filename=args.result)
    def initcrop(imagePath):
        dire = dir
        path = dire + '/Classifyer_dump'
        try:
            os.makedirs(path)
        except OSError:
            pass
        image = cv2.imread(imagePath)
        blue_image = pcv.rgb2gray_lab(image, 'l')
        Gaussian_blue = cv2.adaptiveThreshold(blue_image, 255,
                                              cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                              cv2.THRESH_BINARY, 981,
                                              -1)  # 241 is good 981
        cv2.imwrite(os.path.join(path, "blue_test.png"), Gaussian_blue)
        fill = pcv.fill_holes(Gaussian_blue)
        fill_again = pcv.fill(fill, 100000)

        id_objects, obj_hierarchy = pcv.find_objects(
            img=image,
            mask=fill_again)  # lazy way to findContours and draw them

        roi1, roi_hierarchy = pcv.roi.rectangle(img=image,
                                                x=3000,
                                                y=1000,
                                                h=200,
                                                w=300)

        roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
            img=image,
            roi_contour=roi1,
            roi_hierarchy=roi_hierarchy,
            object_contour=id_objects,
            obj_hierarchy=obj_hierarchy,
            roi_type='partial')
        cv2.imwrite(os.path.join(path, "plate_mask.png"), kept_mask)

        mask = cv2.imread(os.path.join(path, "plate_mask.png"))
        result = image * (mask.astype(image.dtype))
        result = cv2.bitwise_not(result)
        cv2.imwrite(os.path.join(path, "AutoCrop.png"), result)

        output = cv2.connectedComponentsWithStats(kept_mask, connectivity=8)
        stats = output[2]
        left = (stats[1, cv2.CC_STAT_LEFT])
        # print(stats[1, cv2.CC_STAT_TOP])
        # print(stats[1, cv2.CC_STAT_HEIGHT])
        # exit(2)

        L, a, b = cv2.split(result)
        # cv2.imwrite("gray_scale.png", L)
        plate_threshold = cv2.adaptiveThreshold(b, 255,
                                                cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                                cv2.THRESH_BINARY, 87,
                                                -1)  # 867 is good 241
        cv2.imwrite(os.path.join(path, "plate_threshold.png"), plate_threshold)

        fill_again2 = pcv.fill(plate_threshold, 1000)

        cv2.imwrite(os.path.join(path, "fill_test.png"), fill_again2)
        # fill = pcv.fill_holes(fill_again2)
        # cv2.imwrite(os.path.join(path, "fill_test2.png"), fill)
        blur_image = pcv.median_blur(fill_again2, 10)
        nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(
            blur_image, connectivity=8)
        sizes = stats[1:, -1]
        nb_components = nb_components - 1
        min_size = 20000
        img2 = np.zeros((output.shape))
        for i in range(0, nb_components):
            if sizes[i] <= min_size:
                img2[output == i + 1] = 255
        cv2.imwrite(os.path.join(path, "remove_20000.png"),
                    img2)  # this can be made better to speed it up
        thresh_image = img2.astype(
            np.uint8)  # maybe crop to the roi below then do it
        thresh_image = pcv.fill_holes(thresh_image)
        cv2.imwrite("NEWTEST.jpg", thresh_image)
        id_objects, obj_hierarchy = pcv.find_objects(img=image,
                                                     mask=thresh_image)

        roi1, roi_hierarchy = pcv.roi.rectangle(img=image,
                                                x=(left + 380),
                                                y=750,
                                                h=175,
                                                w=100)
        try:
            where_cell = 0
            roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
                img=image,
                roi_contour=roi1,
                roi_hierarchy=roi_hierarchy,
                object_contour=id_objects,
                obj_hierarchy=obj_hierarchy,
                roi_type='partial')

            cv2.imwrite(os.path.join(path, "test_mask.png"), kept_mask)
            mask = cv2.imread(os.path.join(path, "test_mask.png"))
            result = image * (mask.astype(image.dtype))
            result = cv2.bitwise_not(result)
            cv2.imwrite(os.path.join(path, "TEST.png"), result)

            output = cv2.connectedComponentsWithStats(kept_mask,
                                                      connectivity=8)
            stats = output[2]
            centroids = output[3]
            centroids_x = (int(centroids[1][0]))
            centroids_y = (int(centroids[1][1]))
        except:
            where_cell = 1
            print("did this work?")
            roi1, roi_hierarchy = pcv.roi.rectangle(img=image,
                                                    x=(left + 380),
                                                    y=3200,
                                                    h=100,
                                                    w=100)
            roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
                img=image,
                roi_contour=roi1,
                roi_hierarchy=roi_hierarchy,
                object_contour=id_objects,
                obj_hierarchy=obj_hierarchy,
                roi_type='partial')
            cv2.imwrite(os.path.join(path, "test_mask.png"), kept_mask)
            mask = cv2.imread(os.path.join(path, "test_mask.png"))
            result = image * (mask.astype(image.dtype))
            result = cv2.bitwise_not(result)
            cv2.imwrite(os.path.join(path, "TEST.png"), result)

            output = cv2.connectedComponentsWithStats(kept_mask,
                                                      connectivity=8)
            stats = output[2]
            centroids = output[3]
            centroids_x = (int(centroids[1][0]))
            centroids_y = (int(centroids[1][1]))
        flag = 0

        # print(stats[1, cv2.CC_STAT_AREA])
        if ((stats[1, cv2.CC_STAT_AREA]) > 4000):
            flag = 30
        # print(centroids_x)
        # print(centroids_y)

        # print(centroids)
        if (where_cell == 0):
            left = (centroids_x - 70)
            right = (centroids_x + 3695 + flag)  # was 3715
            top = (centroids_y - 80)
            bottom = (centroids_y + 2462)
        if (where_cell == 1):
            left = (centroids_x - 70)
            right = (centroids_x + 3715 + flag)
            top = (centroids_y - 2480)
            bottom = (centroids_y + 62)

        # print(top)
        # print(bottom)
        image = Image.open(imagePath)
        img_crop = image.crop((left, top, right, bottom))
        # img_crop.show()
        img_crop.save(os.path.join(path, 'Cropped_full_yeast.png'))
        circle_me = cv2.imread(os.path.join(path, "Cropped_full_yeast.png"))
        cropped_img = cv2.imread(
            os.path.join(path, "Cropped_full_yeast.png"
                         ))  # changed from Yeast_Cluster.%d.png  %counter
        L, a, b = cv2.split(cropped_img)  # can do l a or b
        Gaussian_blue = cv2.adaptiveThreshold(b, 255,
                                              cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                              cv2.THRESH_BINARY, 241,
                                              -1)  # For liz's pictures 241
        cv2.imwrite(os.path.join(path, "blue_test.png"), Gaussian_blue)
        blur_image = pcv.median_blur(Gaussian_blue, 10)
        heavy_fill_blue = pcv.fill(blur_image, 1000)  # value 400
        hole_fill = pcv.fill_holes(heavy_fill_blue)
        cv2.imwrite(os.path.join(path, "Cropped_Threshold.png"), hole_fill)
def main():

    # Get options
    pcv.params.debug = args.debug  #set debug mode
    pcv.params.debug_outdir = args.outdir  #set output directory

    # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb')
    # Inputs:
    #   filename - Image file to be read in
    #   mode - Return mode of image; either 'native' (default), 'rgb', 'gray', or 'csv'
    img, path, filename = pcv.readimage(filename=args.image, mode='rgb')

    ### SELECTING THE PLANT

    ### Attempt 5 combineren
    # Parameters
    hue_lower_tresh = 22  # 24
    hue_higher_tresh = 50  # 50
    saturation_lower_tresh = 138  # 140
    saturation_higher_tresh = 230  # 230
    value_lower_tresh = 120  # 125
    value_higher_tresh = 255  # 255
    # RGB color space
    green_lower_tresh = 105  # 110
    green_higher_tresh = 255  # 255
    red_lower_tresh = 22  # 24
    red_higher_thresh = 98  # 98
    blue_lower_tresh = 85  # 85
    blue_higher_tresh = 253  # 255
    # CIELAB color space
    #lab_blue_lower_tresh = 0            # Blue yellow channel
    #lab_blue_higher_tresh = 255

    s = pcv.rgb2gray_hsv(rgb_img=img, channel='h')
    mask, masked_image = pcv.threshold.custom_range(
        rgb_img=s,
        lower_thresh=[hue_lower_tresh],
        upper_thresh=[hue_higher_tresh],
        channel='gray')
    masked = pcv.apply_mask(rgb_img=img, mask=mask, mask_color='white')
    # Filtered on Hue
    s = pcv.rgb2gray_hsv(rgb_img=masked, channel='s')
    mask, masked_image = pcv.threshold.custom_range(
        rgb_img=s,
        lower_thresh=[saturation_lower_tresh],
        upper_thresh=[saturation_higher_tresh],
        channel='gray')
    masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white')
    #filtered on saturation
    s = pcv.rgb2gray_hsv(rgb_img=masked, channel='v')
    mask, masked_image = pcv.threshold.custom_range(
        rgb_img=s,
        lower_thresh=[value_lower_tresh],
        upper_thresh=[value_higher_tresh],
        channel='gray')
    masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white')
    #filtered on value
    mask, masked = pcv.threshold.custom_range(
        rgb_img=masked,
        lower_thresh=[0, green_lower_tresh, 0],
        upper_thresh=[255, green_higher_tresh, 255],
        channel='RGB')
    masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white')
    #filtered on green
    mask, masked = pcv.threshold.custom_range(
        rgb_img=masked,
        lower_thresh=[red_lower_tresh, 0, 0],
        upper_thresh=[red_higher_thresh, 255, 255],
        channel='RGB')
    masked = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white')
    #filtered on red
    mask_old, masked_old = pcv.threshold.custom_range(
        rgb_img=masked,
        lower_thresh=[0, 0, blue_lower_tresh],
        upper_thresh=[255, 255, blue_higher_tresh],
        channel='RGB')
    masked = pcv.apply_mask(rgb_img=masked_old,
                            mask=mask_old,
                            mask_color='white')
    #filtered on blue
    #b = pcv.rgb2gray_lab(rgb_img = masked, channel = 'b')   # Converting toe CIElab blue_yellow image
    #b_thresh =pcv.threshold.binary(gray_img = b, threshold=lab_blue_lower_tresh, max_value = lab_blue_higher_tresh)

    ###_____________________________________ Now to identify objects
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(
        gray_img=masked_a,
        threshold=125,  # original 115
        max_value=255,
        object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(
        gray_img=masked_a,
        threshold=140,  # original 135
        max_value=255,
        object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b,
                                          threshold=128,
                                          max_value=255,
                                          object_type='light')

    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)

    # Fill small objects
    # Inputs:
    #   bin_img - Binary image data
    #   size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled
    ab = pcv.median_blur(gray_img=ab, ksize=3)
    ab_fill = pcv.fill(bin_img=ab, size=1000)
    #print("filled")
    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(rgb_img=masked, mask=ab_fill, mask_color='white')
    # ID the objects
    id_objects, obj_hierarchy = pcv.find_objects(masked2, ab_fill)
    # Let's just take the largest
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                            x=0,
                                            y=0,
                                            h=960,
                                            w=1280)  # Currently hardcoded

    # Decide which objects to keep
    # Inputs:
    #    img            = img to display kept objects
    #    roi_contour    = contour of roi, output from any ROI function
    #    roi_hierarchy  = contour of roi, output from any ROI function
    #    object_contour = contours of objects, output from pcv.find_objects function
    #    obj_hierarchy  = hierarchy of objects, output from pcv.find_objects function
    #    roi_type       = 'partial' (default, for partially inside), 'cutto', or
    #    'largest' (keep only largest contour)
    with HiddenPrints():
        roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
            img=img,
            roi_contour=roi1,
            roi_hierarchy=roi_hierarchy,
            object_contour=id_objects,
            obj_hierarchy=obj_hierarchy,
            roi_type='partial')
    # Object combine kept objects
    # Inputs:
    #   img - RGB or grayscale image data for plotting
    #   contours - Contour list
    #   hierarchy - Contour hierarchy array
    obj, mask = pcv.object_composition(img=img,
                                       contours=roi_objects,
                                       hierarchy=hierarchy3)
    #print("final plant")
    new_im = Image.fromarray(masked2)
    new_im.save("output//" + args.filename + "last_masked.png")

    ##################_________________ Analysis

    outfile = args.outdir + "/" + filename
    # Here come all the analyse functions.
    # pcv.acute_vertex(img, obj, 30, 15, 100)

    color_img = pcv.analyze_color(rgb_img=img,
                                  mask=kept_mask,
                                  hist_plot_type=None)
    #new_im = Image.fromarray(color_img)
    #new_im.save(args.filename + "color_img.png")

    # Find shape properties, output shape image (optional)

    # Inputs:
    #   img - RGB or grayscale image data
    #   obj- Single or grouped contour object
    #   mask - Binary image mask to use as mask for moments analysis
    shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask)
    new_im = Image.fromarray(shape_img)
    new_im.save("output//" + args.filename + "shape_img.png")
    # Shape properties relative to user boundary line (optional)

    # Inputs:
    #   img - RGB or grayscale image data
    #   obj - Single or grouped contour object
    #   mask - Binary mask of selected contours
    #   line_position - Position of boundary line (a value of 0 would draw a line
    #                   through the bottom of the image)
    boundary_img1 = pcv.analyze_bound_horizontal(img=img,
                                                 obj=obj,
                                                 mask=mask,
                                                 line_position=1680)
    new_im = Image.fromarray(boundary_img1)
    new_im.save("output//" + args.filename + "boundary_img.png")
    # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional)

    # Inputs:
    #   rgb_img - RGB image data
    #   mask - Binary mask of selected contours
    #   hist_plot_type - None (default), 'all', 'rgb', 'lab', or 'hsv'
    #                    This is the data to be printed to the SVG histogram file
    color_histogram = pcv.analyze_color(rgb_img=img,
                                        mask=kept_mask,
                                        hist_plot_type='all')
    #new_im = Image.fromarray(color_histogram)
    #new_im.save(args.filename + "color_histogram_img.png")

    # Pseudocolor the grayscale image

    # Inputs:
    #     gray_img - Grayscale image data
    #     obj - Single or grouped contour object (optional), if provided the pseudocolored image gets
    #           cropped down to the region of interest.
    #     mask - Binary mask (optional)
    #     background - Background color/type. Options are "image" (gray_img, default), "white", or "black". A mask
    #                  must be supplied.
    #     cmap - Colormap
    #     min_value - Minimum value for range of interest
    #     max_value - Maximum value for range of interest
    #     dpi - Dots per inch for image if printed out (optional, if dpi=None then the default is set to 100 dpi).
    #     axes - If False then the title, x-axis, and y-axis won't be displayed (default axes=True).
    #     colorbar - If False then the colorbar won't be displayed (default colorbar=True)
    pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s,
                                                  mask=kept_mask,
                                                  cmap='jet')
    #new_im = Image.fromarray(pseudocolored_img)
    #new_im.save(args.filename + "pseudocolored.png")

    # Write shape and color data to results file
    pcv.print_results(filename=args.result)
Example #12
0
def generateMask(input, output, maskType=MASK_TYPES['BW']):
    pcv.params.debug = True  #set debug mode
    # pcv.params.debug_outdir="./output.txt" #set output directory

    # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb')
    # Inputs:
    #   filename - Image file to be read in
    #   mode - Return mode of image; either 'native' (default), 'rgb', 'gray', 'envi', or 'csv'
    img, path, filename = pcv.readimage(filename=input, mode='rgb')

    s = pcv.rgb2gray_hsv(rgb_img=img, channel='s')

    # Threshold the saturation image
    s_thresh = pcv.threshold.binary(gray_img=s,
                                    threshold=85,
                                    max_value=255,
                                    object_type='light')

    # Median Blur
    s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)
    s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5)

    # Convert RGB to LAB and extract the Blue channel
    b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

    # Threshold the blue image
    b_thresh = pcv.threshold.binary(gray_img=b,
                                    threshold=160,
                                    max_value=255,
                                    object_type='light')
    b_cnt = pcv.threshold.binary(gray_img=b,
                                 threshold=160,
                                 max_value=255,
                                 object_type='light')

    # Fill small objects
    # b_fill = pcv.fill(b_thresh, 10)

    # Join the thresholded saturation and blue-yellow images
    bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt)

    # Apply Mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(img=img, mask=bs, mask_color='white')

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a,
                                          threshold=115,
                                          max_value=255,
                                          object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a,
                                           threshold=135,
                                           max_value=255,
                                           object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b,
                                          threshold=128,
                                          max_value=255,
                                          object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)
    if maskType == MASK_TYPES['BW']:
        pcv.print_image(ab, filename=output)
        return (True, None)

    # Fill small objects
    ab_fill = pcv.fill(bin_img=ab, size=200)

    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='black')
    if maskType == MASK_TYPES['COLORED']:
        pcv.print_image(masked2, filename=output)
        return (True, None)

    return (False, 'Unknown mask type.')
    """
Example #13
0
def segmentation(imgW, imgNIR, shape):
    # VIS example from PlantCV with few modifications

    # Higher value = more strict selection
    s_threshold = 165
    b_threshold = 200

    # Read image
    img = imread(imgW)
    #img = cvtColor(img, COLOR_BGR2RGB)
    imgNIR = imread(imgNIR)
    #imgNIR = cvtColor(imgNIR, COLOR_BGR2RGB)
    #img, path, img_filename = pcv.readimage(filename=imgW, mode="native")
    #imgNIR, pathNIR, imgNIR_filename = pcv.readimage(filename=imgNIR, mode="native")

    # Convert RGB to HSV and extract the saturation channel
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='s')

    # Threshold the saturation image
    s_thresh = pcv.threshold.binary(gray_img=s, threshold=s_threshold, max_value=255, object_type='light')

    # Median Blur
    s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)
    s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5)

    # Convert RGB to LAB and extract the Blue channel
    b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

    # Threshold the blue image ORIGINAL 160
    b_thresh = pcv.threshold.binary(gray_img=b, threshold=b_threshold, max_value=255, object_type='light')
    b_cnt = pcv.threshold.binary(gray_img=b, threshold=b_threshold, max_value=255, object_type='light')

    # Join the thresholded saturation and blue-yellow images
    bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt)

    # Apply Mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(img=img, mask=bs, mask_color='white')

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    # 115
    # 135
    # 128
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)

    # Fill small objects
    ab_fill = pcv.fill(bin_img=ab, size=200)

    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white')

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)

    # Define ROI
    height = shape[0]
    width = shape[1]
    roi1, roi_hierarchy= pcv.roi.rectangle(img=masked2, x=0, y=0, h=height, w=width)

    # Decide which objects to keep
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1, 
                                                               roi_hierarchy=roi_hierarchy, 
                                                               object_contour=id_objects, 
                                                               obj_hierarchy=obj_hierarchy,
                                                               roi_type='partial')

    # Object combine kept objects
    obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3)
    
    # Filling holes in the mask, works great for alive plants, not so good for dead plants
    filled_mask = pcv.fill_holes(mask)

    final = pcv.apply_mask(img=imgNIR, mask=mask, mask_color='white')
    pcv.print_image(final, "./segment/segment-temp.png")
Example #14
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    pcv.params.debug=args.debug #set debug mode

    # STEP 1: Check if this is a night image, for some of these dataset's images were captured
    # at night, even if nothing is visible. To make sure that images are not taken at
    # night we check that the image isn't mostly dark (0=black, 255=white).
    # if it is a night image it throws a fatal error and stops the workflow.

    if np.average(img) < 50:
        pcv.fatal_error("Night Image")
    else:
        pass

    # STEP 2: Normalize the white color so you can later
    # compare color between images.
    # Inputs:
    #   img = image object, RGB colorspace
    #   roi = region for white reference, if none uses the whole image,
    #         otherwise (x position, y position, box width, box height)

    # white balance image based on white toughspot

    #img1 = pcv.white_balance(img=img,roi=(400,800,200,200))
    img1 = pcv.white_balance(img=img, mode='hist', roi=None)

    # STEP 3: Rotate the image
    # Inputs:
    #   img = image object, RGB color space
    #   rotation_deg = Rotation angle in degrees, can be negative, positive values 
    #                  will move counter-clockwise 
    #   crop = If True then image will be cropped to original image dimensions, if False
    #          the image size will be adjusted to accommodate new image dimensions 


    rotate_img = pcv.rotate(img=img1,rotation_deg=-1, crop=False)

    # STEP 4: Shift image. This step is important for clustering later on.
    # For this image it also allows you to push the green raspberry pi camera
    # out of the image. This step might not be necessary for all images.
    # The resulting image is the same size as the original.
    # Inputs:
    #   img    = image object
    #   number = integer, number of pixels to move image
    #   side   = direction to move from "top", "bottom", "right","left"

    shift1 = pcv.shift_img(img=img1, number=300, side='top')
    img1 = shift1

    # STEP 5: Convert image from RGB colorspace to LAB colorspace
    # Keep only the green-magenta channel (grayscale)
    # Inputs:
    #    img     = image object, RGB colorspace
    #    channel = color subchannel ('l' = lightness, 'a' = green-magenta , 'b' = blue-yellow)

    #a = pcv.rgb2gray_lab(img=img1, channel='a')
    a = pcv.rgb2gray_lab(rgb_img=img1, channel='a')

    # STEP 6: Set a binary threshold on the saturation channel image
    # Inputs:
    #    img         = img object, grayscale
    #    threshold   = threshold value (0-255)
    #    max_value   = value to apply above threshold (usually 255 = white)
    #    object_type = light or dark
    #       - If object is light then standard thresholding is done
    #       - If object is dark then inverse thresholding is done

    img_binary = pcv.threshold.binary(gray_img=a, threshold=120, max_value=255, object_type='dark')
    #img_binary = pcv.threshold.binary(gray_img=a, threshold=120, max_value=255, object_type'dark')
    #                                                   ^
    #                                                   |
    #                                     adjust this value

    # STEP 7: Fill in small objects (speckles)
    # Inputs:
    #    bin_img  = image object, binary. img will be returned after filling
    #    size = minimum object area size in pixels (integer)

    fill_image = pcv.fill(bin_img=img_binary, size=10)
    #                                          ^
    #                                          |
    #                           adjust this value

    # STEP 8: Dilate so that you don't lose leaves (just in case)
    # Inputs:
    #    img    = input image
    #    ksize  = kernel size
    #    i      = iterations, i.e. number of consecutive filtering passes

    #dilated = pcv.dilate(img=fill_image, ksize=1, i=1)
    dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)

    # STEP 9: Find objects (contours: black-white boundaries)
    # Inputs:
    #    img  = image that the objects will be overlayed
    #    mask = what is used for object detection

    id_objects, obj_hierarchy = pcv.find_objects(img=img1, mask=dilated)
    #id_objects, obj_hierarchy = pcv.find_objects(gray_img, mask)

    # STEP 10: Define region of interest (ROI)
    # Inputs:
    #    img       = img to overlay roi
    #    x_adj     = adjust center along x axis
    #    y_adj     = adjust center along y axis
    #    h_adj     = adjust height
    #    w_adj     = adjust width
    # roi_contour, roi_hierarchy = pcv.roi.rectangle(img1, 10, 500, -10, -100)
    #                                                      ^                ^
    #                                                      |________________|
    #                                            adjust these four values

    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1, x=200, y=190, h=2000, w=3000)

    # STEP 11: Keep objects that overlap with the ROI
    # Inputs:
    #    img            = img to display kept objects
    #    roi_contour    = contour of roi, output from any ROI function
    #    roi_hierarchy  = contour of roi, output from any ROI function
    #    object_contour = contours of objects, output from "Identifying Objects" function
    #    obj_hierarchy  = hierarchy of objects, output from "Identifying Objects" function
    #    roi_type       = 'partial' (default, for partially inside), 'cutto', or 'largest' (keep only largest contour)

    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(img=img1, roi_contour=roi_contour, 
                                                                          roi_hierarchy=roi_hierarchy,
                                                                          object_contour=id_objects,
                                                                          obj_hierarchy=obj_hierarchy, 
                                                                          roi_type='partial')

    # STEP 12: This function take a image with multiple contours and
    # clusters them based on user input of rows and columns

    # Inputs:
    #    img               = An RGB image
    #    roi_objects       = object contours in an image that are needed to be clustered.
    #    roi_obj_hierarchy = object hierarchy
    #    nrow              = number of rows to cluster (this should be the approximate  number of desired rows in the entire image even if there isn't a literal row of plants)
    #    ncol              = number of columns to cluster (this should be the approximate number of desired columns in the entire image even if there isn't a literal row of plants)
    #    show_grid         = if True then a grid gets displayed in debug mode (default show_grid=False)

    clusters_i, contours, hierarchies = pcv.cluster_contours(img=img1, roi_objects=roi_objects, 
                                                             roi_obj_hierarchy=roi_obj_hierarchy, 
                                                             nrow=2, ncol=3)

    # STEP 13: This function takes clustered contours and splits them into multiple images,
    # also does a check to make sure that the number of inputted filenames matches the number
    # of clustered contours. If no filenames are given then the objects are just numbered
    # Inputs:
    #    img                     = ideally a masked RGB image.
    #    grouped_contour_indexes = output of cluster_contours, indexes of clusters of contours
    #    contours                = contours to cluster, output of cluster_contours
    #    hierarchy               = object hierarchy
    #    outdir                  = directory for output images
    #    file                    = the name of the input image to use as a base name , output of filename from read_image function
    #    filenames               = input txt file with list of filenames in order from top to bottom left to right (likely list of genotypes)

    # Set global debug behavior to None (default), "print" (to file), or "plot" (Jupyter Notebooks or X11)
    pcv.params.debug = "print"

    out = args.outdir
    names = args.names

    output_path, imgs, masks = pcv.cluster_contour_splitimg(rgb_img=img1, grouped_contour_indexes=clusters_i, 
                                                            contours=contours, hierarchy=hierarchies, 
                                                            outdir=out, file=filename, filenames=names)
Example #15
0
    # Collect the cropped image
    cropped_img = image[int(roi[1]):int(roi[1] + roi[3]),
                        int(roi[0]):int(roi[0] + roi[2])]

    # Convert RGB to HSV and extract saturation channel
    # The HSV value can be changed to be h, s, or v depending on the colour of the flower
    saturation_img = pcv.rgb2gray_hsv(cropped_img, 'h')

    # Threshold the saturation img
    # Depending on the output of the saturation image, the value can be light or dark
    # Light or dark is what defines what part of the image its to be removed
    saturation_thresh = pcv.threshold.binary(saturation_img, 85, 255, 'light')

    # Apply median blur
    saturation_mblur = pcv.median_blur(saturation_thresh, 5)

    # Convert RGB to LAB and extract blue channel
    # Like the HSV function this can be l, a, or b depending on the colour of the flower
    blue_channel_img = pcv.rgb2gray_lab(cropped_img, 'l')
    blue_channel_cnt = pcv.threshold.binary(blue_channel_img, 160, 255,
                                            'light')

    # Join thresholded saturated and blue channel imgs
    blue_saturated = pcv.logical_or(saturation_mblur, blue_channel_cnt)

    # Apply black colour mask
    masked = pcv.apply_mask(cropped_img, blue_saturated, 'black')

    # Save image
    cv2.imwrite('tmp/' + image, masked)
Example #16
0
#   sigma_y - Standard deviation in Y direction; if sigmaY is
#            None (default), sigmaY is taken to equal sigmaX
gaussian_img = pcv.gaussian_blur(img=s_thresh,
                                 ksize=(5, 5),
                                 sigma_x=0,
                                 sigma_y=None)
pcv.print_image(img=gaussian_img, filename="upload/output_imgs/Blur_img.jpg")

# In[8]:

# Convert RGB to LAB and extract the blue channel ('b')

# Input:
#   rgb_img - RGB image data
#   channel- Split by 'l' (lightness), 'a' (green-magenta), or 'b' (blue-yellow) channel
b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

# Threshold the blue channel image
b_thresh = pcv.threshold.binary(gray_img=b,
                                threshold=160,
                                max_value=255,
                                object_type='light')
pcv.print_image(img=b_thresh, filename="upload/output_imgs/Extracted_img.jpg")
pcv.print_image(img=b, filename="upload/output_imgs/Gray_img.jpg")

# In[9]:

# Join the threshold saturation and blue-yellow images with a logical or operation

# Inputs:
#   bin_img1 - Binary image data to be compared to bin_img2
def main():
    # Get options
    args = options()

    if args.debug:
        pcv.params.debug = args.debug  # set debug mode
        if args.debugdir:
            pcv.params.debug_outdir = args.debugdir  # set debug directory
            os.makedirs(args.debugdir, exist_ok=True)

    # pixel_resolution
    # mm
    # see pixel_resolution.xlsx for calibration curve for pixel to mm translation
    pixelresolution = 0.052

    # The result file should exist if plantcv-workflow.py was run
    if os.path.exists(args.result):
        # Open the result file
        results = open(args.result, "r")
        # The result file would have image metadata in it from plantcv-workflow.py, read it into memory
        metadata = json.load(results)
        # Close the file
        results.close()
        # Delete the file, we will create new ones
        os.remove(args.result)
        plantbarcode = metadata['metadata']['plantbarcode']['value']
        print(plantbarcode,
              metadata['metadata']['timestamp']['value'],
              sep=' - ')

    else:
        # If the file did not exist (for testing), initialize metadata as an empty string
        metadata = "{}"
        regpat = re.compile(args.regex)
        plantbarcode = re.search(regpat, args.image).groups()[0]

    # read images and create mask
    img, _, fn = pcv.readimage(args.image)
    imagename = os.path.splitext(fn)[0]

    # create mask

    # taf=filters.try_all_threshold(s_img)
    ## remove background
    s_img = pcv.rgb2gray_hsv(img, 's')
    min_s = filters.threshold_minimum(s_img)
    thresh_s = pcv.threshold.binary(s_img, min_s, 255, 'light')
    rm_bkgrd = pcv.fill_holes(thresh_s)

    ## low greenness
    thresh_s = pcv.threshold.binary(s_img, min_s + 15, 255, 'dark')
    # taf = filters.try_all_threshold(s_img)
    c = pcv.logical_xor(rm_bkgrd, thresh_s)
    cinv = pcv.invert(c)
    cinv_f = pcv.fill(cinv, 500)
    cinv_f_c = pcv.closing(cinv_f, np.ones((5, 5)))
    cinv_f_c_e = pcv.erode(cinv_f_c, 2, 1)

    ## high greenness
    a_img = pcv.rgb2gray_lab(img, channel='a')
    # taf = filters.try_all_threshold(a_img)
    t_a = filters.threshold_isodata(a_img)
    thresh_a = pcv.threshold.binary(a_img, t_a, 255, 'dark')
    thresh_a = pcv.closing(thresh_a, np.ones((5, 5)))
    thresh_a_f = pcv.fill(thresh_a, 500)
    ## combined mask
    lor = pcv.logical_or(cinv_f_c_e, thresh_a_f)
    close = pcv.closing(lor, np.ones((2, 2)))
    fill = pcv.fill(close, 800)
    erode = pcv.erode(fill, 3, 1)
    fill2 = pcv.fill(erode, 1200)
    # dilate = pcv.dilate(fill2,2,2)
    mask = fill2

    final_mask = np.zeros_like(mask)

    # Compute greenness
    # split color channels
    b, g, r = cv2.split(img)
    # print green intensity
    # g_img = pcv.visualize.pseudocolor(g, cmap='Greens', background='white', min_value=0, max_value=255, mask=mask, axes=False)

    # convert color channels to int16 so we can add them (values will be greater than 255 which is max of current uint8 format)
    g = g.astype('uint16')
    r = r.astype('uint16')
    b = b.astype('uint16')
    denom = g + r + b

    # greenness index
    out_flt = np.zeros_like(denom, dtype='float32')
    # divide green by sum of channels to compute greenness index with values 0-1
    gi = np.divide(g,
                   denom,
                   out=out_flt,
                   where=np.logical_and(denom != 0, mask > 0))

    # find objects
    c, h = pcv.find_objects(img, mask)
    rc, rh = pcv.roi.multi(img, coord=[(1300, 900), (1300, 2400)], radius=350)
    # Turn off debug temporarily, otherwise there will be a lot of plots
    pcv.params.debug = None
    # Loop over each region of interest
    i = 0
    rc_i = rc[i]
    for i, rc_i in enumerate(rc):
        rh_i = rh[i]

        # Add ROI number to output. Before roi_objects so result has NA if no object.
        pcv.outputs.add_observation(variable='roi',
                                    trait='roi',
                                    method='roi',
                                    scale='int',
                                    datatype=int,
                                    value=i,
                                    label='#')

        roi_obj, hierarchy_obj, submask, obj_area = pcv.roi_objects(
            img,
            roi_contour=rc_i,
            roi_hierarchy=rh_i,
            object_contour=c,
            obj_hierarchy=h,
            roi_type='partial')

        if obj_area == 0:

            print('\t!!! No object found in ROI', str(i))
            pcv.outputs.add_observation(
                variable='plantarea',
                trait='plant area in sq mm',
                method='observations.area*pixelresolution^2',
                scale=pixelresolution,
                datatype="<class 'float'>",
                value=0,
                label='sq mm')

        else:

            # Combine multiple objects
            # ple plant objects within an roi together
            plant_object, plant_mask = pcv.object_composition(
                img=img, contours=roi_obj, hierarchy=hierarchy_obj)

            final_mask = pcv.image_add(final_mask, plant_mask)

            # Save greenness for individual ROI
            grnindex = np.mean(gi[np.where(plant_mask > 0)])
            pcv.outputs.add_observation(
                variable='greenness_index',
                trait='mean normalized greenness index',
                method='g/sum(b+g+r)',
                scale='[0,1]',
                datatype="<class 'float'>",
                value=float(grnindex),
                label='/1')

            # Analyze all colors
            hist = pcv.analyze_color(img, plant_mask, 'all')

            # Analyze the shape of the current plant
            shape_img = pcv.analyze_object(img, plant_object, plant_mask)
            plant_area = pcv.outputs.observations['area'][
                'value'] * pixelresolution**2
            pcv.outputs.add_observation(
                variable='plantarea',
                trait='plant area in sq mm',
                method='observations.area*pixelresolution^2',
                scale=pixelresolution,
                datatype="<class 'float'>",
                value=plant_area,
                label='sq mm')

        # end if-else

        # At this point we have observations for one plant
        # We can write these out to a unique results file
        # Here I will name the results file with the ROI ID combined with the original result filename
        basename, ext = os.path.splitext(args.result)
        filename = basename + "-roi" + str(i) + ext
        # Save the existing metadata to the new file
        with open(filename, "w") as r:
            json.dump(metadata, r)
        pcv.print_results(filename=filename)
        # The results are saved, now clear out the observations so the next loop adds new ones for the next plant
        pcv.outputs.clear()

        if args.writeimg and obj_area != 0:
            imgdir = os.path.join(args.outdir, 'shape_images', plantbarcode)
            os.makedirs(imgdir, exist_ok=True)
            pcv.print_image(
                shape_img,
                os.path.join(imgdir,
                             imagename + '-roi' + str(i) + '-shape.png'))

            imgdir = os.path.join(args.outdir, 'colorhist_images',
                                  plantbarcode)
            os.makedirs(imgdir, exist_ok=True)
            pcv.print_image(
                hist,
                os.path.join(imgdir,
                             imagename + '-roi' + str(i) + '-colorhist.png'))

# end roi loop

    if args.writeimg:
        # save grnness image of entire tray
        imgdir = os.path.join(args.outdir, 'pseudocolor_images', plantbarcode)
        os.makedirs(imgdir, exist_ok=True)
        gi_img = pcv.visualize.pseudocolor(gi,
                                           obj=None,
                                           mask=final_mask,
                                           cmap='viridis',
                                           axes=False,
                                           min_value=0.3,
                                           max_value=0.6,
                                           background='black',
                                           obj_padding=0)
        gi_img = add_scalebar(gi_img,
                              pixelresolution=pixelresolution,
                              barwidth=20,
                              barlocation='lower left')
        gi_img.set_size_inches(6, 6, forward=False)
        gi_img.savefig(os.path.join(imgdir, imagename + '-greenness.png'),
                       bbox_inches='tight')
        gi_img.clf()
Example #18
0
def main():
    # Get options
    args = options()

    debug = args.debug

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    device, img1 = pcv.white_balance(device, img, debug, roi=(1000, 1000, 500, 500))

    device, a = pcv.rgb2gray_lab(img1, 'a', device, debug)

    device, img_binary = pcv.binary_threshold(a, 116, 255, 'dark', device, debug)

    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, 300, device, debug)

    device, id_objects, obj_hierarchy = pcv.find_objects(img1, fill_image, device, debug)

    device, roi, roi_hierarchy = pcv.define_roi(img1, 'rectangle', device, None, 'default', debug, True,
                                                1800, 1600, -1500, -500)

    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(img1, 'partial', roi, roi_hierarchy,
                                                                                  id_objects, obj_hierarchy, device,
                                                                                  debug)

    outfile = os.path.join(args.outdir, filename)

    device, color_header, color_data, color_img = pcv.analyze_color(img1, img1, kept_mask, 256, device, debug, None,
                                                                    'v', 'img', 300, outfile)

    device, masked = pcv.apply_mask(img1, kept_mask, 'white', device, debug)
    device, dilated = pcv.dilate(kept_mask, 10, 2, device, debug)
    device, plant_objects, plant_hierarchy = pcv.find_objects(img1, dilated, device, debug)

    img_copy = np.copy(img1)

    color = [(255, 0, 255), (0, 255, 0), (66, 134, 244), (255, 255, 0)]

    for i in range(0, len(plant_objects)):
        if len(plant_objects[i]) < 100:
            pass
        else:
            background = np.zeros((np.shape(img1)), np.uint8)
            cv2.drawContours(background, plant_objects, i, (255, 255, 255), -1, lineType=8, hierarchy=plant_hierarchy)
            device, grayimg = pcv.rgb2gray(background, device, debug)
            device, masked1 = pcv.apply_mask(masked, grayimg, 'white', device, debug)
            device, a1 = pcv.rgb2gray_lab(masked1, 'a', device, debug)
            device, img_binary1 = pcv.binary_threshold(a1, 116, 255, 'dark', device, debug)
            device, single_object, single_hierarchy = pcv.find_objects(masked1, img_binary1, device, debug)
            device, obj, mask = pcv.object_composition(img1, single_object, single_hierarchy, device, debug)
            device, shape_header, shape_data, shape_img = pcv.analyze_object(img, "img", obj, mask, device, debug)
            cv2.drawContours(img_copy, plant_objects, i, color[i], -1, lineType=8, hierarchy=plant_hierarchy)
            plantsize = "Plant matching this color is " + str(shape_data[1]) + " pixels large"
            cv2.putText(img_copy, plantsize, (500, (i + 1) * 300), cv2.FONT_HERSHEY_SIMPLEX, 5, color[i], 10)

    pcv.print_image(img_copy, os.path.join(args.outdir, "arabidopsis-out_shapes.jpg"))
Example #19
0
def mainPage(response):

    print(" ")
    print(
        "--------------------------- Main Page Refreshed! -------------------------------"
    )
    print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    print(" ")

    mode_selected_obj_global = mode_selected.objects.latest('date')
    devices_obj_global = devices.objects.latest('date')

    mode1_obj_global = mode1.objects.latest('date')
    mode2_obj_global = mode2.objects.latest('date')
    mode3_obj_global = mode3.objects.latest('date')
    mode4_obj_global = mode4.objects.latest('date')

    if response.POST.get('action') == 'setup':
        print(" ")
        print("~Initializing~")
        print(" ")
        print("Mode: " + str(mode_selected_obj_global.modeNumber))
        print("Grid: " + mode_selected_obj_global.grid)
        print(" ")
        print(" ")

        json = {'modeNumber': mode_selected_obj_global.modeNumber}

        return JsonResponse(json)

    # Create instances so you can insert into the database
    mode_selected_ = mode_selected()
    devices_ = devices()
    devices_2 = devices()
    sensors_ = sensors()
    mode1_vision_system_ = mode1_vision_system()
    mode2_vision_system_ = mode2_vision_system()
    mode3_vision_system_ = mode3_vision_system()
    mode4_vision_system_ = mode4_vision_system()

    if response.POST.get('action') == 'getSensorValues':
        print(" ")
        print("~Sensor Values Updated~")
        print(" ")

        # Start SPI connection
        spi = spidev.SpiDev()  # Created an object
        spi.open(0, 0)

        humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
        humidity2, temperature2 = Adafruit_DHT.read_retry(
            DHT_SENSOR2, DHT_PIN2)

        def analogInput(channel):
            spi.max_speed_hz = 1350000
            adc = spi.xfer2([1, (8 + channel) << 4, 0])
            data = ((adc[1] & 3) << 8) + adc[2]
            return data

        output = analogInput(0)  # Reading from CH0
        output = interp(output, [0, 1023], [100, 0])
        output = int(output)
        #print("Moistures", output)

        currentMoisture = output
        averageTemperature = (temperature + temperature2) / 2
        averageHumidity = (humidity + humidity2) / 2

        temperatureStatus = 'good'
        humidityStatus = 'good'
        soilMoistureStatus = 'good'

        temperatureStatusSummary = "Default"
        humidityStatusSummary = "Default"
        soilMoistureStatusSummary = "Default"

        if (averageTemperature > 26):
            temperatureStatus = 'high'  # Too High
        else:
            temperatureStatus = 'good'  # Good

        if (averageHumidity < 50):
            humidityStatus = 'low'  # Too Low
        elif (averageHumidity > 80):
            humidityStatus = 'high'  # Too High
        else:
            temperatureStatus = 11  # Good

        if (currentMoisture >= 10 and currentMoisture <= 30):
            soilMoistureStatus = 'dry'
            # Dry
        elif (currentMoisture >= 31 and currentMoisture <= 70):
            soilMoistureStatus = 'moist'
            # Moist
        elif (currentMoisture >= 71):
            soilMoistureStatus = 'wet'
            # Wet

        if (temperatureStatus == 'high'):
            temperatureStatusSummary = 'Too High!'
        else:
            temperatureStatusSummary = 'Good'

        if (humidityStatus == 'high'):
            humidityStatusSummary = 'Too High!'
        elif (humidityStatus == 'low'):
            humidityStatusSummary = 'Too Low!'
        else:
            humidityStatusSummary = 'Good'

        if (soilMoistureStatus == 'dry'):
            soilMoistureStatus = 'Dry!'
            print(" ")
            print("~ (PIN 19) Watering System Activated~")
            print(" ")
            devices_.fansStatus = devices_obj_global.fansStatus
            devices_.lightsStatus = devices_obj_global.lightsStatus
            devices_.calibrationStatus = devices_obj_global.calibrationStatus
            devices_.waterStatus = 'On'
            devices_.seedStatus = devices_obj_global.seedStatus
            devices_.save()
            GPIO.output(19, GPIO.HIGH)
            sleep(1)
            GPIO.output(19, GPIO.LOW)
            print(" ")
            print("~ (PIN 19) Watering System Deactivated~")
            print(" ")
            devices_2.fansStatus = devices_obj_global.fansStatus
            devices_2.lightsStatus = devices_obj_global.lightsStatus
            devices_2.calibrationStatus = devices_obj_global.calibrationStatus
            devices_2.waterStatus = 'Off'
            devices_2.seedStatus = devices_obj_global.seedStatus
            devices_2.save()

        elif (soilMoistureStatus == 'moist'):
            soilMoistureStatus = 'Moist'
        elif (soilMoistureStatus == 'wet'):
            soilMoistureStatus = 'Wet!'

        print("Temp1: " + str(temperature))
        print("Hum1: " + str(humidity))
        print("Temp2: " + str(temperature2))
        print("Hum2: " + str(humidity2))
        print("Moisture: " + str(currentMoisture))
        print("Ave temp: " + str(round(averageTemperature, 2)))
        print("Ave humidity: " + str(round(averageHumidity, 0)))

        if (temperatureStatus == 'low' and humidityStatus == 'low'):
            print(" ")
            print("~Fans Deactivated~")
            print(" ")
            GPIO.output(20, GPIO.LOW)
            GPIO.output(16, GPIO.LOW)
            devices_.fansStatus = 'Off'
            devices_.lightsStatus = devices_obj_global.lightsStatus
            devices_.calibrationStatus = devices_obj_global.calibrationStatus
            devices_.waterStatus = devices_obj_global.waterStatus
            devices_.seedStatus = devices_obj_global.seedStatus
            devices_.save()
        elif (temperatureStatus == 'high' and humidityStatus == 'high'):
            print(" ")
            print("~Fans Activated~")
            print(" ")
            GPIO.output(20, GPIO.HIGH)
            GPIO.output(16, GPIO.HIGH)
            devices_.fansStatus = 'On'
            devices_.lightsStatus = devices_obj_global.lightsStatus
            devices_.calibrationStatus = devices_obj_global.calibrationStatus
            devices_.waterStatus = devices_obj_global.waterStatus
            devices_.seedStatus = devices_obj_global.seedStatus
            devices_.save()
        elif (temperatureStatus == 'low' and humidityStatus == 'high'):
            print(" ")
            print("~Fans Activated~")
            print(" ")
            GPIO.output(20, GPIO.HIGH)
            GPIO.output(16, GPIO.HIGH)
            devices_.fansStatus = 'On'
            devices_.lightsStatus = devices_obj_global.lightsStatus
            devices_.calibrationStatus = devices_obj_global.calibrationStatus
            devices_.waterStatus = devices_obj_global.waterStatus
            devices_.seedStatus = devices_obj_global.seedStatus
            devices_.save()
        elif (temperatureStatus == 'high' and humidityStatus == 'low'):
            print(" ")
            print("~Fans Activated~")
            print(" ")
            GPIO.output(20, GPIO.HIGH)
            GPIO.output(16, GPIO.HIGH)
            devices_.fansStatus = 'On'
            devices_.lightsStatus = devices_obj_global.lightsStatus
            devices_.calibrationStatus = devices_obj_global.calibrationStatus
            devices_.waterStatus = devices_obj_global.waterStatus
            devices_.seedStatus = devices_obj_global.seedStatus
            devices_.save()

        sensors_.temperature = round(averageTemperature, 2)
        sensors_.humidity = round(averageHumidity, 0)
        sensors_.moisture = currentMoisture
        sensors_.temperatureStatus = temperatureStatusSummary
        sensors_.humidityStatus = humidityStatusSummary
        sensors_.soilMoistureStatus = soilMoistureStatus
        sensors_.save()

        sensors_obj = sensors.objects.latest('date')
        mode_selected_obj_first = mode_selected.objects.first()
        mode_selected_obj = mode_selected.objects.latest('date')

        date1 = mode_selected_obj_first.date
        date2 = sensors_obj.date

        def numOfDays(date1, date2):
            return (date2 - date1).days

        mode_selected_.daysCounter = numOfDays(date1, date2)
        mode_selected_.grid = mode_selected_obj.grid
        mode_selected_.rows = mode_selected_obj.rows
        mode_selected_.columns = mode_selected_obj.columns
        mode_selected_.modeNumber = mode_selected_obj.modeNumber
        mode_selected_.save()

        mode_selected_obj_2 = mode_selected.objects.latest('date')

        json = {
            'daysCounter_json': str(mode_selected_obj_2.daysCounter),
            'date_json': str(datetime.now().strftime('%b. %d, %Y, %-I:%M %p')),
            'temperature_json': sensors_obj.temperature,
            'humidity_json': sensors_obj.humidity,
            'soilMoisture_json': sensors_obj.moisture,
            'temperatureStatus_json': sensors_obj.temperatureStatus,
            'humidityStatus_json': sensors_obj.humidityStatus,
            'soilMoistureStatus_json': sensors_obj.soilMoistureStatus,
        }

        return JsonResponse(json)

    if response.POST.get('action') == 'snapImage':
        mode_selected_obj = mode_selected.objects.latest('date')
        if (mode_selected_obj.modeNumber == 1):
            print(" ")
            print("~[ Mode 1 ] Vision System Starting~")
            print(" ")
            print(" ")

            getTime = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')

            class options:
                def __init__(self):
                    self.debug = "plot"
                    self.outdir = "./assets/gardenPics/"

            args = options()
            #pcv.params.debug = args.debug

            plant_area_list = []  #Plant area array for storage

            #img, path, filename = pcv.readimage(filename='./assets/gardenPics/' + getTime + '.jpg', modeNumber="native") # Read image to be used
            img, path, filename = pcv.readimage(
                filename='./assets/gardenPics/test.jpg',
                mode="native")  # Read image to be used

            # START of  Multi Plant Workflow https://plantcv.readthedocs.io/en/stable/multi-plant_tutorial/

            # STEP 1: Check if this is a night image
            # STEP 2: Normalize the white color so you can later
            img1 = pcv.white_balance(img, roi=(600, 70, 20, 20))
            # STEP 3: Rotate the image so that plants line up with grid
            # STEP 4: Shift image
            # STEP 5: Convert image from RGB colorspace to LAB colorspace Keep only the green-magenta channel (grayscale)
            a = pcv.rgb2gray_lab(rgb_img=img1, channel='a')
            # STEP 6: Set a binary threshold on the saturation channel image
            img_binary = pcv.threshold.binary(gray_img=a,
                                              threshold=119,
                                              max_value=255,
                                              object_type='dark')
            # STEP 7: Fill in small objects (speckles)
            fill_image = pcv.fill(bin_img=img_binary, size=100)
            # STEP 8: Dilate so that you don't lose leaves (just in case)
            dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)
            # STEP 9: Find objects (contours: black-white boundaries)
            id_objects, obj_hierarchy = pcv.find_objects(img=img1,
                                                         mask=dilated)
            # STEP 10: Define region of interest (ROI)
            roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1,
                                                           x=100,
                                                           y=160,
                                                           h=390,
                                                           w=780)
            # STEP 11: Keep objects that overlap with the ROI
            roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
                img=img1,
                roi_contour=roi_contour,
                roi_hierarchy=roi_hierarchy,
                object_contour=id_objects,
                obj_hierarchy=obj_hierarchy,
                roi_type='partial')

            # END of Multi Plant Workflow

            # START of Create Multiple Regions of Interest (ROI) https://plantcv.readthedocs.io/en/stable/roi_multi/

            # Make a grid of ROIs
            roi1, roi_hier1 = pcv.roi.multi(img=img1,
                                            coord=(180, 260),
                                            radius=50,
                                            spacing=(150, 200),
                                            nrows=2,
                                            ncols=5)

            # Loop through and filter each plant, record the area
            for i in range(0, len(roi1)):
                roi = roi1[i]
                hierarchy = roi_hier1[i]
                # Find objects
                filtered_contours, filtered_hierarchy, filtered_mask, filtered_area = pcv.roi_objects(
                    img=img,
                    roi_type="partial",
                    roi_contour=roi,
                    roi_hierarchy=hierarchy,
                    object_contour=roi_objects,
                    obj_hierarchy=roi_obj_hierarchy)

                # Record the area
                plant_area_list.append(filtered_area)

                if (i < 10):
                    print(plant_area_list[i])

            # END of Create Multiple Regions of Interest (ROI)

            # Label area by plant ID, leftmost plant has id=0
            plant_area_labels = [i for i in range(0, len(plant_area_list))]

            #out = args.outdir
            # Create a new measurement
            pcv.outputs.add_observation(variable='plant_area',
                                        trait='plant area ',
                                        method='plantcv.plantcv.roi_objects',
                                        scale='pixels',
                                        datatype=list,
                                        value=plant_area_list,
                                        label=plant_area_labels)

            # Print areas to XML
            #pcv.print_results(filename="./assets/gardenPics/plant_area_results.xml")

            mode1_vision_system_.image = '../assets/gardenPics/' + getTime + '.jpg'
            mode1_vision_system_.plant1 = plant_area_list[0]
            mode1_vision_system_.plant2 = plant_area_list[1]
            mode1_vision_system_.plant3 = plant_area_list[2]
            mode1_vision_system_.plant4 = plant_area_list[3]
            mode1_vision_system_.plant5 = plant_area_list[4]
            mode1_vision_system_.plant6 = plant_area_list[5]
            mode1_vision_system_.plant7 = plant_area_list[6]
            mode1_vision_system_.plant8 = plant_area_list[7]
            mode1_vision_system_.plant9 = plant_area_list[8]
            mode1_vision_system_.plant10 = plant_area_list[9]
            mode1_vision_system_.save()

            mode1_visionSystem_obj_afterInsertion = mode1_vision_system.objects.latest(
                'date')
            mode_selected_obj_first = mode_selected.objects.first()
            mode_selected_obj = mode_selected.objects.latest('date')

            date1 = mode_selected_obj_first.date
            date2 = mode1_visionSystem_obj_afterInsertion.date

            def numOfDays(date1, date2):
                return (date2 - date1).days

            mode_selected_.daysCounter = numOfDays(date1, date2)
            mode_selected_.grid = mode_selected_obj.grid
            mode_selected_.rows = mode_selected_obj.rows
            mode_selected_.columns = mode_selected_obj.columns
            mode_selected_.modeNumber = mode_selected_obj.modeNumber
            mode_selected_.save()

            json = {
                'image_json':
                str(mode1_vision_system_.image),
                'cameraDateJSON':
                str(datetime.now().strftime('%b. %d, %Y, %-I:%M %p')),
                'daysCounter_json':
                str(numOfDays(date1, date2)),
                'plant1_json':
                mode1_vision_system_.plant1,
                'plant2_json':
                mode1_vision_system_.plant2,
                'plant3_json':
                mode1_vision_system_.plant3,
                'plant4_json':
                mode1_vision_system_.plant4,
                'plant5_json':
                mode1_vision_system_.plant5,
                'plant6_json':
                mode1_vision_system_.plant6,
                'plant7_json':
                mode1_vision_system_.plant7,
                'plant8_json':
                mode1_vision_system_.plant8,
                'plant9_json':
                mode1_vision_system_.plant9,
                'plant10_json':
                mode1_vision_system_.plant10
            }

            return JsonResponse(json)

        if (mode_selected_obj.modeNumber == 2):
            print(" ")
            print("~[ Mode 2 ] Vision System Starting~")
            print(" ")
            print(" ")

        if (mode_selected_obj.modeNumber == 3):
            print(" ")
            print("~[ Mode 3 ] Vision System Starting~")
            print(" ")
            print(" ")

        if (mode_selected_obj.modeNumber == 4):
            print(" ")
            print("~[ Mode 4 ] Vision System Starting~")
            print(" ")
            print(" ")

    if response.POST.get('action') == 'onMode1':

        print(" ")
        print("~Mode 1 Activated~")
        print(" ")

        GPIO.output(6, GPIO.LOW)
        GPIO.output(5, GPIO.LOW)

        mode_selected_.grid = mode1_obj_global.grid
        mode_selected_.rows = mode1_obj_global.rows
        mode_selected_.columns = mode1_obj_global.columns
        mode_selected_.modeNumber = mode1_obj_global.modeNumber
        mode_selected_.save()

        mode_selected_obj = mode_selected.objects.latest('date')

        json = {
            'grid_json': mode_selected_obj.grid,
            'mode_json': mode_selected_obj.modeNumber,
        }

        return JsonResponse(json)

    if response.POST.get('action') == 'onMode2':

        print(" ")
        print("~Mode 2 Activated~")
        print(" ")

        GPIO.output(6, GPIO.LOW)
        GPIO.output(5, GPIO.HIGH)

        mode_selected_.grid = mode2_obj_global.grid
        mode_selected_.rows = mode2_obj_global.rows
        mode_selected_.columns = mode2_obj_global.columns
        mode_selected_.modeNumber = mode2_obj_global.modeNumber
        mode_selected_.save()

        mode_selected_obj = mode_selected.objects.latest('date')

        json = {
            'grid_json': mode_selected_obj.grid,
            'mode_json': mode_selected_obj.modeNumber,
        }

        return JsonResponse(json)

    if response.POST.get('action') == 'onMode3':

        print(" ")
        print("~Mode 3 Activated~")
        print(" ")

        GPIO.output(6, GPIO.HIGH)
        GPIO.output(5, GPIO.LOW)

        mode_selected_.grid = mode3_obj_global.grid
        mode_selected_.rows = mode3_obj_global.rows
        mode_selected_.columns = mode3_obj_global.columns
        mode_selected_.modeNumber = mode3_obj_global.modeNumber
        mode_selected_.save()

        mode_selected_obj = mode_selected.objects.latest('date')

        json = {
            'grid_json': mode_selected_obj.grid,
            'mode_json': mode_selected_obj.modeNumber,
        }

        return JsonResponse(json)

    if response.POST.get('action') == 'onMode4':

        print(" ")
        print("~Mode 4 Activated~")
        print(" ")

        GPIO.output(6, GPIO.HIGH)
        GPIO.output(5, GPIO.HIGH)

        mode_selected_.grid = mode4_obj_global.grid
        mode_selected_.rows = mode4_obj_global.rows
        mode_selected_.columns = mode4_obj_global.columns
        mode_selected_.modeNumber = mode4_obj_global.modeNumber
        mode_selected_.save()

        mode_selected_obj = mode_selected.objects.latest('date')

        json = {
            'grid_json': mode_selected_obj.grid,
            'mode_json': mode_selected_obj.modeNumber,
        }

        return JsonResponse(json)

    if response.POST.get('action') == 'onCalibration':

        print(" ")
        print("~ (PIN 26) Calibration Activated~")
        print(" ")

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = 'On'
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

        GPIO.output(26, GPIO.HIGH)
        sleep(1)
        GPIO.output(26, GPIO.LOW)

        print(" ")
        print("~ (PIN 26) Calibration Deactivated~")
        print(" ")

        devices_2.fansStatus = devices_obj_global.fansStatus
        devices_2.lightsStatus = devices_obj_global.lightsStatus
        devices_2.calibrationStatus = 'Off'
        devices_2.waterStatus = devices_obj_global.waterStatus
        devices_2.seedStatus = devices_obj_global.seedStatus
        devices_2.save()

    if response.POST.get('action') == 'onFan':

        print(" ")
        print("~Fans Activated~")
        print(" ")

        GPIO.output(20, GPIO.HIGH)
        GPIO.output(16, GPIO.HIGH)

        devices_.fansStatus = 'On'
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

    if response.POST.get('action') == 'offFan':

        print(" ")
        print("~Fans deactivated~")
        print(" ")

        GPIO.output(20, GPIO.LOW)
        GPIO.output(16, GPIO.LOW)

        devices_.fansStatus = 'Off'
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

    if response.POST.get('action') == 'onLights':

        print(" ")
        print("~Lights Activated~")
        print(" ")

        GPIO.output(21, GPIO.HIGH)

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = 'On'
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

    if response.POST.get('action') == 'offLights':

        print(" ")
        print("~Lights Deactivated~")
        print(" ")

        GPIO.output(21, GPIO.LOW)

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = 'Off'
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

    if response.POST.get('action') == 'onWater':

        print(" ")
        print("~ (PIN 19) Watering System Activated~")
        print(" ")

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = 'On'
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

        GPIO.output(19, GPIO.HIGH)
        sleep(1)
        GPIO.output(19, GPIO.LOW)

        print(" ")
        print("~ (PIN 19) Watering System Deactivated~")
        print(" ")

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = 'Off'
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

    if response.POST.get('action') == 'onSeed':

        print(" ")
        print("~ (PIN 13) Seeder Activated~")
        print(" ")

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = 'On'
        devices_.save()

        GPIO.output(13, GPIO.HIGH)
        sleep(1)
        GPIO.output(13, GPIO.LOW)

        print(" ")
        print("~Seeder Deactivated~")
        print(" ")

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = 'Off'
        devices_.save()

    if response.POST.get('action') == 'fullReset':

        print(" ")
        print("~Database Cleared~")
        print(" ")

        mode_selected.objects.all().delete()
        mode_selected_.daysCounter = 0
        mode_selected_.grid = mode1_obj_global.grid
        mode_selected_.rows = mode1_obj_global.rows
        mode_selected_.columns = mode1_obj_global.columns
        mode_selected_.modeNumber = mode1_obj_global.modeNumber
        mode_selected_.save()

        devices.objects.all().delete()
        devices_.calibrationStatus = 'Off'
        devices_.fansStatus = 'Off'
        devices_.lightsStatus = 'Off'
        devices_.waterStatus = 'Off'
        devices_.seedStatus = 'Off'
        devices_.save()

        sensors.objects.all().delete()
        sensors_.temperature = 0
        sensors_.humidity = 0
        sensors_.moisture = 0
        sensors_.temperatureStatus = "Good"
        sensors_.humidityStatus = "Good"
        sensors_.soilMoistureStatus = "Good"
        sensors_.save()

        mode1_vision_system.objects.all().delete()
        mode1_vision_system_.image = '../assets/background/rpiBG.gif'
        mode1_vision_system_.plant1 = 0
        mode1_vision_system_.plant2 = 0
        mode1_vision_system_.plant3 = 0
        mode1_vision_system_.plant4 = 0
        mode1_vision_system_.plant5 = 0
        mode1_vision_system_.plant6 = 0
        mode1_vision_system_.plant7 = 0
        mode1_vision_system_.plant8 = 0
        mode1_vision_system_.plant9 = 0
        mode1_vision_system_.plant10 = 0
        mode1_vision_system_.save()

        mode2_vision_system.objects.all().delete()
        mode2_vision_system_.image = '../assets/background/rpiBG.gif'
        mode2_vision_system_.plant1 = 0
        mode2_vision_system_.plant2 = 0
        mode2_vision_system_.plant3 = 0
        mode2_vision_system_.plant4 = 0
        mode2_vision_system_.plant5 = 0
        mode2_vision_system_.plant6 = 0
        mode2_vision_system_.plant7 = 0
        mode2_vision_system_.plant8 = 8
        mode2_vision_system_.save()

        mode3_vision_system.objects.all().delete()
        mode3_vision_system_.image = '../assets/background/rpiBG.gif'
        mode3_vision_system_.plant1 = 0
        mode3_vision_system_.plant2 = 0
        mode3_vision_system_.plant3 = 0
        mode3_vision_system_.plant4 = 0
        mode3_vision_system_.plant5 = 0
        mode3_vision_system_.plant6 = 0
        mode3_vision_system_.plant7 = 0
        mode3_vision_system_.plant8 = 0
        mode3_vision_system_.plant9 = 0
        mode3_vision_system_.plant10 = 0
        mode3_vision_system_.plant11 = 0
        mode3_vision_system_.plant12 = 0
        mode3_vision_system_.plant13 = 0
        mode3_vision_system_.plant14 = 0
        mode3_vision_system_.plant15 = 0
        mode3_vision_system_.plant16 = 0
        mode3_vision_system_.plant17 = 0
        mode3_vision_system_.plant18 = 18
        mode3_vision_system_.save()

        mode4_vision_system.objects.all().delete()
        mode4_vision_system_.image = '../assets/background/rpiBG.gif'
        mode4_vision_system_.plant1 = 0
        mode4_vision_system_.plant2 = 0
        mode4_vision_system_.plant3 = 0
        mode4_vision_system_.plant4 = 0
        mode4_vision_system_.plant5 = 0
        mode4_vision_system_.plant6 = 0
        mode4_vision_system_.plant7 = 0
        mode4_vision_system_.plant8 = 0
        mode4_vision_system_.plant9 = 0
        mode4_vision_system_.plant10 = 0
        mode4_vision_system_.plant11 = 0
        mode4_vision_system_.plant12 = 12
        mode4_vision_system_.save()

        mode_selected_obj = mode_selected.objects.latest('date')
        mode1_visionSystem_obj = mode1_vision_system.objects.latest('date')
        sensors_obj = sensors.objects.latest('date')
        devices_obj = devices.objects.latest('date')

        json = {
            'mode_json': mode_selected_obj.modeNumber,
            'grid_json': mode_selected_obj.grid,
            'startDate_json':
            str(datetime.now().strftime('%b. %d, %Y, %-I:%M %p')),
            'daysCounter_json': str(mode_selected_obj.daysCounter),
            'calibration_json': devices_obj.calibrationStatus,
            'fans_json': devices_obj.fansStatus,
            'lights_json': devices_obj.lightsStatus,
            'water_json': devices_obj.waterStatus,
            'seeder_json': devices_obj.seedStatus,
            'temperature_json': sensors_obj.temperature,
            'humidity_json': sensors_obj.humidity,
            'soilMoisture_json': sensors_obj.moisture,
            'temperatureStatus_json': sensors_obj.temperatureStatus,
            'humidityStatus_json': sensors_obj.humidityStatus,
            'soilMoistureStatus_json': sensors_obj.soilMoistureStatus,
            'image_json': str(mode1_visionSystem_obj.image),
            'plant1_json': mode1_visionSystem_obj.plant1,
            'plant2_json': mode1_visionSystem_obj.plant2,
            'plant3_json': mode1_visionSystem_obj.plant3,
            'plant4_json': mode1_visionSystem_obj.plant4,
            'plant5_json': mode1_visionSystem_obj.plant5,
            'plant6_json': mode1_visionSystem_obj.plant6,
            'plant7_json': mode1_visionSystem_obj.plant7,
            'plant8_json': mode1_visionSystem_obj.plant8,
            'plant9_json': mode1_visionSystem_obj.plant9,
            'plant10_json': mode1_visionSystem_obj.plant10,
        }

        return JsonResponse(json)

    sensors_obj_global = sensors.objects.latest('date')
    mode1_vision_system_obj_global = mode1_vision_system.objects.latest('date')
    mode2_vision_system_obj_global = mode2_vision_system.objects.latest('date')
    mode3_vision_system_obj_global = mode3_vision_system.objects.latest('date')
    mode4_vision_system_obj_global = mode4_vision_system.objects.latest('date')
    mode_selected_obj_global_first = mode_selected.objects.first()
    mode_selected_obj_global_2 = mode_selected.objects.latest('date')

    myObj = {
        'mode_selected_obj_global_first': mode_selected_obj_global_first,
        'mode_selected_obj_global_2': mode_selected_obj_global_2,
        'devices_obj_global': devices_obj_global,
        'sensors_obj_global': sensors_obj_global,
        'mode1_vision_system_obj_global': mode1_vision_system_obj_global,
        'mode2_vision_system_obj_global': mode2_vision_system_obj_global,
        'mode3_vision_system_obj_global': mode3_vision_system_obj_global,
        'mode4_vision_system_obj_global': mode4_vision_system_obj_global
    }

    return render(response, 'main.html', context=myObj)
device, mask = pcv.naive_bayes_classifier(rgb_img,
                                          pdf_file=sys.argv[2],
                                          device=0,
                                          debug="print")  #plantcv model output
mask_image_plant, path, filename = pcv.readimage(
    '1_naive_bayes_Plant_mask.jpg')

############################################
######    Perform Calculations    ##########
############################################

# combine leaf and labels
mask_image_label, path, filename = pcv.readimage(
    '1_naive_bayes_labels_mask.jpg')
mask_image = mask_image_plant + mask_image_label
device, mask_image = pcv.rgb2gray_lab(mask_image, 'l', device)

#clean the mask up
device, img_binary = pcv.binary_threshold(mask_image, 50, 255, 'light', device)
pcv.print_image(img_binary, 'img_binary.tif')
device, blur_img = pcv.erode(
    img_binary, 3, 1, device, debug='print'
)  # Erode to remove soil and Dilate so that you don't lose leaves (just in case)

mask = np.copy(blur_img)
device, fill_image = pcv.fill(blur_img, mask, 100, device)
pcv.print_image(fill_image, 'fill_image.tif')
device, binary_image = pcv.median_blur(fill_image, 1, device)
pcv.print_image(binary_image, 'binary_image.tif')
device, masked_image = device, dilate_image = pcv.dilate(
    fill_image, 3, 3, device)
Example #21
0
def colorspaces(rgb_img, original_img=True):
    """ Visualize an RGB image in all potential colorspaces

    Inputs:
    rgb_img      = RGB image data
    original_img = Whether or not to include the original image the the debugging plot

    Returns:
    plotting_img = Plotting image containing the original image and L,A,B,H,S, and V colorspaces

    :param segmented_img: numpy.ndarray
    :param original_img: bool
    :return labeled_img: numpy.ndarray

    """

    if not len(np.shape(rgb_img)) == 3:
        fatal_error("Input image is not RGB!")

    # Store and disable debug mode
    debug = params.debug
    params.debug = None

    # Initialize grayscale images list, rgb images list, plotting coordinates
    colorspace_names = ["H", "S", "V", "L", "A", "B"]
    all_colorspaces = []
    labeled_imgs = []
    y = int(np.shape(rgb_img)[0] / 2)
    x = int(np.shape(rgb_img)[1] / 2)

    # Loop through and create grayscale imgs from each colorspace
    for i in range(0, 3):
        channel = colorspace_names[i]
        all_colorspaces.append(rgb2gray_hsv(rgb_img=rgb_img, channel=channel))
    for i in range(3, 6):
        channel = colorspace_names[i]
        all_colorspaces.append(rgb2gray_lab(rgb_img=rgb_img, channel=channel))

    # Plot labels of each colorspace on the corresponding img
    for i, colorspace in enumerate(all_colorspaces):
        converted_img = cv2.cvtColor(colorspace, cv2.COLOR_GRAY2RGB)
        labeled = cv2.putText(img=converted_img, text=colorspace_names[i], org=(x, y),
                              fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                              fontScale=params.text_size, color=(255, 0, 255), thickness=params.text_thickness)
        labeled_imgs.append(labeled)

    # Compile images together, including a larger version of the original image
    plotting_img = np.vstack([np.hstack([labeled_imgs[0], labeled_imgs[1], labeled_imgs[2]]),
                          np.hstack([labeled_imgs[3], labeled_imgs[4], labeled_imgs[5]])])

    # If original_img is True then also plot the original image with the rest of them
    if original_img:
        plotting_img = np.hstack([resize(img=rgb_img, resize_x=2, resize_y=2), plotting_img])
    plotting_img = resize(plotting_img,  resize_x=.5, resize_y=.5)

    # Reset debug mode
    params.debug = debug

    if params.debug == "print":
        # If debug is print, save the image to a file
        print_image(plotting_img, os.path.join(params.debug_outdir, str(params.device) + "_vis_colorspaces.png"))
    elif params.debug == "plot":
        # If debug is plot, print to the plotting device
        plot_image(plotting_img)

    return plotting_img
def plantCVProcess(img, x, y, w, h):

    # Convert RGB to HSV and extract the saturation channel
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='s')

    # Threshold the saturation image
    s_thresh = pcv.threshold.binary(gray_img=s, threshold=85, max_value=255, object_type='light')

    # Median Blur
    s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)
    s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=5)

    # Convert RGB to LAB and extract the Blue channel
    b = pcv.rgb2gray_lab(rgb_img=img, channel='b')

    # Threshold the blue image
    b_thresh = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light')
    b_cnt = pcv.threshold.binary(gray_img=b, threshold=160, max_value=255, object_type='light')

    # Fill small objects
    # b_fill = pcv.fill(b_thresh, 10)

    # Join the thresholded saturation and blue-yellow images
    bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_cnt)

    # Apply Mask (for VIS images, mask_color=white)
    masked = pcv.apply_mask(img=img, mask=bs, mask_color='white')

    # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, max_value=255, object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, max_value=255, object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, max_value=255, object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)

    # Fill small objects
    ab_fill = pcv.fill(bin_img=ab, size=200)

    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(img=masked, mask=ab_fill, mask_color='white')

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)

    # Define ROI
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2, x=x, y=y, h=h, w=w)

    # Decide which objects to keep
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1,
                                                               roi_hierarchy=roi_hierarchy,
                                                               object_contour=id_objects,
                                                               obj_hierarchy=obj_hierarchy,
                                                               roi_type='partial')

    # Object combine kept objects
    obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3)

    ############### Analysis ################

    # Find shape properties, output shape image (optional)
    shape_imgs = pcv.analyze_object(img=img, obj=obj, mask=mask)

    # Shape properties relative to user boundary line (optional)
    boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=obj, mask=mask, line_position=1680)

    # Determine color properties: Histograms, Color Slices, output color analyzed histogram (optional)
    color_histogram = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='all')

    # Pseudocolor the grayscale image
    pseudocolored_img = pcv.visualize.pseudocolor(gray_img=s, mask=mask, cmap='jet')

    return print_results()
def main():

    # Set variables
    args = options()
    pcv.params.debug = args.debug

    # Read and rotate image
    img, path, filename = pcv.readimage(filename=args.image)
    img = pcv.rotate(img, -90, False)

    # Create mask from LAB b channel
    l = pcv.rgb2gray_lab(rgb_img=img, channel='b')
    l_thresh = pcv.threshold.binary(gray_img=l,
                                    threshold=115,
                                    max_value=255,
                                    object_type='dark')
    l_mblur = pcv.median_blur(gray_img=l_thresh, ksize=5)

    # Apply mask to image
    masked = pcv.apply_mask(img=img, mask=l_mblur, mask_color='white')
    ab_fill = pcv.fill(bin_img=l_mblur, size=50)

    # Extract plant object from image
    id_objects, obj_hierarchy = pcv.find_objects(img=img, mask=ab_fill)
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked,
                                            x=150,
                                            y=270,
                                            h=100,
                                            w=100)
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img=img,
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')
    obj, mask = pcv.object_composition(img=img,
                                       contours=roi_objects,
                                       hierarchy=hierarchy3)

    ############### Analysis ################

    # Analyze shape properties
    analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask)
    boundary_image2 = pcv.analyze_bound_horizontal(img=img,
                                                   obj=obj,
                                                   mask=mask,
                                                   line_position=370)

    # Analyze colour properties
    color_histogram = pcv.analyze_color(rgb_img=img,
                                        mask=kept_mask,
                                        hist_plot_type='all')

    # Analyze shape independent of size
    top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img,
                                                             obj=obj,
                                                             mask=mask)
    top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img,
                                                             obj=obj,
                                                             mask=mask)

    # Print results
    pcv.print_results(filename='{}'.format(args.result))
    pcv.print_image(img=color_histogram,
                    filename='{}_color_hist.jpg'.format(args.outdir))
    pcv.print_image(img=kept_mask, filename='{}_mask.jpg'.format(args.outdir))
if traynum == 2:
    pixelres = 1/226*25.4#.11 #convert from pixels/inch to mm/pixel
elif traynum == 3:
    pixelres = 1/200*25.4#.13
elif traynum == 4:
    pixelres = 1/215*25.4#.12
elif traynum == 5:
    pixelres = 1/213*25.4#0.12
elif traynum == 6:
    pixelres = 1/195*25.4#.13
elif traynum == 7:
    pixelres = 1/202*25.4#.13

img,_,_ = pcv.readimage('diy_data/rgb/tray'+str(traynum)+'.png')
imga = pcv.rgb2gray_lab(img, 'a')
thresh = pcv.threshold.binary(imga,115, 255,'dark')
mask = pcv.fill(bin_img = thresh, size=200)
c_wt, h_wt = pcv.roi.rectangle(img,1200,300,250,800)

id_objects, obj_hierarchy = pcv.find_objects(img,mask)
roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=c_wt,
                                                                roi_hierarchy=h_wt,
                                                                object_contour=id_objects,
                                                                obj_hierarchy=obj_hierarchy,
                                                                roi_type='partial')

obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3)
shape_img = pcv.analyze_object(img=img, obj=obj, mask=mask)

pcv.outputs.observations['area']['value']/2 * pixelres * pixelres
Example #25
0
def main():
    # Get options
    args = options()

    # Set variables
    device = 0
    pcv.params.debug = args.debug
    img_file = args.image

    # Read image
    img, path, filename = pcv.readimage(filename=img_file, mode='rgb')

    # Process saturation channel from HSV colour space
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='s')
    lp_s = pcv.laplace_filter(s, 1, 1)
    shrp_s = pcv.image_subtract(s, lp_s)
    s_eq = pcv.hist_equalization(shrp_s)
    s_thresh = pcv.threshold.binary(gray_img=s_eq,
                                    threshold=215,
                                    max_value=255,
                                    object_type='light')
    s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=5)

    # Process green-magenta channel from LAB colour space
    b = pcv.rgb2gray_lab(rgb_img=img, channel='a')
    b_lp = pcv.laplace_filter(b, 1, 1)
    b_shrp = pcv.image_subtract(b, b_lp)
    b_thresh = pcv.threshold.otsu(b_shrp, 255, object_type='dark')

    # Create and apply mask
    bs = pcv.logical_or(bin_img1=s_mblur, bin_img2=b_thresh)
    filled = pcv.fill_holes(bs)
    masked = pcv.apply_mask(img=img, mask=filled, mask_color='white')

    # Extract colour channels from masked image
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')

    # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a,
                                          threshold=115,
                                          max_value=255,
                                          object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a,
                                           threshold=140,
                                           max_value=255,
                                           object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b,
                                          threshold=128,
                                          max_value=255,
                                          object_type='light')

    # Join the thresholded saturation and blue-yellow images (OR)
    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)

    # Produce and apply a mask
    opened_ab = pcv.opening(gray_img=ab)
    ab_fill = pcv.fill(bin_img=ab, size=200)
    closed_ab = pcv.closing(gray_img=ab_fill)
    masked2 = pcv.apply_mask(img=masked, mask=bs, mask_color='white')

    # Identify objects
    id_objects, obj_hierarchy = pcv.find_objects(img=masked2, mask=ab_fill)

    # Define region of interest (ROI)
    roi1, roi_hierarchy = pcv.roi.rectangle(img=masked2,
                                            x=250,
                                            y=100,
                                            h=200,
                                            w=200)

    # Decide what objects to keep
    roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(
        img=img,
        roi_contour=roi1,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')

    # Object combine kept objects
    obj, mask = pcv.object_composition(img=img,
                                       contours=roi_objects,
                                       hierarchy=hierarchy3)

    ############### Analysis ################

    outfile = False
    if args.writeimg == True:
        outfile = args.outdir + "/" + filename

    # Analyze the plant
    analysis_image = pcv.analyze_object(img=img, obj=obj, mask=mask)
    color_histogram = pcv.analyze_color(rgb_img=img,
                                        mask=kept_mask,
                                        hist_plot_type='all')
    top_x, bottom_x, center_v_x = pcv.x_axis_pseudolandmarks(img=img,
                                                             obj=obj,
                                                             mask=mask)
    top_y, bottom_y, center_v_y = pcv.y_axis_pseudolandmarks(img=img,
                                                             obj=obj,
                                                             mask=mask)

    # Print results of the analysis
    pcv.print_results(filename=args.result)
    pcv.output_mask(img,
                    kept_mask,
                    filename,
                    outdir=args.outdir,
                    mask_only=True)
def draw_plot(x_start, y_start, x_end, y_end, reference_file, save_file):
    """
		Utilizes plantcv (citation below) to count the green pixels (Chlorophyll) of wells containg plants in a 4x6 grid format of the selected tray.
		
		Outputs
		-------
		A csv file containing the green pixel count for each well containing plants within the grid 
		
		Parameters
		----------
		x_start : int
			Contains the x coordinate of the top left of the user selection
		y_start : int
			Contains the y coordinate of the top left of the user selection
		x_end : int
			Contains the x coordinate of the bottom right of the user selection
		y_end : int
			Contains the y coordinate of the bottom right of the user selection
		reference_file : str
			A txt file containing the names of each well of the tray
		save_file : str
			A csv file to output the green pixel count for each well of the tray
		
		Citation
		--------
		Fahlgren N, Feldman M, Gehan MA, Wilson MS, Shyu C, Bryant DW, Hill ST, McEntee CJ, Warnasooriya SN, Kumar I, Ficor T, Turnipseed S, Gilbert KB, Brutnell TP, Carrington JC, Mockler TC, Baxter I. (2015) A versatile phenotyping system and analytics platform reveals diverse temporal responses to water availability in Setaria. Molecular Plant 8: 1520-1535. http://doi.org/10.1016/j.molp.2015.06.005
		
		Website Link
		------------
		https://plantcv.readthedocs.io/en/stable/
	"""

    # Resize x,y values from the resized image to the initial raw image x,y coordinates for an accurate count on pixels
    x_start = x_start * img_width / dim[0]
    y_start = y_start * img_height / dim[1]
    x_end = x_end * img_width / dim[0]
    y_end = y_end * img_height / dim[1]

    # Crop raw image to selection window
    cropped = pcv.crop(img,
                       x=int(x_start),
                       y=int(y_start),
                       h=int(y_end - y_start),
                       w=int(x_end - x_start))

    # Debug code to display cropped image. Uncomment to see cropped window
    #cropbytes = cv.imencode('.png', cropped)[1].tobytes()
    #graph.DrawImage(data=cropbytes, location=(0, 0))

    # Utilize plantcv code to count green pixels within selection window
    # For further information see : https://plantcv.readthedocs.io/en/latest/multi-plant_tutorial/
    img1 = pcv.white_balance(img=cropped, roi=(0, 0, 50, 50))
    a = pcv.rgb2gray_lab(rgb_img=img1, channel='a')
    img_binary = pcv.threshold.binary(gray_img=a,
                                      threshold=115,
                                      max_value=255,
                                      object_type='dark')
    fill_image = pcv.fill(bin_img=img_binary, size=80)
    dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)
    id_objects, obj_hierarchy = pcv.find_objects(img=img1, mask=dilated)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1,
                                                   x=0,
                                                   y=0,
                                                   h=int(y_end - y_start),
                                                   w=int(x_end - x_start))
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img=img1,
        roi_contour=roi_contour,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')
    clusters_i, contours, hierarchies = pcv.cluster_contours(
        img=img1,
        roi_objects=roi_objects,
        roi_obj_hierarchy=roi_obj_hierarchy,
        nrow=4,
        ncol=6,
        show_grid=True)
    output_path, imgs, masks = pcv.cluster_contour_splitimg(
        img1,
        grouped_contour_indexes=clusters_i,
        contours=contours,
        hierarchy=hierarchies,
        file=filename,
        filenames=reference_file)

    # Save green pixel count for each well of the tray to a csv file using the reference file to name each well
    results = []
    for f in range(len(imgs)):
        color_histogram = pcv.analyze_color(rgb_img=imgs[f],
                                            mask=kept_mask,
                                            hist_plot_type='rgb')

        # Access data stored out from analyze_color
        hue_circular_mean = pcv.outputs.observations['green_frequencies'][
            'value']

        result = [output_path[f].split('_')[1], np.trapz(hue_circular_mean)]
        results.append(result)

    with open(save_file, "w", newline="") as fil:
        writer = csv.writer(fil)
        writer.writerows(results)
        sg.Popup('Finished Analysis! Please see the .csv file for results!')
Example #27
0
def main():
    # Create input arguments object
    args = options()

    # Set debug mode
    pcv.params.debug = args.debug

    # Open a single image
    img, imgpath, imgname = pcv.readimage(filename=args.image)

    # Visualize colorspaces
    all_cs = pcv.visualize.colorspaces(rgb_img=img)

    # Extract the Blue-Yellow ("b") channel from the LAB colorspace
    gray_img = pcv.rgb2gray_lab(rgb_img=img, channel="b")

    # Plot a histogram of pixel values for the Blue-Yellow ("b") channel.
    hist_plot = pcv.visualize.histogram(gray_img=gray_img)

    # Apply a binary threshold to the Blue-Yellow ("b") grayscale image.
    thresh_img = pcv.threshold.binary(gray_img=gray_img,
                                      threshold=140,
                                      max_value=255,
                                      object_type="light")

    # Apply a dilation with a 5x5 kernel and 3 iterations
    dil_img = pcv.dilate(gray_img=thresh_img, ksize=5, i=3)

    # Fill in small holes in the leaves
    closed_img = pcv.fill_holes(bin_img=dil_img)

    # Erode the plant pixels using a 5x5 kernel and 3 iterations
    er_img = pcv.erode(gray_img=closed_img, ksize=5, i=3)

    # Apply a Gaussian blur with a 5 x 5 kernel.
    blur_img = pcv.gaussian_blur(img=er_img, ksize=(5, 5))

    # Set pixel values less than 255 to 0
    blur_img[np.where(blur_img < 255)] = 0

    # Fill/remove objects less than 300 pixels in area
    cleaned = pcv.fill(bin_img=blur_img, size=300)

    # Create a circular ROI
    roi, roi_str = pcv.roi.circle(img=img, x=1725, y=1155, r=400)

    # Identify objects in the binary image
    cnts, cnts_str = pcv.find_objects(img=img, mask=cleaned)

    # Filter objects by region of interest
    plant_cnt, plant_str, plant_mask, plant_area = pcv.roi_objects(
        img=img,
        roi_contour=roi,
        roi_hierarchy=roi_str,
        object_contour=cnts,
        obj_hierarchy=cnts_str)

    # Combine objects into one
    plant, mask = pcv.object_composition(img=img,
                                         contours=plant_cnt,
                                         hierarchy=plant_str)

    # Measure size and shape properties
    shape_img = pcv.analyze_object(img=img, obj=plant, mask=mask)
    if args.writeimg:
        pcv.print_image(img=shape_img,
                        filename=os.path.join(args.outdir,
                                              "shapes_" + imgname))

    # Analyze color properties
    color_img = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="hsv")
    if args.writeimg:
        pcv.print_image(img=color_img,
                        filename=os.path.join(args.outdir,
                                              "histogram_" + imgname))

    # Save the measurements to a file
    pcv.print_results(filename=args.result)
Example #28
0
        'Q4_size': ()
    })
writer = pd.ExcelWriter("Demo.xlsx", engine='openpyxl')
df.to_excel(writer, index=False, header=True, startcol=0)
writer.save()

img = Image.open("DemoImage.JPG")
# img.show() shows image

# inputs are left top right than bottom
img_crop = img.crop((1875, 730, 5680, 3260))
# img_crop.show() shows cropped image
img_crop.save("Cropped_plate.png")

img_crop = cv2.imread("Cropped_plate.png")  # reads in the saved img
filter_image = pcv.rgb2gray_lab(
    img_crop, 'b')  # filters out colors to gray scale the image
Threshold = cv2.adaptiveThreshold(filter_image, 255,
                                  cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                  cv2.THRESH_BINARY, 241,
                                  -1)  # Thresholds based on a 241 block size
cv2.imwrite("Threshold.png", Threshold)  # Saves threshold
Threshold = pcv.fill(Threshold,
                     400)  # removes small white spots left by threshold
cv2.imwrite("Final_threshold.png",
            Threshold)  # saves threshold with fill changes

# now that we have the threshold we need to crop the clusters out so we can get data from each cluster of 4 cells

dire = os.getcwd()
path = dire + '/photo_dump'
try:
Example #29
0
                                threshold=122,
                                max_value=255,
                                object_type='dark')

# In[110]:

# Set Median Blur
#Input box size "ksize"
s_mblur = pcv.median_blur(gray_img=s_thresh, ksize=2)
s_cnt = pcv.median_blur(gray_img=s_thresh, ksize=2)

# In[111]:

# Convert RGB to LAB and extract the blue channel
#Then threshold the image
b = pcv.rgb2gray_lab(rgb_img=img1, channel='b')
b_thresh = pcv.threshold.binary(gray_img=b,
                                threshold=135,
                                max_value=255,
                                object_type='light')

#Setting threshold continued
b_cnt = pcv.threshold.binary(gray_img=b,
                             threshold=135,
                             max_value=255,
                             object_type='light')

# In[112]:

#Join the blue and yellow binary images
bs = pcv.logical_and(bin_img1=s_mblur, bin_img2=b_cnt)
def test(true_positive_file, test_parameters):
    hue_lower_tresh = test_parameters[0]
    hue_higher_tresh = test_parameters[1]
    saturation_lower_tresh = test_parameters[2]
    saturation_higher_tresh = test_parameters[3]
    value_lower_tresh = test_parameters[4]
    value_higher_tresh = test_parameters[5]
    green_lower_tresh = test_parameters[6]
    green_higher_tresh = test_parameters[7]
    red_lower_tresh = test_parameters[8]
    red_higher_thresh = test_parameters[9]
    blue_lower_tresh = test_parameters[10]
    blue_higher_tresh = test_parameters[11]
    blur_k = test_parameters[12]
    fill_k = test_parameters[13]
    
    class args:
            #image = "C:\\Users\\RensD\\OneDrive\\studie\\Master\\The_big_project\\top_perspective\\0214_2018-03-07 08.55 - 26_cam9.png"
            image = true_positive_file
            outdir = "C:\\Users\\RensD\\OneDrive\\studie\\Master\\The_big_project\\top_perspective\\output"
            debug = debug_setting
            result = "results.txt"
    # Get options
    pcv.params.debug=args.debug #set debug mode
    pcv.params.debug_outdir=args.outdir #set output directory

    # Read image (readimage mode defaults to native but if image is RGBA then specify mode='rgb')
    # Inputs:
    #   filename - Image file to be read in 
    #   mode - Return mode of image; either 'native' (default), 'rgb', 'gray', or 'csv'
    img, path, filename = pcv.readimage(filename=args.image, mode='rgb')
    
    s = pcv.rgb2gray_hsv(rgb_img=img, channel='h')
    mask, masked_image = pcv.threshold.custom_range(rgb_img=s, lower_thresh=[hue_lower_tresh], upper_thresh=[hue_higher_tresh], channel='gray')
    masked = pcv.apply_mask(rgb_img=img, mask = mask, mask_color = 'white')
    #print("filtered on hue")
    s = pcv.rgb2gray_hsv(rgb_img=masked, channel='s')
    mask, masked_image = pcv.threshold.custom_range(rgb_img=s, lower_thresh=[saturation_lower_tresh], upper_thresh=[saturation_higher_tresh], channel='gray')
    masked = pcv.apply_mask(rgb_img=masked, mask = mask, mask_color = 'white')
    #print("filtered on saturation")
    s = pcv.rgb2gray_hsv(rgb_img=masked, channel='v')
    mask, masked_image = pcv.threshold.custom_range(rgb_img=s, lower_thresh=[value_lower_tresh], upper_thresh=[value_higher_tresh], channel='gray')
    masked = pcv.apply_mask(rgb_img=masked, mask = mask, mask_color = 'white')
    #print("filtered on value")
    mask, masked = pcv.threshold.custom_range(rgb_img=masked, lower_thresh=[0,green_lower_tresh,0], upper_thresh=[255,green_higher_tresh,255], channel='RGB')
    masked = pcv.apply_mask(rgb_img=masked, mask = mask, mask_color = 'white')
    #print("filtered on green")
    mask, masked = pcv.threshold.custom_range(rgb_img=masked, lower_thresh=[red_lower_tresh,0,0], upper_thresh=[red_higher_thresh,255,255], channel='RGB')
    masked = pcv.apply_mask(rgb_img=masked, mask = mask, mask_color = 'white')
    #print("filtered on red")
    mask_old, masked_old = pcv.threshold.custom_range(rgb_img=masked, lower_thresh=[0,0,blue_lower_tresh], upper_thresh=[255,255,blue_higher_tresh], channel='RGB')
    masked = pcv.apply_mask(rgb_img=masked_old, mask = mask_old, mask_color = 'white')
    #print("filtered on blue")
    ###____________________________________ Blur to minimize 
    try:
        s_mblur = pcv.median_blur(gray_img = masked_old, ksize = blur_k)
        s = pcv.rgb2gray_hsv(rgb_img=s_mblur, channel='v')
        mask, masked_image = pcv.threshold.custom_range(rgb_img=s, lower_thresh=[0], upper_thresh=[254], channel='gray')
    except:
        print("failed blur step")
    try:
        mask = pcv.fill(mask, fill_k)
    except:
        pass
    masked = pcv.apply_mask(rgb_img=masked, mask = mask, mask_color = 'white')


    ###_____________________________________ Now to identify objects
    masked_a = pcv.rgb2gray_lab(rgb_img=masked, channel='a')
    masked_b = pcv.rgb2gray_lab(rgb_img=masked, channel='b')
    
     # Threshold the green-magenta and blue images
    maskeda_thresh = pcv.threshold.binary(gray_img=masked_a, threshold=115, 
                                      max_value=255, object_type='dark')
    maskeda_thresh1 = pcv.threshold.binary(gray_img=masked_a, threshold=135, 
                                           max_value=255, object_type='light')
    maskedb_thresh = pcv.threshold.binary(gray_img=masked_b, threshold=128, 
                                          max_value=255, object_type='light')
    
    ab1 = pcv.logical_or(bin_img1=maskeda_thresh, bin_img2=maskedb_thresh)
    ab = pcv.logical_or(bin_img1=maskeda_thresh1, bin_img2=ab1)
    
    # Fill small objects
    # Inputs: 
    #   bin_img - Binary image data 
    #   size - Minimum object area size in pixels (must be an integer), and smaller objects will be filled
    ab_fill = pcv.fill(bin_img=ab, size=200)
    #print("filled")
    # Apply mask (for VIS images, mask_color=white)
    masked2 = pcv.apply_mask(rgb_img=masked, mask=ab_fill, mask_color='white')
    
    id_objects, obj_hierarchy = pcv.find_objects(masked, ab_fill)
    # Let's just take the largest
    roi1, roi_hierarchy= pcv.roi.rectangle(img=masked, x=0, y=0, h=960, w=1280)  # Currently hardcoded
    with HiddenPrints():
        roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi1, 
                                                                       roi_hierarchy=roi_hierarchy, 
                                                                       object_contour=id_objects, 
                                                                       obj_hierarchy=obj_hierarchy,
                                                                       roi_type=roi_type)
    obj, mask = pcv.object_composition(img=img, contours=roi_objects, hierarchy=hierarchy3)
    
    if use_mask == True:
        return(mask)
    else:
        masked2 = pcv.apply_mask(rgb_img=masked, mask=mask, mask_color='white')
        return(masked2)