Esempio n. 1
0
def main():
    # Get options
    args = options()

    debug = args.debug

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    device, corrected_img = pcv.white_balance(device, img, debug,
                                              (500, 1000, 500, 500))
    img = corrected_img

    device, img_gray_sat = pcv.rgb2gray_lab(img, 'a', device, debug)

    device, img_binary = pcv.binary_threshold(img_gray_sat, 120, 255, 'dark',
                                              device, debug)

    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, 300, device, debug)

    device, id_objects, obj_hierarchy = pcv.find_objects(
        img, fill_image, device, debug)

    device, roi, roi_hierarchy = pcv.define_roi(img, 'rectangle', device, None,
                                                'default', debug, True, 1800,
                                                1600, -1500, -500)

    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi, roi_hierarchy, id_objects, obj_hierarchy, device,
        debug)

    device, obj, mask = pcv.object_composition(img, roi_objects,
                                               roi_obj_hierarchy, device,
                                               debug)

    outfile = os.path.join(args.outdir, filename)

    device, color_header, color_data, color_img = pcv.analyze_color(
        img, img, mask, 256, device, debug, None, 'v', 'img', 300, outfile)

    device, shape_header, shape_data, shape_img = pcv.analyze_object(
        img, "img", obj, mask, device, debug, outfile)

    shapepath = outfile[:-4] + '_shapes.jpg'
    shapepic = cv2.imread(shapepath)
    plantsize = "The plant is " + str(np.sum(mask)) + " pixels large"
    cv2.putText(shapepic, plantsize, (500, 500), cv2.FONT_HERSHEY_SIMPLEX, 5,
                (0, 255, 0), 10)
    pcv.print_image(shapepic, outfile[:-4] + '-out_shapes.jpg')
Esempio n. 2
0
def main():
    #Menangkap gambar IP Cam dengan opencv
    ##Ngosek, koding e wis ono tapi carane ngonek ning gcloud rung reti wkwk

    #Mengambil gambar yang sudah didapatkan dari opencv untuk diproses di plantcv
    path = 'Image test\capture (1).jpg'
    gmbTumbuhanRaw, path, filename = pcv.readimage(path, mode='native')

    #benarkan gambar yang miring
    koreksiRot = pcv.rotate(gmbTumbuhanRaw, 2, True)
    gmbKoreksi = koreksiRot
    pcv.print_image(gmbKoreksi, 'Image test\Hasil\gambar_koreksi.jpg')

    #Mengatur white balance dari gambar
    #usahakan gambar rata (tanpa bayangan dari manapun!)!
    #GANTI nilai dari region of intrest (roi) berdasarkan ukuran gambar!!
    koreksiWhiteBal = pcv.white_balance(gmbTumbuhanRaw,
                                        roi=(2, 100, 1104, 1200))
    pcv.print_image(koreksiWhiteBal, 'Image test\Hasil\koreksi_white_bal.jpg')

    #mengubah kontras gambar agar berbeda dengan warna background
    #tips: latar jangan sama hijaunya
    kontrasBG = pcv.rgb2gray_lab(koreksiWhiteBal, channel='a')
    pcv.print_image(kontrasBG, 'Image test\Hasil\koreksi_kontras.jpg')

    #binary threshol gambar
    #sesuaikan thresholdnya
    binthres = pcv.threshold.binary(gray_img=kontrasBG,
                                    threshold=115,
                                    max_value=255,
                                    object_type='dark')

    #hilangkan noise dengan fill noise
    resiksitik = pcv.fill(binthres, size=10)
    pcv.print_image(resiksitik, 'Image test\Hasil\\noiseFill.jpg')

    #haluskan dengan dilate
    dilasi = pcv.dilate(resiksitik, ksize=12, i=1)

    #ambil objek dan set besar roi
    id_objek, hirarki_objek = pcv.find_objects(gmbTumbuhanRaw, mask=dilasi)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=gmbKoreksi,
                                                   x=20,
                                                   y=96,
                                                   h=1100,
                                                   w=680)

    #keluarkan gambar (untuk debug aja sih)
    roicontour = cv2.drawContours(gmbKoreksi, roi_contour, -1, (0, 0, 255), 3)
    pcv.print_image(roicontour, 'Image test\Hasil\\roicontour.jpg')
    """
Esempio n. 3
0
if np.average(img) < 50:
    pcv.fatal_error("Night Image")
else:
    pass

# Normalize the white color so you can later
# compare color between images.

# Inputs:
#   img = image object, RGB color space
#   roi = region for white reference, if none uses the whole image,
#         otherwise (x position, y position, box width, box height)

# white balance image based on white toughspot
img1 = pcv.white_balance(crop_img, roi=(0, 0, 200, 20))

# In[109]:

# Convert RGB to HSV and extract the saturation channel
# Then set threshold for saturation
s = pcv.rgb2gray_hsv(rgb_img=img1, channel='s')
s_thresh = pcv.threshold.binary(gray_img=s,
                                threshold=122,
                                max_value=255,
                                object_type='dark')

# In[110]:

# Set Median Blur
#Input box size "ksize"
Esempio n. 4
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    pcv.params.debug=args.debug #set debug mode

    # STEP 1: Check if this is a night image, for some of these dataset's images were captured
    # at night, even if nothing is visible. To make sure that images are not taken at
    # night we check that the image isn't mostly dark (0=black, 255=white).
    # if it is a night image it throws a fatal error and stops the workflow.

    if np.average(img) < 50:
        pcv.fatal_error("Night Image")
    else:
        pass

    # STEP 2: Normalize the white color so you can later
    # compare color between images.
    # Inputs:
    #   img = image object, RGB colorspace
    #   roi = region for white reference, if none uses the whole image,
    #         otherwise (x position, y position, box width, box height)

    # white balance image based on white toughspot

    #img1 = pcv.white_balance(img=img,roi=(400,800,200,200))
    img1 = pcv.white_balance(img=img, mode='hist', roi=None)

    # STEP 3: Rotate the image
    # Inputs:
    #   img = image object, RGB color space
    #   rotation_deg = Rotation angle in degrees, can be negative, positive values 
    #                  will move counter-clockwise 
    #   crop = If True then image will be cropped to original image dimensions, if False
    #          the image size will be adjusted to accommodate new image dimensions 


    rotate_img = pcv.rotate(img=img1,rotation_deg=-1, crop=False)

    # STEP 4: Shift image. This step is important for clustering later on.
    # For this image it also allows you to push the green raspberry pi camera
    # out of the image. This step might not be necessary for all images.
    # The resulting image is the same size as the original.
    # Inputs:
    #   img    = image object
    #   number = integer, number of pixels to move image
    #   side   = direction to move from "top", "bottom", "right","left"

    shift1 = pcv.shift_img(img=img1, number=300, side='top')
    img1 = shift1

    # STEP 5: Convert image from RGB colorspace to LAB colorspace
    # Keep only the green-magenta channel (grayscale)
    # Inputs:
    #    img     = image object, RGB colorspace
    #    channel = color subchannel ('l' = lightness, 'a' = green-magenta , 'b' = blue-yellow)

    #a = pcv.rgb2gray_lab(img=img1, channel='a')
    a = pcv.rgb2gray_lab(rgb_img=img1, channel='a')

    # STEP 6: Set a binary threshold on the saturation channel image
    # Inputs:
    #    img         = img object, grayscale
    #    threshold   = threshold value (0-255)
    #    max_value   = value to apply above threshold (usually 255 = white)
    #    object_type = light or dark
    #       - If object is light then standard thresholding is done
    #       - If object is dark then inverse thresholding is done

    img_binary = pcv.threshold.binary(gray_img=a, threshold=120, max_value=255, object_type='dark')
    #img_binary = pcv.threshold.binary(gray_img=a, threshold=120, max_value=255, object_type'dark')
    #                                                   ^
    #                                                   |
    #                                     adjust this value

    # STEP 7: Fill in small objects (speckles)
    # Inputs:
    #    bin_img  = image object, binary. img will be returned after filling
    #    size = minimum object area size in pixels (integer)

    fill_image = pcv.fill(bin_img=img_binary, size=10)
    #                                          ^
    #                                          |
    #                           adjust this value

    # STEP 8: Dilate so that you don't lose leaves (just in case)
    # Inputs:
    #    img    = input image
    #    ksize  = kernel size
    #    i      = iterations, i.e. number of consecutive filtering passes

    #dilated = pcv.dilate(img=fill_image, ksize=1, i=1)
    dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)

    # STEP 9: Find objects (contours: black-white boundaries)
    # Inputs:
    #    img  = image that the objects will be overlayed
    #    mask = what is used for object detection

    id_objects, obj_hierarchy = pcv.find_objects(img=img1, mask=dilated)
    #id_objects, obj_hierarchy = pcv.find_objects(gray_img, mask)

    # STEP 10: Define region of interest (ROI)
    # Inputs:
    #    img       = img to overlay roi
    #    x_adj     = adjust center along x axis
    #    y_adj     = adjust center along y axis
    #    h_adj     = adjust height
    #    w_adj     = adjust width
    # roi_contour, roi_hierarchy = pcv.roi.rectangle(img1, 10, 500, -10, -100)
    #                                                      ^                ^
    #                                                      |________________|
    #                                            adjust these four values

    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1, x=200, y=190, h=2000, w=3000)

    # STEP 11: Keep objects that overlap with the ROI
    # Inputs:
    #    img            = img to display kept objects
    #    roi_contour    = contour of roi, output from any ROI function
    #    roi_hierarchy  = contour of roi, output from any ROI function
    #    object_contour = contours of objects, output from "Identifying Objects" function
    #    obj_hierarchy  = hierarchy of objects, output from "Identifying Objects" function
    #    roi_type       = 'partial' (default, for partially inside), 'cutto', or 'largest' (keep only largest contour)

    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(img=img1, roi_contour=roi_contour, 
                                                                          roi_hierarchy=roi_hierarchy,
                                                                          object_contour=id_objects,
                                                                          obj_hierarchy=obj_hierarchy, 
                                                                          roi_type='partial')

    # STEP 12: This function take a image with multiple contours and
    # clusters them based on user input of rows and columns

    # Inputs:
    #    img               = An RGB image
    #    roi_objects       = object contours in an image that are needed to be clustered.
    #    roi_obj_hierarchy = object hierarchy
    #    nrow              = number of rows to cluster (this should be the approximate  number of desired rows in the entire image even if there isn't a literal row of plants)
    #    ncol              = number of columns to cluster (this should be the approximate number of desired columns in the entire image even if there isn't a literal row of plants)
    #    show_grid         = if True then a grid gets displayed in debug mode (default show_grid=False)

    clusters_i, contours, hierarchies = pcv.cluster_contours(img=img1, roi_objects=roi_objects, 
                                                             roi_obj_hierarchy=roi_obj_hierarchy, 
                                                             nrow=2, ncol=3)

    # STEP 13: This function takes clustered contours and splits them into multiple images,
    # also does a check to make sure that the number of inputted filenames matches the number
    # of clustered contours. If no filenames are given then the objects are just numbered
    # Inputs:
    #    img                     = ideally a masked RGB image.
    #    grouped_contour_indexes = output of cluster_contours, indexes of clusters of contours
    #    contours                = contours to cluster, output of cluster_contours
    #    hierarchy               = object hierarchy
    #    outdir                  = directory for output images
    #    file                    = the name of the input image to use as a base name , output of filename from read_image function
    #    filenames               = input txt file with list of filenames in order from top to bottom left to right (likely list of genotypes)

    # Set global debug behavior to None (default), "print" (to file), or "plot" (Jupyter Notebooks or X11)
    pcv.params.debug = "print"

    out = args.outdir
    names = args.names

    output_path, imgs, masks = pcv.cluster_contour_splitimg(rgb_img=img1, grouped_contour_indexes=clusters_i, 
                                                            contours=contours, hierarchy=hierarchies, 
                                                            outdir=out, file=filename, filenames=names)
def main():
    # Get options
    args = options()

    debug = args.debug

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    device, img1 = pcv.white_balance(device, img, debug,
                                     (100, 100, 1000, 1000))
    img = img1

    #seedmask, path1, filename1 = pcv.readimage(args.mask)
    #device, seedmask = pcv.rgb2gray(seedmask, device, debug)
    #device, inverted = pcv.invert(seedmask, device, debug)
    #device, masked_img = pcv.apply_mask(img, inverted, 'white', device, debug)

    device, img_gray_sat = pcv.rgb2gray_hsv(img1, 's', device, debug)

    device, img_binary = pcv.binary_threshold(img_gray_sat, 70, 255, 'light',
                                              device, debug)

    img_binary1 = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary1, img_binary, 300, device, debug)

    device, seed_objects, seed_hierarchy = pcv.find_objects(
        img, fill_image, device, debug)

    device, roi1, roi_hierarchy1 = pcv.define_roi(img, 'rectangle', device,
                                                  None, 'default', debug, True,
                                                  1500, 1000, -1000, -500)

    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img, 'partial', roi1, roi_hierarchy1, seed_objects, seed_hierarchy,
        device, debug)

    img_copy = np.copy(img)
    for i in range(0, len(roi_objects)):
        rand_color = pcv.color_palette(1)
        cv2.drawContours(img_copy,
                         roi_objects,
                         i,
                         rand_color[0],
                         -1,
                         lineType=8,
                         hierarchy=roi_obj_hierarchy)

    pcv.print_image(
        img_copy,
        os.path.join(args.outdir, filename[:-4]) + "-seed-confetti.jpg")

    shape_header = []  # Store the table header
    table = []  # Store the PlantCV measurements for each seed in a table
    for i in range(0, len(roi_objects)):
        if roi_obj_hierarchy[0][i][
                3] == -1:  # Only continue if the object is an outermost contour

            # Object combine kept objects
            # Inputs:
            #    contours = object list
            #    device   = device number. Used to count steps in the pipeline
            #    debug    = None, print, or plot. Print = save to file, Plot = print to screen.
            device, obj, mask = pcv.object_composition(
                img, [roi_objects[i]], np.array([[roi_obj_hierarchy[0][i]]]),
                device, None)
            if obj is not None:
                # Measure the area and other shape properties of each seed
                # Inputs:
                #    img             = image object (most likely the original), color(RGB)
                #    imgname         = name of image
                #    obj             = single or grouped contour object
                #    device          = device number. Used to count steps in the pipeline
                #    debug           = None, print, or plot. Print = save to file, Plot = print to screen.
                #    filename        = False or image name. If defined print image
                device, shape_header, shape_data, shape_img = pcv.analyze_object(
                    img, "img", obj, mask, device, None)

                if shape_data is not None:
                    table.append(shape_data[1])

    data_array = np.array(table)
    maxval = np.argmax(data_array)
    maxseed = np.copy(img)
    cv2.drawContours(maxseed, roi_objects, maxval, (0, 255, 0), 10)

    imgtext = "This image has " + str(len(data_array)) + " seeds"
    sizeseed = "The largest seed is in green and is " + str(
        data_array[maxval]) + " pixels"
    cv2.putText(maxseed, imgtext, (500, 300), cv2.FONT_HERSHEY_SIMPLEX, 5,
                (0, 0, 0), 10)
    cv2.putText(maxseed, sizeseed, (500, 600), cv2.FONT_HERSHEY_SIMPLEX, 5,
                (0, 0, 0), 10)
    pcv.print_image(maxseed,
                    os.path.join(args.outdir, filename[:-4]) + "-maxseed.jpg")
def draw_plot(x_start, y_start, x_end, y_end, reference_file, save_file):
    """
		Utilizes plantcv (citation below) to count the green pixels (Chlorophyll) of wells containg plants in a 4x6 grid format of the selected tray.
		
		Outputs
		-------
		A csv file containing the green pixel count for each well containing plants within the grid 
		
		Parameters
		----------
		x_start : int
			Contains the x coordinate of the top left of the user selection
		y_start : int
			Contains the y coordinate of the top left of the user selection
		x_end : int
			Contains the x coordinate of the bottom right of the user selection
		y_end : int
			Contains the y coordinate of the bottom right of the user selection
		reference_file : str
			A txt file containing the names of each well of the tray
		save_file : str
			A csv file to output the green pixel count for each well of the tray
		
		Citation
		--------
		Fahlgren N, Feldman M, Gehan MA, Wilson MS, Shyu C, Bryant DW, Hill ST, McEntee CJ, Warnasooriya SN, Kumar I, Ficor T, Turnipseed S, Gilbert KB, Brutnell TP, Carrington JC, Mockler TC, Baxter I. (2015) A versatile phenotyping system and analytics platform reveals diverse temporal responses to water availability in Setaria. Molecular Plant 8: 1520-1535. http://doi.org/10.1016/j.molp.2015.06.005
		
		Website Link
		------------
		https://plantcv.readthedocs.io/en/stable/
	"""

    # Resize x,y values from the resized image to the initial raw image x,y coordinates for an accurate count on pixels
    x_start = x_start * img_width / dim[0]
    y_start = y_start * img_height / dim[1]
    x_end = x_end * img_width / dim[0]
    y_end = y_end * img_height / dim[1]

    # Crop raw image to selection window
    cropped = pcv.crop(img,
                       x=int(x_start),
                       y=int(y_start),
                       h=int(y_end - y_start),
                       w=int(x_end - x_start))

    # Debug code to display cropped image. Uncomment to see cropped window
    #cropbytes = cv.imencode('.png', cropped)[1].tobytes()
    #graph.DrawImage(data=cropbytes, location=(0, 0))

    # Utilize plantcv code to count green pixels within selection window
    # For further information see : https://plantcv.readthedocs.io/en/latest/multi-plant_tutorial/
    img1 = pcv.white_balance(img=cropped, roi=(0, 0, 50, 50))
    a = pcv.rgb2gray_lab(rgb_img=img1, channel='a')
    img_binary = pcv.threshold.binary(gray_img=a,
                                      threshold=115,
                                      max_value=255,
                                      object_type='dark')
    fill_image = pcv.fill(bin_img=img_binary, size=80)
    dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)
    id_objects, obj_hierarchy = pcv.find_objects(img=img1, mask=dilated)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1,
                                                   x=0,
                                                   y=0,
                                                   h=int(y_end - y_start),
                                                   w=int(x_end - x_start))
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img=img1,
        roi_contour=roi_contour,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')
    clusters_i, contours, hierarchies = pcv.cluster_contours(
        img=img1,
        roi_objects=roi_objects,
        roi_obj_hierarchy=roi_obj_hierarchy,
        nrow=4,
        ncol=6,
        show_grid=True)
    output_path, imgs, masks = pcv.cluster_contour_splitimg(
        img1,
        grouped_contour_indexes=clusters_i,
        contours=contours,
        hierarchy=hierarchies,
        file=filename,
        filenames=reference_file)

    # Save green pixel count for each well of the tray to a csv file using the reference file to name each well
    results = []
    for f in range(len(imgs)):
        color_histogram = pcv.analyze_color(rgb_img=imgs[f],
                                            mask=kept_mask,
                                            hist_plot_type='rgb')

        # Access data stored out from analyze_color
        hue_circular_mean = pcv.outputs.observations['green_frequencies'][
            'value']

        result = [output_path[f].split('_')[1], np.trapz(hue_circular_mean)]
        results.append(result)

    with open(save_file, "w", newline="") as fil:
        writer = csv.writer(fil)
        writer.writerows(results)
        sg.Popup('Finished Analysis! Please see the .csv file for results!')
Esempio n. 7
0
def main():
    # Get options
    args = options()

    debug = args.debug

    # Read image
    img, path, filename = pcv.readimage(args.image)

    # Pipeline step
    device = 0

    device, img1 = pcv.white_balance(device, img, debug, roi=(1000, 1000, 500, 500))

    device, a = pcv.rgb2gray_lab(img1, 'a', device, debug)

    device, img_binary = pcv.binary_threshold(a, 116, 255, 'dark', device, debug)

    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, 300, device, debug)

    device, id_objects, obj_hierarchy = pcv.find_objects(img1, fill_image, device, debug)

    device, roi, roi_hierarchy = pcv.define_roi(img1, 'rectangle', device, None, 'default', debug, True,
                                                1800, 1600, -1500, -500)

    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(img1, 'partial', roi, roi_hierarchy,
                                                                                  id_objects, obj_hierarchy, device,
                                                                                  debug)

    outfile = os.path.join(args.outdir, filename)

    device, color_header, color_data, color_img = pcv.analyze_color(img1, img1, kept_mask, 256, device, debug, None,
                                                                    'v', 'img', 300, outfile)

    device, masked = pcv.apply_mask(img1, kept_mask, 'white', device, debug)
    device, dilated = pcv.dilate(kept_mask, 10, 2, device, debug)
    device, plant_objects, plant_hierarchy = pcv.find_objects(img1, dilated, device, debug)

    img_copy = np.copy(img1)

    color = [(255, 0, 255), (0, 255, 0), (66, 134, 244), (255, 255, 0)]

    for i in range(0, len(plant_objects)):
        if len(plant_objects[i]) < 100:
            pass
        else:
            background = np.zeros((np.shape(img1)), np.uint8)
            cv2.drawContours(background, plant_objects, i, (255, 255, 255), -1, lineType=8, hierarchy=plant_hierarchy)
            device, grayimg = pcv.rgb2gray(background, device, debug)
            device, masked1 = pcv.apply_mask(masked, grayimg, 'white', device, debug)
            device, a1 = pcv.rgb2gray_lab(masked1, 'a', device, debug)
            device, img_binary1 = pcv.binary_threshold(a1, 116, 255, 'dark', device, debug)
            device, single_object, single_hierarchy = pcv.find_objects(masked1, img_binary1, device, debug)
            device, obj, mask = pcv.object_composition(img1, single_object, single_hierarchy, device, debug)
            device, shape_header, shape_data, shape_img = pcv.analyze_object(img, "img", obj, mask, device, debug)
            cv2.drawContours(img_copy, plant_objects, i, color[i], -1, lineType=8, hierarchy=plant_hierarchy)
            plantsize = "Plant matching this color is " + str(shape_data[1]) + " pixels large"
            cv2.putText(img_copy, plantsize, (500, (i + 1) * 300), cv2.FONT_HERSHEY_SIMPLEX, 5, color[i], 10)

    pcv.print_image(img_copy, os.path.join(args.outdir, "arabidopsis-out_shapes.jpg"))
Esempio n. 8
0
def mainPage(response):

    print(" ")
    print(
        "--------------------------- Main Page Refreshed! -------------------------------"
    )
    print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    print(" ")

    mode_selected_obj_global = mode_selected.objects.latest('date')
    devices_obj_global = devices.objects.latest('date')

    mode1_obj_global = mode1.objects.latest('date')
    mode2_obj_global = mode2.objects.latest('date')
    mode3_obj_global = mode3.objects.latest('date')
    mode4_obj_global = mode4.objects.latest('date')

    if response.POST.get('action') == 'setup':
        print(" ")
        print("~Initializing~")
        print(" ")
        print("Mode: " + str(mode_selected_obj_global.modeNumber))
        print("Grid: " + mode_selected_obj_global.grid)
        print(" ")
        print(" ")

        json = {'modeNumber': mode_selected_obj_global.modeNumber}

        return JsonResponse(json)

    # Create instances so you can insert into the database
    mode_selected_ = mode_selected()
    devices_ = devices()
    devices_2 = devices()
    sensors_ = sensors()
    mode1_vision_system_ = mode1_vision_system()
    mode2_vision_system_ = mode2_vision_system()
    mode3_vision_system_ = mode3_vision_system()
    mode4_vision_system_ = mode4_vision_system()

    if response.POST.get('action') == 'getSensorValues':
        print(" ")
        print("~Sensor Values Updated~")
        print(" ")

        # Start SPI connection
        spi = spidev.SpiDev()  # Created an object
        spi.open(0, 0)

        humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
        humidity2, temperature2 = Adafruit_DHT.read_retry(
            DHT_SENSOR2, DHT_PIN2)

        def analogInput(channel):
            spi.max_speed_hz = 1350000
            adc = spi.xfer2([1, (8 + channel) << 4, 0])
            data = ((adc[1] & 3) << 8) + adc[2]
            return data

        output = analogInput(0)  # Reading from CH0
        output = interp(output, [0, 1023], [100, 0])
        output = int(output)
        #print("Moistures", output)

        currentMoisture = output
        averageTemperature = (temperature + temperature2) / 2
        averageHumidity = (humidity + humidity2) / 2

        temperatureStatus = 'good'
        humidityStatus = 'good'
        soilMoistureStatus = 'good'

        temperatureStatusSummary = "Default"
        humidityStatusSummary = "Default"
        soilMoistureStatusSummary = "Default"

        if (averageTemperature > 26):
            temperatureStatus = 'high'  # Too High
        else:
            temperatureStatus = 'good'  # Good

        if (averageHumidity < 50):
            humidityStatus = 'low'  # Too Low
        elif (averageHumidity > 80):
            humidityStatus = 'high'  # Too High
        else:
            temperatureStatus = 11  # Good

        if (currentMoisture >= 10 and currentMoisture <= 30):
            soilMoistureStatus = 'dry'
            # Dry
        elif (currentMoisture >= 31 and currentMoisture <= 70):
            soilMoistureStatus = 'moist'
            # Moist
        elif (currentMoisture >= 71):
            soilMoistureStatus = 'wet'
            # Wet

        if (temperatureStatus == 'high'):
            temperatureStatusSummary = 'Too High!'
        else:
            temperatureStatusSummary = 'Good'

        if (humidityStatus == 'high'):
            humidityStatusSummary = 'Too High!'
        elif (humidityStatus == 'low'):
            humidityStatusSummary = 'Too Low!'
        else:
            humidityStatusSummary = 'Good'

        if (soilMoistureStatus == 'dry'):
            soilMoistureStatus = 'Dry!'
            print(" ")
            print("~ (PIN 19) Watering System Activated~")
            print(" ")
            devices_.fansStatus = devices_obj_global.fansStatus
            devices_.lightsStatus = devices_obj_global.lightsStatus
            devices_.calibrationStatus = devices_obj_global.calibrationStatus
            devices_.waterStatus = 'On'
            devices_.seedStatus = devices_obj_global.seedStatus
            devices_.save()
            GPIO.output(19, GPIO.HIGH)
            sleep(1)
            GPIO.output(19, GPIO.LOW)
            print(" ")
            print("~ (PIN 19) Watering System Deactivated~")
            print(" ")
            devices_2.fansStatus = devices_obj_global.fansStatus
            devices_2.lightsStatus = devices_obj_global.lightsStatus
            devices_2.calibrationStatus = devices_obj_global.calibrationStatus
            devices_2.waterStatus = 'Off'
            devices_2.seedStatus = devices_obj_global.seedStatus
            devices_2.save()

        elif (soilMoistureStatus == 'moist'):
            soilMoistureStatus = 'Moist'
        elif (soilMoistureStatus == 'wet'):
            soilMoistureStatus = 'Wet!'

        print("Temp1: " + str(temperature))
        print("Hum1: " + str(humidity))
        print("Temp2: " + str(temperature2))
        print("Hum2: " + str(humidity2))
        print("Moisture: " + str(currentMoisture))
        print("Ave temp: " + str(round(averageTemperature, 2)))
        print("Ave humidity: " + str(round(averageHumidity, 0)))

        if (temperatureStatus == 'low' and humidityStatus == 'low'):
            print(" ")
            print("~Fans Deactivated~")
            print(" ")
            GPIO.output(20, GPIO.LOW)
            GPIO.output(16, GPIO.LOW)
            devices_.fansStatus = 'Off'
            devices_.lightsStatus = devices_obj_global.lightsStatus
            devices_.calibrationStatus = devices_obj_global.calibrationStatus
            devices_.waterStatus = devices_obj_global.waterStatus
            devices_.seedStatus = devices_obj_global.seedStatus
            devices_.save()
        elif (temperatureStatus == 'high' and humidityStatus == 'high'):
            print(" ")
            print("~Fans Activated~")
            print(" ")
            GPIO.output(20, GPIO.HIGH)
            GPIO.output(16, GPIO.HIGH)
            devices_.fansStatus = 'On'
            devices_.lightsStatus = devices_obj_global.lightsStatus
            devices_.calibrationStatus = devices_obj_global.calibrationStatus
            devices_.waterStatus = devices_obj_global.waterStatus
            devices_.seedStatus = devices_obj_global.seedStatus
            devices_.save()
        elif (temperatureStatus == 'low' and humidityStatus == 'high'):
            print(" ")
            print("~Fans Activated~")
            print(" ")
            GPIO.output(20, GPIO.HIGH)
            GPIO.output(16, GPIO.HIGH)
            devices_.fansStatus = 'On'
            devices_.lightsStatus = devices_obj_global.lightsStatus
            devices_.calibrationStatus = devices_obj_global.calibrationStatus
            devices_.waterStatus = devices_obj_global.waterStatus
            devices_.seedStatus = devices_obj_global.seedStatus
            devices_.save()
        elif (temperatureStatus == 'high' and humidityStatus == 'low'):
            print(" ")
            print("~Fans Activated~")
            print(" ")
            GPIO.output(20, GPIO.HIGH)
            GPIO.output(16, GPIO.HIGH)
            devices_.fansStatus = 'On'
            devices_.lightsStatus = devices_obj_global.lightsStatus
            devices_.calibrationStatus = devices_obj_global.calibrationStatus
            devices_.waterStatus = devices_obj_global.waterStatus
            devices_.seedStatus = devices_obj_global.seedStatus
            devices_.save()

        sensors_.temperature = round(averageTemperature, 2)
        sensors_.humidity = round(averageHumidity, 0)
        sensors_.moisture = currentMoisture
        sensors_.temperatureStatus = temperatureStatusSummary
        sensors_.humidityStatus = humidityStatusSummary
        sensors_.soilMoistureStatus = soilMoistureStatus
        sensors_.save()

        sensors_obj = sensors.objects.latest('date')
        mode_selected_obj_first = mode_selected.objects.first()
        mode_selected_obj = mode_selected.objects.latest('date')

        date1 = mode_selected_obj_first.date
        date2 = sensors_obj.date

        def numOfDays(date1, date2):
            return (date2 - date1).days

        mode_selected_.daysCounter = numOfDays(date1, date2)
        mode_selected_.grid = mode_selected_obj.grid
        mode_selected_.rows = mode_selected_obj.rows
        mode_selected_.columns = mode_selected_obj.columns
        mode_selected_.modeNumber = mode_selected_obj.modeNumber
        mode_selected_.save()

        mode_selected_obj_2 = mode_selected.objects.latest('date')

        json = {
            'daysCounter_json': str(mode_selected_obj_2.daysCounter),
            'date_json': str(datetime.now().strftime('%b. %d, %Y, %-I:%M %p')),
            'temperature_json': sensors_obj.temperature,
            'humidity_json': sensors_obj.humidity,
            'soilMoisture_json': sensors_obj.moisture,
            'temperatureStatus_json': sensors_obj.temperatureStatus,
            'humidityStatus_json': sensors_obj.humidityStatus,
            'soilMoistureStatus_json': sensors_obj.soilMoistureStatus,
        }

        return JsonResponse(json)

    if response.POST.get('action') == 'snapImage':
        mode_selected_obj = mode_selected.objects.latest('date')
        if (mode_selected_obj.modeNumber == 1):
            print(" ")
            print("~[ Mode 1 ] Vision System Starting~")
            print(" ")
            print(" ")

            getTime = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')

            class options:
                def __init__(self):
                    self.debug = "plot"
                    self.outdir = "./assets/gardenPics/"

            args = options()
            #pcv.params.debug = args.debug

            plant_area_list = []  #Plant area array for storage

            #img, path, filename = pcv.readimage(filename='./assets/gardenPics/' + getTime + '.jpg', modeNumber="native") # Read image to be used
            img, path, filename = pcv.readimage(
                filename='./assets/gardenPics/test.jpg',
                mode="native")  # Read image to be used

            # START of  Multi Plant Workflow https://plantcv.readthedocs.io/en/stable/multi-plant_tutorial/

            # STEP 1: Check if this is a night image
            # STEP 2: Normalize the white color so you can later
            img1 = pcv.white_balance(img, roi=(600, 70, 20, 20))
            # STEP 3: Rotate the image so that plants line up with grid
            # STEP 4: Shift image
            # STEP 5: Convert image from RGB colorspace to LAB colorspace Keep only the green-magenta channel (grayscale)
            a = pcv.rgb2gray_lab(rgb_img=img1, channel='a')
            # STEP 6: Set a binary threshold on the saturation channel image
            img_binary = pcv.threshold.binary(gray_img=a,
                                              threshold=119,
                                              max_value=255,
                                              object_type='dark')
            # STEP 7: Fill in small objects (speckles)
            fill_image = pcv.fill(bin_img=img_binary, size=100)
            # STEP 8: Dilate so that you don't lose leaves (just in case)
            dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)
            # STEP 9: Find objects (contours: black-white boundaries)
            id_objects, obj_hierarchy = pcv.find_objects(img=img1,
                                                         mask=dilated)
            # STEP 10: Define region of interest (ROI)
            roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1,
                                                           x=100,
                                                           y=160,
                                                           h=390,
                                                           w=780)
            # STEP 11: Keep objects that overlap with the ROI
            roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
                img=img1,
                roi_contour=roi_contour,
                roi_hierarchy=roi_hierarchy,
                object_contour=id_objects,
                obj_hierarchy=obj_hierarchy,
                roi_type='partial')

            # END of Multi Plant Workflow

            # START of Create Multiple Regions of Interest (ROI) https://plantcv.readthedocs.io/en/stable/roi_multi/

            # Make a grid of ROIs
            roi1, roi_hier1 = pcv.roi.multi(img=img1,
                                            coord=(180, 260),
                                            radius=50,
                                            spacing=(150, 200),
                                            nrows=2,
                                            ncols=5)

            # Loop through and filter each plant, record the area
            for i in range(0, len(roi1)):
                roi = roi1[i]
                hierarchy = roi_hier1[i]
                # Find objects
                filtered_contours, filtered_hierarchy, filtered_mask, filtered_area = pcv.roi_objects(
                    img=img,
                    roi_type="partial",
                    roi_contour=roi,
                    roi_hierarchy=hierarchy,
                    object_contour=roi_objects,
                    obj_hierarchy=roi_obj_hierarchy)

                # Record the area
                plant_area_list.append(filtered_area)

                if (i < 10):
                    print(plant_area_list[i])

            # END of Create Multiple Regions of Interest (ROI)

            # Label area by plant ID, leftmost plant has id=0
            plant_area_labels = [i for i in range(0, len(plant_area_list))]

            #out = args.outdir
            # Create a new measurement
            pcv.outputs.add_observation(variable='plant_area',
                                        trait='plant area ',
                                        method='plantcv.plantcv.roi_objects',
                                        scale='pixels',
                                        datatype=list,
                                        value=plant_area_list,
                                        label=plant_area_labels)

            # Print areas to XML
            #pcv.print_results(filename="./assets/gardenPics/plant_area_results.xml")

            mode1_vision_system_.image = '../assets/gardenPics/' + getTime + '.jpg'
            mode1_vision_system_.plant1 = plant_area_list[0]
            mode1_vision_system_.plant2 = plant_area_list[1]
            mode1_vision_system_.plant3 = plant_area_list[2]
            mode1_vision_system_.plant4 = plant_area_list[3]
            mode1_vision_system_.plant5 = plant_area_list[4]
            mode1_vision_system_.plant6 = plant_area_list[5]
            mode1_vision_system_.plant7 = plant_area_list[6]
            mode1_vision_system_.plant8 = plant_area_list[7]
            mode1_vision_system_.plant9 = plant_area_list[8]
            mode1_vision_system_.plant10 = plant_area_list[9]
            mode1_vision_system_.save()

            mode1_visionSystem_obj_afterInsertion = mode1_vision_system.objects.latest(
                'date')
            mode_selected_obj_first = mode_selected.objects.first()
            mode_selected_obj = mode_selected.objects.latest('date')

            date1 = mode_selected_obj_first.date
            date2 = mode1_visionSystem_obj_afterInsertion.date

            def numOfDays(date1, date2):
                return (date2 - date1).days

            mode_selected_.daysCounter = numOfDays(date1, date2)
            mode_selected_.grid = mode_selected_obj.grid
            mode_selected_.rows = mode_selected_obj.rows
            mode_selected_.columns = mode_selected_obj.columns
            mode_selected_.modeNumber = mode_selected_obj.modeNumber
            mode_selected_.save()

            json = {
                'image_json':
                str(mode1_vision_system_.image),
                'cameraDateJSON':
                str(datetime.now().strftime('%b. %d, %Y, %-I:%M %p')),
                'daysCounter_json':
                str(numOfDays(date1, date2)),
                'plant1_json':
                mode1_vision_system_.plant1,
                'plant2_json':
                mode1_vision_system_.plant2,
                'plant3_json':
                mode1_vision_system_.plant3,
                'plant4_json':
                mode1_vision_system_.plant4,
                'plant5_json':
                mode1_vision_system_.plant5,
                'plant6_json':
                mode1_vision_system_.plant6,
                'plant7_json':
                mode1_vision_system_.plant7,
                'plant8_json':
                mode1_vision_system_.plant8,
                'plant9_json':
                mode1_vision_system_.plant9,
                'plant10_json':
                mode1_vision_system_.plant10
            }

            return JsonResponse(json)

        if (mode_selected_obj.modeNumber == 2):
            print(" ")
            print("~[ Mode 2 ] Vision System Starting~")
            print(" ")
            print(" ")

        if (mode_selected_obj.modeNumber == 3):
            print(" ")
            print("~[ Mode 3 ] Vision System Starting~")
            print(" ")
            print(" ")

        if (mode_selected_obj.modeNumber == 4):
            print(" ")
            print("~[ Mode 4 ] Vision System Starting~")
            print(" ")
            print(" ")

    if response.POST.get('action') == 'onMode1':

        print(" ")
        print("~Mode 1 Activated~")
        print(" ")

        GPIO.output(6, GPIO.LOW)
        GPIO.output(5, GPIO.LOW)

        mode_selected_.grid = mode1_obj_global.grid
        mode_selected_.rows = mode1_obj_global.rows
        mode_selected_.columns = mode1_obj_global.columns
        mode_selected_.modeNumber = mode1_obj_global.modeNumber
        mode_selected_.save()

        mode_selected_obj = mode_selected.objects.latest('date')

        json = {
            'grid_json': mode_selected_obj.grid,
            'mode_json': mode_selected_obj.modeNumber,
        }

        return JsonResponse(json)

    if response.POST.get('action') == 'onMode2':

        print(" ")
        print("~Mode 2 Activated~")
        print(" ")

        GPIO.output(6, GPIO.LOW)
        GPIO.output(5, GPIO.HIGH)

        mode_selected_.grid = mode2_obj_global.grid
        mode_selected_.rows = mode2_obj_global.rows
        mode_selected_.columns = mode2_obj_global.columns
        mode_selected_.modeNumber = mode2_obj_global.modeNumber
        mode_selected_.save()

        mode_selected_obj = mode_selected.objects.latest('date')

        json = {
            'grid_json': mode_selected_obj.grid,
            'mode_json': mode_selected_obj.modeNumber,
        }

        return JsonResponse(json)

    if response.POST.get('action') == 'onMode3':

        print(" ")
        print("~Mode 3 Activated~")
        print(" ")

        GPIO.output(6, GPIO.HIGH)
        GPIO.output(5, GPIO.LOW)

        mode_selected_.grid = mode3_obj_global.grid
        mode_selected_.rows = mode3_obj_global.rows
        mode_selected_.columns = mode3_obj_global.columns
        mode_selected_.modeNumber = mode3_obj_global.modeNumber
        mode_selected_.save()

        mode_selected_obj = mode_selected.objects.latest('date')

        json = {
            'grid_json': mode_selected_obj.grid,
            'mode_json': mode_selected_obj.modeNumber,
        }

        return JsonResponse(json)

    if response.POST.get('action') == 'onMode4':

        print(" ")
        print("~Mode 4 Activated~")
        print(" ")

        GPIO.output(6, GPIO.HIGH)
        GPIO.output(5, GPIO.HIGH)

        mode_selected_.grid = mode4_obj_global.grid
        mode_selected_.rows = mode4_obj_global.rows
        mode_selected_.columns = mode4_obj_global.columns
        mode_selected_.modeNumber = mode4_obj_global.modeNumber
        mode_selected_.save()

        mode_selected_obj = mode_selected.objects.latest('date')

        json = {
            'grid_json': mode_selected_obj.grid,
            'mode_json': mode_selected_obj.modeNumber,
        }

        return JsonResponse(json)

    if response.POST.get('action') == 'onCalibration':

        print(" ")
        print("~ (PIN 26) Calibration Activated~")
        print(" ")

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = 'On'
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

        GPIO.output(26, GPIO.HIGH)
        sleep(1)
        GPIO.output(26, GPIO.LOW)

        print(" ")
        print("~ (PIN 26) Calibration Deactivated~")
        print(" ")

        devices_2.fansStatus = devices_obj_global.fansStatus
        devices_2.lightsStatus = devices_obj_global.lightsStatus
        devices_2.calibrationStatus = 'Off'
        devices_2.waterStatus = devices_obj_global.waterStatus
        devices_2.seedStatus = devices_obj_global.seedStatus
        devices_2.save()

    if response.POST.get('action') == 'onFan':

        print(" ")
        print("~Fans Activated~")
        print(" ")

        GPIO.output(20, GPIO.HIGH)
        GPIO.output(16, GPIO.HIGH)

        devices_.fansStatus = 'On'
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

    if response.POST.get('action') == 'offFan':

        print(" ")
        print("~Fans deactivated~")
        print(" ")

        GPIO.output(20, GPIO.LOW)
        GPIO.output(16, GPIO.LOW)

        devices_.fansStatus = 'Off'
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

    if response.POST.get('action') == 'onLights':

        print(" ")
        print("~Lights Activated~")
        print(" ")

        GPIO.output(21, GPIO.HIGH)

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = 'On'
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

    if response.POST.get('action') == 'offLights':

        print(" ")
        print("~Lights Deactivated~")
        print(" ")

        GPIO.output(21, GPIO.LOW)

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = 'Off'
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

    if response.POST.get('action') == 'onWater':

        print(" ")
        print("~ (PIN 19) Watering System Activated~")
        print(" ")

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = 'On'
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

        GPIO.output(19, GPIO.HIGH)
        sleep(1)
        GPIO.output(19, GPIO.LOW)

        print(" ")
        print("~ (PIN 19) Watering System Deactivated~")
        print(" ")

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = 'Off'
        devices_.seedStatus = devices_obj_global.seedStatus
        devices_.save()

    if response.POST.get('action') == 'onSeed':

        print(" ")
        print("~ (PIN 13) Seeder Activated~")
        print(" ")

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = 'On'
        devices_.save()

        GPIO.output(13, GPIO.HIGH)
        sleep(1)
        GPIO.output(13, GPIO.LOW)

        print(" ")
        print("~Seeder Deactivated~")
        print(" ")

        devices_.fansStatus = devices_obj_global.fansStatus
        devices_.lightsStatus = devices_obj_global.lightsStatus
        devices_.calibrationStatus = devices_obj_global.calibrationStatus
        devices_.waterStatus = devices_obj_global.waterStatus
        devices_.seedStatus = 'Off'
        devices_.save()

    if response.POST.get('action') == 'fullReset':

        print(" ")
        print("~Database Cleared~")
        print(" ")

        mode_selected.objects.all().delete()
        mode_selected_.daysCounter = 0
        mode_selected_.grid = mode1_obj_global.grid
        mode_selected_.rows = mode1_obj_global.rows
        mode_selected_.columns = mode1_obj_global.columns
        mode_selected_.modeNumber = mode1_obj_global.modeNumber
        mode_selected_.save()

        devices.objects.all().delete()
        devices_.calibrationStatus = 'Off'
        devices_.fansStatus = 'Off'
        devices_.lightsStatus = 'Off'
        devices_.waterStatus = 'Off'
        devices_.seedStatus = 'Off'
        devices_.save()

        sensors.objects.all().delete()
        sensors_.temperature = 0
        sensors_.humidity = 0
        sensors_.moisture = 0
        sensors_.temperatureStatus = "Good"
        sensors_.humidityStatus = "Good"
        sensors_.soilMoistureStatus = "Good"
        sensors_.save()

        mode1_vision_system.objects.all().delete()
        mode1_vision_system_.image = '../assets/background/rpiBG.gif'
        mode1_vision_system_.plant1 = 0
        mode1_vision_system_.plant2 = 0
        mode1_vision_system_.plant3 = 0
        mode1_vision_system_.plant4 = 0
        mode1_vision_system_.plant5 = 0
        mode1_vision_system_.plant6 = 0
        mode1_vision_system_.plant7 = 0
        mode1_vision_system_.plant8 = 0
        mode1_vision_system_.plant9 = 0
        mode1_vision_system_.plant10 = 0
        mode1_vision_system_.save()

        mode2_vision_system.objects.all().delete()
        mode2_vision_system_.image = '../assets/background/rpiBG.gif'
        mode2_vision_system_.plant1 = 0
        mode2_vision_system_.plant2 = 0
        mode2_vision_system_.plant3 = 0
        mode2_vision_system_.plant4 = 0
        mode2_vision_system_.plant5 = 0
        mode2_vision_system_.plant6 = 0
        mode2_vision_system_.plant7 = 0
        mode2_vision_system_.plant8 = 8
        mode2_vision_system_.save()

        mode3_vision_system.objects.all().delete()
        mode3_vision_system_.image = '../assets/background/rpiBG.gif'
        mode3_vision_system_.plant1 = 0
        mode3_vision_system_.plant2 = 0
        mode3_vision_system_.plant3 = 0
        mode3_vision_system_.plant4 = 0
        mode3_vision_system_.plant5 = 0
        mode3_vision_system_.plant6 = 0
        mode3_vision_system_.plant7 = 0
        mode3_vision_system_.plant8 = 0
        mode3_vision_system_.plant9 = 0
        mode3_vision_system_.plant10 = 0
        mode3_vision_system_.plant11 = 0
        mode3_vision_system_.plant12 = 0
        mode3_vision_system_.plant13 = 0
        mode3_vision_system_.plant14 = 0
        mode3_vision_system_.plant15 = 0
        mode3_vision_system_.plant16 = 0
        mode3_vision_system_.plant17 = 0
        mode3_vision_system_.plant18 = 18
        mode3_vision_system_.save()

        mode4_vision_system.objects.all().delete()
        mode4_vision_system_.image = '../assets/background/rpiBG.gif'
        mode4_vision_system_.plant1 = 0
        mode4_vision_system_.plant2 = 0
        mode4_vision_system_.plant3 = 0
        mode4_vision_system_.plant4 = 0
        mode4_vision_system_.plant5 = 0
        mode4_vision_system_.plant6 = 0
        mode4_vision_system_.plant7 = 0
        mode4_vision_system_.plant8 = 0
        mode4_vision_system_.plant9 = 0
        mode4_vision_system_.plant10 = 0
        mode4_vision_system_.plant11 = 0
        mode4_vision_system_.plant12 = 12
        mode4_vision_system_.save()

        mode_selected_obj = mode_selected.objects.latest('date')
        mode1_visionSystem_obj = mode1_vision_system.objects.latest('date')
        sensors_obj = sensors.objects.latest('date')
        devices_obj = devices.objects.latest('date')

        json = {
            'mode_json': mode_selected_obj.modeNumber,
            'grid_json': mode_selected_obj.grid,
            'startDate_json':
            str(datetime.now().strftime('%b. %d, %Y, %-I:%M %p')),
            'daysCounter_json': str(mode_selected_obj.daysCounter),
            'calibration_json': devices_obj.calibrationStatus,
            'fans_json': devices_obj.fansStatus,
            'lights_json': devices_obj.lightsStatus,
            'water_json': devices_obj.waterStatus,
            'seeder_json': devices_obj.seedStatus,
            'temperature_json': sensors_obj.temperature,
            'humidity_json': sensors_obj.humidity,
            'soilMoisture_json': sensors_obj.moisture,
            'temperatureStatus_json': sensors_obj.temperatureStatus,
            'humidityStatus_json': sensors_obj.humidityStatus,
            'soilMoistureStatus_json': sensors_obj.soilMoistureStatus,
            'image_json': str(mode1_visionSystem_obj.image),
            'plant1_json': mode1_visionSystem_obj.plant1,
            'plant2_json': mode1_visionSystem_obj.plant2,
            'plant3_json': mode1_visionSystem_obj.plant3,
            'plant4_json': mode1_visionSystem_obj.plant4,
            'plant5_json': mode1_visionSystem_obj.plant5,
            'plant6_json': mode1_visionSystem_obj.plant6,
            'plant7_json': mode1_visionSystem_obj.plant7,
            'plant8_json': mode1_visionSystem_obj.plant8,
            'plant9_json': mode1_visionSystem_obj.plant9,
            'plant10_json': mode1_visionSystem_obj.plant10,
        }

        return JsonResponse(json)

    sensors_obj_global = sensors.objects.latest('date')
    mode1_vision_system_obj_global = mode1_vision_system.objects.latest('date')
    mode2_vision_system_obj_global = mode2_vision_system.objects.latest('date')
    mode3_vision_system_obj_global = mode3_vision_system.objects.latest('date')
    mode4_vision_system_obj_global = mode4_vision_system.objects.latest('date')
    mode_selected_obj_global_first = mode_selected.objects.first()
    mode_selected_obj_global_2 = mode_selected.objects.latest('date')

    myObj = {
        'mode_selected_obj_global_first': mode_selected_obj_global_first,
        'mode_selected_obj_global_2': mode_selected_obj_global_2,
        'devices_obj_global': devices_obj_global,
        'sensors_obj_global': sensors_obj_global,
        'mode1_vision_system_obj_global': mode1_vision_system_obj_global,
        'mode2_vision_system_obj_global': mode2_vision_system_obj_global,
        'mode3_vision_system_obj_global': mode3_vision_system_obj_global,
        'mode4_vision_system_obj_global': mode4_vision_system_obj_global
    }

    return render(response, 'main.html', context=myObj)
if np.average(img) < 50:
    pcv.fatal_error("Night Image")
else:
    pass

# Normalize the white color so you can later
# compare color between images.

# Inputs:
#   img = image object, RGB color space
#   roi = region for white reference, if none uses the whole image,
#         otherwise (x position, y position, box width, box height)

# white balance image based on white toughspot
img1 = pcv.white_balance(img, roi=(52, 100, 20, 20))

# Inputs:
#   img = image object, RGB color space
#   rotation_deg = Rotation angle in degrees, can be negative, positive values
#                  will move counter-clockwise
#   crop = If True then image will be cropped to orginal image dimensions, if False
#          the image size will be adjusted to accommodate new image dimensions
rotate_img = pcv.rotate(img=img1, rotation_deg=-1, crop=False)

# Inputs:
#   img    = image object
#   number = integer, number of pixels to move image
#   side   = direction to move from "top", "bottom", "right","left"
shift1 = pcv.shift_img(img=img1, number=40, side='top')
img1 = shift1
Esempio n. 10
0
def main():
    #Import file gambar
    path = 'Image test\capture (2).jpg'
    imgraw, path, img_filename = pcv.readimage(path, mode='native')

    nilaiTerang = np.average(imgraw)

    if nilaiTerang < 50:
        pcv.fatal_error("Night Image")
    else:
        pass

    rotateimg = pcv.rotate(imgraw, -3, True)
    imgraw = rotateimg

    bersih1 = pcv.white_balance(imgraw)

    hitamputih = pcv.rgb2gray_lab(bersih1, channel='a')

    img_binary = pcv.threshold.binary(hitamputih,
                                      threshold=110,
                                      max_value=255,
                                      object_type='dark')

    fill_image = pcv.fill(bin_img=img_binary, size=10)
    dilated = pcv.dilate(gray_img=fill_image, ksize=6, i=1)
    id_objects, obj_hierarchy = pcv.find_objects(img=imgraw, mask=dilated)
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=imgraw,
                                                   x=280,
                                                   y=96,
                                                   h=1104,
                                                   w=1246)
    print(type(roi_contour))
    print(type(roi_hierarchy))
    print(roi_hierarchy)
    print(roi_contour)
    roicontour = cv2.drawContours(imgraw, roi_contour, -1, (0, 0, 255), 3)
    #cv2.rectangle(imgraw, roi_contour[0], roi_contour[3])

    roi_obj, hier, kept_mask, obj_area = pcv.roi_objects(
        img=imgraw,
        roi_contour=roi_contour,
        roi_hierarchy=roi_hierarchy,
        object_contour=id_objects,
        obj_hierarchy=obj_hierarchy,
        roi_type='partial')
    cnt_i, contours, hierarchies = pcv.cluster_contours(img=imgraw,
                                                        roi_objects=roi_obj,
                                                        roi_obj_hierarchy=hier,
                                                        nrow=4,
                                                        ncol=3)
    clustered_image = pcv.visualize.clustered_contours(
        img=imgraw,
        grouped_contour_indices=cnt_i,
        roi_objects=roi_obj,
        roi_obj_hierarchy=hier)
    obj, mask = pcv.object_composition(imgraw, roi_obj, hier)
    hasil = pcv.analyze_object(imgraw, obj, mask)
    pcv.print_image(imgraw, 'Image test\Result\wel.jpg')
    pcv.print_image(clustered_image, 'Image test\Result\clustred.jpg')
    pcv.print_image(hitamputih, 'Image test\Result\Bersihe.jpg')
    pcv.print_image(dilated, 'Image test\Result\dilated.jpg')
    pcv.print_image(hasil, 'Image test\Result\hasil.jpg')
    plantHasil = pcv.outputs.observations['area']
    data1 = pcv.outputs.observations['area']['value']
    print(data1)
    print(plantHasil)
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    pcv.params.debug = args.debug  #set debug mode

    # STEP 1: white balance (for comparison across images)
    # inputs:
    #   img = image object, RGB colorspace
    #   roi = region for white reference (position of ColorChecker Passport)
    img1 = pcv.white_balance(img, roi=(910, 3555, 30, 30))

    # STEP 2: Mask out color card and stake
    # inputs:
    #   img = grayscale image ('a' channel)
    #   p1 = (x,y) coordinates for top left corner of rectangle
    #   p2 = (x,y) coordinates for bottom right corner of rectangle
    #   color = color to make the mask (white here to match background)
    masked, binary, contours, hierarchy = pcv.rectangle_mask(img1, (0, 2000),
                                                             (1300, 4000),
                                                             color="white")
    masked2, binary, contours, hierarchy = pcv.rectangle_mask(masked,
                                                              (0, 3600),
                                                              (4000, 4000),
                                                              color="white")

    # STEP 3: Convert from RGB colorspace to LAB colorspace
    # Keep green-magenta channel (a)
    # inputs:
    #   img = image object, RGB colorspace
    #   channel = color subchannel ('l' = lightness, 'a' = green-magenta, 'b' = blue-yellow)
    a = pcv.rgb2gray_lab(masked2, 'l')

    # STEP 4: Set a binary threshold on the saturation channel image
    # inputs:
    #   img = img object, grayscale
    #   threshold = treshold value (0-255) - need to adjust this
    #   max_value = value to apply above treshold (255 = white)
    #   object_type = light or dark
    img_binary = pcv.threshold.binary(a, 118, 255, object_type="dark")

    # STEP 5: Apply Gaussian blur to binary image (reduced noise)
    # inputs:
    #   img = img object, binary
    #   ksize = tuple of kernel dimensions, e.g. (5,5)
    blur_image = pcv.median_blur(img_binary, 10)

    # STEP 6: Fill small objects (speckles)
    # inputs:
    #   img = img object, binary
    #   size = minimum object area size in pixels
    fill_image1 = pcv.fill(blur_image, 150000)

    # STEP 7: Invert image to fill gaps
    # inputs:
    #   img = img object, binary
    inv_image = pcv.invert(fill_image1)
    # rerun fill on inverted image
    inv_fill = pcv.fill(inv_image, 25000)
    # invert image again
    fill_image = pcv.invert(inv_fill)

    # STEP 8: Dilate to avoid losing detail
    # inputs:
    #   img = img object, binary
    #   ksize = kernel size
    #   i = iterations (number of consecutive filtering passes)
    dilated = pcv.dilate(fill_image, 2, 1)

    # STEP 9: Find objects (contours: black-white boundaries)
    # inputs:
    #   img = img object, RGB colorspace
    #   mask = binary image used for object detection
    id_objects, obj_hierarchy = pcv.find_objects(img1, dilated)

    # STEP 10: Define region of interest (ROI)
    # inputs:
    #   img = img object to overlay ROI
    #   x = x-coordinate of upper left corner for rectangle
    #   y = y-coordinate of upper left corner for rectangle
    #   h = height of rectangle
    #   w = width of rectangle
    roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img1,
                                                   x=20,
                                                   y=10,
                                                   h=3000,
                                                   w=3000)

    # STEP 11: Keep objects that overlap with the ROI
    # inputs:
    #   img = img where selected objects will be displayed
    #   roi_type = options are 'cutto', 'partial' (objects are partially inside roi), or 'largest' (keep only the biggest boi)
    #   roi_countour = contour of roi, output from 'view and adjust roi' function (STEP 10)
    #   roi_hierarchy = contour of roi, output from 'view and adjust roi' function (STEP 10)
    #   object_contour = contours of objects, output from 'identifying objects' function (STEP 9)
    #   obj_hierarchy = hierarchy of objects, output from 'identifying objects' function (STEP 9)
    roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img1, 'partial', roi_contour, roi_hierarchy, id_objects, obj_hierarchy)

    # STEP 12: Cluster multiple contours in an image based on user input of rows/columns
    # inputs:
    #   img = img object (RGB colorspace)
    #   roi_objects = object contours in an image that will be clustered (output from STEP 11)
    #   roi_obj_hierarchy = object hierarchy (also from STEP 11)
    #   nrow = number of rows for clustering (desired rows in image even if no leaf present in all)
    #   ncol = number of columns to cluster (desired columns in image even if no leaf present in all)
    clusters_i, contours, hierarchies = pcv.cluster_contours(
        img1, roi_objects, roi_obj_hierarchy, 3, 3)

    # STEP 13: select and split clustered contours to export into multiple images
    # also checks if number of inputted filenames matches number of clustered contours
    # if no filenames, objects are numbered in order
    # inputs:
    #   img = masked RGB image
    #   grouped_contour_indexes = indexes of clustered contours, output of 'cluster_contours' (STEP 12)
    #   contours = contours of cluster, output of 'cluster_contours' (STEP 12)
    #   hierarchy = object hierarchy (from STEP 12)
    #   outdir = directory to export output images
    #   file = name of input image to use as basename (uses filename from 'readimage')
    #   filenames = (optional) txt file with list of filenames ordered from top to bottom/left to right

    # Set global debug behavior to None (default), "print" (to file), or "plot" (Jupyter Notebooks or X11)
    pcv.params.debug = None
    out = args.outdir
    # names = args.namesout = "./"

    output_path, imgs, masks = pcv.cluster_contour_splitimg(img1,
                                                            clusters_i,
                                                            contours,
                                                            hierarchies,
                                                            out,
                                                            file=filename,
                                                            filenames=None)
Esempio n. 12
0
def main():
    # Get options
    args = options()

    # Read image
    img, path, filename = pcv.readimage(args.image)

    debug = args.debug

    # Pipeline step
    device = 0

    # Step 1: Check if this is a night image, for some of these datasets images were captured
    # at night, even if nothing is visible. To make sure that images are not taken at
    # night we check that the image isn't mostly dark (0=black, 255=white).
    # if it is a night image it throws a fatal error and stops the pipeline.

    if np.average(img) < 50:
        pcv.fatal_error("Night Image")
    else:
        pass

    # Step 2: Normalize the white color so you can later
    # compare color between images.
    # Inputs:
    # device = device number. Used to count steps in the workflow
    # img = image object, RGB colorspace
    # debug = None, print, or plot. Print = save to file, Plot = print to screen.
    # roi = region for white reference, if none uses the whole image,
    # otherwise (x position, y position, box width, box height)

    #white balance image based on white toughspot
    device, img1 = pcv.white_balance(device, img, debug, roi=white_balance_roi)
    # img1 = img

    # Step 3: Rotate the image

    # device, rotate_img = pcv.rotate(img1, -1, device, debug)

    #Step 4: Shift image. This step is important for clustering later on.
    # For this image it also allows you to push the green raspberry pi camera
    # out of the image. This step might not be necessary for all images.
    # The resulting image is the same size as the original.
    # Input:
    # img = image object
    # device = device number. Used to count steps in the workflow
    # number = integer, number of pixels to move image
    # side = direction to move from "top", "bottom", "right","left"
    # debug = None, print, or plot. Print = save to file, Plot = print to screen.

    # device, shift1 = pcv.shift_img(img1, device, 300, 'top', debug)
    # img1 = shift1

    # STEP 5: Convert image from RGB colorspace to LAB colorspace
    # Keep only the green-magenta channel (grayscale)
    # Inputs:
    #    img     = image object, RGB colorspace
    #    channel = color subchannel (l = lightness, a = green-magenta , b = blue-yellow)
    #    device  = device number. Used to count steps in the workflow
    #    debug   = None, print, or plot. Print = save to file, Plot = print to screen.
    device, a = pcv.rgb2gray_lab(img1, 'a', device, debug)

    # STEP 6: Set a binary threshold on the Saturation channel image
    # Inputs:
    #    img         = img object, grayscale
    #    threshold   = threshold value (0-255)
    #    maxValue    = value to apply above threshold (usually 255 = white)
    #    object_type = light or dark
    #                  - If object is light then standard thresholding is done
    #                  - If object is dark then inverse thresholding is done
    #    device      = device number. Used to count steps in the pipeline
    #    debug       = None, print, or plot. Print = save to file, Plot = print to screen.
    device, img_binary = pcv.binary_threshold(a, darkness_threshold, 255,
                                              'dark', device, debug)
    #                                            ^
    #                                            |
    #                                           adjust this value

    # STEP 7: Fill in small objects (speckles)
    # Inputs:
    #    img    = image object, grayscale. img will be returned after filling
    #    mask   = image object, grayscale. This image will be used to identify contours
    #    size   = minimum object area size in pixels (integer)
    #    device = device number. Used to count steps in the pipeline
    #    debug  = None, print, or plot. Print = save to file, Plot = print to screen.
    mask = np.copy(img_binary)
    device, fill_image = pcv.fill(img_binary, mask, minimum_object_area_pixels,
                                  device, debug)
    #                                               ^
    #                                               |
    #                                               adjust this value

    # STEP 8: Dilate so that you don't lose leaves (just in case)
    # Inputs:
    #    img     = input image
    #    kernel  = integer
    #    i       = interations, i.e. number of consecutive filtering passes
    #    device  = device number. Used to count steps in the pipeline
    #    debug   = None, print, or plot. Print = save to file, Plot = print to screen.

    device, dilated = pcv.dilate(fill_image, 1, 1, device, debug)

    # STEP 9: Find objects (contours: black-white boundaries)
    # Inputs:
    #    img       = image that the objects will be overlayed
    #    mask      = what is used for object detection
    #    device    = device number.  Used to count steps in the pipeline
    #    debug     = None, print, or plot. Print = save to file, Plot = print to screen.
    device, id_objects, obj_hierarchy = pcv.find_objects(
        img1, dilated, device, debug)

    # STEP 10: Define region of interest (ROI)
    # Inputs:
    #    img       = img to overlay roi
    #    roi       = default (None) or user input ROI image, object area should be white and background should be black,
    #                has not been optimized for more than one ROI
    #    roi_input = type of file roi_base is, either 'binary', 'rgb', or 'default' (no ROI inputted)
    #    shape     = desired shape of final roi, either 'rectangle' or 'circle', if  user inputs rectangular roi but chooses
    #                'circle' for shape then a circle is fitted around rectangular roi (and vice versa)
    #    device    = device number.  Used to count steps in the pipeline
    #    debug     = None, print, or plot. Print = save to file, Plot = print to screen.
    #    adjust    = either 'True' or 'False', if 'True' allows user to adjust ROI
    #    x_adj     = adjust center along x axis
    #    y_adj     = adjust center along y axis
    #    w_adj     = adjust width
    #    h_adj     = adjust height
    # x=0, y=560, h=4040-560, w=3456
    roi_contour, roi_hierarchy = pcv.roi.rectangle(**total_region_of_interest,
                                                   img=img1)
    # device, roi, roi_hierarchy = pcv.define_roi(img1, 'rectangle', device, None, 'default', debug, False,
    #                                             0, 0, 0, 0)
    #                                            ^                ^
    #                                            |________________|
    #                                            adjust these four values

    # STEP 11: Keep objects that overlap with the ROI
    # Inputs:
    #    img            = img to display kept objects
    #    roi_type       = 'cutto' or 'partial' (for partially inside)
    #    roi_contour    = contour of roi, output from "View and Ajust ROI" function
    #    roi_hierarchy  = contour of roi, output from "View and Ajust ROI" function
    #    object_contour = contours of objects, output from "Identifying Objects" fuction
    #    obj_hierarchy  = hierarchy of objects, output from "Identifying Objects" fuction
    #    device         = device number.  Used to count steps in the pipeline
    #    debug          = None, print, or plot. Print = save to file, Plot = print to screen.
    device, roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
        img1, 'partial', roi_contour, roi_hierarchy, id_objects, obj_hierarchy,
        device, debug)

    # print(obj_area)

    #Step 12: This function take a image with multiple contours and
    # clusters them based on user input of rows and columns

    #Inputs:
    #    img - An RGB image array
    #    roi_objects - object contours in an image that are needed to be clustered.
    #    nrow - number of rows to cluster (this should be the approximate  number of desired rows in the entire image (even if there isn't a literal row of plants)
    #    ncol - number of columns to cluster (this should be the approximate number of desired columns in the entire image (even if there isn't a literal row of plants)
    #    file -  output of filename from read_image function
    #    filenames - input txt file with list of filenames in order from top to bottom left to right
    #    debug - print debugging images

    device, clusters_i, contours = pcv.cluster_contours(
        device, img1, roi_objects, expected_number_of_rows,
        expected_number_of_columns, debug)

    # print(contours)

    #Step 13:This function takes clustered contours and splits them into multiple images,
    #also does a check to make sure that the number of inputted filenames matches the number
    #of clustered contours. If no filenames are given then the objects are just numbered

    #Inputs:
    #    img - ideally a masked RGB image.
    #    grouped_contour_indexes - output of cluster_contours, indexes of clusters of contours
    #    contours - contours to cluster, output of cluster_contours
    #    filenames - input txt file with list of filenames in order from top to bottom left to right (likely list of genotypes)
    #    debug - print debugging images

    out = args.outdir
    names = args.names
    device, output_path = pcv.cluster_contour_splitimg(device,
                                                       img1,
                                                       clusters_i,
                                                       contours,
                                                       out,
                                                       file=filename,
                                                       filenames=names,
                                                       debug=debug)