Пример #1
0
def image_avg(fundf):
    # Predefine some variables
    global c, h, roi_c, roi_h

    # Get the filename for minimum and maximum fluoresence
    fn_min = fundf.query('frame == "Fo" or frame == "Fp"').filename.values[0]
    fn_max = fundf.query('frame == "Fm" or frame == "Fmp"').filename.values[0]

    # Get the parameter name that links these 2 frames
    param_name = fundf['parameter'].iloc[0]

    # Create a new output filename that combines existing filename with parameter
    outfn = os.path.splitext(os.path.basename(fn_max))[0]
    outfn_split = outfn.split('_')
    # outfn_split[2] = datetime.strptime(fundf.jobdate.values[0],'%Y-%m-%d').strftime('%Y%m%d')
    outfn_split[2] = fundf.jobdate.dt.strftime('%Y%m%d').values[0]
    basefn = "-".join(outfn_split[0:-1])
    outfn_split[-1] = param_name
    outfn = "-".join(outfn_split)
    print(outfn)

    # Make some directories based on sample id to keep output organized
    sampleid = outfn_split[0]
    fmaxdir = os.path.join(fluordir, sampleid)
    os.makedirs(fmaxdir, exist_ok=True)

    # If debug mode is 'print', create a specific debug dir for each pim file
    if pcv.params.debug == 'print':
        debug_outdir = os.path.join(debugdir, outfn)
        os.makedirs(debug_outdir, exist_ok=True)
        pcv.params.debug_outdir = debug_outdir

    # read images and create mask from max fluorescence
    # read image as is. only gray values in PSII images
    imgmin, _, _ = pcv.readimage(fn_min)
    img, _, _ = pcv.readimage(fn_max)
    fdark = np.zeros_like(img)
    out_flt = fdark.astype('float32')  # <- needs to be float32 for imwrite

    if param_name == 'FvFm':
        # create mask
        mask = createmasks.psIImask(img)
   
        # find objects and setup roi
        c, h = pcv.find_objects(img, mask)
        roi_c, roi_h = pcv.roi.multi(img, 
                                    coord=(250, 350), 
                                    radius=200, 
                                    spacing=(0, 0), 
                                    ncols=1, 
                                    nrows=1)

        # setup individual roi plant masks
        newmask = np.zeros_like(mask)

        # compute fv/fm and save to file
        Fv, hist_fvfm = pcv.fluor_fvfm(
            fdark=fdark, fmin=imgmin, fmax=img, mask=mask, bins=128)
        YII = np.divide(Fv, img, out=out_flt.copy(),
                        where=np.logical_and(mask > 0, img > 0))
        cv2.imwrite(os.path.join(fmaxdir, outfn + '_fvfm.tif'), YII)

        # NPQ is 0
        NPQ = np.zeros_like(YII)

        # print Fm
        cv2.imwrite(os.path.join(fmaxdir, outfn + '_fmax.tif'), img)
        # NPQ will always be an array of 0s

    else:  # compute YII and NPQ if parameter is other than FvFm
        # use cv2 to read image becase pcv.readimage will save as input_image.png overwriting img
        newmask = cv2.imread(os.path.join(
            maskdir, basefn + '-FvFm_mask.png'), -1)

        # compute YII
        Fvp, hist_yii = pcv.fluor_fvfm(
            fdark, fmin=imgmin, fmax=img, mask=newmask, bins=128)
        # make sure to initialize with out=. using where= provides random values at False pixels. you will get a strange result. newmask comes from Fm instead of Fm' so they can be different
        #newmask<0, img>0 = FALSE: not part of plant but fluorescence detected.
        #newmask>0, img<=0 = FALSE: part of plant in Fm but no fluorescence detected <- this is likely the culprit because pcv.apply_mask doesn't always solve issue.
        YII = np.divide(Fvp, img, 
                        out=out_flt.copy(),
                        where=np.logical_and(newmask > 0, img > 0))
        cv2.imwrite(os.path.join(fmaxdir, outfn + '_yii.tif'), YII)

        # compute NPQ
        Fm = cv2.imread(os.path.join(fmaxdir, basefn + '-FvFm_fmax.tif'), -1)
        NPQ = np.divide(Fm, img, 
                        out=out_flt.copy(),
                        where=np.logical_and(newmask > 0, img > 0))
        NPQ = np.subtract(NPQ, 1, out=out_flt.copy(),
                          where=np.logical_and(NPQ >= 1, newmask > 0))
        cv2.imwrite(os.path.join(fmaxdir, outfn + '_npq.tif'), NPQ)
    # end if-else Fv/Fm

    # Make as many copies of incoming dataframe as there are ROIs so all results can be saved
    outdf = fundf.copy()
    for i in range(0, len(roi_c)-1):
        outdf = outdf.append(fundf)
    outdf.imageid = outdf.imageid.astype('uint8')

    # Initialize lists to store variables for each ROI and iterate through each plant
    frame_avg = []
    yii_avg = []
    yii_std = []
    npq_avg = []
    npq_std = []
    plantarea = []
    ithroi = []
    inbounds = []
    i = 0
    rc = roi_c[i]
    for i, rc in enumerate(roi_c):
        # Store iteration Number
        ithroi.append(int(i))
        ithroi.append(int(i))  # append twice so each image has a value.
        # extract ith hierarchy
        rh = roi_h[i]

        # Filter objects based on being in the defined ROI
        roi_obj, hierarchy_obj, submask, obj_area = pcv.roi_objects(
        img, 
        roi_contour=rc, 
        roi_hierarchy=rh, 
        object_contour=c, 
        obj_hierarchy=h, 
        roi_type='partial')
                
        if obj_area == 0:
            print('!!! No plant detected in ROI ', str(i))

            frame_avg.append(np.nan)
            frame_avg.append(np.nan)
            yii_avg.append(np.nan)
            yii_avg.append(np.nan)
            yii_std.append(np.nan)
            yii_std.append(np.nan)
            npq_avg.append(np.nan)
            npq_avg.append(np.nan)
            npq_std.append(np.nan)
            npq_std.append(np.nan)
            inbounds.append(np.nan)
            inbounds.append(np.nan)

        else:

            # Combine multiple plant objects within an roi together
            plant_contour, plant_mask = pcv.object_composition(
                img=img, contours=roi_obj, hierarchy=hierarchy_obj)

            #combine plant masks after roi filter
            if param_name == 'FvFm':
                newmask = pcv.image_add(newmask, plant_mask)

            # Calc mean and std dev of fluoresence, YII, and NPQ and save to list
            frame_avg.append(masked_stats.mean(imgmin, plant_mask))
            frame_avg.append(masked_stats.mean(img, plant_mask))
            # need double because there are two images per loop
            yii_avg.append(masked_stats.mean(YII, plant_mask))
            yii_avg.append(masked_stats.mean(YII, plant_mask))
            yii_std.append(masked_stats.std(YII, plant_mask))
            yii_std.append(masked_stats.std(YII, plant_mask))
            npq_avg.append(masked_stats.mean(NPQ, plant_mask))
            npq_avg.append(masked_stats.mean(NPQ, plant_mask))
            npq_std.append(masked_stats.std(NPQ, plant_mask))
            npq_std.append(masked_stats.std(NPQ, plant_mask))

            # Check if plant is compeltely within the frame of the image
            inbounds.append(pcv.within_frame(plant_mask))
            inbounds.append(pcv.within_frame(plant_mask))

        # end try-except-else
    # end roi loop

    # save mask of all plants to file after roi filter
    if param_name == 'FvFm':
        pcv.print_image(newmask, os.path.join(maskdir, outfn + '_mask.png'))

    # Output a pseudocolor of NPQ and YII for each induction period for each image
    imgdir = os.path.join(outdir, 'pseudocolor_images', sampleid)
    os.makedirs(imgdir, exist_ok=True)
    npq_img = pcv.visualize.pseudocolor(NPQ,
                                        obj=None,
                                        mask=newmask,
                                        cmap='inferno',
                                        axes=False,
                                        min_value=0,
                                        max_value=2.5,
                                        background='black',
                                        obj_padding=0)
    npq_img = add_scalebar.add_scalebar(npq_img,
                                        pixelresolution=pixelresolution,
                                        barwidth=20,
                                        barlocation='lower left')
    # If you change the output size and resolution you will need to adjust the  timelapse video script
    npq_img.set_size_inches(6, 6, forward=False)
    npq_img.savefig(os.path.join(imgdir, outfn + '_NPQ.png'),
                    bbox_inches='tight',
                    dpi=150)
    npq_img.clf()

    yii_img = pcv.visualize.pseudocolor(YII,
                                        obj=None,
                                        mask=newmask,
                                        cmap=custom_colormaps.get_cmap(
                                            'imagingwin'),
                                        axes=False,
                                        min_value=0,
                                        max_value=1,
                                        background='black',
                                        obj_padding=0)
    yii_img = add_scalebar.add_scalebar(yii_img,
                                        pixelresolution=pixelresolution,
                                        barwidth=20,
                                        barlocation='lower left')
    yii_img.set_size_inches(6, 6, forward=False)
    yii_img.savefig(os.path.join(imgdir, outfn + '_YII.png'),
                    bbox_inches='tight',
                    dpi=150)
    yii_img.clf()

    # check YII values for uniqueness between all ROI. nonunique ROI suggests the plants grew into each other and can no longer be reliably separated in image processing.
    # a single value isn't always robust. I think because there are small independent objects that fall in one roi but not the other that change the object within the roi slightly.
    # also note, I originally designed this for trays of 2 pots. It will not detect if e.g. 2 out of 9 plants grow into each other
    rounded_avg = [round(n, 3) for n in yii_avg]
    rounded_std = [round(n, 3) for n in yii_std]
    if len(roi_c) > 1:
        isunique = not (rounded_avg.count(rounded_avg[0]) == len(yii_avg) and
                        rounded_std.count(rounded_std[0]) == len(yii_std))
    else:
        isunique = True
        
    # save all values to outgoing dataframe
    outdf['roi'] = ithroi
    outdf['frame_avg'] = frame_avg
    outdf['yii_avg'] = yii_avg
    outdf['npq_avg'] = npq_avg
    outdf['yii_std'] = yii_std
    outdf['npq_std'] = npq_std
    outdf['obj_in_frame'] = inbounds
    outdf['unique_roi'] = isunique

    return (outdf)
Пример #2
0
def image_avg(fundf):
    # dn't understand why import suddently needs to be inside function
    # import cv2 as cv2
    # import numpy as np
    # import pandas as pd
    # import os
    # from matplotlib import pyplot as plt
    # from skimage import filters
    # from skimage import morphology
    # from skimage import segmentation

    # Predefine some variables
    global c, h, roi_c, roi_h, ilegend, mask_Fm, fn_Fm

    # Get the filename for minimum and maximum fluoresence
    fn_min = fundf.query('frame == "Fo" or frame == "Fp"').filename.values[0]
    fn_max = fundf.query('frame == "Fm" or frame == "Fmp"').filename.values[0]

    # Get the parameter name that links these 2 frames
    param_name = fundf['parameter'].iloc[0]

    # Create a new output filename that combines existing filename with parameter
    outfn = os.path.splitext(os.path.basename(fn_max))[0]
    outfn_split = outfn.split('-')
    # outfn_split[2] = datetime.strptime(fundf.jobdate.values[0],'%Y-%m-%d').strftime('%Y%m%d')
    outfn_split[2] = fundf.jobdate.dt.strftime('%Y%m%d').values[0]
    basefn = "-".join(outfn_split[0:-1])
    outfn_split[-1] = param_name
    outfn = "-".join(outfn_split)
    print(outfn)

    # Make some directories based on sample id to keep output organized
    plantbarcode = outfn_split[0]
    fmaxdir = os.path.join(fluordir, plantbarcode)
    os.makedirs(fmaxdir, exist_ok=True)

    # If debug mode is 'print', create a specific debug dir for each pim file
    if pcv.params.debug == 'print':
        debug_outdir = os.path.join(debugdir, outfn)
        os.makedirs(debug_outdir, exist_ok=True)
        pcv.params.debug_outdir = debug_outdir

    # read images and create mask from max fluorescence
    # read image as is. only gray values in PSII images
    imgmin, _, _ = pcv.readimage(fn_min)
    img, _, _ = pcv.readimage(fn_max)
    fdark = np.zeros_like(img)
    out_flt = fdark.astype('float32')  # <- needs to be float32 for imwrite

    if param_name == 'FvFm':
        # save max fluorescence filename
        fn_Fm = fn_max

        # create mask
        # #create black mask over lower half of image to threshold upper plant only
        # img_half, _, _, _ = pcv.rectangle_mask(img, p1=(0,321), p2=(480,640))
        # # mask1 = pcv.threshold.otsu(img_half,255)
        # algaethresh = filters.threshold_otsu(image=img_half)
        # mask0 = pcv.threshold.binary(img_half, algaethresh, 255, 'light')

        # # create black mask over upper half of image to threshold lower plant only
        # img_half, _, _, _ = pcv.rectangle_mask(img, p1=(0, 0), p2=(480, 319), color='black')
        # # mask0 = pcv.threshold.otsu(img_half,255)
        # algaethresh = filters.threshold_otsu(image=img_half)
        # mask1 = pcv.threshold.binary(img_half, algaethresh, 255, 'light')

        # mask = pcv.logical_xor(mask0, mask1)
        # # mask = pcv.dilate(mask, 2, 1)
        # mask = pcv.fill(mask, 350)
        # mask = pcv.erode(mask, 2, 2)

        # mask = pcv.erode(mask, 2, 1)
        # mask = pcv.fill(mask, 100)

        # otsuT = filters.threshold_otsu(img)
        # # sigma=(k-1)/6. This is because the length for 99 percentile of gaussian pdf is 6sigma.
        # k = int(2 * np.ceil(3 * otsuT) + 1)
        # gb = pcv.gaussian_blur(img, ksize = (k,k), sigma_x = otsuT)
        # mask = img >= gb + 10
        # pcv.plot_image(mask)

        # local_otsu = filters.rank.otsu(img, pcv.get_kernel((9,9), 'rectangle'))#morphology.disk(2))
        # thresh_image = img >= local_otsu

        #_------>
        elevation_map = filters.sobel(img)
        # pcv.plot_image(elevation_map)
        thresh = filters.threshold_otsu(image=img)
        # thresh = 50

        markers = np.zeros_like(img, dtype='uint8')
        markers[img > thresh + 8] = 2
        markers[img <= thresh + 8] = 1
        # pcv.plot_image(markers,cmap=plt.cm.nipy_spectral)

        mask = segmentation.watershed(elevation_map, markers)
        mask = mask.astype(np.uint8)
        # pcv.plot_image(mask)

        mask[mask == 1] = 0
        mask[mask == 2] = 1
        # pcv.plot_image(mask, cmap=plt.cm.nipy_spectral)

        # mask = pcv.erode(mask, 2, 1)
        mask = pcv.fill(mask, 100)
        # pcv.plot_image(mask, cmap=plt.cm.nipy_spectral)
        # <-----------
        roi_c, roi_h = pcv.roi.multi(img,
                                     coord=(250, 200),
                                     radius=70,
                                     spacing=(0, 220),
                                     ncols=1,
                                     nrows=2)

        if len(np.unique(mask)) == 1:
            c = []
            YII = mask
            NPQ = mask
            newmask = mask
        else:
            # find objects and setup roi
            c, h = pcv.find_objects(img, mask)

            # setup individual roi plant masks
            newmask = np.zeros_like(mask)

            # compute fv/fm and save to file
            YII, hist_fvfm = pcv.photosynthesis.analyze_fvfm(fdark=fdark,
                                                             fmin=imgmin,
                                                             fmax=img,
                                                             mask=mask,
                                                             bins=128)
            # YII = np.divide(Fv,
            #                 img,
            #                 out=out_flt.copy(),
            #                 where=np.logical_and(mask > 0, img > 0))

            # NPQ is 0
            NPQ = np.zeros_like(YII)

        # cv2.imwrite(os.path.join(fmaxdir, outfn + '-fvfm.tif'), YII)
        # print Fm - will need this later
        # cv2.imwrite(os.path.join(fmaxdir, outfn + '-fmax.tif'), img)
        # NPQ will always be an array of 0s

    else:  # compute YII and NPQ if parameter is other than FvFm
        newmask = mask_Fm
        # use cv2 to read image becase pcv.readimage will save as input_image.png overwriting img
        # newmask = cv2.imread(os.path.join(maskdir, basefn + '-FvFm-mask.png'),-1)
        if len(np.unique(newmask)) == 1:
            YII = np.zeros_like(newmask)
            NPQ = np.zeros_like(newmask)

        else:
            # compute YII
            YII, hist_yii = pcv.photosynthesis.analyze_fvfm(fdark,
                                                            fmin=imgmin,
                                                            fmax=img,
                                                            mask=newmask,
                                                            bins=128)
            # make sure to initialize with out=. using where= provides random values at False pixels. you will get a strange result. newmask comes from Fm instead of Fm' so they can be different
            #newmask<0, img>0 = FALSE: not part of plant but fluorescence detected.
            #newmask>0, img<=0 = FALSE: part of plant in Fm but no fluorescence detected <- this is likely the culprit because pcv.apply_mask doesn't always solve issue.
            # YII = np.divide(Fvp,
            #                 img,
            #                 out=out_flt.copy(),
            #                 where=np.logical_and(newmask > 0, img > 0))

            # compute NPQ
            # Fm = cv2.imread(os.path.join(fmaxdir, basefn + '-FvFm-fmax.tif'), -1)
            Fm = cv2.imread(fn_Fm, -1)
            NPQ = np.divide(Fm,
                            img,
                            out=out_flt.copy(),
                            where=np.logical_and(newmask > 0, img > 0))
            NPQ = np.subtract(NPQ,
                              1,
                              out=out_flt.copy(),
                              where=np.logical_and(NPQ >= 1, newmask > 0))

        # cv2.imwrite(os.path.join(fmaxdir, outfn + '-yii.tif'), YII)
        # cv2.imwrite(os.path.join(fmaxdir, outfn + '-npq.tif'), NPQ)

    # end if-else Fv/Fm

    # Make as many copies of incoming dataframe as there are ROIs so all results can be saved
    outdf = fundf.copy()
    for i in range(0, len(roi_c) - 1):
        outdf = outdf.append(fundf)
    outdf.frameid = outdf.frameid.astype('uint8')

    # Initialize lists to store variables for each ROI and iterate through each plant
    frame_avg = []
    yii_avg = []
    yii_std = []
    npq_avg = []
    npq_std = []
    plantarea = []
    ithroi = []
    inbounds = []
    if len(c) == 0:

        for i, rc in enumerate(roi_c):
            # each variable needs to be stored 2 x #roi
            frame_avg.append(np.nan)
            frame_avg.append(np.nan)
            yii_avg.append(np.nan)
            yii_avg.append(np.nan)
            yii_std.append(np.nan)
            yii_std.append(np.nan)
            npq_avg.append(np.nan)
            npq_avg.append(np.nan)
            npq_std.append(np.nan)
            npq_std.append(np.nan)
            inbounds.append(False)
            inbounds.append(False)
            plantarea.append(0)
            plantarea.append(0)
            # Store iteration Number even if there are no objects in image
            ithroi.append(int(i))
            ithroi.append(int(i))  # append twice so each image has a value.

    else:
        i = 1
        rc = roi_c[i]
        for i, rc in enumerate(roi_c):
            # Store iteration Number
            ithroi.append(int(i))
            ithroi.append(int(i))  # append twice so each image has a value.
            # extract ith hierarchy
            rh = roi_h[i]

            # Filter objects based on being in the defined ROI
            roi_obj, hierarchy_obj, submask, obj_area = pcv.roi_objects(
                img,
                roi_contour=rc,
                roi_hierarchy=rh,
                object_contour=c,
                obj_hierarchy=h,
                roi_type='partial')

            if obj_area == 0:
                print('!!! No plant detected in ROI ', str(i))

                frame_avg.append(np.nan)
                frame_avg.append(np.nan)
                yii_avg.append(np.nan)
                yii_avg.append(np.nan)
                yii_std.append(np.nan)
                yii_std.append(np.nan)
                npq_avg.append(np.nan)
                npq_avg.append(np.nan)
                npq_std.append(np.nan)
                npq_std.append(np.nan)
                inbounds.append(False)
                inbounds.append(False)
                plantarea.append(0)
                plantarea.append(0)

            else:

                # Combine multiple plant objects within an roi together
                plant_contour, plant_mask = pcv.object_composition(
                    img=img, contours=roi_obj, hierarchy=hierarchy_obj)

                #combine plant masks after roi filter
                if param_name == 'FvFm':
                    newmask = pcv.image_add(newmask, plant_mask)

                # Calc mean and std dev of fluoresence, YII, and NPQ and save to list
                frame_avg.append(cppc.utils.mean(imgmin, plant_mask))
                frame_avg.append(cppc.utils.mean(img, plant_mask))
                # need double because there are two images per loop
                yii_avg.append(cppc.utils.mean(YII, plant_mask))
                yii_avg.append(cppc.utils.mean(YII, plant_mask))
                yii_std.append(cppc.utils.std(YII, plant_mask))
                yii_std.append(cppc.utils.std(YII, plant_mask))
                npq_avg.append(cppc.utils.mean(NPQ, plant_mask))
                npq_avg.append(cppc.utils.mean(NPQ, plant_mask))
                npq_std.append(cppc.utils.std(NPQ, plant_mask))
                npq_std.append(cppc.utils.std(NPQ, plant_mask))
                plantarea.append(obj_area * cppc.pixelresolution**2)
                plantarea.append(obj_area * cppc.pixelresolution**2)

                # Check if plant is compeltely within the frame of the image
                inbounds.append(pcv.within_frame(plant_mask))
                inbounds.append(pcv.within_frame(plant_mask))

                # Output a pseudocolor of NPQ and YII for each induction period for each image
                imgdir = os.path.join(outdir, 'pseudocolor_images')
                outfn_roi = outfn + '-roi' + str(i)
                os.makedirs(imgdir, exist_ok=True)
                npq_img = pcv.visualize.pseudocolor(NPQ,
                                                    obj=None,
                                                    mask=plant_mask,
                                                    cmap='inferno',
                                                    axes=False,
                                                    min_value=0,
                                                    max_value=2.5,
                                                    background='black',
                                                    obj_padding=0)
                npq_img = cppc.viz.add_scalebar(
                    npq_img,
                    pixelresolution=cppc.pixelresolution,
                    barwidth=10,
                    barlabel='1 cm',
                    barlocation='lower left')
                # If you change the output size and resolution you will need to adjust the timelapse video script
                npq_img.set_size_inches(6, 6, forward=False)
                npq_img.savefig(
                    os.path.join(imgdir, outfn_roi + '-NPQ.png'),
                    bbox_inches='tight',
                    dpi=100)  #100 is default for matplotlib/plantcv
                if ilegend == 1:  #only need to print legend once
                    npq_img.savefig(os.path.join(imgdir, 'npq_legend.pdf'),
                                    bbox_inches='tight')
                npq_img.clf()

                yii_img = pcv.visualize.pseudocolor(
                    YII,
                    obj=None,
                    mask=plant_mask,
                    cmap='gist_rainbow',  #custom_colormaps.get_cmap(
                    # 'imagingwin')#
                    axes=False,
                    min_value=0,
                    max_value=1,
                    background='black',
                    obj_padding=0)
                yii_img = cppc.viz.add_scalebar(
                    yii_img,
                    pixelresolution=cppc.pixelresolution,
                    barwidth=10,
                    barlabel='1 cm',
                    barlocation='lower left')
                yii_img.set_size_inches(6, 6, forward=False)
                yii_img.savefig(os.path.join(imgdir, outfn_roi + '-YII.png'),
                                bbox_inches='tight',
                                dpi=100)
                if ilegend == 1:  #print legend once and increment ilegend  to stop in future iterations
                    yii_img.savefig(os.path.join(imgdir, 'yii_legend.pdf'),
                                    bbox_inches='tight')
                    ilegend = ilegend + 1
                yii_img.clf()

            # end try-except-else

        # end roi loop

    # end if there are objects from roi filter

    # save mask of all plants to file after roi filter
    if param_name == 'FvFm':
        mask_Fm = newmask.copy()
        # pcv.print_image(newmask, os.path.join(maskdir, outfn + '-mask.png'))

    # check YII values for uniqueness between all ROI. nonunique ROI suggests the plants grew into each other and can no longer be reliably separated in image processing.
    # a single value isn't always robust. I think because there are small independent objects that fall in one roi but not the other that change the object within the roi slightly.
    # also note, I originally designed this for trays of 2 pots. It will not detect if e.g. 2 out of 9 plants grow into each other
    rounded_avg = [round(n, 3) for n in yii_avg]
    rounded_std = [round(n, 3) for n in yii_std]
    if len(roi_c) > 1:
        isunique = not (rounded_avg.count(rounded_avg[0]) == len(yii_avg)
                        and rounded_std.count(rounded_std[0]) == len(yii_std))
    else:
        isunique = True

    # save all values to outgoing dataframe
    outdf['roi'] = ithroi
    outdf['frame_avg'] = frame_avg
    outdf['yii_avg'] = yii_avg
    outdf['npq_avg'] = npq_avg
    outdf['yii_std'] = yii_std
    outdf['npq_std'] = npq_std
    outdf['obj_in_frame'] = inbounds
    outdf['unique_roi'] = isunique

    return (outdf)
Пример #3
0
def analyze_object(img, obj, mask):
    """Outputs numeric properties for an input object (contour or grouped contours).

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary image to use as mask

    Returns:
    analysis_images = list of output images

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :return analysis_images: list
    """

    params.device += 1

    # Valid objects can only be analyzed if they have >= 5 vertices
    if len(obj) < 5:
        return None, None, None

    ori_img = np.copy(img)
    # Convert grayscale images to color
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)

    if len(np.shape(img)) == 3:
        ix, iy, iz = np.shape(img)
    else:
        ix, iy = np.shape(img)
    size = ix, iy, 3
    size1 = ix, iy
    background = np.zeros(size, dtype=np.uint8)
    background1 = np.zeros(size1, dtype=np.uint8)
    background2 = np.zeros(size1, dtype=np.uint8)

    # Check is object is touching image boundaries (QC)
    in_bounds = within_frame(mask)

    # Convex Hull
    hull = cv2.convexHull(obj)
    hull_vertices = len(hull)
    # Moments
    #  m = cv2.moments(obj)
    m = cv2.moments(mask, binaryImage=True)
    # Properties
    # Area
    area = m['m00']

    if area:
        # Convex Hull area
        hull_area = cv2.contourArea(hull)
        # Solidity
        solidity = 1
        if int(hull_area) != 0:
            solidity = area / hull_area
        # Perimeter
        perimeter = cv2.arcLength(obj, closed=True)
        # x and y position (bottom left?) and extent x (width) and extent y (height)
        x, y, width, height = cv2.boundingRect(obj)
        # Centroid (center of mass x, center of mass y)
        cmx, cmy = (float(m['m10'] / m['m00']), float(m['m01'] / m['m00']))
        # Ellipse
        center, axes, angle = cv2.fitEllipse(obj)
        major_axis = np.argmax(axes)
        minor_axis = 1 - major_axis
        major_axis_length = float(axes[major_axis])
        minor_axis_length = float(axes[minor_axis])
        eccentricity = float(
            np.sqrt(1 - (axes[minor_axis] / axes[major_axis])**2))

        # Longest Axis: line through center of mass and point on the convex hull that is furthest away
        cv2.circle(background, (int(cmx), int(cmy)), 4, (255, 255, 255), -1)
        center_p = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
        ret, centerp_binary = cv2.threshold(center_p, 0, 255,
                                            cv2.THRESH_BINARY)
        centerpoint, cpoint_h = cv2.findContours(centerp_binary, cv2.RETR_TREE,
                                                 cv2.CHAIN_APPROX_NONE)[-2:]

        dist = []
        vhull = np.vstack(hull)

        for i, c in enumerate(vhull):
            xy = tuple(c)
            pptest = cv2.pointPolygonTest(centerpoint[0], xy, measureDist=True)
            dist.append(pptest)

        abs_dist = np.absolute(dist)
        max_i = np.argmax(abs_dist)

        caliper_max_x, caliper_max_y = list(tuple(vhull[max_i]))
        caliper_mid_x, caliper_mid_y = [int(cmx), int(cmy)]

        xdiff = float(caliper_max_x - caliper_mid_x)
        ydiff = float(caliper_max_y - caliper_mid_y)

        # Set default values
        slope = 1

        if xdiff != 0:
            slope = (float(ydiff / xdiff))
        b_line = caliper_mid_y - (slope * caliper_mid_x)

        if slope != 0:
            xintercept = int(-b_line / slope)
            xintercept1 = int((ix - b_line) / slope)
            if 0 <= xintercept <= iy and 0 <= xintercept1 <= iy:
                cv2.line(background1, (xintercept1, ix), (xintercept, 0),
                         (255), params.line_thickness)
            elif xintercept < 0 or xintercept > iy or xintercept1 < 0 or xintercept1 > iy:
                # Used a random number generator to test if either of these cases were possible but neither is possible
                # if xintercept < 0 and 0 <= xintercept1 <= iy:
                #     yintercept = int(b_line)
                #     cv2.line(background1, (0, yintercept), (xintercept1, ix), (255), 5)
                # elif xintercept > iy and 0 <= xintercept1 <= iy:
                #     yintercept1 = int((slope * iy) + b_line)
                #     cv2.line(background1, (iy, yintercept1), (xintercept1, ix), (255), 5)
                # elif 0 <= xintercept <= iy and xintercept1 < 0:
                #     yintercept = int(b_line)
                #     cv2.line(background1, (0, yintercept), (xintercept, 0), (255), 5)
                # elif 0 <= xintercept <= iy and xintercept1 > iy:
                #     yintercept1 = int((slope * iy) + b_line)
                #     cv2.line(background1, (iy, yintercept1), (xintercept, 0), (255), 5)
                # else:
                yintercept = int(b_line)
                yintercept1 = int((slope * iy) + b_line)
                cv2.line(background1, (0, yintercept), (iy, yintercept1),
                         (255), 5)
        else:
            cv2.line(background1, (iy, caliper_mid_y), (0, caliper_mid_y),
                     (255), params.line_thickness)

        ret1, line_binary = cv2.threshold(background1, 0, 255,
                                          cv2.THRESH_BINARY)
        # print_image(line_binary,(str(device)+'_caliperfit.png'))

        cv2.drawContours(background2, [hull], -1, (255), -1)
        ret2, hullp_binary = cv2.threshold(background2, 0, 255,
                                           cv2.THRESH_BINARY)
        # print_image(hullp_binary,(str(device)+'_hull.png'))

        caliper = cv2.multiply(line_binary, hullp_binary)
        # print_image(caliper,(str(device)+'_caliperlength.png'))

        caliper_y, caliper_x = np.array(caliper.nonzero())
        caliper_matrix = np.vstack((caliper_x, caliper_y))
        caliper_transpose = np.transpose(caliper_matrix)
        caliper_length = len(caliper_transpose)

        caliper_transpose1 = np.lexsort((caliper_y, caliper_x))
        caliper_transpose2 = [(caliper_x[i], caliper_y[i])
                              for i in caliper_transpose1]
        caliper_transpose = np.array(caliper_transpose2)

    # else:
    #  hull_area, solidity, perimeter, width, height, cmx, cmy = 'ND', 'ND', 'ND', 'ND', 'ND', 'ND', 'ND'

    analysis_images = []

    # Draw properties
    if area:
        cv2.drawContours(ori_img, obj, -1, (255, 0, 0), params.line_thickness)
        cv2.drawContours(ori_img, [hull], -1, (255, 0, 255),
                         params.line_thickness)
        cv2.line(ori_img, (x, y), (x + width, y), (255, 0, 255),
                 params.line_thickness)
        cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (255, 0, 255),
                 params.line_thickness)
        cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])),
                 (tuple(caliper_transpose[0])), (255, 0, 255),
                 params.line_thickness)
        cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (255, 0, 255),
                   params.line_thickness)
        # Output images with convex hull, extent x and y
        # out_file = os.path.splitext(filename)[0] + '_shapes.jpg'
        # out_file1 = os.path.splitext(filename)[0] + '_mask.jpg'

        # print_image(ori_img, out_file)
        analysis_images.append(ori_img)

        # print_image(mask, out_file1)
        analysis_images.append(mask)

    else:
        pass

    outputs.add_observation(variable='area',
                            trait='area',
                            method='plantcv.plantcv.analyze_object',
                            scale='pixels',
                            datatype=int,
                            value=area,
                            label='pixels')
    outputs.add_observation(variable='convex_hull_area',
                            trait='convex hull area',
                            method='plantcv.plantcv.analyze_object',
                            scale='pixels',
                            datatype=int,
                            value=hull_area,
                            label='pixels')
    outputs.add_observation(variable='solidity',
                            trait='solidity',
                            method='plantcv.plantcv.analyze_object',
                            scale='none',
                            datatype=float,
                            value=solidity,
                            label='none')
    outputs.add_observation(variable='perimeter',
                            trait='perimeter',
                            method='plantcv.plantcv.analyze_object',
                            scale='pixels',
                            datatype=int,
                            value=perimeter,
                            label='pixels')
    outputs.add_observation(variable='width',
                            trait='width',
                            method='plantcv.plantcv.analyze_object',
                            scale='pixels',
                            datatype=int,
                            value=width,
                            label='pixels')
    outputs.add_observation(variable='height',
                            trait='height',
                            method='plantcv.plantcv.analyze_object',
                            scale='pixels',
                            datatype=int,
                            value=height,
                            label='pixels')
    outputs.add_observation(variable='longest_path',
                            trait='longest path',
                            method='plantcv.plantcv.analyze_object',
                            scale='pixels',
                            datatype=int,
                            value=caliper_length,
                            label='pixels')
    outputs.add_observation(variable='center_of_mass',
                            trait='center of mass',
                            method='plantcv.plantcv.analyze_object',
                            scale='none',
                            datatype=tuple,
                            value=(cmx, cmy),
                            label='none')
    outputs.add_observation(variable='convex_hull_vertices',
                            trait='convex hull vertices',
                            method='plantcv.plantcv.analyze_object',
                            scale='none',
                            datatype=int,
                            value=hull_vertices,
                            label='none')
    outputs.add_observation(variable='object_in_frame',
                            trait='object in frame',
                            method='plantcv.plantcv.analyze_object',
                            scale='none',
                            datatype=bool,
                            value=in_bounds,
                            label='none')
    outputs.add_observation(variable='ellipse_center',
                            trait='ellipse center',
                            method='plantcv.plantcv.analyze_object',
                            scale='none',
                            datatype=tuple,
                            value=(center[0], center[1]),
                            label='none')
    outputs.add_observation(variable='ellipse_major_axis',
                            trait='ellipse major axis length',
                            method='plantcv.plantcv.analyze_object',
                            scale='pixels',
                            datatype=int,
                            value=major_axis_length,
                            label='pixels')
    outputs.add_observation(variable='ellipse_minor_axis',
                            trait='ellipse minor axis length',
                            method='plantcv.plantcv.analyze_object',
                            scale='pixels',
                            datatype=int,
                            value=minor_axis_length,
                            label='pixels')
    outputs.add_observation(variable='ellipse_angle',
                            trait='ellipse major axis angle',
                            method='plantcv.plantcv.analyze_object',
                            scale='degrees',
                            datatype=float,
                            value=float(angle),
                            label='degrees')
    outputs.add_observation(variable='ellipse_eccentricity',
                            trait='ellipse eccentricity',
                            method='plantcv.plantcv.analyze_object',
                            scale='none',
                            datatype=float,
                            value=float(eccentricity),
                            label='none')

    if params.debug is not None:
        cv2.drawContours(ori_img, obj, -1, (255, 0, 0), params.line_thickness)
        cv2.drawContours(ori_img, [hull], -1, (255, 0, 255),
                         params.line_thickness)
        cv2.line(ori_img, (x, y), (x + width, y), (255, 0, 255),
                 params.line_thickness)
        cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (255, 0, 255),
                 params.line_thickness)
        cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (255, 0, 255),
                   params.line_thickness)
        cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])),
                 (tuple(caliper_transpose[0])), (255, 0, 255),
                 params.line_thickness)
        if params.debug == 'print':
            print_image(
                ori_img,
                os.path.join(params.debug_outdir,
                             str(params.device) + '_shapes.jpg'))
        elif params.debug == 'plot':
            if len(np.shape(img)) == 3:
                plot_image(ori_img)
            else:
                plot_image(ori_img, cmap='gray')

    # Store images
    outputs.images.append(analysis_images)
    return analysis_images
Пример #4
0
                frame_avg.append(cppc.utils.mean(imgmin, plant_mask))
                frame_avg.append(cppc.utils.mean(img, plant_mask))
                # need double because there are two images per loop
                yii_avg.append(cppc.utils.mean(YII, plant_mask))
                yii_avg.append(cppc.utils.mean(YII, plant_mask))
                yii_std.append(cppc.utils.std(YII, plant_mask))
                yii_std.append(cppc.utils.std(YII, plant_mask))
                npq_avg.append(cppc.utils.mean(NPQ, plant_mask))
                npq_avg.append(cppc.utils.mean(NPQ, plant_mask))
                npq_std.append(cppc.utils.std(NPQ, plant_mask))
                npq_std.append(cppc.utils.std(NPQ, plant_mask))
                plantarea.append(obj_area * cppc.pixelresolution**2)
                plantarea.append(obj_area * cppc.pixelresolution**2)

                # Check if plant is compeltely within the frame of the image
                inbounds.append(pcv.within_frame(plant_mask))
                inbounds.append(pcv.within_frame(plant_mask))

                # Output a pseudocolor of NPQ and YII for each induction period for each image
                imgdir = os.path.join(outdir, 'pseudocolor_images')
                outfn_roi = outfn + '-roi' + str(i)
                os.makedirs(imgdir, exist_ok=True)
                npq_img = pcv.visualize.pseudocolor(NPQ,
                                                    obj=None,
                                                    mask=plant_mask,
                                                    cmap='inferno',
                                                    axes=False,
                                                    min_value=0,
                                                    max_value=2.5,
                                                    background='black',
                                                    obj_padding=0)
roi_objects, roi_obj_hierarchy, kept_mask, obj_area = pcv.roi_objects(
    img=img1,
    roi_contour=roi_contour,
    roi_hierarchy=roi_hierarchy,
    object_contour=id_objects,
    obj_hierarchy=obj_hierarchy,
    roi_type='partial')

# In[25]:

# Check if all of the plants fall completely within the bounds of an image
# or if it touches the edge. Used for QC.

# Inputs:
#   mask = Binary mask
in_bounds = pcv.within_frame(mask=kept_mask)

# In[29]:

# This function take a image with multiple contours and
# clusters them based on user input of rows and columns

# Inputs:
#    img               = An RGB or grayscale image
#    roi_objects       = object contours in an image that are needed to be clustered.
#    roi_obj_hierarchy = object hierarchy
#    nrow              = number of rows to cluster (this should be the approximate  number of
#                        desired rows in the entire image even if there isn't a literal row of plants)
#    ncol              = number of columns to cluster (this should be the approximate number of
#                        desired columns in the entire image even if there isn't a literal row of plants)
#    show_grid         = if True then the grid is drawn on the image, default show_grid=False
Пример #6
0
def image_avg(fundf):
    global c, h, roi_c, roi_h

    fn_min = fundf.filename.iloc[0]
    fn_max = fundf.filename.iloc[1]
    param_name = fundf['parameter'].iloc[0]

    outfn = os.path.splitext(os.path.basename(fn_max))[0]
    outfn_split = outfn.split('-')
    basefn = "-".join(outfn_split[0:-1])
    outfn_split[-1] = param_name
    outfn = "-".join(outfn_split)
    print(outfn)

    sampleid = outfn_split[2]
    fmaxdir = os.path.join(fluordir, sampleid)
    os.makedirs(fmaxdir, exist_ok=True)

    if pcv.params.debug == 'print':
        debug_outdir = os.path.join(debugdir, outfn)
        if not os.path.exists(debug_outdir):
            os.makedirs(debug_outdir)
        pcv.params.debug_outdir = debug_outdir

    # read images and create mask from max fluorescence
    # read image as is. only gray values in PSII images
    imgmin, _, _ = pcv.readimage(fn_min)
    img, _, _ = pcv.readimage(fn_max)
    fdark = np.zeros_like(img)
    out_flt = fdark.astype('float32')  # <- needs to be float32 for imwrite

    if param_name == 'FvFm':
        # create mask
        mask = createmasks.psIImask(img)

        # find objects and setup roi
        c, h = pcv.find_objects(img, mask)
        roi_c, roi_h = pcv.roi.multi(img,
                                     coord=(240, 180),
                                     radius=30,
                                     spacing=(150, 150),
                                     ncols=2,
                                     nrows=2)

        #setup individual roi plant masks
        newmask = np.zeros_like(mask)

        # compute fvfm
        YII, hist_fvfm = pcv.photosynthesis.analyze_yii(fdark=fdark,
                                                        fmin=imgmin,
                                                        fmax=img,
                                                        mask=mask,
                                                        bins=128,
                                                        parameter='Fv/Fm')
        cv2.imwrite(os.path.join(fmaxdir, outfn + '_fvfm.tif'),
                    YII.astype('float32'))

        NPQ = np.zeros_like(YII)

        # print Fm
        cv2.imwrite(os.path.join(fmaxdir, outfn + '_fmax.tif'), img)
        # NPQ will always be an array of 0s

    else:
        #use cv2 to read image becase pcv.readimage will save as input_image.png overwriting img
        newmask = cv2.imread(os.path.join(maskdir, basefn + '-FvFm_mask.png'),
                             -1)

        # compute YII
        YII, hist_yii = pcv.photosynthesis.analyze_yii(fdark=fdark,
                                                       fmin=imgmin,
                                                       fmax=img,
                                                       mask=newmask,
                                                       bins=64,
                                                       parameter=param_name)
        cv2.imwrite(os.path.join(fmaxdir, outfn + '_yii.tif'),
                    YII.astype('float32'))

        # compute NPQ
        Fm = cv2.imread(os.path.join(fmaxdir, basefn + '-FvFm_fmax.tif'), -1)
        NPQ, hist_npq = pcv.photosynthesis.analyze_npq(fm=Fm,
                                                       fmax=img,
                                                       mask=newmask,
                                                       bins=64)
        cv2.imwrite(os.path.join(fmaxdir, outfn + '_npq.tif'),
                    NPQ.astype('float32'))

    # Make as many copies of incoming dataframe as there are ROIs
    outdf = fundf.copy()
    for i in range(0, len(roi_c) - 1):
        outdf = outdf.append(fundf)
    outdf.imageid = outdf.imageid.astype('uint8')

    # Initialize lists to store variables for each ROI and iterate
    frame_avg = []
    yii_avg = []
    yii_std = []
    npq_avg = []
    npq_std = []
    plantarea = []
    ithroi = []
    inbounds = []
    i = 0
    rc = roi_c[i]
    for i, rc in enumerate(roi_c):
        # Store iteration Number
        ithroi.append(int(i))
        ithroi.append(int(i))  # append twice so each image has a value.
        # extract ith hierarchy
        rh = roi_h[i]

        # Filter objects based on being in the ROI
        try:
            roi_obj, hierarchy_obj, submask, obj_area = pcv.roi_objects(
                img,
                roi_contour=rc,
                roi_hierarchy=rh,
                object_contour=c,
                obj_hierarchy=h,
                roi_type='partial')
        except RuntimeError as err:
            print('!!!', err)

            frame_avg.append(np.nan)
            frame_avg.append(np.nan)
            yii_avg.append(np.nan)
            yii_avg.append(np.nan)
            yii_std.append(np.nan)
            yii_std.append(np.nan)
            npq_avg.append(np.nan)
            npq_avg.append(np.nan)
            npq_std.append(np.nan)
            npq_std.append(np.nan)
            inbounds.append(np.nan)
            inbounds.append(np.nan)
            plantarea.append(0)
            plantarea.append(0)

        else:

            # Combine multiple plant objects within an roi together
            plant_contour, plant_mask = pcv.object_composition(
                img=img, contours=roi_obj, hierarchy=hierarchy_obj)

            #combine plant masks after roi filter
            if param_name == 'FvFm':
                newmask = pcv.image_add(newmask, plant_mask)

            frame_avg.append(pcv.masked_stats.mean(imgmin, plant_mask))
            frame_avg.append(pcv.masked_stats.mean(img, plant_mask))
            # need double because there are two images per loop
            yii_avg.append(pcv.masked_stats.mean(YII, plant_mask))
            yii_avg.append(pcv.masked_stats.mean(YII, plant_mask))
            yii_std.append(pcv.masked_stats.std(YII, plant_mask))
            yii_std.append(pcv.masked_stats.std(YII, plant_mask))
            npq_avg.append(pcv.masked_stats.mean(NPQ, plant_mask))
            npq_avg.append(pcv.masked_stats.mean(NPQ, plant_mask))
            npq_std.append(pcv.masked_stats.std(NPQ, plant_mask))
            npq_std.append(pcv.masked_stats.std(NPQ, plant_mask))
            inbounds.append(pcv.within_frame(plant_mask))
            inbounds.append(pcv.within_frame(plant_mask))
            plantarea.append(obj_area * pixelresolution**2.)
            plantarea.append(obj_area * pixelresolution**2.)

            # with open(os.path.join(outdir, outfn + '_roi' + str(i) + '.txt'), 'w') as f:
            #     for item in yii_avg:
            #         f.write("%s\n" % item)

            #setup pseudocolor image size
            hgt, wdth = np.shape(newmask)
            figframe = 1
            if len(roi_c) == 2:
                if i == 0:
                    p1 = (int(0), int(0))
                    p2 = (int(hgt), int(hgt))
                elif i == 1:
                    p1 = (int(wdth - hgt), int(0))
                    p2 = (int(wdth), int(hgt))
            elif len(roi_c) == 1:
                cutwidth = (wdth - hgt) / 2
                p1 = (int(cutwidth), int(0))
                p2 = (int(cutwidth + hgt), int(hgt))
            else:
                figframe = None

            if figframe is not None:
                _, _, figframe, _ = pcv.rectangle_mask(plant_mask,
                                                       p1,
                                                       p2,
                                                       color='white')
                figframe = figframe[0]

            # print pseduocolor
            imgdir = os.path.join(outdir, 'pseudocolor_images', sampleid,
                                  'roi' + str(i))
            if param_name == 'FvFm':
                imgdir = os.path.join(imgdir, 'fvfm')
                os.makedirs(imgdir, exist_ok=True)
            else:
                imgdir = os.path.join(imgdir, 'IndC')
                os.makedirs(imgdir, exist_ok=True)
                npq_img = pcv.visualize.pseudocolor(NPQ,
                                                    obj=figframe,
                                                    mask=plant_mask,
                                                    cmap='inferno',
                                                    axes=False,
                                                    min_value=0,
                                                    max_value=2.5,
                                                    background='black',
                                                    obj_padding=0)
                npq_img = add_scalebar.add_scalebar(
                    npq_img,
                    pixelresolution=pixelresolution,
                    barwidth=20,
                    barlocation='lower right')
                npq_img.savefig(os.path.join(
                    imgdir, outfn + '_roi' + str(i) + '_NPQ.png'),
                                bbox_inches='tight')
                npq_img.clf()

            yii_img = pcv.visualize.pseudocolor(
                YII,
                obj=figframe,
                mask=plant_mask,
                cmap=custom_colormaps.get_cmap('imagingwin'),
                axes=False,
                min_value=0,
                max_value=1,
                background='black',
                obj_padding=0)
            yii_img = add_scalebar.add_scalebar(
                yii_img,
                pixelresolution=pixelresolution,
                barwidth=20,
                barlocation='lower right')
            yii_img.savefig(os.path.join(imgdir,
                                         outfn + '_roi' + str(i) + '_YII.png'),
                            bbox_inches='tight')
            yii_img.clf()
        # end try-except-else
    # end roi loop

    # save mask of all plants to file after roi filter
    if param_name == 'FvFm':
        pcv.print_image(newmask, os.path.join(maskdir, outfn + '_mask.png'))

    # save pseudocolor of all plants in image
    imgdir = os.path.join(outdir, 'pseudocolor_images', sampleid)
    npq_img = pcv.visualize.pseudocolor(NPQ,
                                        obj=None,
                                        mask=newmask,
                                        cmap='inferno',
                                        axes=False,
                                        min_value=0,
                                        max_value=2.5,
                                        background='black',
                                        obj_padding=0)
    npq_img = add_scalebar.add_scalebar(npq_img,
                                        pixelresolution=pixelresolution,
                                        barwidth=20,
                                        barlocation='lower right')
    npq_img.savefig(os.path.join(imgdir, outfn + '_NPQ.png'),
                    bbox_inches='tight')
    npq_img.clf()

    yii_img = pcv.visualize.pseudocolor(
        YII,
        obj=None,
        mask=newmask,
        cmap=custom_colormaps.get_cmap('imagingwin'),
        axes=False,
        min_value=0,
        max_value=1,
        background='black',
        obj_padding=0)
    yii_img = add_scalebar.add_scalebar(yii_img,
                                        pixelresolution=pixelresolution,
                                        barwidth=20,
                                        barlocation='lower right')
    yii_img.savefig(os.path.join(imgdir, outfn + '_YII.png'),
                    bbox_inches='tight')
    yii_img.clf()

    # check yii values for uniqueness
    # a single value isn't always robust. I think because there ae small independent objects that fall in one roi but not the other that change the object within the roi slightly.
    rounded_avg = [round(n, 3) for n in yii_avg]
    rounded_std = [round(n, 3) for n in yii_std]
    isunique = not (rounded_avg.count(rounded_avg[0]) == len(yii_avg)
                    and rounded_std.count(rounded_std[0]) == len(yii_std))

    # save all values to outgoing dataframe
    outdf['roi'] = ithroi
    outdf['frame_avg'] = frame_avg
    outdf['yii_avg'] = yii_avg
    outdf['npq_avg'] = npq_avg
    outdf['yii_std'] = yii_std
    outdf['npq_std'] = npq_std
    outdf['plantarea'] = plantarea
    outdf['obj_in_frame'] = inbounds
    outdf['unique_roi'] = isunique

    return (outdf)
Пример #7
0
def analyze_object(img, obj, mask):
    """Outputs numeric properties for an input object (contour or grouped contours).

    Inputs:
    img             = RGB or grayscale image data for plotting
    obj             = single or grouped contour object
    mask            = Binary image to use as mask

    Returns:
    analysis_images = list of output images

    :param img: numpy.ndarray
    :param obj: list
    :param mask: numpy.ndarray
    :return analysis_images: list
    """

    params.device += 1

    # Valid objects can only be analyzed if they have >= 5 vertices
    if len(obj) < 5:
        return None, None, None

    ori_img = np.copy(img)
    # Convert grayscale images to color
    if len(np.shape(ori_img)) == 2:
        ori_img = cv2.cvtColor(ori_img, cv2.COLOR_GRAY2BGR)

    if len(np.shape(img)) == 3:
        ix, iy, iz = np.shape(img)
    else:
        ix, iy = np.shape(img)
    size = ix, iy, 3
    size1 = ix, iy
    background = np.zeros(size, dtype=np.uint8)
    background1 = np.zeros(size1, dtype=np.uint8)
    background2 = np.zeros(size1, dtype=np.uint8)

    # Check is object is touching image boundaries (QC)
    in_bounds = within_frame(mask)

    # Convex Hull
    hull = cv2.convexHull(obj)
    hull_vertices = len(hull)
    # Moments
    #  m = cv2.moments(obj)
    m = cv2.moments(mask, binaryImage=True)
    # Properties
    # Area
    area = m['m00']

    if area:
        # Convex Hull area
        hull_area = cv2.contourArea(hull)
        # Solidity
        solidity = 1
        if int(hull_area) != 0:
            solidity = area / hull_area
        # Perimeter
        perimeter = cv2.arcLength(obj, closed=True)
        # x and y position (bottom left?) and extent x (width) and extent y (height)
        x, y, width, height = cv2.boundingRect(obj)
        # Centroid (center of mass x, center of mass y)
        cmx, cmy = (float(m['m10'] / m['m00']), float(m['m01'] / m['m00']))
        # Ellipse
        center, axes, angle = cv2.fitEllipse(obj)
        major_axis = np.argmax(axes)
        minor_axis = 1 - major_axis
        major_axis_length = float(axes[major_axis])
        minor_axis_length = float(axes[minor_axis])
        eccentricity = float(np.sqrt(1 - (axes[minor_axis] / axes[major_axis]) ** 2))

        # Longest Axis: line through center of mass and point on the convex hull that is furthest away
        cv2.circle(background, (int(cmx), int(cmy)), 4, (255, 255, 255), -1)
        center_p = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
        ret, centerp_binary = cv2.threshold(center_p, 0, 255, cv2.THRESH_BINARY)
        centerpoint, cpoint_h = cv2.findContours(centerp_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]

        dist = []
        vhull = np.vstack(hull)

        for i, c in enumerate(vhull):
            xy = tuple(c)
            pptest = cv2.pointPolygonTest(centerpoint[0], xy, measureDist=True)
            dist.append(pptest)

        abs_dist = np.absolute(dist)
        max_i = np.argmax(abs_dist)

        caliper_max_x, caliper_max_y = list(tuple(vhull[max_i]))
        caliper_mid_x, caliper_mid_y = [int(cmx), int(cmy)]

        xdiff = float(caliper_max_x - caliper_mid_x)
        ydiff = float(caliper_max_y - caliper_mid_y)

        # Set default values
        slope = 1

        if xdiff != 0:
            slope = (float(ydiff / xdiff))
        b_line = caliper_mid_y - (slope * caliper_mid_x)

        if slope != 0:
            xintercept = int(-b_line / slope)
            xintercept1 = int((ix - b_line) / slope)
            if 0 <= xintercept <= iy and 0 <= xintercept1 <= iy:
                cv2.line(background1, (xintercept1, ix), (xintercept, 0), (255), params.line_thickness)
            elif xintercept < 0 or xintercept > iy or xintercept1 < 0 or xintercept1 > iy:
                # Used a random number generator to test if either of these cases were possible but neither is possible
                # if xintercept < 0 and 0 <= xintercept1 <= iy:
                #     yintercept = int(b_line)
                #     cv2.line(background1, (0, yintercept), (xintercept1, ix), (255), 5)
                # elif xintercept > iy and 0 <= xintercept1 <= iy:
                #     yintercept1 = int((slope * iy) + b_line)
                #     cv2.line(background1, (iy, yintercept1), (xintercept1, ix), (255), 5)
                # elif 0 <= xintercept <= iy and xintercept1 < 0:
                #     yintercept = int(b_line)
                #     cv2.line(background1, (0, yintercept), (xintercept, 0), (255), 5)
                # elif 0 <= xintercept <= iy and xintercept1 > iy:
                #     yintercept1 = int((slope * iy) + b_line)
                #     cv2.line(background1, (iy, yintercept1), (xintercept, 0), (255), 5)
                # else:
                yintercept = int(b_line)
                yintercept1 = int((slope * iy) + b_line)
                cv2.line(background1, (0, yintercept), (iy, yintercept1), (255), 5)
        else:
            cv2.line(background1, (iy, caliper_mid_y), (0, caliper_mid_y), (255), params.line_thickness)

        ret1, line_binary = cv2.threshold(background1, 0, 255, cv2.THRESH_BINARY)
        # print_image(line_binary,(str(device)+'_caliperfit.png'))

        cv2.drawContours(background2, [hull], -1, (255), -1)
        ret2, hullp_binary = cv2.threshold(background2, 0, 255, cv2.THRESH_BINARY)
        # print_image(hullp_binary,(str(device)+'_hull.png'))

        caliper = cv2.multiply(line_binary, hullp_binary)
        # print_image(caliper,(str(device)+'_caliperlength.png'))

        caliper_y, caliper_x = np.array(caliper.nonzero())
        caliper_matrix = np.vstack((caliper_x, caliper_y))
        caliper_transpose = np.transpose(caliper_matrix)
        caliper_length = len(caliper_transpose)

        caliper_transpose1 = np.lexsort((caliper_y, caliper_x))
        caliper_transpose2 = [(caliper_x[i], caliper_y[i]) for i in caliper_transpose1]
        caliper_transpose = np.array(caliper_transpose2)

    # else:
    #  hull_area, solidity, perimeter, width, height, cmx, cmy = 'ND', 'ND', 'ND', 'ND', 'ND', 'ND', 'ND'

    analysis_images = []

    # Draw properties
    if area:
        cv2.drawContours(ori_img, obj, -1, (255, 0, 0), params.line_thickness)
        cv2.drawContours(ori_img, [hull], -1, (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (x, y), (x + width, y), (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])), (tuple(caliper_transpose[0])), (255, 0, 255),
                 params.line_thickness)
        cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (255, 0, 255), params.line_thickness)
        # Output images with convex hull, extent x and y
        # out_file = os.path.splitext(filename)[0] + '_shapes.jpg'
        # out_file1 = os.path.splitext(filename)[0] + '_mask.jpg'

        # print_image(ori_img, out_file)
        analysis_images.append(ori_img)

        # print_image(mask, out_file1)
        analysis_images.append(mask)

    else:
        pass

    outputs.add_observation(variable='area', trait='area',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=area, label='pixels')
    outputs.add_observation(variable='convex_hull_area', trait='convex hull area',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=hull_area, label='pixels')
    outputs.add_observation(variable='solidity', trait='solidity',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=float,
                            value=solidity, label='none')
    outputs.add_observation(variable='perimeter', trait='perimeter',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=perimeter, label='pixels')
    outputs.add_observation(variable='width', trait='width',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=width, label='pixels')
    outputs.add_observation(variable='height', trait='height',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=height, label='pixels')
    outputs.add_observation(variable='longest_path', trait='longest path',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=caliper_length, label='pixels')
    outputs.add_observation(variable='center_of_mass', trait='center of mass',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=tuple,
                            value=(cmx, cmy), label='none')
    outputs.add_observation(variable='convex_hull_vertices', trait='convex hull vertices',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=int,
                            value=hull_vertices, label='none')
    outputs.add_observation(variable='object_in_frame', trait='object in frame',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=bool,
                            value=in_bounds, label='none')
    outputs.add_observation(variable='ellipse_center', trait='ellipse center',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=tuple,
                            value=(center[0], center[1]), label='none')
    outputs.add_observation(variable='ellipse_major_axis', trait='ellipse major axis length',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=major_axis_length, label='pixels')
    outputs.add_observation(variable='ellipse_minor_axis', trait='ellipse minor axis length',
                            method='plantcv.plantcv.analyze_object', scale='pixels', datatype=int,
                            value=minor_axis_length, label='pixels')
    outputs.add_observation(variable='ellipse_angle', trait='ellipse major axis angle',
                            method='plantcv.plantcv.analyze_object', scale='degrees', datatype=float,
                            value=float(angle), label='degrees')
    outputs.add_observation(variable='ellipse_eccentricity', trait='ellipse eccentricity',
                            method='plantcv.plantcv.analyze_object', scale='none', datatype=float,
                            value=float(eccentricity), label='none')

    if params.debug is not None:
        cv2.drawContours(ori_img, obj, -1, (255, 0, 0), params.line_thickness)
        cv2.drawContours(ori_img, [hull], -1, (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (x, y), (x + width, y), (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (255, 0, 255), params.line_thickness)
        cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (255, 0, 255), params.line_thickness)
        cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])), (tuple(caliper_transpose[0])), (255, 0, 255),
                 params.line_thickness)
        if params.debug == 'print':
            print_image(ori_img, os.path.join(params.debug_outdir, str(params.device) + '_shapes.jpg'))
        elif params.debug == 'plot':
            if len(np.shape(img)) == 3:
                plot_image(ori_img)
            else:
                plot_image(ori_img, cmap='gray')

    # Store images
    outputs.images.append(analysis_images)
    return analysis_images