コード例 #1
0
ファイル: data.py プロジェクト: figure002/nbclassify
    def __get_shape_outline(self, args, bin_mask):
        """Executes :meth:`features.shape_outline`."""
        if self.bin_mask == None:
            raise ValueError("Binary mask cannot be None")

        k = getattr(args, 'k', 15)

        # Obtain contours (all points) from the mask.
        contour = ft.get_largest_contour(bin_mask.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_NONE)
        if contour == None:
            raise ValueError("No contour found for binary image")

        # Get the outline.
        outline = ft.shape_outline(contour, k)

        # Compute the delta's for the horizontal and vertical point pairs.
        shape = []
        for x, y in outline:
            delta_x = x[0] - x[1]
            delta_y = y[0] - y[1]
            shape.append(delta_x)
            shape.append(delta_y)
        shape = np.array(shape, dtype=float)

        # Normalize the features if a scaler is set.
        if self.scaler:
            shape = self.scaler.fit_transform(shape)

        return shape
コード例 #2
0
ファイル: shape360-color.py プロジェクト: sdikby/imgpheno
def process_image(args, path):
    global bin_mask, intersects, rotation, img, img_src, center

    img = cv2.imread(path)
    if img == None or img.size == 0:
        logging.info("Failed to read %s" % path)
        return

    logging.info("Processing %s..." % path)

    # Scale the image down if its perimeter exceeds the maximum (if set).
    img = common.scale_max_perimeter(img, args.max_size)
    img_src = img.copy()

    # Perform segmentation
    logging.info("- Segmenting...")
    mask = common.grabcut(img, args.iters, None, args.margin)
    bin_mask = np.where((mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255,
                        0).astype('uint8')

    # Obtain contours (all points) from the mask.
    contour = ft.get_largest_contour(bin_mask, cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_NONE)

    # Fit an ellipse on the contour to get the rotation angle.
    box = cv2.fitEllipse(contour)
    rotation = int(box[2])

    # Get the shape360 feature.
    logging.info("- Obtaining shape...")
    intersects, center = ft.shape_360(contour, rotation)

    logging.info("- Done")

    draw_axis()
コード例 #3
0
ファイル: shape360.py プロジェクト: xieyanfu/imgpheno
def process_image(args, path):
    global intersects, rotation, img, img_src, center

    img = cv2.imread(path)
    if img == None or img.size == 0:
        logging.info("Failed to read %s" % path)
        return

    logging.info("Processing %s..." % path)

    # Scale the image down if its perimeter exceeds the maximum (if set).
    img = common.scale_max_perimeter(img, args.max_size)
    img_src = img.copy()

    # Perform segmentation
    logging.info("- Segmenting...")
    mask = common.grabcut(img, args.iters, None, args.margin)
    bin_mask = np.where((mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255, 0).astype("uint8")

    # Obtain contours (all points) from the mask.
    contour = ft.get_largest_contour(bin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    # Fit an ellipse on the contour to get the rotation angle.
    box = cv2.fitEllipse(contour)
    rotation = int(box[2])

    # Get the shape360 feature.
    logging.info("- Obtaining shape...")
    intersects, center = ft.shape_360(contour, rotation)

    logging.info("- Done")

    draw_axis()
コード例 #4
0
ファイル: shape-outline.py プロジェクト: sdikby/imgpheno
def process_image(args, path):
    global img, img_src, outline, box

    img = cv2.imread(path)
    if img == None or img.size == 0:
        logging.error("Failed to read %s" % path)
        exit(1)

    logging.info("Processing %s..." % path)

    # Scale the image down if its perimeter exceeds the maximum (if set).
    img = common.scale_max_perimeter(img, args.max_size)
    img_src = img.copy()

    # Perform segmentation.
    logging.info("- Segmenting...")
    mask = common.grabcut(img, args.iters, None, args.margin)
    bin_mask = np.where((mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255,
                        0).astype('uint8')

    # Obtain contours (all points) from the mask.
    contour = ft.get_largest_contour(bin_mask.copy(), cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_NONE)

    # Get bounding rectange of the largest contour.
    box = cv2.boundingRect(contour)

    # Get the outline.
    logging.info("- Obtaining shape...")
    outline = ft.shape_outline(contour, args.k)

    # And draw it.
    logging.info("- Done")
    draw_outline(0, outline, args.k)
コード例 #5
0
ファイル: data.py プロジェクト: xieyanfu/nbclassify
    def __get_shape_outline(self, args, bin_mask):
        """Executes :meth:`features.shape_outline`."""
        if self.bin_mask == None:
            raise ValueError("Binary mask cannot be None")

        k = getattr(args, 'k', 15)

        # Obtain contours (all points) from the mask.
        contour = ft.get_largest_contour(bin_mask.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_NONE)
        if contour == None:
            raise ValueError("No contour found for binary image")

        # Get the outline.
        outline = ft.shape_outline(contour, k)

        # Compute the delta's for the horizontal and vertical point pairs.
        shape = []
        for x, y in outline:
            delta_x = x[0] - x[1]
            delta_y = y[0] - y[1]
            shape.append(delta_x)
            shape.append(delta_y)
        shape = np.array(shape, dtype=float)

        # Normalize the features if a scaler is set.
        if self.scaler:
            shape = self.scaler.fit_transform(shape)

        return shape
コード例 #6
0
ファイル: color-bgr-means.py プロジェクト: naturalis/imgpheno
def process_image(args, path):
    global img, img_src, outline, box, bin_mask

    img = cv2.imread(path)
    if img == None or img.size == 0:
        logging.error("Failed to read %s" % path)
        exit(1)

    logging.info("Processing %s..." % path)

    # Scale the image down if its perimeter exceeds the maximum (if set).
    img = common.scale_max_perimeter(img, args.max_size)
    img_src = img.copy()

    # Perform segmentation.
    logging.info("- Segmenting...")
    mask = common.grabcut(img, args.iters, None, args.margin)
    bin_mask = np.where((mask==cv2.GC_FGD) + (mask==cv2.GC_PR_FGD),
        255, 0).astype('uint8')

    # Obtain contours (all points) from the mask.
    contour = ft.get_largest_contour(bin_mask.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_NONE)

    # Get bounding rectange of the largest contour.
    props = ft.contour_properties([contour], 'BoundingRect')
    box = props[0]['BoundingRect']

    # And draw it.
    logging.info("- Done")
    draw_sections(0, args.bins)
コード例 #7
0
ファイル: train.py プロジェクト: naturalis/imgpheno
    def get_shape_outline(self, args, bin_mask):
        if self.bin_mask == None:
            raise ValueError("Binary mask cannot be None")

        k = getattr(args, 'k', 15)

        # Obtain contours (all points) from the mask.
        contour = ft.get_largest_contour(bin_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        if contour == None:
            raise ValueError("No contour found for binary image")

        # Get the outline.
        outline = ft.shape_outline(contour, k)

        # Compute the delta's for the horizontal and vertical point pairs.
        shape = []
        for x, y in outline:
            delta_x = x[0] - x[1]
            delta_y = y[0] - y[1]
            shape.append(delta_x)
            shape.append(delta_y)

        # Normalize results.
        shape = np.array(shape, dtype=np.float32)
        shape = cv2.normalize(shape, None, -1, 1, cv2.NORM_MINMAX)

        return shape.ravel()
コード例 #8
0
ファイル: train.py プロジェクト: naturalis/imgpheno
    def _preprocess(self):
        if self.img == None:
            raise ValueError("No image loaded")

        if 'preprocess' not in self.params:
            return

        # Scale the image down if its perimeter exceeds the maximum (if set).
        perim = sum(self.img.shape[:2])
        max_perim = getattr(self.params.preprocess, 'maximum_perimeter', None)
        if max_perim and perim > max_perim:
            logging.info("Scaling down...")
            rf = float(max_perim) / perim
            self.img = cv2.resize(self.img, None, fx=rf, fy=rf)

        # Perform color enhancement.
        color_enhancement = getattr(self.params.preprocess, 'color_enhancement', None)
        if color_enhancement:
            for method, args in vars(color_enhancement).iteritems():
                if method == 'naik_murthy_linear':
                    logging.info("Color enhancement...")
                    self.img = ft.naik_murthy_linear(self.img)
                else:
                    raise ValueError("Unknown color enhancement method '%s'" % method)

        # Perform segmentation.
        segmentation = getattr(self.params.preprocess, 'segmentation', None)
        if segmentation:
            logging.info("Segmenting...")
            iterations = getattr(segmentation, 'iterations', 5)
            margin = getattr(segmentation, 'margin', 1)
            output_folder = getattr(segmentation, 'output_folder', None)

            # Create a binary mask for the largest contour.
            self.mask = common.grabcut(self.img, iterations, None, margin)
            self.bin_mask = np.where((self.mask==cv2.GC_FGD) + (self.mask==cv2.GC_PR_FGD), 255, 0).astype('uint8')
            contour = ft.get_largest_contour(self.bin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            if contour == None:
                raise ValueError("No contour found for binary image")
            self.bin_mask = np.zeros(self.img.shape[:2], dtype=np.uint8)
            cv2.drawContours(self.bin_mask, [contour], 0, 255, -1)

            # Save the masked image to the output folder.
            if output_folder and os.path.isdir(output_folder):
                img_masked = cv2.bitwise_and(self.img, self.img, mask=self.bin_mask)
                fname = os.path.basename(self.path)
                out_path = os.path.join(output_folder, fname)
                cv2.imwrite(out_path, img_masked)
コード例 #9
0
ファイル: train.py プロジェクト: naturalis/imgpheno
    def get_color_bgr_means(self, src, args, bin_mask=None):
        if self.bin_mask == None:
            raise ValueError("Binary mask cannot be None")

        # Get the contours from the mask.
        contour = ft.get_largest_contour(bin_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        if contour == None:
            raise ValueError("No contour found for binary image")

        # Create a masked image.
        img = cv2.bitwise_and(src, src, mask=bin_mask)

        bins = getattr(args, 'bins', 20)
        output = ft.color_bgr_means(img, contour, bins)

        # Normalize data to range -1 .. 1
        return output * 2.0 / 255 - 1
コード例 #10
0
def create_mask(img, args, what_roi):
    """
    Create a binary mask for an image.

    Expects an image, the result of an argument parser and a string
    of what kind of roi needs to be used.
    
    A binary mask is created for the image. If there is no (valid) ROI
    given, the whole image will be in foreground, otherwise only the
    ROI will be in foreground.

    The masked image and a contour of the ROI will be returned.
    """
    mask = np.zeros(img.shape[:2], np.uint8)
    if what_roi == "Fractions":
        roi = split_roi(args.roi_frac, True)
        mask[int(img.shape[0] * roi[2]):int(img.shape[0] * roi[3]),
             int(img.shape[1] * roi[0]):int(img.shape[1] *
                                            roi[1])] = cv2.GC_FGD
    elif what_roi == "Pixels":
        roi = split_roi(args.roi_pix, False)
        mask[roi[1]:roi[1] + roi[3], roi[0]:roi[0] + roi[2]] = cv2.GC_FGD
    else:
        mask[0:img.shape[0], 0:img.shape[1]] = cv2.GC_FGD

    # Create a binary mask. Foreground is made white, background black.
    bin_mask = np.where((mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255,
                        0).astype('uint8')

    # Create a binary mask for the largest contour.
    contour = ft.get_largest_contour(bin_mask, cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)

    # Merge the binary mask with the image.
    img_masked = cv2.bitwise_and(img, img, mask=bin_mask)

    return img_masked, contour
コード例 #11
0
ファイル: data.py プロジェクト: xieyanfu/nbclassify
    def __get_color_bgr_means(self, src, args, bin_mask=None):
        """Executes :meth:`features.color_bgr_means`."""
        if self.bin_mask is None:
            raise ValueError("Binary mask cannot be None")

        # Get the contours from the mask.
        contour = ft.get_largest_contour(bin_mask.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        if contour is None:
            raise ValueError("No contour found for binary image")

        # Create a masked image.
        img = cv2.bitwise_and(src, src, mask=bin_mask)

        bins = getattr(args, 'bins', 20)
        hor_means, ver_means = ft.color_bgr_means(img, contour, bins)
        output = np.append(hor_means, ver_means).astype(float)

        # Normalize the features if a scaler is set.
        if self.scaler:
            self.scaler.fit([0.0, 255.0])
            output = self.scaler.transform( output )

        return output
コード例 #12
0
ファイル: data.py プロジェクト: figure002/nbclassify
    def __get_color_bgr_means(self, src, args, bin_mask=None):
        """Executes :meth:`features.color_bgr_means`."""
        if self.bin_mask is None:
            raise ValueError("Binary mask cannot be None")

        # Get the contours from the mask.
        contour = ft.get_largest_contour(bin_mask.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
        if contour is None:
            raise ValueError("No contour found for binary image")

        # Create a masked image.
        img = cv2.bitwise_and(src, src, mask=bin_mask)

        bins = getattr(args, 'bins', 20)
        hor_means, ver_means = ft.color_bgr_means(img, contour, bins)
        output = np.append(hor_means, ver_means).astype(float)

        # Normalize the features if a scaler is set.
        if self.scaler:
            self.scaler.fit([0.0, 255.0])
            output = self.scaler.transform(output)

        return output
コード例 #13
0
def create_mask(img, args, what_roi):
    """
    Create a binary mask for an image.

    Expects an image, the result of an argument parser and a string
    of what kind of roi needs to be used.
    
    A binary mask is created for the image. If there is no (valid) ROI
    given, the whole image will be in foreground, otherwise only the
    ROI will be in foreground.

    The masked image and a contour of the ROI will be returned.
    """
    mask = np.zeros(img.shape[:2], np.uint8)
    if what_roi == "Fractions":
        roi = split_roi(args.roi_frac, True)
        mask[int(img.shape[0] * roi[2]):
             int(img.shape[0] * roi[3]),
             int(img.shape[1] * roi[0]):
             int(img.shape[1] * roi[1])] = cv2.GC_FGD
    elif what_roi == "Pixels":
        roi = split_roi(args.roi_pix, False)
        mask[roi[1]: roi[1] + roi[3], roi[0]: roi[0] + roi[2]] = cv2.GC_FGD
    else:
        mask[0: img.shape[0], 0:img.shape[1]] = cv2.GC_FGD

    # Create a binary mask. Foreground is made white, background black.
    bin_mask = np.where((mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255, 0).astype('uint8')

    # Create a binary mask for the largest contour.
    contour = ft.get_largest_contour(bin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # Merge the binary mask with the image.
    img_masked = cv2.bitwise_and(img, img, mask=bin_mask)

    return img_masked, contour
コード例 #14
0
ファイル: timeit_shape360.py プロジェクト: naturalis/imgpheno
    intersects, center = shape_360_v1(contour, 0)

def test2(contour):
    """Calculate spot distances on run time."""
    intersects, center = shape_360_v2(contour, 0)

if __name__ == "__main__":
    path = "../examples/images/erycina/1.jpg"
    maxdim = 500
    runs = 2

    img = cv2.imread(path)
    if img == None or img.size == 0:
        sys.stderr.write("Cannot open %s (no such file)\n" % path)
        exit()

    max_px = max(img.shape[:2])
    if max_px > maxdim:
        rf = float(maxdim) / max_px
        img = cv2.resize(img, None, fx=rf, fy=rf)

    mask = grabcut(img, 5)
    bin_mask = np.where((mask==cv2.GC_FGD) + (mask==cv2.GC_PR_FGD), 255, 0).astype('uint8')
    contour = ft.get_largest_contour(bin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    t = timeit.Timer("test1(contour)", "from __main__ import test1, contour")
    print "shape_360_v1: %f seconds" % (t.timeit(runs) / runs)

    t = timeit.Timer("test2(contour)", "from __main__ import test2, contour")
    print "shape_360_v2: %f seconds" % (t.timeit(runs) / runs)
コード例 #15
0
ファイル: data.py プロジェクト: xieyanfu/nbclassify
    def __get_shape_360(self, args, bin_mask):
        """Executes :meth:`features.shape_360`."""
        if self.bin_mask == None:
            raise ValueError("Binary mask cannot be None")

        rotation = getattr(args, 'rotation', 0)
        step = getattr(args, 'step', 1)
        t = getattr(args, 't', 8)
        output_functions = getattr(args, 'output_functions', {'mean_sd': True})

        # Get the largest contour from the binary mask.
        contour = ft.get_largest_contour(bin_mask, cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_NONE)
        if contour == None:
            raise ValueError("No contour found for binary image")

        # Set the rotation.
        if rotation == 'FIT_ELLIPSE':
            box = cv2.fitEllipse(contour)
            rotation = int(box[2])
        if not 0 <= rotation <= 179:
            raise ValueError("Rotation must be in the range 0 to 179, "\
                "found %s" % rotation)

        # Extract shape feature.
        intersects, center = ft.shape_360(contour, rotation, step, t)

        # Create a masked image.
        if 'color_histograms' in output_functions:
            img_masked = cv2.bitwise_and(self.img, self.img, mask=bin_mask)

        # Run the output function for each angle.
        means = []
        sds = []
        histograms = []
        for angle in range(0, 360, step):
            for f_name, f_args in vars(output_functions).iteritems():
                # Mean distance + standard deviation.
                if f_name == 'mean_sd':
                    distances = []
                    for p in intersects[angle]:
                        d = ft.point_dist(center, p)
                        distances.append(d)

                    if len(distances) == 0:
                        mean = 0
                        sd = 0
                    else:
                        mean = np.mean(distances, dtype=np.float32)
                        if len(distances) > 1:
                            sd = np.std(distances, ddof=1, dtype=np.float32)
                        else:
                            sd = 0

                    means.append(mean)
                    sds.append(sd)

                # Color histograms.
                if f_name == 'color_histograms':
                    # Get a line from the center to the outer intersection point.
                    line = None
                    if intersects[angle]:
                        line = ft.extreme_points([center] + intersects[angle])

                    # Create a mask for the line, where the line is foreground.
                    line_mask = np.zeros(self.img.shape[:2], dtype=np.uint8)
                    if line is not None:
                        cv2.line(line_mask, tuple(line[0]), tuple(line[1]),
                            255, 1)

                    # Create histogram from masked + line masked image.
                    hists = self.__get_color_histograms(img_masked, f_args,
                        line_mask)
                    histograms.append(hists)

        means = means.astype(float)
        sds = sds.astype(float)

        # Normalize the features if a scaler is set.
        if self.scaler and 'mean_sd' in output_functions:
            means = self.scaler.fit_transform(means)
            sds = self.scaler.fit_transform(sds)

        # Group the means+sds together.
        means_sds = np.array(zip(means, sds)).flatten()

        return np.append(means_sds, histograms)
コード例 #16
0
ファイル: segment.py プロジェクト: sdikby/imgpheno
def main():
    print __doc__

    parser = argparse.ArgumentParser(description='Test image segmentation')
    parser.add_argument('image', metavar='FILE', help='Input image')
    parser.add_argument('--iters',
                        metavar='N',
                        type=int,
                        default=5,
                        help="The number of grabCut iterations. Default is 5.")
    parser.add_argument(
        '--margin',
        metavar='N',
        type=int,
        default=1,
        help=
        "The margin of the foreground rectangle from the edges. Default is 1.")
    parser.add_argument(
        '--max-size',
        metavar='N',
        type=float,
        help=
        "Scale the input image down if its perimeter exceeds N. Default is no scaling."
    )
    parser.add_argument(
        '--algo',
        metavar='simple|grabcut',
        type=str,
        choices=['simple', 'grabcut'],
        default='grabcut',
        help="The segmentation algorithm to use, either 'simple' or 'grabcut'."
    )
    parser.add_argument(
        '--roi',
        metavar='x,y,w,h',
        type=str,
        help="Region Of Interest, expressed as X,Y,Width,Height in pixel units."
    )
    args = parser.parse_args()

    img = cv2.imread(args.image)
    if img == None or img.size == 0:
        sys.stderr.write("Failed to read %s\n" % args.image)
        return -1

    sys.stderr.write("Processing %s...\n" % args.image)

    # Scale the image down if its perimeter exceeds the maximum (if set).
    img = common.scale_max_perimeter(img, args.max_size)

    # Process region of interest argument
    roi = None
    if args.roi != None:
        roi = args.roi.split(',')
        roi[0] = int(roi[0])
        roi[1] = int(roi[1])
        roi[2] = int(roi[2])
        roi[3] = int(roi[3])

    # Perform segmentation.
    if args.algo == 'grabcut':
        mask = common.grabcut(img, args.iters, roi, args.margin)
    else:
        mask = common.simple(img, roi)

    # Create a binary mask. Foreground is made white, background black.
    bin_mask = np.where((mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255,
                        0).astype('uint8')

    # Create a binary mask for the largest contour.
    contour = ft.get_largest_contour(bin_mask, cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)
    mask_contour = np.zeros(bin_mask.shape, dtype=np.uint8)
    cv2.drawContours(mask_contour, [contour], 0, 255, -1)
    cv2.drawContours(img, [contour], 0, common.COLOR['green'], 1)

    # Merge the binary mask with the image.
    img_masked = cv2.bitwise_and(img, img, mask=bin_mask)
    img_masked_contour = cv2.bitwise_and(img, img, mask=mask_contour)

    # Display the image in a window.
    cv2.namedWindow('image')
    cv2.imshow('image', img_masked)

    while True:
        k = cv2.waitKey(0) & 0xFF

        if k == ord('o'):
            cv2.imshow('image', img)
        elif k == ord('s'):
            cv2.imshow('image', img_masked)
        elif k == ord('l'):
            cv2.imshow('image', img_masked_contour)
        elif k == ord('q'):
            break

    cv2.destroyAllWindows()

    return 0
コード例 #17
0
ファイル: data.py プロジェクト: figure002/nbclassify
    def __preprocess(self):
        """Perform preprocessing steps as specified in the configurations.

        Preprocessing steps may be:

        * Resizing
        * Color correction
        * Segmentation or cropping

        This method is executed by :meth:`make`.
        """
        if self.img is None:
            raise RuntimeError("No image is loaded")
        if 'preprocess' not in self.config:
            return

        # Scale the image down if its perimeter (width+height) exceeds the
        # maximum. If a ROI is set, use the perimeter of the ROI instead, or
        # else we might end up with a very small ROI.
        if self.roi:
            perim = sum(self.roi[2:4])
        else:
            perim = sum(self.img.shape[:2])

        rf = 1.0
        max_perim = getattr(self.config.preprocess, 'maximum_perimeter', None)
        if max_perim and perim > max_perim:
            logging.info("Scaling down...")
            rf = float(max_perim) / perim
            self.img = cv2.resize(self.img, None, fx=rf, fy=rf)

        # Account for the resizing factor if a ROI is set.
        if self.roi:
            self.roi = [int(x * rf) for x in self.roi]
            self.roi = tuple(self.roi)

        # Perform color enhancement.
        color_enhancement = getattr(self.config.preprocess,
                                    'color_enhancement', None)
        if color_enhancement:
            for method, args in vars(color_enhancement).iteritems():
                if method == 'naik_murthy_linear':
                    logging.info("Color enhancement...")
                    self.img = ft.naik_murthy_linear(self.img)
                else:
                    raise ConfigurationError("Unknown color enhancement "\
                        "method '%s'" % method)

        # Perform segmentation.
        try:
            segmentation = self.config.preprocess.segmentation.grabcut
        except:
            segmentation = {}

        if segmentation:
            logging.info("Segmenting...")
            iters = getattr(segmentation, 'iters', 5)
            margin = getattr(segmentation, 'margin', 1)
            output_folder = getattr(segmentation, 'output_folder', None)

            # Get the main contour.
            self.mask = self.__grabcut(self.img, iters, self.roi, margin)
            self.bin_mask = np.where((self.mask==cv2.GC_FGD) + \
                (self.mask==cv2.GC_PR_FGD), 255, 0).astype('uint8')
            contour = ft.get_largest_contour(self.bin_mask, cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)
            if contour is None:
                raise ValueError("No contour found for binary image")

            # Create a binary mask of the main contour.
            self.bin_mask = np.zeros(self.img.shape[:2], dtype=np.uint8)
            cv2.drawContours(self.bin_mask, [contour], 0, 255, -1)

            # Save the masked image to the output folder.
            if output_folder:
                img_masked = cv2.bitwise_and(self.img,
                                             self.img,
                                             mask=self.bin_mask)

                out_path = os.path.join(output_folder, self.path)
                out_dir = os.path.dirname(out_path)
                if not os.path.isdir(out_dir):
                    os.makedirs(out_dir)

                cv2.imwrite(out_path, img_masked)
        else:
            # Crop image in stead of segmenting.
            try:
                crop = self.config.preprocess.crop
            except:
                crop = {}

            if crop:
                logging.info("Cropping image...")
                roi_pix = getattr(crop, 'roi_pix', None)
                roi_frac = getattr(crop, 'roi_frac', None)
                if roi_pix:
                    # roi_pix is like (x, y, w, h) in pixel units.
                    if len(roi_pix) != 4:
                        raise ValueError(
                            "roi_pix must be a list of four integers.")
                    for x in roi_pix:
                        if not (isinstance(x, int) and x >= 0):
                            raise ValueError(
                                "roi_pix must be a (x, y, w, h) tuple "
                                "of integers.")
                    self.roi = roi_pix
                elif roi_frac:
                    # roi_frac is like (x1, x2, y1, y2) in fractions
                    # of total img size.
                    if len(roi_frac) != 4:
                        raise ValueError(
                            "roi_frac must be a list of four floats.")
                    for x in roi_frac:
                        if not 0 <= x <= 1:
                            raise ValueError(
                                "roi_frac must be a (x1, x2, y1, y2) tuple, "
                                "where the values are floats between 0 and 1.")
                    if not (roi_frac[0] < roi_frac[1]
                            and roi_frac[2] < roi_frac[3]):
                        raise ValueError(
                            "roi_frac must be a (x1, x2, y1, y2) tuple, "
                            "where x1 < x2 and y1 < y2.")
                    # Make ROI like (x, y, w, h).
                    self.roi = (int(self.img.shape[1] * roi_frac[0]),
                                int(self.img.shape[0] * roi_frac[2]),
                                int(self.img.shape[1] * roi_frac[1]) -
                                int(self.img.shape[1] * roi_frac[0]),
                                int(self.img.shape[0] * roi_frac[3]) -
                                int(self.img.shape[0] * roi_frac[2]))
                else:
                    logging.warning("No ROI for cropping found. Proceed "
                                    "without cropping.")
                    self.roi = (0, 0, self.img.shape[1], self.img.shape[0])

                # Crop image to given ROI.
                self.img = self.img[self.roi[1]:self.roi[1] + self.roi[3],
                                    self.roi[0]:self.roi[0] + self.roi[2]]
コード例 #18
0
ファイル: data.py プロジェクト: naturalis/nbclassify
    def __preprocess(self):
        """Perform preprocessing steps as specified in the configurations.

        Preprocessing steps may be:

        * Resizing
        * Color correction
        * Segmentation or cropping

        This method is executed by :meth:`make`.
        """
        if self.img is None:
            raise RuntimeError("No image is loaded")
        if 'preprocess' not in self.config:
            return

        # Scale the image down if its perimeter (width+height) exceeds the
        # maximum. If a ROI is set, use the perimeter of the ROI instead, or
        # else we might end up with a very small ROI.
        if self.roi:
            perim = sum(self.roi[2:4])
        else:
            perim = sum(self.img.shape[:2])

        rf = 1.0
        max_perim = getattr(self.config.preprocess, 'maximum_perimeter', None)
        if max_perim and perim > max_perim:
            logging.info("Scaling down...")
            rf = float(max_perim) / perim
            self.img = cv2.resize(self.img, None, fx=rf, fy=rf)

        # Account for the resizing factor if a ROI is set.
        if self.roi:
            self.roi = [int(x*rf) for x in self.roi]
            self.roi = tuple(self.roi)

        # Perform color enhancement.
        color_enhancement = getattr(self.config.preprocess,
            'color_enhancement', None)
        if color_enhancement:
            for method, args in vars(color_enhancement).iteritems():
                if method == 'naik_murthy_linear':
                    logging.info("Color enhancement...")
                    self.img = ft.naik_murthy_linear(self.img)
                else:
                    raise ConfigurationError("Unknown color enhancement "\
                        "method '%s'" % method)

        # Perform segmentation.
        try:
            segmentation = self.config.preprocess.segmentation.grabcut
        except:
            segmentation = {}

        if segmentation:
            logging.info("Segmenting...")
            iters = getattr(segmentation, 'iters', 5)
            margin = getattr(segmentation, 'margin', 1)
            output_folder = getattr(segmentation, 'output_folder', None)

            # Get the main contour.
            self.mask = self.__grabcut(self.img, iters, self.roi, margin)
            self.bin_mask = np.where((self.mask==cv2.GC_FGD) + \
                (self.mask==cv2.GC_PR_FGD), 255, 0).astype('uint8')
            contour = ft.get_largest_contour(self.bin_mask, cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
            if contour is None:
                raise ValueError("No contour found for binary image")

            # Create a binary mask of the main contour.
            self.bin_mask = np.zeros(self.img.shape[:2], dtype=np.uint8)
            cv2.drawContours(self.bin_mask, [contour], 0, 255, -1)

            # Save the masked image to the output folder.
            if output_folder:
                img_masked = cv2.bitwise_and(self.img, self.img,
                    mask=self.bin_mask)

                out_path = os.path.join(output_folder, self.path)
                out_dir = os.path.dirname(out_path)
                if not os.path.isdir(out_dir):
                    os.makedirs(out_dir)

                cv2.imwrite(out_path, img_masked)
        else:
            # Crop image in stead of segmenting.
            try:
                crop = self.config.preprocess.crop
            except:
                crop = {}

            if crop:
                logging.info("Cropping image...")
                roi_pix = getattr(crop, 'roi_pix', None)
                roi_frac = getattr(crop, 'roi_frac', None)
                if roi_pix:
                    # roi_pix is like (x, y, w, h) in pixel units.
                    if len(roi_pix) != 4:
                        raise ValueError(
                            "roi_pix must be a list of four integers.")
                    for x in roi_pix:
                        if not (isinstance(x, int) and x >= 0):
                            raise ValueError(
                                "roi_pix must be a (x, y, w, h) tuple "
                                "of integers.")
                    self.roi = roi_pix
                elif roi_frac:
                    # roi_frac is like (x1, x2, y1, y2) in fractions
                    # of total img size.
                    if len(roi_frac) != 4:
                        raise ValueError(
                            "roi_frac must be a list of four floats.")
                    for x in roi_frac:
                        if not 0 <= x <= 1:
                            raise ValueError(
                                "roi_frac must be a (x1, x2, y1, y2) tuple, "
                                "where the values are floats between 0 and 1.")
                    if not (roi_frac[0] < roi_frac[1] and
                            roi_frac[2] < roi_frac[3]):
                        raise ValueError(
                            "roi_frac must be a (x1, x2, y1, y2) tuple, "
                            "where x1 < x2 and y1 < y2.")
                    # Make ROI like (x, y, w, h).
                    self.roi = (int(self.img.shape[1] * roi_frac[0]),
                                int(self.img.shape[0] * roi_frac[2]),
                                int(self.img.shape[1] * roi_frac[1]) -
                                int(self.img.shape[1] * roi_frac[0]),
                                int(self.img.shape[0] * roi_frac[3]) -
                                int(self.img.shape[0] * roi_frac[2]))
                else:
                    logging.warning("No ROI for cropping found. Proceed "
                                    "without cropping.")
                    self.roi = (0, 0, self.img.shape[1], self.img.shape[0])

                # Crop image to given ROI.
                self.img = self.img[self.roi[1]: self.roi[1] + self.roi[3],
                                    self.roi[0]: self.roi[0] + self.roi[2]]
コード例 #19
0
ファイル: segment.py プロジェクト: naturalis/imgpheno
def main():
    print __doc__

    parser = argparse.ArgumentParser(description='Test image segmentation')
    parser.add_argument('image', metavar='FILE', help='Input image')
    parser.add_argument('--iters', metavar='N', type=int, default=5, help="The number of grabCut iterations. Default is 5.")
    parser.add_argument('--margin', metavar='N', type=int, default=1, help="The margin of the foreground rectangle from the edges. Default is 1.")
    parser.add_argument('--max-size', metavar='N', type=float, help="Scale the input image down if its perimeter exceeds N. Default is no scaling.")
    parser.add_argument('--algo', metavar='simple|grabcut', type=str, choices=['simple', 'grabcut'], default='grabcut', help="The segmentation algorithm to use, either 'simple' or 'grabcut'.")
    parser.add_argument('--roi', metavar='x,y,w,h', type=str, help="Region Of Interest, expressed as X,Y,Width,Height in pixel units.")
    args = parser.parse_args()

    img = cv2.imread(args.image)
    if img == None or img.size == 0:
        sys.stderr.write("Failed to read %s\n" % args.image)
        return -1

    sys.stderr.write("Processing %s...\n" % args.image)

    # Scale the image down if its perimeter exceeds the maximum (if set).
    img = common.scale_max_perimeter(img, args.max_size)

    # Process region of interest argument
    roi = None
    if args.roi != None:
        roi = args.roi.split(',')
        roi[0] = int(roi[0])
        roi[1] = int(roi[1])
        roi[2] = int(roi[2])
        roi[3] = int(roi[3])

    # Perform segmentation.
    if args.algo == 'grabcut':
        mask = common.grabcut(img, args.iters, roi, args.margin)
    else:
        mask = common.simple(img, roi)

    # Create a binary mask. Foreground is made white, background black.
    bin_mask = np.where((mask==cv2.GC_FGD) + (mask==cv2.GC_PR_FGD), 255, 0).astype('uint8')

    # Create a binary mask for the largest contour.
    contour = ft.get_largest_contour(bin_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    mask_contour = np.zeros(bin_mask.shape, dtype=np.uint8)
    cv2.drawContours(mask_contour, [contour], 0, 255, -1)
    cv2.drawContours(img, [contour], 0, common.COLOR['green'], 1)

    # Merge the binary mask with the image.
    img_masked = cv2.bitwise_and(img, img, mask=bin_mask)
    img_masked_contour = cv2.bitwise_and(img, img, mask=mask_contour)

    # Display the image in a window.
    cv2.namedWindow('image')
    cv2.imshow('image', img_masked)

    while True:
        k = cv2.waitKey(0) & 0xFF

        if k == ord('o'):
            cv2.imshow('image', img)
        elif k == ord('s'):
            cv2.imshow('image', img_masked)
        elif k == ord('l'):
            cv2.imshow('image', img_masked_contour)
        elif k == ord('q'):
            break

    cv2.destroyAllWindows()

    return 0
コード例 #20
0
ファイル: data.py プロジェクト: figure002/nbclassify
    def __get_shape_360(self, args, bin_mask):
        """Executes :meth:`features.shape_360`."""
        if self.bin_mask == None:
            raise ValueError("Binary mask cannot be None")

        rotation = getattr(args, 'rotation', 0)
        step = getattr(args, 'step', 1)
        t = getattr(args, 't', 8)
        output_functions = getattr(args, 'output_functions', {'mean_sd': True})

        # Get the largest contour from the binary mask.
        contour = ft.get_largest_contour(bin_mask, cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_NONE)
        if contour == None:
            raise ValueError("No contour found for binary image")

        # Set the rotation.
        if rotation == 'FIT_ELLIPSE':
            box = cv2.fitEllipse(contour)
            rotation = int(box[2])
        if not 0 <= rotation <= 179:
            raise ValueError("Rotation must be in the range 0 to 179, "\
                "found %s" % rotation)

        # Extract shape feature.
        intersects, center = ft.shape_360(contour, rotation, step, t)

        # Create a masked image.
        if 'color_histograms' in output_functions:
            img_masked = cv2.bitwise_and(self.img, self.img, mask=bin_mask)

        # Run the output function for each angle.
        means = []
        sds = []
        histograms = []
        for angle in range(0, 360, step):
            for f_name, f_args in vars(output_functions).iteritems():
                # Mean distance + standard deviation.
                if f_name == 'mean_sd':
                    distances = []
                    for p in intersects[angle]:
                        d = ft.point_dist(center, p)
                        distances.append(d)

                    if len(distances) == 0:
                        mean = 0
                        sd = 0
                    else:
                        mean = np.mean(distances, dtype=np.float32)
                        if len(distances) > 1:
                            sd = np.std(distances, ddof=1, dtype=np.float32)
                        else:
                            sd = 0

                    means.append(mean)
                    sds.append(sd)

                # Color histograms.
                if f_name == 'color_histograms':
                    # Get a line from the center to the outer intersection point.
                    line = None
                    if intersects[angle]:
                        line = ft.extreme_points([center] + intersects[angle])

                    # Create a mask for the line, where the line is foreground.
                    line_mask = np.zeros(self.img.shape[:2], dtype=np.uint8)
                    if line is not None:
                        cv2.line(line_mask, tuple(line[0]), tuple(line[1]),
                                 255, 1)

                    # Create histogram from masked + line masked image.
                    hists = self.__get_color_histograms(
                        img_masked, f_args, line_mask)
                    histograms.append(hists)

        means = means.astype(float)
        sds = sds.astype(float)

        # Normalize the features if a scaler is set.
        if self.scaler and 'mean_sd' in output_functions:
            means = self.scaler.fit_transform(means)
            sds = self.scaler.fit_transform(sds)

        # Group the means+sds together.
        means_sds = np.array(zip(means, sds)).flatten()

        return np.append(means_sds, histograms)
コード例 #21
0
ファイル: data.py プロジェクト: xieyanfu/nbclassify
    def __preprocess(self):
        """Perform preprocessing steps as specified in the configurations.

        Preprocessing steps may be:

        * Resizing
        * Color correction
        * Segmentation

        This method is executed by :meth:`make`.
        """
        if self.img is None:
            raise RuntimeError("No image is loaded")
        if 'preprocess' not in self.config:
            return

        # Scale the image down if its perimeter (width+height) exceeds the
        # maximum. If a ROI is set, use the perimeter of the ROI instead, or
        # else we might end up with a very small ROI.
        if self.roi:
            perim = sum(self.roi[2:4])
        else:
            perim = sum(self.img.shape[:2])

        rf = 1.0
        max_perim = getattr(self.config.preprocess, 'maximum_perimeter', None)
        if max_perim and perim > max_perim:
            logging.info("Scaling down...")
            rf = float(max_perim) / perim
            self.img = cv2.resize(self.img, None, fx=rf, fy=rf)

        # Account for the resizing factor if a ROI is set.
        if self.roi:
            self.roi = [int(x*rf) for x in self.roi]
            self.roi = tuple(self.roi)

        # Perform color enhancement.
        color_enhancement = getattr(self.config.preprocess,
            'color_enhancement', None)
        if color_enhancement:
            for method, args in vars(color_enhancement).iteritems():
                if method == 'naik_murthy_linear':
                    logging.info("Color enhancement...")
                    self.img = ft.naik_murthy_linear(self.img)
                else:
                    raise ConfigurationError("Unknown color enhancement "\
                        "method '%s'" % method)

        # Perform segmentation.
        try:
            segmentation = self.config.preprocess.segmentation.grabcut
        except:
            segmentation = {}

        if segmentation:
            logging.info("Segmenting...")
            iters = getattr(segmentation, 'iters', 5)
            margin = getattr(segmentation, 'margin', 1)
            output_folder = getattr(segmentation, 'output_folder', None)

            # Get the main contour.
            self.mask = self.__grabcut(self.img, iters, self.roi, margin)
            self.bin_mask = np.where((self.mask==cv2.GC_FGD) + \
                (self.mask==cv2.GC_PR_FGD), 255, 0).astype('uint8')
            contour = ft.get_largest_contour(self.bin_mask, cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)
            if contour is None:
                raise ValueError("No contour found for binary image")

            # Create a binary mask of the main contour.
            self.bin_mask = np.zeros(self.img.shape[:2], dtype=np.uint8)
            cv2.drawContours(self.bin_mask, [contour], 0, 255, -1)

            # Save the masked image to the output folder.
            if output_folder:
                img_masked = cv2.bitwise_and(self.img, self.img,
                    mask=self.bin_mask)

                out_path = os.path.join(output_folder, self.path)
                out_dir = os.path.dirname(out_path)
                if not os.path.isdir(out_dir):
                    os.makedirs(out_dir)

                cv2.imwrite(out_path, img_masked)
コード例 #22
0
ファイル: timeit_shape360.py プロジェクト: sdikby/imgpheno
    """Calculate spot distances on run time."""
    intersects, center = shape_360_v2(contour, 0)


if __name__ == "__main__":
    path = "../examples/images/erycina/1.jpg"
    maxdim = 500
    runs = 2

    img = cv2.imread(path)
    if img == None or img.size == 0:
        sys.stderr.write("Cannot open %s (no such file)\n" % path)
        exit()

    max_px = max(img.shape[:2])
    if max_px > maxdim:
        rf = float(maxdim) / max_px
        img = cv2.resize(img, None, fx=rf, fy=rf)

    mask = grabcut(img, 5)
    bin_mask = np.where((mask == cv2.GC_FGD) + (mask == cv2.GC_PR_FGD), 255,
                        0).astype('uint8')
    contour = ft.get_largest_contour(bin_mask, cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_NONE)

    t = timeit.Timer("test1(contour)", "from __main__ import test1, contour")
    print "shape_360_v1: %f seconds" % (t.timeit(runs) / runs)

    t = timeit.Timer("test2(contour)", "from __main__ import test2, contour")
    print "shape_360_v2: %f seconds" % (t.timeit(runs) / runs)