コード例 #1
0
def main(im_path, desc_name):
    print('[INFO] Preparing to extract features for images in \'' + im_path + '\'')

    # track HOG feature vectors and corresponding images
    features = {}

    # image dimensions
    width = 128
    height = 64

    # feature descriptor
    print('[INFO] Using the ' + desc_name.upper() + ' feature descriptor')
    if desc_name == 'hog':
        descriptor = HOG()

   # evaluate image files
    print('[INFO] Processing images and computing features')
    for filename in os.listdir(im_path):
        if not filename.endswith('.jpg'):
            continue

        im = cv2.imread(im_path + filename, cv2.COLOR_BGR2GRAY)

        # resize image
        im = cv2.resize(im, (width,height))

        # binarize using Otsu's method
        im = cv2.threshold(im, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
        im[im == 255] = 1

        # thin using Zhang and Suen's method
        #im = skeletonize(im)
        #im = im.astype(np.uint8)

        # compute features
        v = descriptor.compute(im)
        features[filename] = v

    # save data
    print('[INFO] Saving features and corresponding image name to \'features/' + desc_name + '_features.pickle\'')
    with open('./features/' + desc_name + '_features.pickle', 'wb') as handle:
        pickle.dump(features, handle)
コード例 #2
0
# construct the argument parser and parse the command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c",
                "--conf",
                required=True,
                help="path to the configuration file")
args = vars(ap.parse_args())

# load the configuration file
conf = Conf(args["conf"])

# initialize the HOG descriptor along with the list of data and labels
hog = HOG(orientations=conf["orientations"],
          pixelsPerCell=tuple(conf["pixels_per_cell"]),
          cellsPerBlock=tuple(conf["cells_per_block"]),
          normalize=conf["normalize"],
          block_norm="L1")
data = []
labels = []

# grab the set of ground-truth images and select a percentage of them for training
trnPaths = list(paths.list_images(conf["image_dataset"]))
trnPaths = random.sample(trnPaths,
                         int(len(trnPaths) * conf["percent_gt_images"]))
print("[INFO] describing training ROIs...")

# setup the progress bar
widgets = [
    "Extracting: ",
    progressbar.Percentage(), " ",
コード例 #3
0
                help="path to the configuration file")
ap.add_argument("-i",
                "--image",
                required=True,
                help="path to the image to be classified")
args = vars(ap.parse_args())

# load the configuration file
conf = Conf(args["conf"])

# load the classifier, then initialize the Histogram of Oriented Gradients descriptor
# and the object detector
model = pickle.loads(open(conf["classifier_path"], "rb").read())
hog = HOG(orientations=conf["orientations"],
          pixelsPerCell=tuple(conf["pixels_per_cell"]),
          cellsPerBlock=tuple(conf["cells_per_block"]),
          normalize=conf["normalize"],
          block_norm="L1")
od = ObjectDetector(model, hog)

# load the image and convert it to grayscale
image = cv2.imread(args["image"])
image = imutils.resize(image, width=min(260, image.shape[1]))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# detect objects in the image
(boxes, probs) = od.detect(gray,
                           conf["window_dim"],
                           winStep=conf["window_step"],
                           pyramidScale=conf["pyramid_scale"],
                           minProb=conf["min_probability"])
コード例 #4
0
def HOG_extractor(image_path, features):
    image = cv2.imread(image_path)
    image = imutils.resize(image, width=1024)
    hog = HOG(image, None)
    tuple = [image_path, hog]
    features.append(tuple)