Пример #1
0
ap.add_argument("-f", "--features-db", required=True,
  help="Path to where the features database will be stored")
ap.add_argument("-a", "--approx-images", type=int, default=250,
  help="Approximate # of images in the dataset")
ap.add_argument("-b", "--max-buffer-size", type=int, default=50000,
  help="Maximum buffer size for # of features to be stored in memory")
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = cv2.FeatureDetector_create("GFTT")
descriptor = RootSIFT()
dad = DetectAndDescribe(detector, descriptor)

# initialize the feature indexer
fi = FeatureIndexer(args["features_db"], estNumImages=args["approx_images"],
  maxBufferSize=args["max_buffer_size"], verbose=True)

# grab the image paths and randomly shuffle them
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)

# loop over the images in the dataset
for (i, imagePath) in enumerate(imagePaths):
# check to see if progress should be displayed
    if i > 0 and i % 10 == 0:
        fi._debug("processed {} images".format(i), msgType="[PROGRESS]")
    # load the image and pre-process it
    image = cv2.imread(imagePath)
    image = imutils.resize(image, width=320)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
Пример #2
0
ap.add_argument("-e", "--extractor", default="BRISK")
args = vars(ap.parse_args())

# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = FeatureDetector_create(args["feature_detector"])
descriptor = DescriptorExtractor_create(args["extractor"])
dad = DetectAndDescribe(detector, descriptor)

print("Using {} feature detector".format(args["feature_detector"]))
print("Using {} descriptor extractor".format(args["extractor"]))

# initialize the feature indexer, then grab the image paths and randomly shuffle
# them
fi = FeatureIndexer(args["features_db"],
                    estNumImages=args["approx_images"],
                    maxBufferSize=args["max_buffer_size"],
                    verbose=True)
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)

# loop over the images in the dataset
for (i, imagePath) in enumerate(imagePaths):
    # check to see if progress should be displayed
    #if i > 0 and i % 10 == 0:
    #fi._debug("processed {} images".format(i), msgType="[PROGRESS]")

    # extract the filename and image class from the image path and use it to
    # construct the unique image ID
    p = imagePath.split("/")
    imageID = "{}:{}".format(p[-2], p[-1])
Пример #3
0
import argparse, imutils, cv2, glob, os


ap = argparse.ArgumentParser()
ap.add_argument("-d","--dataset",required = True, help = "Path to the directory that contains the images to be indexed")
ap.add_argument("-f","--features_db",required = True, help = "Path to where feature database will be stored")
ap.add_argument("-d","--approx_images",type=int,default=500, help = "Approximate # of images in the dataset")
ap.add_argument("-d","--max_buffer_size",type = int, default = 5000, help = "Maximum buffer size for # of features to be stored in memory")
args = vars(ap.parse_args())

#initialize the keypoint detector , local invariant descriptor , and the descriptor pipeline
detector = cv2.xfeatures2d.SIFT_create()


#initialize the feature indexer
fi = FeatureIndexer(args["feature_db"],estNumImages=args["approx_images"], maxBufferSize = args["max_buffer_size"])



for (i, imagePath) in enumerate(glob.glob("")):
    # check to see if progress should be displayed
    if i > 0 and i % 10 = 0:
        fi._debug("processed {} images".format(i), msgType="[PROGRESS]")

        #Extract the filename from the image path, then load the image itself
        filename = imagePath.rsplit("/")[-1]
        image = cv2.imread(imagePath)
        image = imutils.resize(image, width = 320)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        #describe the image
Пример #4
0
scrapedData= list(scrapedDataReader)
scrapedDataDict={}


#[12-11-2016]
for row in scrapedData: #loop on the CSV file rows and create a dictionnary of additional scraped data
        scrapedDataItem =[]
        index=row[0].rfind("\\")+1 #the dictionary key is named as per the filename
        #the value are stored in a str with items separated by #.
        # Not found a better way to store in hdf5
        scrapedDataDict[row[0][index+1:]]=str(row[ROW_REFERENCE])+"#"+str(row[ROW_TITLE]) #store the reference into item 1 of the dictionary with key =filename
        

# initialize the feature indexer
#[26-12-2016 +++]
fi = FeatureIndexer(featuresDb, appendToFile, estNumImages=approxImages,
	maxBufferSize=maxBufferSize, verbose=True)
#[26-12-2016 ---]

# loop over the images in the dataset
for (i, imagePath) in enumerate(paths.list_images(dataset)):
        # check to see if progress should be displayed
        if i > 0 and i % 10 == 0:
                fi._debug("processed {} images".format(i), msgType="[PROGRESS]")

        # extract the image filename (i.e. the unique image ID) from the image
        # path, then load the image itself
        filename = imagePath[imagePath.rfind("\\") + 1:]
        image = cv2.imread(imagePath)
        image = imutils.resize(image, width=IMAGE_WIDTH)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
Пример #5
0
dad = DetectAndDescribe(detector, descriptor)
"""
Lines 25-27 set up our keypoint detection and local invariant descriptor 
pipeline. In our CBIR lessons, we used the Fast Hessian (i.e. SURF) 
keypoint detector, but here we’ll use the GFTT detector instead. It’s 
very common to use either the GFTT or Harris detector when using the BOVW 
model for classification; however, you should perform experiments 
evaluating each keypoint detector and go with the detector that obtained
the best accuracy. In order to describe the region surrounding each 
keypoint, we’ll use the RootSIFT descriptor, which will produce a 
128-dim feature vector for each keypoint region.
"""

# initialize the feature indexer
fi = FeatureIndexer(args['features_db'],
                    estNumImages=args['approx_images'],
                    maxBufferSize=args['max_buffer_size'],
                    verbose=True)

# grab the image paths and randomly shuffle them
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)

# loop over the images in the dataset
for (i, imagePath) in enumerate(imagePaths):
    # check to see if progress should be displayed
    if i > 0 and i % 10 == 0:
        fi.debug("processed {} images".format(i), msgType='[PROGRESS]')

    # extract the image filename(i.e. the unique image ID) from the image
    # path, then load the image itself
    filename = imagePath[imagePath.rfind("/") + 1:]