Exemple #1
0
def extract_features(vocab_path, image_and_features_paths):
    with open(vocab_path, "rb") as f:
        vocab = np.load(f)

    # FLANN parameters
    FLANN_INDEX_KDTREE = 0
    # FLANN_INDEX_LSH = 6
    index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    # index_params = dict(algorithm=FLANN_INDEX_LSH,
    #                     table_number=20,
    #                     key_size=10,
    #                     multi_probe_level=2)
    search_params = dict(checks=50)  # or pass empty dictionary
    matcher = cv2.FlannBasedMatcher(index_params, search_params)
    detector = cv2.SIFT()
    extractor = cv2.DescriptorExtractor_create("SIFT")
    bow_de = cv2.BOWImgDescriptorExtractor(extractor, matcher)
    bow_de.setVocabulary(vocab)

    for image_path, feature_path in image_and_features_paths:
        screenshot = cv2.imread(image_path)

        keypoints = detector.detect(screenshot)
        descriptor = bow_de.compute(screenshot, keypoints)

        with open(feature_path, "w") as f:
            np.save(f, descriptor)
    def __init__(self, type, image_set, trained_hash, testing_hash, trained_directory):
        self.testing_hash = testing_hash
        self.trained_directory = trained_directory

        if trained_hash is not None:
            self._hash = trained_hash + "_" + image_set[0].hash

        super().__init__(type, image_set)
        self.size = 0.01
        self.bowsize = 4096

        if self.type is FeatureType.FREAK:
            self.size = 0.001

        matcher = self.initMatcher(type)
        self.initFeature(type)

        self.dictionary = None
        self.bow_extract = cv2.BOWImgDescriptorExtractor(self.extractor, matcher)
        self.bow_train = cv2.BOWKMeansTrainer(self.bowsize)

        #print(testing_hash)
        try:
            if(self.trained_directory is not None): 
                dictfile = "cache/" + str(self.name) + "/" + self.trained_directory + "/" + trained_hash + "_dict.npy"
            else:
                dictfile = self.directory + self.hash + "_dict.npy"
            logging.debug(dictfile)
            self.dictionary = np.load(dictfile)
            #logging.debug("Loaded dict cache.")

        except FileNotFoundError:
            logging.debug("Could not load dict cache file for this feature.")
Exemple #3
0
def computeBOWDictionary(BOW):
    dictionary = BOW.cluster()
    sift2 = cv2.xfeatures2d.SIFT_create()
    BOWDiction = cv2.BOWImgDescriptorExtractor(sift2,
                                               cv2.BFMatcher(cv2.NORM_L2))
    BOWDiction.setVocabulary(dictionary)
    return BOWDiction
Exemple #4
0
def pen_detector():
    pos, neg = "pos_", "neg_"
    detect, extract = get_extract_detect()
    matcher = get_flann_matcher()
    print("building BOWKMEANSTraniner...")
    bow_kmeans_trainer = cv2.BOWKMeansTrainer(27)
    extract_bow = cv2.BOWImgDescriptorExtractor(extract, matcher)

    print("adding features to trainer")
    for i in range(4):
        print(i)
        bow_kmeans_trainer.add(extract_sift(path(pos, i), extract, detect))
        bow_kmeans_trainer.add(extract_sift(path(neg, i), extract, detect))

    voc = bow_kmeans_trainer.cluster()
    extract_bow.setVocabulary(voc)

    traindata, trainlabels = [], []
    print("adding to train data")
    for i in range(7):
        print(i+1)
        traindata.extend(bow_features(cv2.imread(path(pos, i), 0), extract_bow, detect))
        trainlabels.append(1)
        traindata.extend(bow_features(cv2.imread(path(neg, i), 0), extract_bow, detect))
        trainlabels.append(-1)

    svm = cv2.ml.SVM_create()
    svm.setType(cv2.ml.SVM_C_SVC)
    svm.setGamma(0.4)
    svm.setC(60)
    svm.setKernel(cv2.ml.SVM_LINEAR)

    svm.train(np.array(traindata), cv2.ml.ROW_SAMPLE, np.array(trainlabels))
    return svm, extract_bow
    def __init__ (self,threshold,cluster_num):

        self.image_paths = []
        self.image_keypoints = {}
        self.surf = cv2.xfeatures2d.SURF_create(threshold)
        self.bow_kmeans = cv2.BOWKMeansTrainer(cluster_num)
        self.bow_extractor = cv2.BOWImgDescriptorExtractor(self.surf, cv2.BFMatcher(cv2.NORM_L2))
        self.empty_histogram = [0.0] * cluster_num
        self.count = 0