コード例 #1
0
ファイル: features.py プロジェクト: henryzord/ImageRetrieval
def compare_histogram(a_hist, b_hist, method=cv2.HISTCMP_CHISQR):
	"""
	Compares two histograms, whether they represent greyscale images
		or colored ones.
	:param a_hist: image A histogram.
	:param b_hist: image B histogram.
	:param method: Comparison method. May be one of the following:

		CV_COMP_CORREL - Correlation

		CV_COMP_CHISQR - Chi-Square

		CV_COMP_INTERSECT - Intersection

		CV_COMP_BHATTACHARYYA - Bhattacharyya distance

		CV_COMP_HELLINGER - Synonym for CV_COMP_BHATTACHARYYA

		Please refer to OpenCV documentation for further details.
	:return: The result of the comparison between two histograms.
	"""
	if isinstance(a_hist, list) and isinstance(b_hist, list):
		diff = []
		for i, channel in enumerate(CHANNELS):
			diff += [cv2.compareHist(a_hist[i], b_hist[i], method=method)]
	else:
		diff = cv2.compareHist(a_hist, b_hist, method=cv2.HISTCMP_CHISQR)

	return np.mean(diff)
コード例 #2
0
ファイル: image.py プロジェクト: jiaqianghuai/image_match_web
def hist_similarity(image_1, image_2):
    """color hist based image similarity
    
    @param image_1: np.array(the first input image)
    @param image_2: np.array(the second input image)
    @return similarity: float(range from [0,1], the bigger the more similar)
    """
    if image_1.ndim == 2 and image_2.ndim == 2:
        hist_1 = cv2.calcHist([image_1], [0], None, [256], [0.0, 255.0])
        hist_2 = cv2.calcHist([image_2], [0], None, [256], [0.0, 255.0])
        similarity = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
    elif image_1.ndim == 3 and image_2.ndim == 3:
        """R,G,B split"""
        b_1, g_1, r_1 = cv2.split(image_1)
        b_2, g_2, r_2 = cv2.split(image_2)
        hist_b_1 = cv2.calcHist([b_1], [0], None, [256], [0.0, 255.0])
        hist_g_1 = cv2.calcHist([g_1], [0], None, [256], [0.0, 255.0])
        hist_r_1 = cv2.calcHist([r_1], [0], None, [256], [0.0, 255.0])
        hist_b_2 = cv2.calcHist([b_2], [0], None, [256], [0.0, 255.0])
        hist_g_2 = cv2.calcHist([g_2], [0], None, [256], [0.0, 255.0])
        hist_r_2 = cv2.calcHist([r_2], [0], None, [256], [0.0, 255.0])
        similarity_b = cv2.compareHist(hist_b_1,hist_b_2,cv2.cv.CV_COMP_CORREL)
        similarity_g = cv2.compareHist(hist_g_1,hist_g_2,cv2.cv.CV_COMP_CORREL)
        similarity_r = cv2.compareHist(hist_r_1,hist_r_2,cv2.cv.CV_COMP_CORREL)
        sum_bgr = similarity_b + similarity_g + similarity_r
        similarity = sum_bgr/3.
    else:
        gray_1 = cv2.cvtColor(image_1,cv2.cv.CV_RGB2GRAY)
        gray_2 = cv2.cvtColor(image_2,cv2.cv.CV_RGB2GRAY)
        hist_1 = cv2.calcHist([gray_1], [0], None, [256], [0.0, 255.0])
        hist_2 = cv2.calcHist([gray_2], [0], None, [256], [0.0, 255.0])
        similarity = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
    return similarity
コード例 #3
0
    def compute_surrounding_color_contrast(self, img, rect, container_rect, layers=2):
        """
        Measure of the dissimilarity of a window to its immediate surrounding area based on chi-squared
        distances of LAB space histogram. See "Measuring the objectness of image windows" Alexe et al.

        :type img: ndarray
        :type rect: tuple
        :type container_rect: tuple
        :type layers: int
        :rtype : ndarray

        :param img: Source image. This is not a cropped ROI image.
        :param rect: ROI (x, y, w, h) to compute the color contrast against surroundings.
        :param container_rect: Container border rectangle (x, y, w, h) pixels outside this will be ignored.
        :param layers: number of surrounding layers to compute contrasts.
        :return: Color contrast feature vector of shape (layers+1, ). The last cell contains the contrast
                   between theROI and the entire image.
        """

        # chi-squared distances between window image and its surroundings
        cc_feature_vector = np.zeros(layers + 1)

        rect_img = crop_image(img, rect)
        window_hist = compute_lab_histogram(rect_img)

        prev_rect = rect
        for level in range(layers):
            expanded_rect = expand_rect(prev_rect, radius_increase=1.5)

            # crop parts outside the container rect
            expanded_rect = get_intersecting_rect2(expanded_rect, container_rect)
            expanded_rect_img = crop_image(img, expanded_rect)

            # mask of only the immediate surrounding area
            ew, eh = expanded_rect_img.shape[1], expanded_rect_img.shape[0]
            mask = get_mask((ew, eh), expanded_rect, prev_rect)

            # LAB histogram of the surrounding area
            surrounding_hist = compute_lab_histogram(expanded_rect_img, mask=mask)

            # method=1 is the same as cv.CV_COMP_CHISQR for computing the chi-squared distance
            cc_feature_vector[level] = cv2.compareHist(window_hist, surrounding_hist, method=1)

            prev_rect = expanded_rect

        # cache the background histogram to save computation time
        img_id = id(img)
        if img_id not in self.lab_hist_cache:
            rect_img = crop_image(img, container_rect)
            background_hist = compute_lab_histogram(rect_img)
            self.lab_hist_cache[img_id] = background_hist
        else:
            background_hist = self.lab_hist_cache[img_id]

        # color contract between the window image and the entire source image
        background_hist_dist = cv2.compareHist(window_hist, background_hist, method=1)
        cc_feature_vector[layers] = background_hist_dist

        return cc_feature_vector
コード例 #4
0
def calcColorHistogramSimilarity(img1, img2):
    h_b1, h_g1, h_r1 = calcRGBHistogram(img1)
    h_b2, h_g2, h_r2 = calcRGBHistogram(img2)

    bSimilarity = cv2.compareHist(h_b1, h_b2, cv2.cv.CV_COMP_CORREL)
    gSimilarity = cv2.compareHist(h_g1, h_g2, cv2.cv.CV_COMP_CORREL)
    rSimilarity = cv2.compareHist(h_r1, h_r2, cv2.cv.CV_COMP_CORREL)

    return (bSimilarity + gSimilarity + rSimilarity)/3.0
コード例 #5
0
ファイル: ColorHistCorrel.py プロジェクト: Rigi/CBIR
    def get_distance(self, query_hist, ref_hist, w=[1.0, 1.0, 1.0]):
        result = dict()

        r0, v0 = query_hist.features.items()[0]
        for r1, v1 in ref_hist.features.items():
            result[r1] = cv2.compareHist(v0[0], v1[0], cv2.cv.CV_COMP_CORREL) * w[0] + \
                    cv2.compareHist(v0[1], v1[1], cv2.cv.CV_COMP_CORREL) * w[1] + \
                    cv2.compareHist(v0[2], v1[2], cv2.cv.CV_COMP_CORREL) * w[2]

        return result
コード例 #6
0
ファイル: detector.py プロジェクト: Beaverworks-W3/racecar_12
	def processImg(self,img):
		print("processing")
		hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
		contourList = []
		maskG = cv2.inRange(hsv, np.array([35,100,100]), np.array([70, 255, 255])) 
		maskG = self.blur(maskG)      
		maskR = cv2.inRange(hsv, np.array([0,100,100]), np.array([15, 255, 255]))   
		maskR = self.blur(maskR)
		maskB = cv2.inRange(hsv, np.array([90,150,150]), np.array([130, 255, 255])) 
		maskB = self.blur(maskB)  
		maskY = cv2.inRange(hsv, np.array([23,100,160]), np.array([30, 255, 255]))  
		maskY = self.blur(maskY) 
		maskP1 = cv2.inRange(hsv, np.array([0,50,230]), np.array([10, 150, 255]))  
		maskP2 = cv2.inRange(hsv, np.array([150,50,230]), np.array([180,150,255]))  
		maskP = maskP1+maskP2 
		maskP = self.blur(maskP)
		contoursG = cv2.findContours(maskG, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
		contoursR = cv2.findContours(maskR, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
		contoursB = cv2.findContours(maskB, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
		contoursY = cv2.findContours(maskY, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
		contoursP = cv2.findContours(maskP, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
		self.contourAppend(contourList,contoursG,"green")
		self.contourAppend(contourList,contoursR,"red")
		self.contourAppend(contourList,contoursB,"blue")
		self.contourAppend(contourList,contoursY,"yellow")
		self.contourAppend(contourList,contoursP,"pink")
		if len(contourList)>0:
			biggest = self.findBiggest(contourList)
		else:
			biggest = None
		if biggest != None:
			print(biggest.text)
			#print(biggest.contour)
			cv2.drawContours(img, biggest.contour, -1, (0, 255, 0), 3)
			#cv2.imshow("oooo",img)
			#cv2.waitKey(0)
			if biggest.text != "pink":
				self.saveImg(img,biggest.text)
			elif biggest.text == "lol":
				x,y,w,h = cv2.boundingRect(biggest.contour)
				sliced = hsv[x:x+w,y:y+h,:]
				hsvTest = cv2.calcHist(sliced,[0,1],None,[180,256],ranges)
				racecarVal = cv2.compareHist(hsvTest,self.racecar,cv2.cv.CV_COMP_CORREL)
				ariVal = cv2.compareHist(hsvTest,self.ari,cv2.cv.CV_COMP_CORREL)
				sertacVal = cv2.compareHist(hsvTest,self.sertac,cv2.cv.CV_COMP_CORREL)
				catVal = cv2.compareHist(hsvTest,self.cat,cv2.cv.CV_COMP_CORREL)
				maxVal = max(racecarVal,ariVal,sertacVal,catVal)
				if maxVal == racecarVal:
					self.saveImg(img,"racecar")
				elif maxVal == ariVal:
					self.saveImg(img,"ari")
				elif maxVal == sertacVal:
					self.saveImg(img,"sertac")
				else:
					self.saveImg(img,"cat")
コード例 #7
0
    def returnHistogramComparison(self, hist_1, hist_2, method='intersection'):
        """Return the comparison value of two histograms.

        Comparing an histogram with itself return 1.
        @param hist_1
        @param hist_2
        @param method the comparison method.
            intersection: (default) the histogram intersection (Swain, Ballard)
        """
        if cv2.__version__.split(".")[0] == '3':
            if(method=="intersection"):
                comparison = cv2.compareHist(hist_1, hist_2, cv2.HISTCMP_INTERSECT)
            elif(method=="correlation"):
                comparison = cv2.compareHist(hist_1, hist_2, cv2.HISTCMP_CORREL)
            elif(method=="chisqr"):
                comparison = cv2.compareHist(hist_1, hist_2, cv2.HISTCMP_CHISQR)
            elif(method=="bhattacharyya"):
                comparison = cv2.compareHist(hist_1, hist_2, cv2.HISTCMP_BHATTACHARYYA)
            else:
                raise ValueError('[DEEPGAZE] color_classification.py: the method specified ' + str(method) + ' is not supported.')
        else:
            if(method=="intersection"):
                comparison = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_INTERSECT)
            elif(method=="correlation"):
                comparison = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
            elif(method=="chisqr"):
                comparison = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CHISQR)
            elif(method=="bhattacharyya"):
                comparison = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_BHATTACHARYYA)
            else:
                raise ValueError('[DEEPGAZE] color_classification.py: the method specified ' + str(method) + ' is not supported.')
        return comparison
コード例 #8
0
def hist_measure(all_tf, subsampled_tf, plot_fig = False):
    sorted_all_tf = sorted(all_tf.items(), key=operator.itemgetter(1)) # becomes tuple
 
    # create subsampled histogram
    sub_hist = []
    all_hist = []
    for item in sorted_all_tf:
        all_hist += [item[1]]
        if item[0] in subsampled_tf:
            sub_hist += [subsampled_tf[item[0]]]
        else:
            sub_hist += [0]

    # correlation
    all_array = np.array(all_hist)
    sub_array = np.array(sub_hist)
    all_array = all_array / (np.sum(all_array) * 1.0)
    sub_array = sub_array / (np.sum(sub_array) * 1.0)
  
    all_hist_cv = all_array.ravel().astype('float32') 
    sub_hist_cv = sub_array.ravel().astype('float32') 
     
    correlation_score =  cv2.compareHist(all_hist_cv, sub_hist_cv, cv2.cv.CV_COMP_CORREL)
    chisq_dist = cv2.compareHist(all_hist_cv, sub_hist_cv, cv2.cv.CV_COMP_CHISQR)
    l1_dist = L1_dist(all_array, sub_array)
    cos_sim = cos_similarity(all_array, sub_array)
    '''
    print 'Correlation (higher-> similar) :', cv2.compareHist(all_hist_cv, sub_hist_cv, cv2.cv.CV_COMP_CORREL)
    print 'Chi-square (smaller-> similar) :', cv2.compareHist(all_hist_cv, sub_hist_cv, cv2.cv.CV_COMP_CHISQR)
    print 'L1 distance (smaller -> similar):', L1_dist(all_array, sub_array)
    '''
    if plot_fig:
        ax = plt.subplot(2,1,1)
        plt.plot(range(len(all_hist)), all_array, 'o-r', label = 'Nonsubsampled') 
        plt.plot(range(len(sub_hist)), sub_array, 'x-b', label = 'Subsampled') 
        plt.plot(range(len(all_hist)), abs(all_array - sub_array), 'o-k', label= 'L1 dist')
        plt.legend()
        plt.xlabel('Word Index')
        plt.ylabel('Word Count (#)')
        ax.set_title('Histogram')
        
        ax = plt.subplot(2,1,2)
        plt.plot(sub_hist, all_hist, 'o')
        plt.xlabel('Subsampled Word Count (#)')
        plt.ylabel('Nonsubsampled Word Count (#)')
        ax.set_title('Correlation')
        plt.show()

    return l1_dist, correlation_score, chisq_dist, cos_sim
コード例 #9
0
ファイル: histogram.py プロジェクト: FilippoC/pke
def get_shots(video, threshold):
    if len(video) == 0:
        return [], []
    if len(video) == 1:
        return [video], []

    shots = [Video(frames = [video[0]])]
    function = []
    
    for i in xrange(1, len(video)):
        previous = video[i - 1].getHistogram()
        current = video[i].getHistogram()

        t = 0
        for ti in range(len(current)):
            t += cv2.compareHist(previous[ti], current[ti], cv.CV_COMP_CORREL)
        t = t / len(current)

        function.append(t)

        if t < threshold:
            shots.append(Video(frames = [video[i]]))
        else:
            shots[-1].addFrame(video[i])

    return shots, function
コード例 #10
0
    def object_comparisson(self, roi):
        roi_hist = self.calculate_histogram(roi)

        # Tomo el histograma del objeto para comparar
        obj_hist = self.saved_object_comparisson()

        return cv2.compareHist(roi_hist, obj_hist, cv2.cv.CV_COMP_BHATTACHARYYA)
コード例 #11
0
ファイル: Features.py プロジェクト: akash0x53/slise
 def __eq__(self,another_histo):
     difference=cv2.compareHist(self.hist[0],another_histo.hist[0],cv2.cv.CV_COMP_CORREL)
     
     difference*=100
     if difference>=__THRESH_VAL__:
         return True
     return False
コード例 #12
0
ファイル: similarity.py プロジェクト: bbcdli/xuexi
def similarness(image1,image2):
    """
Return the correlation distance be1tween the histograms. This is 'normalized' so that
1 is a perfect match while -1 is a complete mismatch and 0 is no match.
"""
    # Open and resize images to 200x200
    i1 = Image.open(image1).resize((200,200))
    i2 = Image.open(image2).resize((200,200))

    # Get histogram and seperate into RGB channels
    i1hist = numpy.array(i1.histogram()).astype('float32')
    i1r, i1b, i1g = i1hist[0:256], i1hist[256:256*2], i1hist[256*2:]
    # Re bin the histogram from 256 bins to 48 for each channel
    i1rh = numpy.array([sum(i1r[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
    i1bh = numpy.array([sum(i1b[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
    i1gh = numpy.array([sum(i1g[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
    # Combine all the channels back into one array
    i1histbin = numpy.ravel([i1rh, i1bh, i1gh]).astype('float32')

    # Same steps for the second image
    i2hist = numpy.array(i2.histogram()).astype('float32')
    i2r, i2b, i2g = i2hist[0:256], i2hist[256:256*2], i2hist[256*2:]
    i2rh = numpy.array([sum(i2r[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
    i2bh = numpy.array([sum(i2b[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
    i2gh = numpy.array([sum(i2g[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
    i2histbin = numpy.ravel([i2rh, i2bh, i2gh]).astype('float32')

    return cv2.compareHist(i1histbin, i2histbin, 0)
コード例 #13
0
ファイル: shot_redundancy.py プロジェクト: FilippoC/pke
def get_post(shots, n):
    px = 10
    py = 10
    threshold = 0.4
    nb_parts = 30

    for i in range(2, len(shots)):
        first = shots[i][0].getPartHistograms(px,py)
        
        for m in range(n):
            j = i - 2 - n
            if j < 0:
                break

            last = shots[j][-1].getPartHistograms(px, py)

            
            t = 0
            for ti in range(len(first)):
                v = cv2.compareHist(last[ti], first[ti], cv.CV_COMP_CORREL)
                if v < threshold:
                    t += 1

            if t > nb_parts:
                if len(shots[i]) > 1:
                    shots[i] = shots[i][1:]
                else:
                    if len(shots[j]) == 1:
                        shots[i] = []
                break

    return shots
コード例 #14
0
def extraerVecinos(imagen1,capitales):
  listaImagenes=[]
  try:
     imagenes = devolverImagenesCapitales(imagen1,capitales)
     img1 = cv2.imread( 'Fotos/'+imagen1 );
     v = cv2.calcHist([img1], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256]) #Calculo de histograma de imagen
     v = v.flatten()
     hist1 = v / sum(v)
     dictSumas ={}
     for imagen2 in imagenes:
        if not imagen2==imagen1:
          try:
            img2 = cv2.imread( 'Fotos/'+imagen2);
            v = cv2.calcHist([img2], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256]) #Calculo de histograma de imagen
            v = v.flatten()
            hist2 = v / sum(v)
            d = cv2.compareHist( hist1, hist2, cv2.cv.CV_COMP_INTERSECT) #Calculo de similitud de imagenes
            dictSumas[imagen2] = d             
          except: 
             print "Error"          
     dictSumas = dictSumas.items()
     dictSumas.sort(lambda x,y:cmp(y[1], x[1]))
     i=0
     while i<K: #Devuelve las K imagenes mas similares.
        listaImagenes.append(dictSumas[i][0]) 
        i=i+1 
  except: 
        print "Error"
  return listaImagenes
コード例 #15
0
ファイル: mean_shift.py プロジェクト: DakotaNelson/robo-games
 def calculate_weights(self):
     """Iterates over x and y starting at the frame radius by dx and dy
     until the window size. At each 'coordinate', computes an object around
     it, computes a histogram, and calculates a weight by comparing that
     histogram to the original frame. Subtracts the normalized distance
     from the object of the original frame, and creates a list of all of
     these new weights, saved as a numpy array.
     """
     weights = []
     for x in range(frame.rad, frame.window_x, frame.dx):
         for y in range(frame.rad, frame.window_y, frame.dy):
             obj = new_frame.create_general_object(x,y)
             hist = new_frame.create_general_hist(obj)
             # compare histograms to find weight
             weight = cv2.compareHist(frame.hist, hist, method=cv2.cv.CV_COMP_CORREL)
             # find distance away from old point, and normalize by max distance
             max_distance = float(self.find_hypotenuse(frame.window_x, frame.window_y))
             distance = self.find_hypotenuse(x-frame.x, y-frame.y) / max_distance
             # subtract normalized distance from weight
             weight = weight - distance
             # make sure no weights are negative
             if weight < 0:
                 weight = 0
             # append weights to array
             weights.append(weight)
     self.weights = np.array(weights)
コード例 #16
0
ファイル: flann_index.py プロジェクト: AlonePar/image_space
def run(query, k=10, mode='bruteforce'):
    if query.startswith('['):
        vec = json.loads(query)
        vec = np.array(vec, dtype=np.float32).reshape(1, 512)
    elif query in image_map:
        vec = image_map[query].reshape(1, 512)
    else:
        return {'error': 'Could not find data for image: ' + query}

    images = []
    if mode == 'bruteforce':
        hist = vec.reshape(512)
        dists = []
        for (i, file) in enumerate(image_files):
            dist = cv2.compareHist(hist, image_map[file], cv2.cv.CV_COMP_INTERSECT)
            dists.append((dist, i))

        top = sorted(dists, reverse=True)[:int(k)]

        for distance, index in top:
            images.append({
                'id': image_files[index],
                'features': image_map[image_files[index]].tolist(),
                'distance': distance
            })

    return images
コード例 #17
0
ファイル: face_detection.py プロジェクト: smart-cam/smart-cam
    def __get_uniq_faces_curr_frame(self, frame_id, faces_roi_hists_prev, faces_roi_hists):
        faces_prev = len(faces_roi_hists_prev)
        faces_curr = len(faces_roi_hists)
        logger.info("[{0}] Face Similarity: Prev: {1}, Curr: {2}".format(frame_id, faces_prev, faces_curr))
        # First Time
        if faces_prev == 0:
            return faces_curr

        # Current frame has more faces than prev frame
        # if faces_curr > faces_prev:
        #    return faces_curr - faces_prev

        uniq_faces_curr_frame = 0

        # Perform Image Histogram Similarity
        # For each histogram in current frame
        for rh1 in faces_roi_hists:
            match_found = False
            # For each histogram in previous frame
            for rh2 in faces_roi_hists_prev:
                # print "\nrh1 {0}: {1}".format(type(rh1),np.shape(rh1))
                # print "\nrh2 {0}: {1}".format(type(rh2),np.shape(rh2))
                corr = cv2.compareHist(rh1, rh2, cv2.HISTCMP_CORREL)
                logger.info("[{0}] Similarity Metrics: {1}".format(frame_id, corr))
                if corr >= 0.35:
                    # Match Found, can break the loop for this histogram in current frame
                    match_found = True
                    break
            # Add to unique face count, if no match found for this histogram in current frame
            if not match_found:
                uniq_faces_curr_frame += 1

        logger.info("[{0}] Total Unique Faces in Current Frame: {1}".format(frame_id, uniq_faces_curr_frame))
        return uniq_faces_curr_frame
コード例 #18
0
ファイル: searcher.py プロジェクト: backman-git/fooder-server
	def similarity(self, f1,f2):
		totalD=0
		
		for hist1,hist2 in zip(f1,f2):
			totalD+=cv2.compareHist(hist1,hist2,0 )
			
		return totalD
コード例 #19
0
def sliding_window(image,r1,r2,step,roihist):
    test_path = "/home/sarbajit/PyCharm_Scripts/test/green_pad_same_name_new/final_rotated2/"
    item = image
    if item.endswith(".png") or item.endswith(".PNG"):
        x = test_path+item
        target2 = cv2.imread(x)
        target = target2[90:150, 90:520]
        (winW, winH) = (50, 30)
        for (x, y, window) in sliding_window_test(target,r1,r2,stepSize=step, windowSize=(winW, winH)):
            # if the window does not meet our desired window size, ignore it
            if window.shape[0] != winH or window.shape[1] != winW:
                continue
            #this section does the histogram backprojected matching window by window.
            hsvt = cv2.cvtColor(window, cv2.COLOR_BGR2HSV)
            inputImage = cv2.calcHist([hsvt], [0, 1], None, [180, 256], [0, 180, 0, 256])
            cv2.normalize(roihist, roihist, 0, 255, cv2.NORM_MINMAX)
            dst = cv2.calcBackProject([hsvt], [0, 1], roihist, [0, 180, 0, 256], 1)
            match = cv2.compareHist(roihist, inputImage, method=0)
            print match
            #the match is printed to see the difference and jumps when the window moves through the landing pad

            # THIS IS WHERE YOU WOULD PROCESS YOUR WINDOW,AND DO THE NECESSARY STEPS

        # we'll just draw the window and show the results
            clone = target.copy()
            cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)
            cv2.imshow("window", clone)
            cv2.waitKey(1)
            time.sleep(1)

# sliding_window('2015-08-06_06-27-48.png',28,58,30)
コード例 #20
0
ファイル: bof_search.py プロジェクト: neoden/bof
def main():
    dict_path = TEMP_DIR + sys.argv[1]
    dictionary = pickle.load(open(dict_path, 'rb'))

    index_path = TEMP_DIR + sys.argv[2]
    index = pickle.load(open(index_path, 'rb'))

    image_path = IMAGES_DIR + sys.argv[3]
    image_des = describe(image_path, dictionary)

    result_path = TEMP_DIR + sys.argv[4]
    clear_directory(result_path)

    result = []

    for path, des in index.items():
        likelyhood = cv2.compareHist(image_des, des, cv2.cv.CV_COMP_CHISQR)
        result.append((likelyhood, path))

    best_match = sorted(result, key=lambda x: x[0])[:NUM_MATCHES]
    for rank, i in enumerate(best_match):
        likelyhood, path = i
        os.symlink(path, result_path + '/{}.jpeg'.format(rank))

    pprint.pprint(best_match)
コード例 #21
0
ファイル: sort.py プロジェクト: stonezuohui/faceswap
    def sort_hist_dissim(self):
        """ Sort by histigram of face dissimilarity """
        input_dir = self.args.input_dir

        logger.info("Sorting by histogram dissimilarity...")

        img_list = [
            [img,
             cv2.calcHist([cv2.imread(img)], [0], None, [256], [0, 256]), 0]
            for img in
            tqdm(self.find_images(input_dir), desc="Loading", file=sys.stdout)
        ]

        img_list_len = len(img_list)
        for i in tqdm(range(0, img_list_len), desc="Sorting", file=sys.stdout):
            score_total = 0
            for j in range(0, img_list_len):
                if i == j:
                    continue
                score_total += cv2.compareHist(img_list[i][1],
                                               img_list[j][1],
                                               cv2.HISTCMP_BHATTACHARYYA)

            img_list[i][2] = score_total

        logger.info("Sorting...")
        img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)

        return img_list
コード例 #22
0
ファイル: treXton.py プロジェクト: Pold87/treXton
def match_histograms(query_histogram, location_histogram, weights=None):

    """
    Match query histogram with location histogram and return the
    distance. To do this, it needs a distance measurement.
    """
    
    # TODO: I could use the distance function as a parameter


    #dist = np.linalg.norm(query_histogram - location_histogram)
    #dist = spatial.distance.cosine(query_histogram, location_histogram)

    dist = cv2.compareHist(np.float32(query_histogram), np.float32(location_histogram), 4)
    #dist = JSD(np.float32(query_histogram), np.float32(location_histogram))
    #_, dist = stats.ks_2samp(query_histogram, location_histogram)
    #dist = -dist
 #   dist = cv2.EMD(np.float32(query_histogram), np.float32(location_histogram), 3)
    #dist=  stats.entropy(np.float32(query_histogram), np.float32(location_histogram))

    #f = np.float64([1] * len(query_histogram))
    #s = np.float64([1] * len(query_histogram))
    
    #dist = emd.emd(np.float64(query_histogram), np.float64(location_histogram), f, s)
    
    return dist
コード例 #23
0
ファイル: selectors.py プロジェクト: davtoh/RRtools
def hist_comp(imlist, loadfunc=None, method="correlation"):
    """
    Histogram comparison

    :param imlist: list of path to images or arrays
    :return: comparison
    """
    # http://www.pyimagesearch.com/2014/07/14/3-ways-compare-histograms-using-opencv-python/
    #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    # assert(len(fns)>=2) # no images to compare. There must be 2 or more
    def helper(im, loadfunc=loadfunc):
        if isinstance(im, basestring):
            if loadfunc is None:
                def loadfunc(im):
                    return cv2.imread(im)
            im = loadfunc(im)  # read BGR image
        return im
    method, reverse = hist_map[method]
    comp, comparison = None, []
    for i, im in enumerate(imlist):  # for each image get data
        hist = cv2.calcHist([helper(im)], [0, 1, 2], None, [
                            8, 8, 8], [0, 256, 0, 256, 0, 256])
        hist = cv2.normalize(hist).flatten()
        if i == 0:
            comp = hist
        comparison.append((cv2.compareHist(comp, hist, method), im))

    comparison.sort(key=lambda x: x[0], reverse=reverse)  # sort comparisons
    return comparison
コード例 #24
0
ファイル: sort.py プロジェクト: stonezuohui/faceswap
    def sort_hist(self):
        """ Sort by histogram of face similarity """
        input_dir = self.args.input_dir

        logger.info("Sorting by histogram similarity...")

        img_list = [
            [img, cv2.calcHist([cv2.imread(img)], [0], None, [256], [0, 256])]
            for img in
            tqdm(self.find_images(input_dir), desc="Loading", file=sys.stdout)
        ]

        img_list_len = len(img_list)
        for i in tqdm(range(0, img_list_len - 1), desc="Sorting",
                      file=sys.stdout):
            min_score = float("inf")
            j_min_score = i + 1
            for j in range(i + 1, len(img_list)):
                score = cv2.compareHist(img_list[i][1],
                                        img_list[j][1],
                                        cv2.HISTCMP_BHATTACHARYYA)
                if score < min_score:
                    min_score = score
                    j_min_score = j
            (img_list[i + 1],
             img_list[j_min_score]) = (img_list[j_min_score],
                                       img_list[i + 1])
        return img_list
コード例 #25
0
ファイル: example.py プロジェクト: openstax/test-automation
    def Harris_Corner(self):
        self.threshold = 0.999999999999
        temp_i = self.image_i.copy()
        temp1_i = self.image_i.copy()
        gray_i = cv2.cvtColor(temp_i, cv2.COLOR_BGR2GRAY)
        gray_i = numpy.float32(gray_i)
        dst_i = cv2.cornerHarris(gray_i, 2, 3, 0.025)
        dst_i = cv2.dilate(dst_i, None)
        # Threshold for an optimal value, it may vary depending on the image.
        temp_i[dst_i < 0.01 * dst_i.max()] = [0, 0, 0]
        temp1_i[dst_i > 0.01 * dst_i.max()] = [0, 0, 255]
        hist_i = cv2.calcHist([temp_i], [0], None, [256], [0, 256])
        temp_j = self.image_j.copy()
        temp1_j = self.image_j.copy()
        gray_j = cv2.cvtColor(temp_j, cv2.COLOR_BGR2GRAY)
        gray_j = numpy.float32(gray_j)
        dst_j = cv2.cornerHarris(gray_j, 2, 3, 0.025)
        dst_j = cv2.dilate(dst_j, None)
        # Threshold for an optimal value, it may vary depending on the image.
        temp_j[dst_j < 0.01 * dst_j.max()] = [0, 0, 0]
        temp1_j[dst_j > 0.01 * dst_j.max()] = [0, 0, 255]
        hist_j = cv2.calcHist([temp_j], [0], None, [256], [0, 256])

        self.measure = cv2.compareHist(hist_i, hist_j, cv.CV_COMP_CORREL)
        self.assertGreater(self.measure, self.threshold)

        print self.measure
コード例 #26
0
ファイル: fitness.py プロジェクト: gablank/UNIK4690
def histogram_compare(img, params):
    resolution = 100
    h1 = cv2.calcHist([img], [0], params.fg_mask, [resolution], [0, 1.0])
    h2 = cv2.calcHist([img], [0], params.bg_mask, [resolution], [0, 1.0])
    cv2.normalize(h1, h1, alpha=1, norm_type=1)
    cv2.normalize(h2, h2, alpha=1, norm_type=1)
    return cv2.compareHist(h1, h2, 0)
コード例 #27
0
def show_ten_quantized_closest(frame_data,frame_block_dict,target_frame_number, description ):
    target_frame_block_hist_dict = frame_block_dict[target_frame_number]
    top_ten_frames = list()

    print("Comparing frames...")
    for keyA in frame_block_dict:
        if keyA == target_frame_number:  #dont compare the frame against itself
            continue
        else:
            frame_score = float(0)
            for keyB in frame_block_dict[keyA]:
                block_hist = frame_block_dict[keyA][keyB[0],keyB[1]]
                frame_score += cv2.compareHist(target_frame_block_hist_dict[keyB[0],keyB[1]],block_hist, 2) #Intersection compare

        top_ten_frames.append((keyA, frame_score))

    top_ten_frames.sort(key=lambda tup: tup[1])  # sorts in place
    top_ten_frames.reverse()

    top_ten_frame_indexes = list((x[0] for x in top_ten_frames))    #just need to the frame number, not the diff so we strip that out
    top_ten_frame_values = list((x[1] for x in top_ten_frames))
    for i in range(0,10):
        # For each of those frame numbers, display the image in a window with the number
        index = top_ten_frame_indexes[i]
        score = top_ten_frame_values[i]
        rgb_target = cv2.cvtColor(frame_data[index-1].astype(np.uint8), cv2.COLOR_GRAY2BGR)
        frame_description = description + ' #' + str(i + 1) + ': frame ' + str(index) + '. Score: ' + str(score)
        print frame_description
        cv2.imshow(frame_description, rgb_target)
        cv2.waitKey(0)

    cv2.destroyAllWindows()
    return
コード例 #28
0
    def do_comparison(self, out):
        alsum = sorted(self.method_composites['alSum'].items(), key=lambda x: x[0])
        random = sorted(self.method_composites['random'].items(), key=lambda x: x[0])
        temporal = sorted(self.method_composites['temporalInterval'].items(), key=lambda x: x[0])
        for ((asite, acomp), (rsite, rcomp)), (tsite, tcomp) in zip(zip(alsum, random), temporal):
            print(acomp, rcomp, tcomp)

            aHist = gethistogram(cv2.imread(self.path + acomp, cv2.IMREAD_COLOR))
            rHist = gethistogram(cv2.imread(self.path + rcomp, cv2.IMREAD_COLOR))
            tHist = gethistogram(cv2.imread(self.path + tcomp, cv2.IMREAD_COLOR))
            arSim = math.fabs(cv2.compareHist(aHist, rHist, cv2.HISTCMP_CORREL))
            atSim = math.fabs(cv2.compareHist(aHist, tHist, cv2.HISTCMP_CORREL))
            out.write("%s,%f,%f\n" % (asite, arSim, atSim))
            del aHist
            del rHist
            del tHist
コード例 #29
0
	def Compare_similarity(self, refBox, cropBox):
	# Compute the similarity between two ROI
	# Param 	refBox 	: ROI for reference
	# Param 	cropBox : ROI for comparing
		H1 = cv2.normalize(cv2.calcHist(refBox,  [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])).flatten()
		H2 = cv2.normalize(cv2.calcHist(cropBox, [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])).flatten()
		return cv2.compareHist(H1, H2, cv2.cv.CV_COMP_CORREL)
コード例 #30
0
ファイル: image_proc.py プロジェクト: patoliya/falco_server
def getMatches():
    init()
    results = {}
    reverse = False

    # # if we are using the correlation or intersection
    # # method, then sort the results in reverse order
    # if methodName in ("Correlation", "Intersection"):
    #     reverse = True

    # loop over the index
    for (k, hist) in index.items():
        # compute the distance between the two histograms
        # using the method and update the results dictionary
        d = cv2.compareHist(index["doge.png"], hist, method)
        results[k] = d

    # sort the results
    results = sorted([(v, k) for (k, v) in results.items()], reverse=reverse)

    print("the method is :----" + methodName)

    sorted_list = []
    # loop over the results
    for (i, (v, k)) in enumerate(results):
        # show the result
        sorted_list.append(str(k))
        # print sorted_list(i)
        print("name : " + str(k) + " value :" + str(v))
    return sorted_list[2:]
コード例 #31
0
ファイル: Q3Support.py プロジェクト: tavein/Recognition
 def matches(self,frame,rects):
     cal=lambda x:Match.calHist(frame,x)
     hists=list(map(cal,rects))
     print(list(map(lambda x:cv2.compareHist(x,self.hist,cv2.HISTCMP_BHATTACHARYYA),hists)))
コード例 #32
0
ファイル: main.py プロジェクト: Tossy0423/A2S4RM
def main():
    """
        Description:
        ----------

        Parameter:
        ----------
        * None

        Return:
        ----------
        * None
    """

    # ===== Standart Histgram Data 読み込み===== #
    _img_hist_stand_red = cv2.imread("./hist_standard/R-1.jpg", 1)
    _img_hist_stand_red_hsv = cv2.cvtColor(_img_hist_stand_red,
                                           cv2.COLOR_BGR2HSV)
    _img_hist_stand_red_h = cv2.calcHist(_img_hist_stand_red_hsv, [0], None,
                                         [180], [0, 180])
    _img_hist_stand_red_h_w, _img_hist_stand_red_h_h, _img_hist_stand_red_h_ch = _img_hist_stand_red.shape[:
                                                                                                           3]

    # plt.hist(_img_hist_stand_red_h.ravel(), 256, [0, 256])
    # plt.hist(_img_hist_stand_red_h, 256, [0, 256])
    # plt.show()

    # cv2.namedWindow("hist", cv2.WINDOW_NORMAL)
    # cv2.imshow("hist", _img_hist_stand_red)

    # ===== 画像読み込み ===== #
    _img_src = cv2.imread("./img_data/temp1.jpg", 1)
    print("size={}".format(_img_src.shape))
    # 画像リサイズ
    _img_src = cv2.resize(_img_src, (1280, 720))

    while True:

        time_start = time.time()

        # cv2.namedWindow("_img_src", cv2.WINDOW_NORMAL)
        # cv2.imshow("_img_src", _img_src)

        # ===== グレイスケールへ変換 ===== #
        """
            一定しきい値を超える明るさの領域を探す
        """
        # BGR色空間からGrayへ変換
        _img_gray = cv2.cvtColor(_img_src, cv2.COLOR_BGR2GRAY)
        # cv2.namedWindow("_img_gray", cv2.WINDOW_NORMAL)
        # cv2.imshow("_img_gray", _img_gray)

        ret, _img_thresh = cv2.threshold(_img_gray, 220, 255,
                                         cv2.THRESH_TOZERO)
        # cv2.namedWindow("_img_thresh", cv2.WINDOW_NORMAL)
        # cv2.imshow("_img_thresh", _img_thresh)

        # ===== Morphology ===== #
        _ERODE_KERNEL = np.ones((3, 3), np.uint8)
        """
        _img_erode_dst = cv2.erode(img_thresh, _ERODE_KERNEL, iterations = 1)    
        cv2.namedWindow("_img_erode_dst", cv2.WINDOW_NORMAL)
        cv2.imshow("_img_erode_dst", _img_erode_dst)
        """
        _img_opening_dst = cv2.morphologyEx(_img_thresh, cv2.MORPH_OPEN,
                                            _ERODE_KERNEL)
        cv2.namedWindow("_img_opening", cv2.WINDOW_NORMAL)
        cv2.imshow("_img_opening", _img_opening_dst)

        # ===== Labering処理 ===== #
        """
            - 重心座標
            - サイズ
        """
        _debug_src = _img_src.copy()

        num, _data = ELP.expansion_labeling_prcessing(_img_opening_dst,
                                                      [50, 2000], [0.0, 1.0],
                                                      [False, False],
                                                      _debug_src)
        print(num)

        # ===== HistgramInterseption ===== #
        """
            - Labering処理で得た明るい領域の位置をトリミングする.
            - 各領域において, 敵チームの色のヒストグラムとベースヒスグラムデータを照らしあわせて比較.
        """

        img_trim = _img_src.copy()
        img_hist_dst = _img_src.copy()

        img_armer_trim = [0 for i in range(0, num)]
        # print(img_armer_trim)

        _hist_object = np.zeros((num, 1), dtype=np.float64)

        hist_armer_trim = [0 for i in range(0, num)]

        for i in range(0, num):

            # triming
            img_armer_trim[i] = img_trim[int(_data[i][5]):int(_data[i][5] +
                                                              _data[i][7]),
                                         int(_data[i][4]):int(_data[i][4] +
                                                              _data[i][6])]

            # resize
            img_armer_trim[i] = cv2.resize(
                img_armer_trim[i],
                (_img_hist_stand_red_h_w, _img_hist_stand_red_h_h))
            # print("hist w={}, h={}".format(_img_hist_stand_red_h_w, _img_hist_stand_red_h_h))
            # print("hist w={}, h={}".format(_img_hist_stand_red_h_w, _img_hist_stand_red_h_h))

            # convert to hsv
            img_armer_trim[i] = cv2.cvtColor(img_armer_trim[i],
                                             cv2.COLOR_BGR2HSV)

            # get hist
            hist_armer_trim[i] = cv2.calcHist(img_armer_trim[i], [0], None,
                                              [180], [0, 180])

            # comper hist
            _hist_object[i] = cv2.compareHist(hist_armer_trim[i],
                                              _img_hist_stand_red_h,
                                              cv2.HISTCMP_CORREL)
            print("detected object{}={}".format(i, _hist_object[i]))

            if (0.05 <= _hist_object[i]):
                img_hist_dst = cv2.rectangle(
                    img_hist_dst, (int(_data[i][4]), int(_data[i][5])),
                    (int(_data[i][4] + _data[i][6]),
                     int(_data[i][5] + _data[i][7])), (0, 255, 0), 3)

            cv2.namedWindow("img_hist_dst", cv2.WINDOW_NORMAL)
            cv2.imshow("img_hist_dst", img_hist_dst)

        # ===== 装甲板を狙う処理 =====#
        """
            - 装甲板についてる両側のLEDを見つけるため, 同じ形(アスペクト比とか)かつ, 一定ピクセル距離のものを見つける.
            - 条件を満たすものであれば, 真ん中の重心座標を狙う.
        """

        # ===== 相手の機体番号の取得 ===== #
        """
            - 装甲板の場所がわかれば, 装甲板のみをトリミングして, 番号を取得
                取得方法は, OCRか, テンプレートマッチングか(多分後者のほうが早そう)
        """

        # time.sleep(1.0)

        time_end = time.time()

        print("[ProcessTime] {}[s], {}[FPS]".format(
            time_end - time_start, 1 / (time_end - time_start)))

        # ===== key Event ===== #
        _key = cv2.waitKey(1)

        if (_key):
            if (_key == ord("q")):
                print("finish")
                break

    cv2.destroyAllWindows()
コード例 #33
0
def upload():
    shutil.rmtree(SAVE_DIR)
    os.mkdir(SAVE_DIR)

    # if request.method == 'POST':
    # name = request.form.get('names')
    name = request.form['names']

    name = str(name)
    if name == "beige":
        SUB_DIR = 'beige/'
    elif name == "black":
        SUB_DIR = 'black/'
    elif name == 'blue':
        SUB_DIR = 'blue/'
    elif name == 'brown':
        SUB_DIR = 'brown/'
    elif name == "check":
        SUB_DIR = 'check/'
    elif name == "gray":
        SUB_DIR = 'gray/'
    elif name == "green":
        SUB_DIR = 'green/'
    elif name == "multi_tone":
        SUB_DIR = 'multi_tone/'
    elif name == "pink":
        SUB_DIR = 'pink/'
    elif name == "red":
        SUB_DIR = 'red/'
    elif name == "Polka_dot":
        SUB_DIR = 'Polka_dot/'
    elif name == "yellow":
        SUB_DIR = 'yellow/'
    elif name == "actress":
        SUB_DIR = 'actress/'
    else:
        SUB_DIR = 'unicro/'

    # # ファイルがなかった場合の処理
    # if 'file' not in request.files:
    #     flash('ファイルがありません','failed')
    #     return redirect(request.url)
    # file = request.files['image']
    #             # ファイルのチェック
    # if file and allowed_file(file.filename):
    # 危険な文字を削除(サニタイズ処理)
    # filename = secure_filename(file.filename)
    # 画像として読み込み
    stream = request.files['image'].stream
    img_array = np.asarray(bytearray(stream.read()), dtype=np.uint8)
    img = cv2.imdecode(img_array, 1)
    img_size = (200, 200)
    channels = (0, 1, 2)
    mask = None
    hist_size = 256
    ranges = (0, 256)
    ret = {}

    if SUB_DIR != 'actress/':
        target_img = img
        target_img = cv2.resize(target_img, img_size)
        # if target_img and allowed_file(target_img.filename):
        #     filename = secure_filename(target_img.filename)
        #     target_img.save(os.path.join('/uploads', filename))
        #     img_url = '/uploads/' + filename

        comparing_files = os.listdir(IMG_DIR + SUB_DIR)

        if len(comparing_files) == 0:
            sys.exit(1)

        for comparing_file in comparing_files:
            if comparing_file == '.DS_Store':
                continue

            tmp = []
            if not comparing_file.endswith(('.png', '.jpg', '.jpeg')):
                continue

            for channel in channels:
                target_hist = cv2.calcHist([target_img], [channel], mask,
                                           [hist_size], ranges)
                comparing_img = cv2.imread(IMG_DIR + SUB_DIR + comparing_file)

                comparing_img = cv2.resize(comparing_img, img_size)

                # calc hist of comparing image
                comparing_hist = cv2.calcHist([comparing_img], [channel], mask,
                                              [hist_size], ranges)

                # compare hist
                tmp.append(cv2.compareHist(target_hist, comparing_hist, 0))

            # mean hist
            ret[comparing_file] = mean(tmp)

            # sort

    #####################################3

    if SUB_DIR == 'actress/':

        # if img and allowed_file(img.filename):
        #     filename = secure_filename(img.filename)
        #     img.save(os.path.join('/uploads', filename))
        #     img_url = '/uploads/' + filename

        target_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        target_img = cv2.resize(target_img, img_size)

        bf = cv2.BFMatcher(cv2.NORM_HAMMING)
        # detector = cv2.ORB_create()
        detector = cv2.AKAZE_create()
        (_, target_des) = detector.detectAndCompute(target_img, None)

        comparing_files = os.listdir(IMG_DIR + SUB_DIR)

        for comparing_file in comparing_files:
            if comparing_file == '.DS_Store':
                continue

            comparing_img_path = IMG_DIR + SUB_DIR + comparing_file
            try:
                comparing_img = cv2.imread(comparing_img_path,
                                           cv2.IMREAD_GRAYSCALE)
                comparing_img = cv2.resize(comparing_img, img_size)
                (_, comparing_des) = detector.detectAndCompute(
                    comparing_img, None)
                matches = bf.match(target_des, comparing_des)
                dist = [m.distance for m in matches]
                score = sum(dist) / len(dist)
                if score <= 1:
                    score = 1
                score = 100.0 / score
            except cv2.error:
                score = 100000

            ret[comparing_file] = score

    ############################################################

    dic_sorted = sorted(ret.items(), reverse=True, key=lambda x: x[1])[:3]
    estimated_d = []
    for file in dic_sorted:
        img_path = IMG_DIR + SUB_DIR + file[0]
        img = cv2.imread(img_path)
        # cv2.imshow('image',img)
        # 保存
        dt_now = datetime.now().strftime("%Y_%m_%d%_H_%M_%S_%f")
        save_path = os.path.join(SAVE_DIR, dt_now + ".jpeg")
        cv2.imwrite(save_path, img)
        estimated_d.append(file[1])
    f_imgs = os.listdir(SAVE_DIR)
    if '.DS_Store' in f_imgs:
        f_imgs.remove('.DS_Store')
    exists_img = sorted(f_imgs)[-3:]

    # ファイルの保存
    # file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
    # アップロード後のページに転送
    return render_template('index.html',
                           names=names,
                           data=zip(exists_img, estimated_d))
コード例 #34
0
ファイル: metrics.py プロジェクト: CrisMD/OpenCV
                        channels,
                        None,
                        histSize,
                        ranges,
                        accumulate=False)
cv.normalize(hist_test, hist_test, alpha=0, beta=1, norm_type=cv.NORM_MINMAX)

# 1 Correlation
# 2 Chi-Square
# 3 Intersection
# 4 Bhattacharyya distance

for compare_method in range(4):
    comparisons = []
    for hist in images_hist:
        comp = cv.compareHist(hist_test, hist, compare_method)
        comparisons.append(comp)

    if compare_method == 3:
        should_rev = False
    else:
        should_rev = True

    matches = [
        im for (c, im) in sorted(zip(comparisons, images),
                                 key=lambda pair: pair[0],
                                 reverse=should_rev)
    ]

    for i in range(2):
        im = matches[i]
コード例 #35
0
def compare_two_hist(hist1, hist2, md="Correlation"):
    if md not in OPENCV_METHODS:
        md = "Correlation"
    d = cv2.compareHist(hist1, hist2, OPENCV_METHODS[md])
    return d
コード例 #36
0
def to_dict_w_hists(data_dict, keys, data_zip):

    i = 0
    while i < len(keys):

        data_dict[keys[i]]['name'] = data_zip[i][0]

        data_dict[keys[i]]['arrays'] = {}

        data_dict[keys[i]]['arrays']['full'] = {}
        data_dict[keys[i]]['arrays']['full']['array'] = data_zip[i][1]
        data_dict[keys[i]]['arrays']['full']['numpy hist'] = np.histogram(
            data_zip[i][1])
        data_dict[keys[i]]['arrays']['full']['cv2 hist'] = np_hist_to_cv(
            np.histogram(data_zip[i][1]))

        data_dict[keys[i]]['MSE'] = round(data_zip[i][2], 2)
        data_dict[keys[i]]['SSIM'] = round(data_zip[i][3], 2)

        data_dict[keys[i]]['arrays']['top left'] = {}
        data_dict[keys[i]]['arrays']['top left']['array'] = data_zip[i][4]
        data_dict[keys[i]]['arrays']['top left']['numpy hist'] = np.histogram(
            data_zip[i][4])
        data_dict[keys[i]]['arrays']['top left']['cv2 hist'] = np_hist_to_cv(
            np.histogram(data_zip[i][4]))

        data_dict[keys[i]]['arrays']['top right'] = {}
        data_dict[keys[i]]['arrays']['top right']['array'] = data_zip[i][5]
        data_dict[keys[i]]['arrays']['top right']['numpy hist'] = np.histogram(
            data_zip[i][5])
        data_dict[keys[i]]['arrays']['top right']['cv2 hist'] = np_hist_to_cv(
            np.histogram(data_zip[i][5]))

        data_dict[keys[i]]['arrays']['low left'] = {}
        data_dict[keys[i]]['arrays']['low left']['array'] = data_zip[i][6]
        data_dict[keys[i]]['arrays']['low left']['numpy hist'] = np.histogram(
            data_zip[i][6])
        data_dict[keys[i]]['arrays']['low left']['cv2 hist'] = np_hist_to_cv(
            np.histogram(data_zip[i][6]))

        data_dict[keys[i]]['arrays']['low right'] = {}
        data_dict[keys[i]]['arrays']['low right']['array'] = data_zip[i][7]
        data_dict[keys[i]]['arrays']['low right']['numpy hist'] = np.histogram(
            data_zip[i][7])
        data_dict[keys[i]]['arrays']['low right']['cv2 hist'] = np_hist_to_cv(
            np.histogram(data_zip[i][7]))

        data_dict[keys[i]]['IMSE'] = round(data_zip[i][8], 2)
        data_dict[keys[i]]['IMSE Map'] = data_zip[i][9]
        data_dict[keys[i]]['MSE Map'] = data_zip[i][10]
        data_dict[keys[i]]['SSIM Map'] = data_zip[i][11]
        data_dict[keys[i]]['CW SSIM'] = data_zip[i][12]
        data_dict[keys[i]]['CW SSIM Map'] = data_zip[i][13]
        data_dict[keys[i]]['Mag. Map'] = data_zip[i][14]
        data_dict[keys[i]]['Frequency Transects'] = data_zip[i][15]
        data_dict[keys[i]]['DCT Map'] = data_zip[i][16]
        data_dict[keys[i]]['DCT Curve'] = data_zip[i][17]

        # Histogram Comparisons

        # Bhattacharyya
        data_dict[keys[i]]['Bhattacharyya Full'] = round(
            cv2.compareHist(data_dict[keys[i]]['arrays']['full']['cv2 hist'],
                            data_dict[keys[0]]['arrays']['full']['cv2 hist'],
                            cv2.cv.CV_COMP_BHATTACHARYYA), 2)

        data_dict[keys[i]]['Bhattacharyya UL'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['top left']['cv2 hist'],
                data_dict[keys[0]]['arrays']['top left']['cv2 hist'],
                cv2.cv.CV_COMP_BHATTACHARYYA), 2)

        data_dict[keys[i]]['Bhattacharyya UR'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['top right']['cv2 hist'],
                data_dict[keys[0]]['arrays']['top right']['cv2 hist'],
                cv2.cv.CV_COMP_BHATTACHARYYA), 2)

        data_dict[keys[i]]['Bhattacharyya LL'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['low left']['cv2 hist'],
                data_dict[keys[0]]['arrays']['low left']['cv2 hist'],
                cv2.cv.CV_COMP_BHATTACHARYYA), 2)

        data_dict[keys[i]]['Bhattacharyya LR'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['low right']['cv2 hist'],
                data_dict[keys[0]]['arrays']['low right']['cv2 hist'],
                cv2.cv.CV_COMP_BHATTACHARYYA), 2)

        # Chi Square
        data_dict[keys[i]]['Chi Square Full'] = round(
            cv2.compareHist(data_dict[keys[i]]['arrays']['full']['cv2 hist'],
                            data_dict[keys[0]]['arrays']['full']['cv2 hist'],
                            cv2.cv.CV_COMP_CHISQR), 2)

        data_dict[keys[i]]['Chi Square UL'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['top left']['cv2 hist'],
                data_dict[keys[0]]['arrays']['top left']['cv2 hist'],
                cv2.cv.CV_COMP_CHISQR), 2)

        data_dict[keys[i]]['Chi Square UR'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['top right']['cv2 hist'],
                data_dict[keys[0]]['arrays']['top right']['cv2 hist'],
                cv2.cv.CV_COMP_CHISQR), 2)

        data_dict[keys[i]]['Chi Square LL'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['low left']['cv2 hist'],
                data_dict[keys[0]]['arrays']['low left']['cv2 hist'],
                cv2.cv.CV_COMP_CHISQR), 2)

        data_dict[keys[i]]['Chi Square LR'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['low right']['cv2 hist'],
                data_dict[keys[0]]['arrays']['low right']['cv2 hist'],
                cv2.cv.CV_COMP_CHISQR), 2)

        # Correlation
        data_dict[keys[i]]['Correlation Full'] = round(
            cv2.compareHist(data_dict[keys[i]]['arrays']['full']['cv2 hist'],
                            data_dict[keys[0]]['arrays']['full']['cv2 hist'],
                            cv2.cv.CV_COMP_CORREL), 2)

        data_dict[keys[i]]['Correlation UL'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['top left']['cv2 hist'],
                data_dict[keys[0]]['arrays']['top left']['cv2 hist'],
                cv2.cv.CV_COMP_CORREL), 2)

        data_dict[keys[i]]['Correlation UR'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['top right']['cv2 hist'],
                data_dict[keys[0]]['arrays']['top right']['cv2 hist'],
                cv2.cv.CV_COMP_CORREL), 2)

        data_dict[keys[i]]['Correlation LL'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['low left']['cv2 hist'],
                data_dict[keys[0]]['arrays']['low left']['cv2 hist'],
                cv2.cv.CV_COMP_CORREL), 2)

        data_dict[keys[i]]['Correlation LR'] = round(
            cv2.compareHist(
                data_dict[keys[i]]['arrays']['low right']['cv2 hist'],
                data_dict[keys[0]]['arrays']['low right']['cv2 hist'],
                cv2.cv.CV_COMP_CORREL), 2)

        i = i + 1
コード例 #37
0
    def updateHistfaces(self,current_hist):
        color = ('b','g','r')
        curs=0
        if      self.choose_face1:

            if len(self.histmoyface1)>0:
                for rgbHist in self.histmoyface1:
                    for i,col in enumerate(color):
                        curs+= cv2.compareHist(current_hist[i], rgbHist[i], cv2.HISTCMP_CORREL)
                curs=curs/len(self.histmoyface1)
                print("Distance nouvel histo par rapport à histo face1 : ",curs)
                if curs>2.8 :
                    if len(self.histmoyface1)<3:
                        self.histmoyface1.append(current_hist)
                        self.index_moyface1+=1
                    else :
                        self.histmoyface1[(self.index_moyface1)%3]=current_hist
                        self.index_moyface1+=1
                    print("Update face1")
                else :
                    if len(self.histmoyface2)<3:
                        self.histmoyface2.append(current_hist)
                        self.index_moyface2+=1
                    else :
                        self.histmoyface2[(self.index_moyface2)%3]=current_hist
                        self.index_moyface2+=1
                    self.choose_face1=False
                    print("Update face2")
            else :
                if len(self.histmoyface1)<3:
                    self.histmoyface1.append(current_hist)
                    self.index_moyface1+=1
                else :
                    self.histmoyface2[(self.index_moyface1)%3]=current_hist
                    self.index_moyface1+=1
                print("Update face1")

        else :
            if len(self.histmoyface2)>0:
                for rgbHist in self.histmoyface2:
                    for i,col in enumerate(color):
                        curs+= cv2.compareHist(current_hist[i], rgbHist[i], cv2.HISTCMP_CORREL)
                curs=curs/len(self.histmoyface2)
                print("Distance nouvel histo par rapport à histo face2: ",curs)
                if curs>2.8 :
                    if len(self.histmoyface2)<3:
                        self.histmoyface2.append(current_hist)
                        self.index_moyface2+=1
                    else :
                        self.histmoyface2[(self.index_moyface2)%3]=current_hist
                        self.index_moyface2+=1
                    print("Update face2")
                else :
                    if len(self.histmoyface1)<3:
                        self.histmoyface1.append(current_hist)
                        self.index_moyface1+=1
                    else :
                        self.histmoyface1[(self.index_moyface1)%3]=current_hist
                        self.index_moyface1+=1
                    self.choose_face1=True
                    print("Update face1")
            else :

                    print("Bug")
コード例 #38
0
ファイル: Hist_Flow.py プロジェクト: icaresakr/TP2_ROB317
    Hist = Hist_p
    flow = cv2.calcOpticalFlowFarneback(
        prvs,
        next,
        None,
        pyr_scale=0.5,  # Taux de réduction pyramidal
        levels=3,  # Nombre de niveaux de la pyramide
        winsize=
        15,  # Taille de fenêtre de lissage (moyenne) des coefficients polynomiaux
        iterations=3,  # Nb d'itérations par niveau
        poly_n=7,  # Taille voisinage pour approximation polynomiale
        poly_sigma=1.5,  # E-T Gaussienne pour calcul dérivées
        flags=0)

    Hist_p = ComputeHistFlow(flow)
    corr.append(cv2.compareHist(Hist, Hist_p,
                                cv2.HISTCMP_CORREL))  #metrique = correlation
    plot_correlation(corr, seuil)

    cv2.namedWindow('Video frames')
    cv2.moveWindow('Video frames', 500, 0)
    cv2.imshow('Video frames', frame2)

    k = cv2.waitKey(15) & 0xff
    if k == 27:
        break
    elif k == ord('s'):
        cv2.imwrite('Frame_%04d.png' % index, frame2)
        cv2.imwrite('Hist%04d.png' % index, Hist_p)

    prvs = next
    ret, frame2 = cap.read()
コード例 #39
0
def mature_check(image,human_string,draw_mask):

    red_flag = 0

    if human_string == "broccoli":
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
        threshold_low = np.array([20, 30, 20], dtype = "uint8")
        threshold_high = np.array([80, 255, 255], dtype = "uint8")
        reference_img = cv2.imread("reference/reference1.jpg")
    elif human_string == "cabbage":
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
        threshold_low = np.array([20, 40, 30], dtype = "uint8")
        threshold_high = np.array([70, 255, 255], dtype = "uint8")
        reference_img = cv2.imread("reference/reference2.jpg")
    elif human_string == "cauliflower":
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
        threshold_low = np.array([0, 0, 30], dtype = "uint8")
        threshold_high = np.array([180, 50, 255], dtype = "uint8")
        reference_img = cv2.imread("reference/reference3.jpg")
    elif human_string == "red capsicum":
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (13, 13))
        threshold_low = np.array([0, 60, 75], dtype = "uint8")
        threshold_high = np.array([10, 255, 255], dtype = "uint8")
        reference_img = cv2.imread("reference/reference4.jpg")
        red_flag = 1
        
    #process the reference image
    reference_blur = cv2.GaussianBlur(reference_img, (15, 15), 2)
    reference_hsv = cv2.cvtColor(reference_blur, cv2.COLOR_BGR2HSV)

    if red_flag == 1:
        mask1 = cv2.inRange(reference_hsv, threshold_low, threshold_high)

        #set second set of threshold values 
        threshold_low = np.array([170, 60, 75], dtype = "uint8")
        threshold_high = np.array([180, 255, 255], dtype = "uint8")

        #get final mask
        mask2 = cv2.inRange(reference_hsv, threshold_low, threshold_high)
        reference_mask = cv2.bitwise_or(mask1, mask2, mask = None) 
        
    else:    
        reference_mask = cv2.inRange(reference_hsv, threshold_low, threshold_high)

    reference_mask = cv2.morphologyEx(reference_mask, cv2.MORPH_OPEN, kernel)
    reference_masked = cv2.bitwise_and(reference_img,reference_img, mask = reference_mask)

    #set the mask for input image
    input_masked = cv2.bitwise_and(image,image, mask = draw_mask)

    hist1 = cv2.calcHist([image], [0,1], draw_mask, [180,256], [0,180, 0,256])
    hist2 = cv2.calcHist([reference_img], [0,1], reference_mask, [180,256], [0,180, 0,256])

    val = cv2.compareHist(hist1,hist2,cv2.HISTCMP_CORREL)

    plot_histogram(reference_img, "Histogram for Reference Image", mask = reference_mask)
    plot_histogram(image, "Histogram for Input Image", mask = draw_mask)
 
    if val < 0:
        val = 0

    return val

    cv2.waitKey(0) 
コード例 #40
0
    index[filename] = hist

OPENCV_METHODS = (("Correlation", cv2.HISTCMP_CORREL),
                  ("Chi-Squared", cv2.HISTCMP_CHISQR), ("Intersection",
                                                        cv2.HISTCMP_INTERSECT),
                  ("Hellinger", cv2.HISTCMP_BHATTACHARYYA))

for (methodName, method) in OPENCV_METHODS:
    results = {}
    reverse = False

    if methodName in ("Correlation", "Intersection"):
        reverse = True

    for (k, hist) in index.items():
        d = cv2.compareHist(index["doge.png"], hist, method)
        results[k] = d

    results = sorted([(v, k) for (k, v) in results.items()], reverse=reverse)

    fig = plt.figure("Query")
    ax = fig.add_subplot(1, 1, 1)
    ax.imshow(images["doge.png"])
    plt.axis("off")

    fig = plt.figure("Results: %s" % (methodName))
    fig.suptitle(methodName, fontsize=20)

    for (i, (v, k)) in enumerate(results):
        ax = fig.add_subplot(1, len(images), i + 1)
        ax.set_title("%s: %.2f" % (k, v))
コード例 #41
0
def main():
    
    # --- ARGUMENT PARSER  AND FILEPATHS ---
    
    # Initialise arguent parser
    ap = argparse.ArgumentParser()
    
    # Input options for path to images
    ap.add_argument("-d", "--directory", help = "Path to directory of images", 
                    required = False, default = "../data/flowers")
    
    # Input option for target image name
    ap.add_argument("-t", "--target_img", help = "Filename of the target image", 
                    required = False, default = "image_0001.jpg")

    # Extract inputs
    args = vars(ap.parse_args())
    img_dir = args["directory"]
    target_img = args["target_img"]
    
    # --- IMAGE SEARCH ---
    
    # Print message
    print(f"\n[INFO] Initialising image search for {target_img} using color histograms.")
    
    # Get filepath to target image
    target_path = os.path.join(img_dir, target_img)
    # Get file paths to all images in directory 
    img_paths = get_paths(img_dir)
    # Remove target path from all image paths
    img_paths.remove(target_path)
    
    # Create empty target data frame for distances
    distances_df = pd.DataFrame(columns=["filename", "chisquare_distance"])
    
    # Get histogram of target image
    target_hist = get_histogram(target_path)

    # Get histogram for all other images and compare to target histogram 
    for img_path in tqdm(img_paths):
        # Get the name of the image
        img_name = os.path.split(img_path)[1]
        # Get the histogram of the image
        img_hist = get_histogram(img_path)
        # Calculate the distance of the image by comparing the target and image histogram
        distance = round(cv2.compareHist(target_hist, img_hist, cv2.HISTCMP_CHISQR), 2)
        # Append filename and distance to dataframe
        distances_df = distances_df.append({"filename": img_name, 
                                            "chisquare_distance": distance}, ignore_index = True)

        
    # Sort data frame by distance and reset index
    distances_df = distances_df.sort_values("chisquare_distance").reset_index(drop=True)
    
    # --- OUTPUT ---
    
    # Prepare output directory 
    out_dir = os.path.join("..", "out")
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    
    # Save data frame in output directory, using target image for filename
    out_df = os.path.join(out_dir, f"{os.path.splitext(target_img)[0]}_hist.csv")
    distances_df.to_csv(out_df)
    
    # Save plot with similar images in output directory, using target image for filename
    out_plot = os.path.join(out_dir, f"{os.path.splitext(target_img)[0]}_hist_top3.png")
    plot_similar(img_dir, target_img, distances_df, out_plot)

    # Print message, and print closest image to target image
    print(f"\n[INFO] Output is saved in {out_dir}, the closest image to {os.path.splitext(target_img)[0]} is:")
    print(distances_df.iloc[0])
コード例 #42
0
x,y,z=frame1.shape

nextimYuv = cv2.cvtColor(frame2,cv2.COLOR_BGR2YUV) 
index=1;
imYuv = cv2.cvtColor(frame1,cv2.COLOR_BGR2YUV) # Passage en niveaux de yuv
y=[]
while(ret):

	#Calculs histogrammes noir blanc
	hist= cv2.calcHist([imYuv], [0], None, [32], [0,255])
	hist=cv2.normalize(hist, hist)
	hist1= cv2.calcHist([nextimYuv], [0], None, [32], [0,255])
	hist1=cv2.normalize(hist1, hist1)
	
	#Correlation entre histogrammes
	y.append(cv2.compareHist(hist, hist1,cv2.HISTCMP_CORREL))

	#Plot Correlation
	fig2, ax2 =plt.subplots()	
	ax2 = plt.gca()
	ax2.plot(y)
	plt.xlabel("Index")
	plt.ylabel("Corrélation entre frames")
	plt.draw()
	fig2.canvas.draw()
	imgt = np.fromstring(fig2.canvas.tostring_rgb(), dtype=np.uint8,
            sep='')
	imgt  = imgt.reshape(fig2.canvas.get_width_height()[::-1] + (3,))
	imgt = cv2.cvtColor(imgt,cv2.COLOR_RGB2BGR)
	cv2.imshow("Correlation entre frames",imgt)
コード例 #43
0
    def callback_humanlist(self, human_list_msg, cv_image, openpose_depth_img,
                           camera_transform, time_):

        if len(human_list_msg.human_list) == 0:
            return

        depth_img = openpose_depth_img
        frame = cv_image
        found_target = False
        pos_list = []

        for human in human_list_msg.human_list:

            # clothes area
            main_points = [1, 2, 5, 8]
            x = []
            y = []

            for idx in main_points:
                if human.body_key_points_with_prob[idx] != 0:
                    if human.body_key_points_with_prob[
                            idx].x != 0 and human.body_key_points_with_prob[
                                idx].y != 0:
                        x.append(human.body_key_points_with_prob[idx].x)
                        y.append(human.body_key_points_with_prob[idx].y)

            if len(x) != 4:
                continue

            #(top_, right_, bottom_, left_) = (int(min(y)), int(max(x)), int(max(y)), int(min(x)))
            (top, right, bottom, left) = (int(
                (3 * min(y) + max(y)) / 4), int(
                    (min(x) + 3 * max(x)) / 4), int((min(y) + 3 * max(y)) / 4),
                                          int((3 * min(x) + max(x)) / 4))
            #print((top, right, bottom, left))

            diagonal_size = sqrt(
                pow((right - left), 2) + pow((top - bottom), 2))
            print(diagonal_size)

            # neglect unvalid small wrong person object from openpose
            if diagonal_size < self.valid_size_of_person:
                continue

            col = int(round((left + right) / 2))
            row = int(round((bottom + top) / 2))
            clothes_block = frame[top:bottom, left:right]
            depth_arr = depth_img[top:bottom, left:right]

            cv2.rectangle(frame, (left, bottom), (right, top), (0, 0, 255), 2)
            hist = cv2.calcHist([clothes_block], [0, 1, 2], None, [8, 8, 8],
                                [0, 256, 0, 256, 0, 256])
            hist = cv2.normalize(hist, hist).flatten()

            # face area
            use_face_area = False
            face_points = [1, 17, 18]
            f_x = []
            f_y = []

            for idx in face_points:
                if human.body_key_points_with_prob[idx] != 0:
                    if human.body_key_points_with_prob[
                            idx].x != 0 and human.body_key_points_with_prob[
                                idx].y != 0:
                        f_x.append(human.body_key_points_with_prob[idx].x)
                        f_y.append(human.body_key_points_with_prob[idx].y)

            if len(f_x) == 3:
                use_face_area = True

            pose_transformed = None
            f_pose_transformed = None

            if use_face_area:
                (top, right, bottom, left) = (int(min(f_y)), int(max(f_x)),
                                              int(max(f_y)), int(min(f_x)))

                f_col = int(round((left + right) / 2))
                f_row = int(round((bottom + top) / 2))
                face_block = frame[top:bottom, left:right]
                f_depth_arr = depth_img[top:bottom, left:right]

                # select the depth located in (self.clothes_depth_arr_position)(ex 2/3), because some obstacle can cover person's clothes in front of person.
                depth_list = []
                for i in range(0, len(f_depth_arr)):
                    for j in range(0, len(f_depth_arr[0])):
                        if f_depth_arr[i][j] != 0:
                            depth_list.append(f_depth_arr[i][j])

                if len(depth_list) == 0:
                    continue
                else:
                    sorted_depth_list = sorted(depth_list, reverse=False)
                    selected_idx = int(
                        len(sorted_depth_list) * self.face_depth_arr_position)
                    selected_depth = sorted_depth_list[selected_idx]

                    cv2.rectangle(frame, (left, bottom), (right, top),
                                  (0, 0, 255), 2)

                    # personPose : [-y, x, z] by camera_link frame unit : mm
                    # data_to_send.data = rs2.rs2_deproject_pixel_to_point(self.intrinsics, [col, row], selected_depth) # [col, row]
                    rs2_pose = rs2.rs2_deproject_pixel_to_point(
                        self.intrinsics, [f_col, f_row],
                        selected_depth)  # [col, row]

                    pose_stamped = PoseStamped()

                    pose_stamped.header.frame_id = 'camera_link'
                    # personPose : [-y, x, z] unit : mm
                    pose_stamped.pose.position.x = rs2_pose[1] / 1000
                    pose_stamped.pose.position.y = -rs2_pose[0] / 1000
                    pose_stamped.pose.position.z = rs2_pose[2] / 1000
                    pose_stamped.pose.orientation.z = 0.0
                    pose_stamped.pose.orientation.w = 1.0
                    pose_stamped.header.stamp = rospy.Time.now()

                    f_pose_transformed = tf2_geometry_msgs.do_transform_pose(
                        pose_stamped, camera_transform)

                    pos_list.append(f_pose_transformed)

            else:

                # select the depth located in (self.clothes_depth_arr_position)(ex 2/3), because some obstacle can cover person's clothes in front of person.
                depth_list = []
                for i in range(0, len(depth_arr)):
                    for j in range(0, len(depth_arr[0])):
                        if depth_arr[i][j] != 0:
                            depth_list.append(depth_arr[i][j])

                if len(depth_list) == 0:
                    continue

                sorted_depth_list = sorted(depth_list, reverse=False)
                selected_idx = int(
                    len(sorted_depth_list) * self.clothes_depth_arr_position)
                selected_depth = sorted_depth_list[selected_idx]

                # personPose : [-y, x, z] by camera_link frame unit : mm
                # data_to_send.data = rs2.rs2_deproject_pixel_to_point(self.intrinsics, [col, row], selected_depth) # [col, row]
                rs2_pose = rs2.rs2_deproject_pixel_to_point(
                    self.intrinsics, [col, row], selected_depth)  # [col, row]

                pose_stamped = PoseStamped()

                pose_stamped.header.frame_id = 'camera_link'
                # personPose : [-y, x, z] unit : mm
                pose_stamped.pose.position.x = rs2_pose[1] / 1000
                pose_stamped.pose.position.y = -rs2_pose[0] / 1000
                pose_stamped.pose.position.z = rs2_pose[2] / 1000
                pose_stamped.pose.orientation.z = 0.0
                pose_stamped.pose.orientation.w = 1.0
                pose_stamped.header.stamp = rospy.Time.now()

                pose_transformed = tf2_geometry_msgs.do_transform_pose(
                    pose_stamped, camera_transform)

                pos_list.append(pose_transformed)

            print('--------------------')

            for idx, val in enumerate(self.person_clothes_hists):

                name = self.person_names[idx]
                if name.split('_')[0] == person_name:
                    # HISTCMP_INTERSECT = 2
                    similarity = cv2.compareHist(hist, val, 2)

                    if similarity > self.clothes_similarity:
                        found_target = True

                        #print(name)
                        #print(similarity)
                        #print('col : ', col, 'row : ', row)
                        #print(selected_depth)
                        #print('data_to_send : ', data_to_send)

                        send_pose_transformed = None

                        if use_face_area:
                            send_pose_transformed = f_pose_transformed
                        else:
                            send_pose_transformed = pose_transformed

                        #print('pose_transformed : ', send_pose_transformed)

                        self.personPose_pub.publish(send_pose_transformed)
                        self.last_person_pose = [send_pose_transformed, time_]
                        self.unauth_case_cnt = self.init_unauth_case_num
                        break

            if found_target:
                break

        # when we can't find target by clothes histogram, then we try to check all person's position.
        # if specific position is near from last target position, then, we can use this as new target position.

        if (not found_target) and (self.last_person_pose is not None) and (
                self.unauth_case_cnt != 0):

            last_time = self.last_person_pose[1]
            time_interval = time_ - last_time

            if time_interval.to_sec() < self.valid_time_from_last_person:

                if len(pos_list) != 0:

                    last_x = self.last_person_pose[0].pose.position.x
                    last_y = self.last_person_pose[0].pose.position.y

                    min_vel = None
                    min_idx = None

                    for idx, pos in enumerate(pos_list):

                        pos_x = pos.pose.position.x
                        pos_y = pos.pose.position.y

                        inc_x = pos_x - last_x
                        inc_y = pos_y - last_y

                        euclidean_distance = sqrt(
                            pow((inc_x), 2) + pow((inc_y), 2))
                        velocity = euclidean_distance / time_interval.to_sec()

                        if min_vel is None:
                            min_vel = velocity
                            min_idx = idx
                        elif velocity < min_vel:
                            min_vel = velocity
                            min_idx = idx

                    if min_vel < self.valid_vel:
                        self.personPose_pub.publish(pos_list[min_idx])
                        self.last_person_pose = [pos_list[min_idx], time_]
                        self.unauth_case_cnt -= 1

        self.frame = frame
コード例 #44
0
#Chi-square distance is one of the distance measures that can be used as a measure of dissimilarity between two histograms
#maxima distancia adotada
maxDist = 200


while True:
    #return_value = 0 for some error or 1 for successful reading
    #image is the frame capturedx
    return_value, image = camera.read()
    if return_value:
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        cv2.imshow("image",  gray_image)

        H1 = cv2.calcHist( gray_image, [0], None, [256], [0,256])
        H2 = cv2.calcHist(gray_image, [0], None, [256], [0,256])
        comp = cv2.compareHist(H1, H2, cv2.HISTCMP_CHISQR)
        
        height, width = gray_image.shape
        while comp >= maxDist:
            for x in range(height):
                gray_image[x,0] = 255
                gray_image[x, width-1] = 255
            
            for y in range(width):
                gray_image[0,y] = 255                    
                gray_image[height-1, y] = 255
            
            cv2.imshow("image",  gray_image)
            
            H1 = cv2.calcHist( gray_image, [0], None, [256], [0,256])
            H2 = cv2.calcHist(gray_image, [0], None, [256], [0,256])
コード例 #45
0
                allFaces.append(
                    Face(f_hist, f_hsv, (x, y - 10, w, h + 10),
                         frame[y - 10:y + h, x:x + w]))
            # ========================================================================
            if len(speakers) == 0:
                for face in allFaces:
                    roll += 1
                    face.identity = roll
                    speakers.append(Person(face.f_image, face.f_window, roll))
            else:
                for face in allFaces:
                    d = 0
                    coeff = 1
                    for speaker in speakers:
                        d = cv2.compareHist(face.f_hist, speaker.hist,
                                            cv2.HISTCMP_BHATTACHARYYA)
                        if coeff > d and d < 0.4:
                            coeff = d
                            face.identity = speaker.roll
                            i.append(speaker.roll)

                    for speaker in speakers:
                        if speaker.roll == face.identity:
                            speaker.image = face.f_image
                            speaker.hist = face.f_hist
                            speaker.hsv = face.f_hsv
                            speaker.active = True

                    # if still no face has matched
                    if face.identity == -1:
                        roll += 1
コード例 #46
0
    def processQueryImage(self):
        paramList = list()
        with open(self.paramTxt) as f:
            for line in f:
                paramList.append(int(line.strip()))
        print(paramList)
        for queryImage in self.queryDict.iterkeys():
            img = cv2.imread(queryImage)
            imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            # radius = 3
            # noPoints = 8 * radius
            radius = paramList[0]
            noPoints = paramList[1] * radius
            lbp = local_binary_pattern(imgGray,
                                       noPoints,
                                       radius,
                                       method='uniform')
            # Calculate the histogram
            x = itemfreq(lbp.ravel())
            # normalize the histogram
            queryHist = x[:, 1] / sum(x[:, 1])

            results = []

            for index, trainedHist in enumerate(self.lbpHistogram):
                # Distance Metric to be used, these two Low score means good
                score = cv2.compareHist(
                    np.array(trainedHist, dtype=np.float32),
                    np.array(queryHist, dtype=np.float32), cv2.HISTCMP_CHISQR)
                results.append((self.addrImg[index], round(score, 3)))

            #data = [('abc', 121),('qwe', 231),('pop', 148), ('gfh',221)]
            #sorted(data, key=lambda x:x[1])
            #sorted(data, key=lambda x:x[1], reverse=True)
            results = sorted(results, key=lambda score: score[1])

            self.results_all[queryImage] = results
            print("Displaying scores for {} ** \n".format(queryImage))
            for k in self.materialCode.keys():
                if k in queryImage:
                    # print(k, end=" ")
                    self.trueList.append(self.materialCode.get(k))
            for k in self.materialCode.keys():
                if k in results[0][0]:
                    # print(k, end=" ")
                    self.predList.append(self.materialCode.get(k))
            for image, score in results:
                print("{} has score {}".format(image, score))
            # font = cv2.FONT_HERSHEY_SIMPLEX
            # cv2.putText(img, 'Bitumin', (10, 450), font, 1, (255, 255, 255), 2)
            # cv2.putText(img, 'Gravel', (10, 450), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
            #################################################### Code added to show in matplot lib
            # plt.axis("off")
            # plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
            # # plt.imshow(img)
            # plt.show()
            ######################################################
            cv2.imshow("query", img)
            ################
            # print(results)
            # print("SASAASAS" + results[0][0])
            # test = cv2.imread(results[0][0])
            # if not test is None:
            #     w, h = test.shape[:2]
            #     print(w)
            #     print(h)
            #     cv2.imshow(str(results[0][1]), cv2.imread(results[0][0]))
            #     cv2.waitKey()
            #     cv2.destroyAllWindows()
            # else:
            #     print("Not read")
            ################

            tmpImg = cv2.imread(results[0][0])
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(tmpImg, 'Bitumin', (10, 450), font, 1, (255, 255, 255),
                        2)

            # plt.axis("off")
            # plt.imshow(cv2.cvtColor(tmpImg, cv2.COLOR_BGR2RGB))
            # plt.imshow(tmpImg)
            # plt.show()

            cv2.imshow(str(results[0][1]), tmpImg)
            cv2.waitKey()
            cv2.destroyAllWindows()
 #    ref.append(all_images[i_mod][0])
 #    wm_masks_ref.append(all_labels[0][0]) # although it's the same for all timepoints
 #curr_ref = ref[i_mod] #Always first timepoint of first patient as reference
 curr_ref = all_ref[i_mod]
 curr_ref_mask = all_ref_labels[0]
 #curr_ref_mask = wm_masks_ref[i_mod]
 #save_image(curr_ref, jp(path_write, pat, modalities[i_mod] + "_norm_" + str(0+1).zfill(2)))
 #histograms_aligned[i_mod][0] = histograms[i_mod][0] # First timepoint as reference(does not change)
 for i_tp in range(
         0, num_tp):  # for each timepoint starting at the second one
     curr_img = all_images[i_mod][i_tp]
     curr_wm_mask = all_labels[0][i_tp]
     f = lambda x: cv2.compareHist(
         np.histogram(curr_ref[curr_ref_mask > 0].ravel(),
                      256, [0, 1],
                      density=True)[0].astype(np.float32),
         np.histogram((x[0] * curr_img[curr_wm_mask > 0.5]).ravel(),
                      256, [0, 1],
                      density=True)[0].astype(np.float32), 1)
     # Optimize Chi-Square metric
     xopt = fmin(func=f, x0=[0.5])
     curr_img_new = np.clip(xopt[0] * curr_img, 0, 1)
     save_image(
         curr_img_new,
         jp(
             path_write, pat, modalities[i_mod] + "_norm_" +
             str(i_tp + 1).zfill(2) + ".nii.gz"))
     all_images_aligned[i_mod][i_tp] = curr_img_new
     histograms_aligned[i_mod][i_tp] = np.histogram(
         curr_img_new[curr_img_new > 0].ravel(),
         256, [0, 1],
コード例 #48
0
#! /usr/bin/python3 -u

# Returns the similarity of 2 images.

import cv2, sys

i1 = cv2.imread(sys.argv[1], 0)
i2 = cv2.imread(sys.argv[2], 0)

channels = [0]
histSize = [256]
mask = None
ranges = [0, 256]
h1 = cv2.calcHist([i1], channels, mask, histSize, ranges)
h2 = cv2.calcHist([i2], channels, mask, histSize, ranges)

method = cv2.HISTCMP_CORREL
print(cv2.compareHist(h1, h2, method))
コード例 #49
0
            img = cv2.imread(os.path.join(test_path, file),
                             cv2.IMREAD_GRAYSCALE)
            height = img.shape[1]
            if USE_STEP:
                step = int(nameValue[5])
                origin = img[y + height:y + step + height, x:x + step]
                imgOut = img[y:y + step, x:x + step]
            else:
                origin = img[height:, :]
                imgOut = img[:height, :]
            originHist = cv2.calcHist([origin], [0], None, [hist_size],
                                      [0.0, 255.0])
            imgHist = cv2.calcHist([imgOut], [0], None, [hist_size],
                                   [0.0, 255.0])

            dist1 = cv2.compareHist(originHist, imgHist, cv2.HISTCMP_CORREL)
            dist2 = cv2.compareHist(originHist, imgHist, cv2.HISTCMP_CHISQR)
            dist3 = cv2.compareHist(originHist, imgHist, cv2.HISTCMP_INTERSECT)
            dist4 = cv2.compareHist(originHist, imgHist,
                                    cv2.HISTCMP_BHATTACHARYYA)
            dist5 = cv2.compareHist(originHist, imgHist, cv2.HISTCMP_HELLINGER)
            psnr = skimage.measure.compare_psnr(origin, imgOut)
            ssim = skimage.measure.compare_ssim(origin, imgOut)
            fid = calFID(origin, imgOut, sess)
            mutual = mr.mutual_info_score(np.reshape(origin, -1),
                                          np.reshape(imgOut, -1))
            outFile.write(file + ',' + str(dist1) + ',' + str(dist2) + ',' +
                          str(dist3) + ',' + str(dist4) + ',' + str(dist5) +
                          ',' + str(ssim) + ',' + str(psnr) + ',' + str(fid) +
                          ',' + str(mutual) + '\n')
コード例 #50
0
def histogram_comparator(X, Y):
    n_rows = Y.shape[1]
    n_cols = Y.shape[2]

    row_wnd = int(np.floor(X.shape[0] / n_rows))
    col_wnd = int(np.floor(X.shape[1] / n_cols))

    mse = []
    c = 0
    # X = calculate_spatial_histogram(X, n_rows, n_cols, CONFIG['n_hist_bins'])
    # idxs = np.where(X > 0)
    # print(X.shape, Y.shape)
    # diff = Y - X
    # diff = np.moveaxis(diff, 0, len(diff.shape) - 1)
    #
    # diff = diff[idxs]
    #
    # # Manhattan
    # # res = np.sum(np.abs(res), axis=(0,1,2,3,4))
    #
    # print(diff.shape)
    #
    # res = -np.sum(diff**2 / diff, axis=(0))
    #
    # X = X.reshape()
    # res = np.reshape(res, (CONFIG['n_hist_bins']**3 * n_rows * n_cols, res.shape[-1:][0]))
    # mse = res

    # BHATTACHARYYA
    for x in range(n_rows):
        for y in range(n_cols):
            h = calculate_histogram(X[
                                x * row_wnd: (x + 1) * row_wnd,
                                y * col_wnd: (y + 1) * col_wnd
                                ], n_bins=CONFIG['n_hist_bins']).astype(np.float16)

            r = []
            if np.sum(h) > 0:
                if X.shape[2] == 3:
                    for i in range(Y.shape[0]):
                        a = h
                        b = Y[i, x, y]
                        d = cv2.compareHist(a.astype(np.float32), b.astype(np.float32), cv2.HISTCMP_BHATTACHARYYA)
                        r.append(d)
                else:
                    idxs = np.where(h > 0)
                    for i in range(Y.shape[0]):
                        a = h[idxs]
                        b = Y[i, x, y][idxs]
                        d = cv2.compareHist(a.astype(np.float32), b.astype(np.float32), cv2.HISTCMP_BHATTACHARYYA)
                        r.append(d)
                c += 1
            else:
                # r = [np.inf] * Y.shape[0]
                r = [0] * Y.shape[0]
            mse.append(r)


    # mse = np.array(mse)
    # mean = np.mean(mse[np.where(mse!=np.inf)])
    #
    # for i in range(n_rows * n_cols):
    #     if np.any(mse[i] == np.inf):
    #         print("replacing")
    #         mse[i, :] = mean
    # mse = np.mean(mse, axis=0)

    mse = np.sum(np.array(mse), axis=0)
    return mse
コード例 #51
0
            sample['avc3'] = float(average_color[2])

            # area / rect
            sample['extent'] = sample['m00'] / (h * w)

            #min_rect / rect
            (min_x, min_y), (min_w, min_h), min_theta = cv2.minAreaRect(c)
            sample['rect_extent'] = (min_w * min_h) / (h * w)

            #print(sample['m00'], min_w*min_h, h*w)
            #sys.exit(0)

            hist = cv2.calcHist([crop_img], [0], None, [256], [0, 256])

            sample['hist_correl'] = cv2.compareHist(
                hist / np.linalg.norm(hist), average_normed_hist,
                cv2.HISTCMP_CORREL)
            sample['hist_chisqr'] = cv2.compareHist(
                hist / np.linalg.norm(hist), average_normed_hist,
                cv2.HISTCMP_CHISQR)
            sample['hist_bhatt'] = cv2.compareHist(hist / np.linalg.norm(hist),
                                                   average_normed_hist,
                                                   cv2.HISTCMP_BHATTACHARYYA)
            sample['hist_inter'] = cv2.compareHist(hist / np.linalg.norm(hist),
                                                   average_normed_hist,
                                                   cv2.HISTCMP_INTERSECT)

            # 'white_balance'
            #print(int(hist[-1])/sample['m00'])
            sample['white_balance'] = int(hist[-1]) / (h * w)
コード例 #52
0
def calc_hist_score(hist1, hist2):
    scores = []
    for channel1, channel2 in zip(hist1, hist2):
        score = cv2.compareHist(channel1, channel2, cv2.HISTCMP_BHATTACHARYYA)
        scores.append(score)
    return np.mean(scores)
コード例 #53
0
def main():
    path = 'paintings/'
    orb = cv2.ORB(1000, 1.2)
    #dictionary for holding the histograms
    index = {}
    #dictionary for holding the RGB images
    images = {}
    #dictionary for holding the OBJ descriptors
    bagdex = {}
    #dictionary for holding the OBJ keypoints
    bagdex_kp = {}
    #dictionary for holding greyscale images
    greyges = {}
    for filename in os.listdir(path):
        if filename.endswith('.jpg'):
            print filename
            image = cv2.imread(path + filename)

            #histogram data
            images[filename] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            hist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8],
                                [0, 256, 0, 256, 0, 256])
            hist = cv2.normalize(hist).flatten()
            index[filename] = hist

            #OBJ feature descriptors data
            image = cv2.imread(path + filename, cv2.CV_LOAD_IMAGE_GRAYSCALE)
            greyges[filename] = image
            orb = cv2.ORB(250, 1.2)
            kp, des = orb.detectAndCompute(greyges[filename], None)
            bagdex[filename] = des
            bagdex_kp[filename] = kp

    active = True
    while (active):
        query_filepath = raw_input("Please enter filepath of query image: \n")

        ind = query_filepath.find('/')
        query_filename = query_filepath
        if ind != -1:
            query_filename = query_filepath[ind + 1:]

        # Create histogram for query image
        query_image = cv2.imread(query_filepath)
        images[query_filename] = cv2.cvtColor(query_image, cv2.COLOR_BGR2RGB)
        query_hist = cv2.calcHist([query_image], [0, 1, 2], None, [8, 8, 8],
                                  [0, 256, 0, 256, 0, 256])
        query_hist = cv2.normalize(query_hist).flatten()
        index[query_filename] = query_hist
        results = {}

        # Create ORB feature descriptor for query image
        query_bagage = cv2.imread(query_filepath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        greyges[query_filename] = query_bagage
        query_kp, query_des = orb.detectAndCompute(greyges[query_filename],
                                                   None)
        bagdex[query_filename] = query_des
        bagdex_kp[query_filename] = query_kp

        bagsults = {}

        # initialize the comparison method for histogram
        methodName = "Hellinger"
        method = cv2.cv.CV_COMP_BHATTACHARYYA

        for (k, hist) in index.items():
            # compute the distance between the two histograms
            # using the method and update the results dictionary
            d = cv2.compareHist(index[query_filename], hist, method)
            results[k] = d

        for (k, des) in bagdex.items():
            d = 0
            bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
            matches = bf.match(bagdex[query_filename], des)
            matches = sorted(matches, key=lambda val: val.distance)

            for match in matches[:10]:
                d += match.distance
            bagsults[k] = d

        # normalize the results
        hist_sum = 0
        bag_sum = 0
        for (name, value) in results.items():
            hist_sum += results[name]
            bag_sum += bagsults[name]
        for (name, value) in results.items():
            results[name] = value / float(hist_sum)
            bagsults[name] = bagsults[name] / float(bag_sum)

        total_results = {}
        for (name, value) in results.items():
            total_results[name] = value + bagsults[name]

        # sort the results
        total_results = sorted([(v, k) for (k, v) in total_results.items()],
                               reverse=False)
        results = sorted([(v, k) for (k, v) in results.items()], reverse=False)
        bagsults = sorted([(v, k) for (k, v) in bagsults.items()],
                          reverse=False)

        # initialize the results figure
        fig = plt.figure("Results: ")

        # loop over the results
        n = 0
        for (i, (v, k)) in enumerate(bagsults):
            if n > 9:
                break
            n += 1
            # show the result
            ax = fig.add_subplot(1, 10, i + 1)
            ax.set_title("%s: %.2f" % (k, v))
            plt.imshow(images[k])
            plt.axis("off")

            # show the matching lines drawn instead
            #out = drawMatches(query_bagage, query_kp, greyges[k], bagdex_kp[k], matches[:10])

        #downloads the image into our own database
        #copyfile(query_filepath, 'paintings/copy_' + query_filename)

        # show the results
        plt.show()
コード例 #54
0
def checkliver():# 사용자 이미지 검사
    print(user_file)#사용자 파일 경로 가져옴
    p_var2.set(5)
    progress_bar2.update()
    image = cv2.imread(user_file)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = image.reshape((image.shape[0] * image.shape[1], 3))
    k = 30  # 색상 K개 추출
    clt = KMeans(n_clusters=k)
    clt.fit(image)
    hist = centroid_histogram(clt)
    colorSort = clt.cluster_centers_
    bar = plot_colors(hist, colorSort)
    global photo_userimgcheck  # 사용자 check이미지를 사용하기 위해서 global로 해주어야 함
    usercheckimg = Image.fromarray(bar)#넘파이를 이미지로 바꿔줌
    photo_userimgcheck = ImageTk.PhotoImage(image=usercheckimg)  # bar을 pilimage로 바꿔주고 tk로 열어주어야함
    userimg_bar.config(image=photo_userimgcheck,width=580,height=70)
    # #이미지 분석하는 코드
    img_original = bar#원본
    img1 = cv2.imread(checkliver1)#1단계
    img2 = cv2.imread(checkliver2)#2단계
    img3 = cv2.imread(checkliver3)#3단계
    img4 = cv2.imread(checkliver4)#4단계
    p_var2.set(90)
    progress_bar2.update()

    #원본 정규화
    img_original = cv2.cvtColor(img_original, cv2.COLOR_BGR2HSV)
    img_original = cv2.calcHist([img_original], [0, 1], None, [180, 256], [0, 180, 0, 256])
    img_original = cv2.normalize(img_original, None, 0, 1, cv2.NORM_MINMAX)

    imgs = [img1, img2, img3, img4]# 단계 사진만 구분해서 정규화함
    hists = []
    for i, img in enumerate(imgs):
        #1 각 이미지를 HSV로 변환
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        #2 H,S 채널에 대한 히스토그램 계산
        hist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
        #3 0~1로 정규화
        cv2.normalize(hist, hist, 0, 1, cv2.NORM_MINMAX)
        hists.append(hist)

    #query = hists[0]#원본 이미지

    correl_result = []
    chisqr_result = []
    intersect_result = []
    bhattacharyya_result = []

    correl = cv2.HISTCMP_CORREL
    chisqr = cv2.HISTCMP_CHISQR
    intersect = cv2.HISTCMP_INTERSECT # ret = ret / np.sum(query) , 교차 분석인 경우 비교대상으로 나누어 1로 정규화
    bhattacharyya = cv2.HISTCMP_BHATTACHARYYA
    listbox.insert(0, '선택한 이미지' + filenm[1])
    #코렐 분석
    for hist in hists:
        c_rs = cv2.compareHist(img_original, hist, correl)
        correl_result.append(c_rs)

    correl_result = np.array(correl_result)
    correl_similar = find_nearest(correl_result, 1.0)#1.0과 가장 가까운 값 찾음
    correl_result = correl_result.tolist()
    correl_similar_index = correl_result.index(correl_similar)#가장 비슷한 이미지 위치 반환
    #print('해당 이미지는',correl_similar_index+1,'단계 입니다.')
    listbox.insert(1, '코렐분석 결과 해당 이미지는' + str(correl_similar_index+1)+' 단계 입니다.')

    #카이제곱
    for hist in hists:
        ch_rs = cv2.compareHist(img_original, hist, chisqr)
        chisqr_result.append(ch_rs)

    chisqr_result = np.array(chisqr_result)
    chisqr_similar = find_nearest(chisqr_result, 0)#0과 가장 가까운 값 찾음
    chisqr_result = chisqr_result.tolist()
    chisqr_similar_index = chisqr_result.index(chisqr_similar)#가장 비슷한 이미지 위치 반환
    #print('해당 이미지는',correl_similar_index+1,'단계 입니다.')
    listbox.insert(2, '카이제곱분석 결과 해당 이미지는' + str(chisqr_similar_index+1)+' 단계 입니다.')

    #교차검증
    for hist in hists:
        intersect_rs = cv2.compareHist(img_original, hist, intersect)
        intersect_result.append(intersect_rs/np.sum(img_original))

    intersect_result = np.array(intersect_result)
    intersect_similar = find_nearest(intersect_result, 1.0)#1.0과 가장 가까운 값 찾음
    intersect_result = intersect_result.tolist()
    intersect_similar_index = intersect_result.index(intersect_similar)#가장 비슷한 이미지 위치 반환
    #print('해당 이미지는',correl_similar_index+1,'단계 입니다.')
    listbox.insert(3, '교차검증 결과 해당 이미지는' + str(intersect_similar_index+1)+' 단계 입니다.')

    #바타차야 거리
    for hist in hists:
        bhattacharyya_rs = cv2.compareHist(img_original, hist, bhattacharyya)
        bhattacharyya_result.append(bhattacharyya_rs)

    bhattacharyya_result = np.array(bhattacharyya_result)
    bhattacharyya_similar = find_nearest(bhattacharyya_result, 0)#0과 가장 가까운 값 찾음
    bhattacharyya_result = bhattacharyya_result.tolist()
    bhattacharyya_similar_index = bhattacharyya_result.index(bhattacharyya_similar)#가장 비슷한 이미지 위치 반환
    #print('해당 이미지는',correl_similar_index+1,'단계 입니다.')
    listbox.insert(4, '바타차야 거리 분석 결과 해당 이미지는' + str(bhattacharyya_similar_index+1)+' 단계 입니다.')

    #총 갯수 출력해주기
    total_list = [correl_similar_index,chisqr_similar_index,intersect_similar_index,bhattacharyya_similar_index]
    total_list = np.array(total_list)
    total_list_count = np.bincount(total_list)#토탈 값 카운팅
    print(total_list_count)
    total_list_max = np.max(total_list_count)# 카운팅 중 가장 높은 수
    print(total_list_max)
    total_list_count = total_list_count.tolist()
    total_list_index = total_list_count.index(total_list_max)
    print(total_list_index)

    if total_list_max == 2:
        print('정확한 측정이 어렵습니다. 분석 이미지를 바꿔주세요')
        listbox.insert(5, '정확한 측정이 어렵습니다. 분석 이미지를 바꿔주세요')
        listbox.insert(6, '======================================================================================')
    else:
        print('결과는',(total_list_index+1),'입니다')
        listbox.insert(5, '해당 이미지는 ' + str(total_list_index+1) + ' 단계 입니다.')
        listbox.insert(6, '======================================================================================')
    #표시하는 부분
    userimg = Label(userimg_frame, image=logo)
    p_var2.set(100)
    progress_bar2.update()
コード例 #55
0
ファイル: sort.py プロジェクト: yrczxx/faceswap
 def get_avg_score_hist(img1, references):
     scores = []
     for img2 in references:
         score = cv2.compareHist(img1, img2, cv2.HISTCMP_BHATTACHARYYA)
         scores.append(score)
     return sum(scores) / len(scores)
コード例 #56
0
ファイル: camera.py プロジェクト: LiuDaveLiu/ibllib
    def check_position(self,
                       hist_thresh=(75, 80),
                       pos_thresh=(10, 15),
                       metric=cv2.TM_CCOEFF_NORMED,
                       display=False,
                       test=False,
                       roi=None,
                       pct_thresh=True):
        """Check camera is positioned correctly
        For the template matching zero-normalized cross-correlation (default) should be more
        robust to exposure (which we're not checking here).  The L2 norm (TM_SQDIFF) should
        also work.

        If display is True, the template ROI (pick hashed) is plotted over a video frame,
        along with the threshold regions (green solid).  The histogram correlations are plotted
        and the full histogram is plotted for one of the sample frames and the reference frame.

        :param hist_thresh: The minimum histogram cross-correlation threshold to pass (0-1).
        :param pos_thresh: The maximum number of pixels off that the template matcher may be off
         by. If two values are provided, the lower threshold is treated as a warning boundary.
        :param metric: The metric to use for template matching.
        :param display: If true, the results are plotted
        :param test: If true a reference frame instead of the frames in frame_samples.
        :param roi: A tuple of indices for the face template in the for ((y1, y2), (x1, x2))
        :param pct_thresh: If true, the thresholds are treated as percentages
        """
        if not test and self.data['frame_samples'] is None:
            return 'NOT_SET'
        refs = self.load_reference_frames(self.side)
        # ensure iterable
        pos_thresh = np.sort(np.array(pos_thresh))
        hist_thresh = np.sort(np.array(hist_thresh))

        # Method 1: compareHist
        ref_h = cv2.calcHist([refs[0]], [0], None, [256], [0, 256])
        frames = refs if test else self.data['frame_samples']
        hists = [cv2.calcHist([x], [0], None, [256], [0, 256]) for x in frames]
        corr = np.array([
            cv2.compareHist(test_h, ref_h, cv2.HISTCMP_CORREL)
            for test_h in hists
        ])
        if pct_thresh:
            corr *= 100
        hist_passed = [np.all(corr > x) for x in hist_thresh]

        # Method 2:
        top_left, roi, template = self.find_face(roi=roi,
                                                 test=test,
                                                 metric=metric,
                                                 refs=refs)
        (y1, y2), (x1, x2) = roi
        err = (x1, y1) - np.median(np.array(top_left), axis=0)
        h, w = frames[0].shape[:2]

        if pct_thresh:  # Threshold as percent
            # t_x, t_y = pct_thresh
            err_pct = [(abs(x) / y) * 100 for x, y in zip(err, (h, w))]
            face_passed = [all(err_pct < x) for x in pos_thresh]
        else:
            face_passed = [np.all(np.abs(err) < x) for x in pos_thresh]

        if display:
            plt.figure()
            # Plot frame with template overlay
            img = frames[0]
            ax0 = plt.subplot(221)
            ax0.imshow(img, cmap='gray', vmin=0, vmax=255)
            bounds = (x1 - err[0], x2 - err[0], y2 - err[1], y1 - err[1])
            ax0.imshow(template, cmap='gray', alpha=0.5, extent=bounds)
            if pct_thresh:
                for c, thresh in zip(('green', 'yellow'), pos_thresh):
                    t_y = (h / 100) * thresh
                    t_x = (w / 100) * thresh
                    xy = (x1 - t_x, y1 - t_y)
                    ax0.add_patch(
                        Rectangle(xy,
                                  x2 - x1 + (t_x * 2),
                                  y2 - y1 + (t_y * 2),
                                  fill=True,
                                  facecolor=c,
                                  lw=0,
                                  alpha=0.05))
            else:
                for c, thresh in zip(('green', 'yellow'), pos_thresh):
                    xy = (x1 - thresh, y1 - thresh)
                    ax0.add_patch(
                        Rectangle(xy,
                                  x2 - x1 + (thresh * 2),
                                  y2 - y1 + (thresh * 2),
                                  fill=True,
                                  facecolor=c,
                                  lw=0,
                                  alpha=0.05))
            xy = (x1 - err[0], y1 - err[1])
            ax0.add_patch(
                Rectangle(xy,
                          x2 - x1,
                          y2 - y1,
                          edgecolor='pink',
                          fill=False,
                          hatch='//',
                          lw=1))
            ax0.set(xlim=(0, img.shape[1]), ylim=(img.shape[0], 0))
            ax0.set_axis_off()
            # Plot the image histograms
            ax1 = plt.subplot(212)
            ax1.plot(ref_h[5:-1], label='reference frame')
            ax1.plot(np.array(hists).mean(axis=0)[5:-1], label='mean frame')
            ax1.set_xlim([0, 256])
            plt.legend()
            # Plot the correlations for each sample frame
            ax2 = plt.subplot(222)
            ax2.plot(corr, label='hist correlation')
            ax2.axhline(hist_thresh[0],
                        0,
                        self.n_samples,
                        linestyle=':',
                        color='r',
                        label='fail threshold')
            ax2.axhline(hist_thresh[1],
                        0,
                        self.n_samples,
                        linestyle=':',
                        color='g',
                        label='pass threshold')
            ax2.set(xlabel='Sample Frame #', ylabel='Hist correlation')
            plt.legend()
            plt.suptitle('Check position')
            plt.show()

        pass_map = {i: s for i, s in enumerate(('FAIL', 'WARNING', 'PASS'))}
        face_aligned = pass_map[sum(face_passed)]
        hist_correlates = pass_map[sum(hist_passed)]

        return self.overall_outcome([face_aligned, hist_correlates])
コード例 #57
0
                        )  # create gray scale histogram, removing black color

                    tot_pixel = histWB.sum()  # calculate not black pixel
                    histDetection_perc[0] = (histBlue / tot_pixel
                                             )  # normalize hist
                    histDetection_perc[1] = (histGreen / tot_pixel
                                             )  # normalize hist
                    histDetection_perc[2] = (histRed / tot_pixel
                                             )  # normalize hist

                    if (num_detection_squad_1 != 0):
                        hist_squad_1_perc = normalizeHist(
                            hist_squad_1, num_detection_squad_1
                        )  # normalize and mediates hist
                        compHist_1 = cv2.compareHist(
                            np.float32(hist_squad_1_perc),
                            np.float32(histDetection_perc),
                            cv2.HISTCMP_BHATTACHARYYA)  # compare hist
                    if (num_detection_squad_2 != 0):
                        hist_squad_2_perc = normalizeHist(
                            hist_squad_2, num_detection_squad_2
                        )  # normalize and mediates hist
                        compHist_2 = cv2.compareHist(
                            np.float32(hist_squad_2_perc),
                            np.float32(histDetection_perc),
                            cv2.HISTCMP_BHATTACHARYYA)  # compoare hist

                    if (num_detection_referee != 0):
                        hist_referee_perc = normalizeHist(
                            hist_referee, num_detection_referee
                        )  # normalize and mediates hist
                        compHist_3 = cv2.compareHist(
コード例 #58
0
ファイル: new_finger_print.py プロジェクト: trendiguru/core
def spaciograms_distance_rating(spaciogram_1, spaciogram_2, rank):
    '''
    :param spaciogram_1:
    :param spaciogram_2:
    :param rank:
    :return:
    '''
    ############ CHECKS ############
    # check if spaciogram_1.shape == spaciogram_2.shape:
    rating = []
    # spaciogram_1 = np.array(spaciogram_1)
    # spaciogram_2 = np.array(spaciogram_2)
    # if spaciogram_1.shape != spaciogram_2.shape is False:
    #     print 'Error: the dimensions of spaciogram_1 and spaciogram_2 are not equal! \n' \
    #           'shapes are: 1st - ' + str(spaciogram_1.shape) + '\n' \
    #           'shapes are: 2nd - ' + str(spaciogram_2.shape)
    #     return rating
    if rank < 1 or rank > 3:
        print 'Error: only 3 ranks, rank = 1, 2 or 3!'
        return rating
    # # Define number of rows (overall bin count):
    # numRows = spaciogram_1.size
    # dims = len(spaciogram_1.shape)
    # bins_per_dim = len(spaciogram_1)
    # signature_1 = np.zeros([numRows, dims+1]) #cv2.CreateMat(numRows, dims, cv2.CV_32FC1)
    # print signature_1.shape
    # signature_2 = signature_1 #cv2.CreateMat(numRows, dims, cv2.CV_32FC1)
    # sigrature_index = 0
    # # fill signature_natures:
    # # TODO: for production optimize this, use Numpy (reshape?)
    # for d1 in range(0, bins_per_dim - 1):
    #     for d2 in range(0, bins_per_dim - 1):
    #         for d3 in range(0, bins_per_dim - 1):
    #             for d4 in range(0, bins_per_dim - 1):
    #                 for d5 in range(0, bins_per_dim - 1):
    #                     # signature 1:
    #                     signature_1[sigrature_index, :] = [spaciogram_1[d1, d2, d3, d4, d5], d1, d2, d3, d4, d5]
    #                     # bin_val = cv2.QueryHistValue_2D(spaciogram_1, d1, d2, d3, d4, d5)
    #                     # cv.Set2D(signature_1, sigrature_index, 0, bin_val) #bin value
    #                     # cv.Set2D(signature_1, sigrature_index, 1, d1)  #coord1
    #                     # cv.Set2D(signature_1, sigrature_index, 2, d2) #coord2
    #                     # cv.Set2D(signature_1, sigrature_index, 3, d3)  #coord3
    #                     # cv.Set2D(signature_1, sigrature_index, 4, d4) #coord4
    #                     # cv.Set2D(signature_1, sigrature_index, 5, d5)  #coord5
    #                     # signature 2:
    #                     signature_2[sigrature_index, :] = [spaciogram_2[d1, d2, d3, d4, d5], d1, d2, d3, d4, d5]
    #                     # bin_val2 = cv2.QueryHistValue_2D(spaciogram_2, d1, d2, d3, d4, d5)
    #                     # cv.Set2D(signature_2, sigrature_index, 0, bin_val2) #bin value
    #                     # cv.Set2D(signature_2, sigrature_index, 1, d1)  #coord1
    #                     # cv.Set2D(signature_2, sigrature_index, 2, d2) #coord2
    #                     # cv.Set2D(signature_2, sigrature_index, 3, d3)  #coord3
    #                     # cv.Set2D(signature_2, sigrature_index, 4, d4) #coord4
    #                     # cv.Set2D(signature_2, sigrature_index, 5, d5)  #coord5
    #                     sigrature_index += 1
    #                     print spaciogram_1[d1, d2, d3, d4, d5]
    # signature_1 = np.zeros([spaciogram_1.size / len(spaciogram_1),  len(spaciogram_1)])
    # sigrature_index = 0
    # # print len(spaciogram_1)
    # for dim in spaciogram_1:
    #     signature_1[:, sigrature_index] = dim.flatten()
    #     sigrature_index += 1
    #
    # signature_2 = np.zeros([spaciogram_2.size / len(spaciogram_1),  len(spaciogram_2)])
    # sigrature_index = 0
    # for dim in spaciogram_2:
    #     signature_2[:, sigrature_index] = dim.flatten()
    #     sigrature_index += 1

    # signature_1 = np.reshape(spaciogram_1, (spaciogram_1[0].size, len(spaciogram_1)))
    # signature_2 = np.reshape(spaciogram_2, (spaciogram_2[0].size, len(spaciogram_2)))

    method = cv2.HISTCMP_CHISQR
    # HISTCMP_CORREL Correlation
    # HISTCMP_CHISQR Chi-Square
    # HISTCMP_INTERSECT Intersection
    # HISTCMP_BHATTACHARYYA Bhattacharyya distance
    # HISTCMP_HELLINGER Synonym for HISTCMP_BHATTACHARYYA
    # HISTCMP_CHISQR_ALT
    # HISTCMP_KL_DIV

    if rank != 3:
        rating = cv2.compareHist(spaciogram_1, spaciogram_2, method)
    # elif rank == 2:
    #     rating = cv2.compareHist(np.array(spaciogram_1[1]).astype('float32'),
    #                              np.array(spaciogram_2[1]).astype('float32'), method)
    elif rank == 3:
        rating = 0.0
        for i in range(2, len(spaciogram_1)):
            rating += cv2.compareHist(spaciogram_1[i], spaciogram_2[i], method)
    else:
        rating = []

    # rating = emd(signature_1, signature_2)
    return rating
コード例 #59
0
    def detect_video(self, yolo):
        from PIL import Image, ImageFont, ImageDraw
        #Start ROS node
        pub, pub_flag = start_node()
        accum_time = 0
        curr_fps = 0
        fps = "FPS: ??"
        prev_time = timer()

        while True:
            if self.ret:
                frame = self.frames[0]
                depth_frame = self.frames[1]
                if (type(frame) is int)or(type(depth_frame) is int):
                    print("!!!CAUTION!!! type of frame is int")
                    continue
                image = Image.fromarray(frame)
                image, bottle, person, right, left, bottom, top, right2, left2, bottom2, top2 = yolo.detect_image(image, pub)

                result = np.asarray(image)
                curr_time = timer()
                exec_time = curr_time - prev_time
                prev_time = curr_time
                accum_time = accum_time + exec_time
                curr_fps = curr_fps + 1
                if accum_time > 1:
                    accum_time = accum_time - 1
                    fps = "FPS: " + str(curr_fps)
                    curr_fps = 0
                cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=0.50, color=(255, 0, 0), thickness=2)
                cv2.imshow("result", result)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break



                if (bottle==False) or (person==False):
                    continue



            # ------------------------------Tracking-----------------------------------
                # tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
                # tracker_type = tracker_types[7]
                tracker = cv2.TrackerCSRT_create()
                tracker2 = cv2.TrackerCSRT_create()

                # setup initial location of window
                left, right, top, bottom = left, right, top, bottom
                r,h,ci,w = top, bottom-top, left, right-left  # simply hardcoded the values r, h, c, w
                import matplotlib.pyplot as plt
                # frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                frame_b, frame_g, frame_r = frame[:,:,0], frame[:,:,1], frame[:,:,2]
                hist_b = cv2.calcHist([frame_b[top:bottom, left:right]],[0],None,[256],[0,256])
                hist_g = cv2.calcHist([frame_g[top:bottom, left:right]],[0],None,[256],[0,256])
                hist_r = cv2.calcHist([frame_r[top:bottom, left:right]],[0],None,[256],[0,256])
                cv2.normalize(hist_b, hist_b,0,255,cv2.NORM_MINMAX)
                cv2.normalize(hist_g, hist_g,0,255,cv2.NORM_MINMAX)
                cv2.normalize(hist_r, hist_r,0,255,cv2.NORM_MINMAX)
                # plt.plot(hist_r, color='r', label="r")
                # plt.plot(hist_g, color='g', label="g")
                # plt.plot(hist_b, color='b', label="b")
                # plt.show()
                track_window = (ci, r, w, h)
                r2,h2,ci2,w2 = top2, bottom2-top2, left2, right2-left2  # simply hardcoded the values r, h, c, w
                track_window2 = (ci2, r2, w2, h2)
                cv2.imwrite('bottledetect.jpg', frame[r:r+h, ci:ci+w])
                cv2.imwrite('persondetect.jpg', frame[r2:r2+h2, ci2:ci2+w2])

                # set up the ROI for tracking
                roi = frame[r:r+h, ci:ci+w]
                hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
                mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
                roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
                cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)

                # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
                term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )

                ok = tracker.init(frame, track_window)
                ok2 = tracker2.init(frame, track_window2)

                track_thing = 0 #bottle
                pts = Point()
                pts2 = Point()
                untrack = 0

                while(1):
                    if self.ret:
                        frame = self.frames[0]
                        depth_frame = self.frames[1]
                        depth = depth_frame

                        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                        dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)

                        # apply meanshift to get the new location
                        print(track_window2)
                        ok, track_window = tracker.update(frame)
                        x,y,w,h = track_window

                        ok, track_window2 = tracker2.update(frame)
                        x2,y2,w2,h2 = track_window2

                        # Draw it on image
                        img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
                        if not track_thing:
                            img2 = cv2.rectangle(img2, (x2,y2), (x2+w2,y2+h2), 255,2)
                        else:
                            img2 = cv2.rectangle(img2, (x2, y2), (x2+w2, y2+h2),(0, 0, 255), 2)
                        cv2.imshow('Tracking',img2)

                        # https://www.intelrealsense.com/wp-content/uploads/2020/06/Intel-RealSense-D400-Series-Datasheet-June-2020.pdf
                        total, cnt = 0, 0
                        for i in range(3):
                            for j in range(3):
                                # dep = depth[j+y+h//2, i+x+w//2]*0.001
                                dep = depth[np.minimum((j+y+h//2), 479), np.minimum((i+x+w//2), 639)]*0.001
                                if (dep)!=0:
                                    total += dep
                                    cnt += 1
                        if cnt!=0:
                            worldz = total/cnt
                        else:
                            worldz = 0

                        total2, cnt2 = 0, 0
                        for i in range(3):
                            for j in range(3):
                                dep2 = depth[np.minimum((j+y2+h2//2), 479), np.minimum((i+x2+w2//2), 639)]*0.001
                                if dep2!=0:
                                    total2 += dep2
                                    cnt2 += 1
                        if cnt2!=0:
                            worldz2 = total2/cnt2
                        else:
                            worldz2 = 0

                        print('worldz', worldz)
                        print('worldz2', worldz2)
                        if (worldz == 0) or (worldz2 == 0):
                            worldx, worldy = 0, 0
                            pts.x, pts.y, pts.z = 0.0, 0.0, 0.0
                            worldx2, worldy2 = 0, 0
                            pts2.x, pts2.y, pts2.z = 0.0, 0.0, 0.0
                        else:
                            # focus length = 1.93mm, distance between depth cameras = about 5cm, a pixel size = 3um
                            if (track_thing==0):
                                #human Tracking
                                u_ud = (0.05*1.88*10**(-3))/(3*10**(-6)*worldz)
                                print('u_ud', u_ud)
                                # 深度カメラとカラーカメラの物理的な距離を考慮した項(-0.3*u_ud)
                                # これらの座標は物体を見たときの左の深度カメラを基準とする
                                worldx = 0.05*(x+w//2 - (img2.shape[1]//2) - 0.3*u_ud)/u_ud
                                worldy = 0.05*((img2.shape[0]//2) - (y+h))/u_ud
                                print('x,y,z = ', worldx, worldy, worldz)
                                pts.y, pts.z, pts.x = float(worldx), float(worldy), float(worldz)

                            else:
                                #bottle Tracking
                                u_ud = (0.05*1.88*10**(-3))/(3*10**(-6)*worldz2)
                                print('u_ud', u_ud)
                                worldx2 = 0.05*(x2+w2//2 - (img2.shape[1]//2) - 0.3*u_ud)/u_ud
                                worldy2 = 0.05*((img2.shape[0]//2) - (y2+h2))/u_ud
                                print('x2,y2,z2 = ', worldx2, worldy2, worldz2)
                                pts2.y, pts2.z, pts.x = float(worldx2), float(worldy2), float(worldz2)

                        print("track_thing = ", track_thing)


                        frame_b, frame_g, frame_r = frame[:,:,0], frame[:,:,1], frame[:,:,2]
                        hist_b2 = cv2.calcHist([frame_b[y: y+h, x: x+w]],[0],None,[256],[0,256])
                        hist_g2 = cv2.calcHist([frame_g[y: y+h, x: x+w]],[0],None,[256],[0,256])
                        hist_r2 = cv2.calcHist([frame_r[y: y+h, x: x+w]],[0],None,[256],[0,256])
                        # plt.plot(hist_r2, color='r', label="r")
                        # plt.plot(hist_g2, color='g', label="g")
                        # plt.plot(hist_b2, color='b', label="b")
                        # plt.show()


                        # if (track_window==(0, 0, 0, 0)) or (track_window2==(0, 0, 0, 0)):
                        cv2.normalize(hist_b2, hist_b2,0,255,cv2.NORM_MINMAX)
                        cv2.normalize(hist_g2, hist_g2,0,255,cv2.NORM_MINMAX)
                        cv2.normalize(hist_r2, hist_r2,0,255,cv2.NORM_MINMAX)
                        print('compareHist(b)', cv2.compareHist(hist_b, hist_b2, cv2.HISTCMP_CORREL))
                        print('compareHist(g)', cv2.compareHist(hist_g, hist_g2, cv2.HISTCMP_CORREL))
                        print('compareHist(r)', cv2.compareHist(hist_r, hist_r2, cv2.HISTCMP_CORREL))
                        print('track_window = ', track_window)
                        if ((cv2.compareHist(hist_b, hist_b2, cv2.HISTCMP_CORREL)<=0.3)or(cv2.compareHist(hist_g, hist_g2, cv2.HISTCMP_CORREL)<=0.3)or(cv2.compareHist(hist_r, hist_r2, cv2.HISTCMP_CORREL)<=0.3))or((track_window==(0, 0, 0, 0)) or (track_window2==(0, 0, 0, 0))):
                            untrack += 1
                            print("untrack = ", untrack)
                            if untrack>=30:
                                print("追跡が外れた!\n")
                                break
                        if ((worldy<=-0.5) and (not track_thing)):
                            print("ポイ捨てした!\n")
                            track_thing = 1 #human

                        if track_thing==0:
                            tracking_point = pts
                            flag = 0 #bottle
                        else:
                            tracking_point = pts2
                            flag = 1 #person
                        pub.publish(tracking_point)
                        pub_flag.publish(flag)


                        k = cv2.waitKey(60) & 0xff
                        if k == 27:
                            break

                    else:
                        break

        yolo.close_session()
コード例 #60
0
 def score_images(self, im1, im2, method="orb"):
     """Score the similarity between two images according to differents methods.
        im1 = framed photo ; im2 = db_im"""
     score = 0
     if method == "ssim":
         im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
         im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
         score = ssim(cv2.resize(im1, (im2.shape[1], im2.shape[0]), interpolation = cv2.INTER_AREA), im2, multichannel=False)
     if method == "hist_inter":
         #crop_zone = (20,35,203,171)
         #im1 = im1[20:203, 35:171]
         #im2 = im2[20:203, 35:171]
         # photo_hist = cv2.calcHist([cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)], [0], None, [256], [0,256])
         # gray_card_im = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
         # image_hist = cv2.calcHist([gray_card_im], [0], None, [256], [0,256])
         score = 0
         r = range(0,im1.shape[-1])
         for i in r:
             photo_hist = cv2.calcHist([im1], [i], None, [256], [0,256])
             image_hist = cv2.calcHist([im2], [i], None, [256], [0,256])
             score += cv2.compareHist(photo_hist, image_hist, method = cv2.HISTCMP_INTERSECT)
     if method == "cor":
         im1 = cv2.resize(im1, (im2.shape[1], im2.shape[0]))
         # im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
         # im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
         score = cv2.matchTemplate(im2, im1, cv2.TM_CCOEFF_NORMED)
         # score = self.correlation_coefficient(im1, im2)
     if method == "diff":
         im1 = cv2.resize(im1, (im2.shape[1], im2.shape[0]))
         diff = im1 - im2
         matrix = np.array(diff)
         flat = matrix.flatten()
         numchange = np.count_nonzero(flat)
         score = 100 * float(numchange) / float(len(flat))
     if method == "hog":
         im1 = cv2.resize(im1, (im2.shape[1], im2.shape[0]))
         im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
         im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
         # H1 = feature.hog(im1, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), transform_sqrt=True)
         # H2 = feature.hog(im2, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), transform_sqrt=True)
         # score = cv2.compareHist(np.float32(H1), np.float32(H2), method = cv2.HISTCMP_BHATTACHARYYA)
         im1 = np.float32(im1) / 255.0
         im2 = np.float32(im2) / 255.0
         # Calculate gradient 
         im1_gx = cv2.Sobel(im1, cv2.CV_32F, 1, 0, ksize=1)
         im1_gy = cv2.Sobel(im1, cv2.CV_32F, 0, 1, ksize=1)
         im2_gx = cv2.Sobel(im2, cv2.CV_32F, 1, 0, ksize=1)
         im2_gy = cv2.Sobel(im2, cv2.CV_32F, 0, 1, ksize=1)
         # Python Calculate gradient magnitude and direction ( in degrees ) 
         mag1, angle1 = cv2.cartToPolar(im1_gx, im1_gy, angleInDegrees=True)
         mag2, angle2 = cv2.cartToPolar(im2_gx, im2_gy, angleInDegrees=True)
         # Compute corelation between angles
         # (h, w) = angle1.shape[:2]
         # print(h)
         # print(w)
         score = np.nanmin(1 - scipy.spatial.distance.cdist(angle1, angle2, "cosine"))
         # score = self.ccoeff_normed(angle1, angle2)
     if method == "orb":
         # See https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html
         im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
         im2 = cv2.imread(im2, 0)
         #im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
         # Initiate SIFT detector
         orb = cv2.ORB_create()
         # find the keypoints and descriptors with SIFT
         kp1, des1 = orb.detectAndCompute(im1,None)
         kp2, des2 = orb.detectAndCompute(im2,None)
         logging.info(des1.shape)
         logging.info(des2.shape)
         logging.info(type(des1))
         logging.info(type(des2))
         # create BFMatcher object
         bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
         # Match descriptors.
         matches = bf.match(des1,des2)
         matches = sorted(matches, key = lambda x:x.distance)
         score = sum(m.distance for m in matches[:20])
         # for m in matches[:20]:
             # score += m.distance
     return score