Пример #1
0
def ApproxPupilPos(grayNormIm, gamma=10):
  #if len(np.where(grayNormIm>1)) :
  #  print "Image not in range"
  #  return
  newIm = np.zeros_like(grayNormIm)
  weight = np.zeros_like(grayNormIm)
  weight = np.power((1-grayNormIm), gamma)
  pCum = cv2.sumElems(grayNormIm*weight)
  wCum = cv2.sumElems(weight)
  weightAvg = pCum[0]/wCum[0]
  #closest = grayNormIm.flat[np.argsort((grayNormIm - weightAvg).flat)[::-1][:100]]
  subArr = (grayNormIm - weightAvg).flatten()
  closest = np.argsort(subArr)[:100]
  #closest = heapq.nsmallest(10,np.nditer(subArr),key=subArr.__getitem__)
  rows = closest/grayNormIm.shape[1]
  cols = closest%grayNormIm.shape[1]
  closest2dInd = [(r,c) for r, c in zip(rows, cols)]
  #pupCandidate = np.array([np.nonzero(cls == grayNormIm) for cls in closest]).squeeze()
  #for pos in closest2dInd:
  #  cv2.circle(grayNormIm, (pos[::-1]), 100, (0,0,255))
  #  cv2.imshow('1', grayNormIm)
  #  cv2.waitKey(10)
  pupDist = distance.squareform(distance.pdist(closest2dInd))
  sumDist = np.sum(pupDist, 0)/(pupDist.shape[0]-1)
  distPos = np.nonzero(np.min(sumDist) == sumDist)
  pos = closest2dInd[distPos[0]]
  return pos
Пример #2
0
def Pyramid(img):
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB)
	YUV = cv2.resize(YUV,(40,40))
	Y,U,V = cv2.split(YUV)
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	img = cv2.resize(YUV,(26,26))
	kernel1 = np.ones((3,1),np.float32)
	kernel2 = np.ones((1,3),np.float32)
	kernel1[0] = -1
	kernel1[1] = 0
	kernel2[0] = [-1,0,1]
	dst = cv2.filter2D(img,cv2.CV_16S,kernel1)
	dstv1 = np.int16(dst)
	dstv2 = cv2.pow(dstv1,2)
	dst = cv2.filter2D(img,cv2.CV_16S,kernel2)
	dsth1 = np.int16(dst)
	dsth2 = cv2.pow(dsth1,2)
	dst1 = dsth2 + dstv2
	dst1 = np.float32(dst1)
	dstfinal = cv2.sqrt(dst1).astype(np.uint8)
	finalh =  dsth1
	finalv = dstv1
	finalm = dstfinal
	UporDown = (finalv > 0 ).astype(int)
	LeftorRight = 2*(finalh > 0).astype(int)
	absh = map(abs, finalh)
	absv = map(abs, finalv)
	absv[:] = [x*1.732 for x in absv]
	absh = np.float32(absh)
	absv = np.float32(absv)
	high = 4*(absv > absh).astype(int)
	out = high + LeftorRight + UporDown
	features = []
	for x in range(6):
		hrt = np.zeros(out.shape[:2],np.uint8)
		features.append(hrt)
	for x in range(out.shape[:2][0]):
		for y in range(out.shape[:2][1]):
			z = out[x][y]
			if z == 4 or z == 6:
#				print "a",z
				features[4][x][y] = finalm[x][y]
			elif z == 5 or z == 7:
				features[5][x][y] = finalm[x][y]
#				print "b",z
			else:
				features[z][x][y] = finalm[x][y]
#				print z
	kernelg1 = 0.125*np.ones((4,4),np.float32)
	kernelg2 = 0.25*np.ones((2,2),np.float32)
	lastFeatures = []	
	for img in features:
		tote = cv2.sumElems(img)
		tote = tote/img.size
		img = img/tote
		print img
		print cv2.sumElems(img)
		print img.size
		lastFeatures.append(img1)
	return lastFeatures
Пример #3
0
def ApproxPupilPos(grayNormIm, ellipsePos,brightness = 4, gamma=10):

  
  AVGG = cv2.mean(grayNormIm)[0]
  normCoeff = 0.5
  #imNorm = grayNormIm * (normCoeff / AVGG) ** brightness
  imNorm = grayNormIm
  newIm = np.zeros_like(imNorm)
  weight = np.zeros_like(imNorm)
  weight = np.power((1-imNorm), gamma)
  pCum = cv2.sumElems(imNorm*weight)
  wCum = cv2.sumElems(weight)
  weightAvg = pCum[0]/wCum[0]
  subArr = (imNorm - weightAvg).flatten()
  closest = np.argsort(subArr)[:100]
  #closest = np.argsort(weight.flatten())[:100]
  rows = closest/imNorm.shape[1]
  cols = closest%imNorm.shape[1]
  closest2dInd = [(r,c) for r, c in zip(rows, cols)]
  #pupCandidate = np.array([np.nonzero(cls == imNorm) for cls in closest]).squeeze()
  pupDist = distance.squareform(distance.pdist(closest2dInd))
  sumDist = np.sum(pupDist, 0)/(pupDist.shape[0]-1)
  distPos = np.nonzero(np.min(sumDist) == sumDist)
  pos = np.uint16(closest2dInd[distPos[0][0]])
  #if ellipsePos is not None:
  #  posEllDist = np.sqrt(np.sum((np.asarray(pos)- np.asarray((ellipsePos[0][1],ellipsePos[0][0])))**2))
  #  if posEllDist < np.min(np.asarray(ellipsePos[1])):
  #    pos = (np.round(ellipsePos[0][1]), np.round(ellipsePos[0][0]))
  return (pos, imNorm)
    def compute_boxscore(self, boxsize=17):
        """
        Basically, if we count the number of dark pixels on the image should
        give us a clue if the box is checked or not. But sometimes the
        sponsor write around the box, which increase the number of dark
        pixels. This is why we start by cropping precisely around the box.
        Then we compute the Canny Edge and Canny Curve and merge them.
        :param boxsize:
        :return:
        """
        # Negative image
        img = 255 - np.copy(self.img)

        # detect the box
        left, right, top, bottom = self._box_coordinates(
            img, squarsize=boxsize)

        # Detect the line borders withe Canny Edge
        canny1 = cv2.Canny(img, 20, 20)
        # Detect line itself with Canny Curve
        canny2 = self._canny_curve_detector(
            img, low_thresh=20, high_thresh=20)
        # Merge the two Canny by keeping the maximum for each pixels
        canny = cv2.max(canny1, canny2)
        # Crop around the box
        canny = canny[top:bottom, left:right]
        self.canny = canny
        # Comupte the integral of the image.
        count = cv2.sumElems(canny)[0] / 255
        return count
Пример #5
0
def main():
    parser  = argparse.ArgumentParser()
    parser.add_argument("--verbosity", "-v", help="Increase output verbosity", action="store_true")
    parser.add_argument("--show", "-s", help="Show the video diff on screen", action="store_true")
    parser.add_argument("--plot", "-p", help="Show a plot of the frame diffs and detected shot changes", action="store_true")
    parser.add_argument("filename", help="The video file to process")
    args = parser.parse_args()

    cap = cv2.VideoCapture(args.filename)

    #cap = cv2.VideoCapture('../../www/assets/miners.mp4')
    #cap = cv2.VideoCapture('../../www/assets/gayrights.mp4')
    #cap = cv2.VideoCapture('../../www/assets/sculpture.mp4')

    prev_frame = None

    diffs = []
    frame_counter = 0
    while(True):
        ret, frame = cap.read()
        
        if not ret: 
            break
        if prev_frame == None:
            prev_frame = frame.copy()

        diff = cv2.absdiff(frame, prev_frame)
        change = (sum(cv2.sumElems(diff))/(diff.size * 3.0))/255.0
        diffs.append(change)


        if args.verbosity:
            debug_data = {"frame":frame_counter, "diff":change}
            print json.dumps(debug_data)

        if args.show:
            cv2.imshow('frame',diff)
            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break

        prev_frame = frame
        frame_counter += 1
        
    cap.release()
    cv2.destroyAllWindows()

    peaks_index = [0]
    peaks_index += list(detect_peaks(diffs, mpd=30, threshold=0.01))
    peaks_index.append(len(diffs)-1)
    peaks = [0]*len(diffs)
    for p in peaks_index:
        peaks[p] = 0.1

    print json.dumps({"shotchange_frames":peaks_index, "frame_diffs":list(diffs)})

    if args.plot:
        plt.plot(diffs)
        plt.plot(peaks)
        plt.show()
Пример #6
0
def diffImg(t0, t1, t2):
    d1 = cv2.absdiff(t2, t1)
    d2 = cv2.absdiff(t1, t0)
    result = cv2.bitwise_and(d1, d2)
    (value, result) = cv2.threshold(result, threshold, 255, cv2.THRESH_BINARY)
    scalar = cv2.sumElems(result)
    return scalar
def calculate_particle_area(image, x_pos, y_pos, width, height):
    total = 0
    sub_rows = image[y_pos:y_pos+height + 1]
    for row in sub_rows:
        sub_cols = row[x_pos:x_pos + width + 1]
        total += cv2.sumElems(sub_cols)[0]
    return total
Пример #8
0
    def get(self, im):
        # Init the first image.
        if self.flag == False:
            self.first = im
            self.flag = True
            return None

        max_diff = 1000.0
        diff_x = None
        diff_y = None
        for x in range(0, WIDTH, 5):
            for y in range(HEIGHT-1, -1, -5):
                cropped_im    = im[y:y+5, x:x+5]
                cropped_first = self.first[y:y+5, x:x+5]
                current_diff = cv2.mean(cv2.sumElems(cv2.absdiff(cropped_first, cropped_im)))[0]
                if max_diff < current_diff:
                    max_diff = current_diff
                    diff_x = WIDTH-x
                    diff_y = HEIGHT-y

            # Early return - if found a local maximum in the current row, no
            # need to search in the other rows.
            if (diff_x != None):
                return (diff_x, diff_y)
                
        #print("Diff: " + str(current_diff))

        if (diff_x != None):
            return (diff_x, diff_y)
        else:
            return None
Пример #9
0
def pre_frame(frame):
  global use_which_learn
  
  f = cv2.blur(frame, (3,3))
  hsv = cv2.split(cv2.cvtColor(f, cv2.COLOR_BGR2HSV))
  gray = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY)
  
  if use_which_learn == None:
    light = diff_g.background_diff(gray)
    dark = diff_dark_g.background_diff(gray)
    
    ld_mask = cv2.imread('light_dark_mask.jpg')
    ld_mask = cv2.cvtColor(ld_mask,cv2.COLOR_BGR2GRAY)
    ret, ld_mask = cv2.threshold(ld_mask, 40, 255, cv2.THRESH_BINARY)
    light = cv2.bitwise_and(light, ld_mask)
    dark =  cv2.bitwise_and(dark , ld_mask)
    
    # print "light", cv2.sumElems(light)
    # print "dark", cv2.sumElems(dark)
    
    light_ret = cv2.sumElems(light)
    dark_ret = cv2.sumElems(dark)
    
    if light_ret[0] > dark_ret[0]:
      print "============= night =============="
      use_which_learn = 2
    else:
      print "============= day =============="
      use_which_learn = 1
    
  if use_which_learn == 1:    
    mask_h = diff_h.background_diff(hsv[0])
    mask_h = cv2.bitwise_and(mask_h, green_mask)
    # cv2.imshow('frame2', mask_h)
    
    mask_g = diff_g.background_diff(gray)
    # cv2.imshow('frame3', mask_g)
  else:
    mask_h = diff_dark_h.background_diff(hsv[0])
    mask_h = cv2.bitwise_and(mask_h, green_mask)
    # cv2.imshow('frame2', mask_h)
    
    mask_g = diff_dark_g.background_diff(gray)
    # cv2.imshow('frame3', mask_g)
  
  return  cv2.bitwise_or(mask_g, mask_h)
Пример #10
0
def centroid(a):
	h, w = a.shape
	key = "%d %d" % a.shape
	if key not in centroid_mat_cache:
		x = np.arange(0, w, dtype = np.float32) - w / 2.0 + 0.5
		y = np.arange(0, h, dtype = np.float32) - h / 2.0 + 0.5
		centroid_mat_x, centroid_mat_y = np.meshgrid(x, y)
		centroid_mat_cache[key] = (centroid_mat_x, centroid_mat_y)
	else:
		(centroid_mat_x, centroid_mat_y) = centroid_mat_cache[key]
		
	s = np.sum(a)
	if s == 0.0:
		return 0, 0
	x = cv2.sumElems(cv2.multiply(a, centroid_mat_x, dtype=cv2.CV_32FC1))[0] / s
	y = cv2.sumElems(cv2.multiply(a, centroid_mat_y, dtype=cv2.CV_32FC1))[0] / s
	return x, y
Пример #11
0
def isThereMotion(t0, t1):
  d1 = cv2.absdiff(t0, t1)
  sum = cv2.sumElems(d1)[0]
  global avg
  avg = avg*0.8 + sum*0.2 
  if sum > avg*1.05: print('ding!')
  print(sum, avg)
  return sum
Пример #12
0
def plot_8(array):
    names=["up", "ne", "lr", "se", "dn", "sw", "rl", "nw"]
    for n in range(1,9):
        print "n is:", n
        print "sum is:", cv2.sumElems(array[n-1])
        plt.subplot("24"+str(n))
        plt.imshow(array[n-1], interpolation="none")
        plt.title(names[n-1])
    plt.show()
Пример #13
0
def isDoorOpen(img):
    global template
    global isOpen
    global openThresh
    global closeThresh
    tmp = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
    tmp[:,:,2] = 0
    total = cv2.sumElems(cv2.sumElems(cv2.absdiff(tmp,template)))
    #print "Total: " + str(total[0])
    if total[0] > openThresh:
        #print "Open"
        isOpen = True
        return True
    #print "Closed"
    if total[0] <= closeThresh:
        isOpen = False
        return False
    return isOpen
Пример #14
0
def isDark(frame):
	global  darknessThreshold, width, height
	# calculate average of pixels of frame
	# return true for darkness if the avergae is below threshold
	frameAvg= cv2.sumElems(frame)
	if (frameAvg[0] < darknessThreshold and frameAvg[1] < darknessThreshold and frameAvg[2] < darknessThreshold):
		return True 
	else:
		return False
def imgColorida(img):
    # Soma os elementos de cada canal
    img = cv2.sumElems(img)

    # Se a soma dos canais BGR forem iguais, entao a imgagem nao e colorida
    if (img[0] != img[1] != img[2]):
        return True
    else:
        return False
Пример #16
0
def get_text_in_image_by_comparision(image, labeled_images_folder_param):
	
	print type(image)
	print image
	
	if image is None:
		raise Exception('Input search image is None')
	
	labeled_images_folder = labeled_images_folder_param
	
	#compare image to all images in the labeled_images folder and return the name of the best match

	image = resize_to_square_image(image)

	if show_images:
		cv2.imshow("image", image)

	#compare letter to all images
	dirs = os.listdir(labeled_images_folder)
	
	results = {}
	
	for filename in dirs:

		compare_image = cv2.imread(os.path.join(labeled_images_folder ,filename), cv2.CV_LOAD_IMAGE_GRAYSCALE)
		if(compare_image is None):
			raise Exception('Compare image is None')
		
		compare_image = resize_to_square_image(compare_image)
	 
		compare_matrix = cv2.absdiff(compare_image, image)

		results[filename] = cv2.sumElems(compare_matrix)
		
	results = collections.OrderedDict(sorted(results.items(), key=lambda t: t[1], reverse=False))

	for filename, distance in results.items():
		print filename, distance

	best_match = results.keys()[0]
	
	if(debug):
		best_match_image = cv2.imread(os.path.join(labeled_images_folder,best_match), cv2.CV_LOAD_IMAGE_GRAYSCALE)
		
		best_match_image = resize_to_square_image(best_match_image)
		if show_images:
			cv2.imshow("best_match_image", best_match_image)
	
	
	#remove the filename extension
	split_best_match = best_match.split('.')
	
	best_match = split_best_match[0]
	
	print "Best match:",best_match
	return best_match
	def _getMotion(self):
		if not self._ready():
			return None
		d1 = cv2.absdiff(self.__image1, self.__image0)
		d2 = cv2.absdiff(self.__image2, self.__image0)
		result = cv2.bitwise_and(d1, d2)
		(value, result) = cv2.threshold(result, self._THRESHOLD, 255, cv2.THRESH_BINARY)
		scalar = cv2.sumElems(result)
		print ' - scalar:', scalar[0], scalar
		return scalar
Пример #18
0
def diff(f0, f1, f2):
	global total
	d1 = cv2.absdiff(f2, f1)
	d2 = cv2.absdiff(f1, f0)
	overlap = cv2.bitwise_and(d1, d2)

	# binary threshold(src, thresh, maxval, type)
	# changing thresh val will affect total
	ret, thresh = cv2.threshold(overlap, 20, 255, cv2.THRESH_BINARY)
	total = total + cv2.sumElems(thresh)[0]
	return thresh
Пример #19
0
def procesarImagen(binary) : 
	print()
	#img = cv2.imread("/home/msuarez/code/celulas/ENSAYO1/magneto/t1.jpg",1)
	nparr = np.fromstring(binary , np.uint8)
	img = cv2.imdecode(nparr, cv2.CV_LOAD_IMAGE_COLOR)
	img = img[55:425,243:430]
	
	img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	
	
	plt.subplot(131),plt.imshow(img),plt.title('Original')
	plt.xticks([]), plt.yticks([])
	
	kernel = np.ones((5,5),np.uint8)
	e = (1,1,1,1)
	while e > (0,0,0,0):
		'''img2 = cv2.dilate(img,kernel,iterations = 1)
		img3 = cv2.max(cv2.erode(img2,kernel,iterations = 1),img)'''
		img3 = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
		d = cv2.absdiff(img,img3)
		e = cv2.sumElems(d)
		img = img3
	
	plt.subplot(132),plt.imshow(img),plt.title('Paso 1')
	plt.xticks([]), plt.yticks([])
		
	e = (1,1,1,1)
	while e > (0,0,0,0):
		'''img2 = cv2.erode(img,kernel,iterations = 1)
		img3 = cv2.min(cv2.dilate(img2,kernel,iterations = 1),img)'''
		img3 = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)	
		d = cv2.absdiff(img,img3)
		e = cv2.sumElems(d)
		img = img3
	
	plt.subplot(133),plt.imshow(img),plt.title('Final')
	plt.xticks([]), plt.yticks([])
	# plt.show()
	nparr2 = cv2.imencode('.jpg',img)
	binary2 =  cv2.imencode('.jpg', img)[1].tostring()
	return binary2
Пример #20
0
 def image_callback(self, msg):
     self.image_sub.unregister()
     try:
         cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
         sum_of_pixels = max(cv.sumElems(cv_image))
     except:
         try:
             cv_image = self.bridge.imgmsg_to_cv(msg, "bgr8")
             sum_of_pixels = max(cv.Sum(cv_image))
         except CvBridgeError, e:
             rospy.logerr("[%s] failed to convert image to cv", self.__class__.__name__)
             return
Пример #21
0
def grayworld(image):
    b,g,r = cv2.split(image)
    imsum = cv2.sumElems(image)
    print imsum
    illum = [i/(np.shape(image)[0]*np.shape(image)[1]) for i in imsum]
    scale = (illum[0]+illum[1]+illum[2])/3
    print illum
    r = (r*scale)*(1/illum[2])
    g = (g*scale)*(1/illum[1])
    b = (b*scale)*(1/illum[0])
    gryw = cv2.merge((b,g,r))
    return gryw
Пример #22
0
def main():
    img = cv2.imread('data/whitebalance.jpg')

    show(img)

    center = img[1000 : 1200, 400 : 600] #y,x
    before = cv2.sumElems(center)
    scale = np.double(max(before[0:3])) / before[0:3]
    
    img = np.uint8(img * scale)

    show(img)
Пример #23
0
def compute_PSNR(imagename1,imagename2):
	img1 = cv2.imread(imagename1)
	img2 = cv2.imread(imagename2)
	height, widht = img1.shape[:2]

	s1 = cv2.absdiff(img1,img2)
	s1 = np.float32(s1)
	s1 = cv2.multiply(s1,s1)
	S = cv2.sumElems(s1)
	sse = S[0] + S[1] + S[2]
	mse = sse /(3*height*widht);
	psnr = 10.0*math.log10((255*255)/mse);
	return "{0:.4f}".format(psnr)
Пример #24
0
    def calculate_fitness_individual(self, individual):
        #calculate the pixel difference between the resultant image and the real image
     
        pixels = self.height * self.width
        maxfitness = 256

        img = self.draw_circles(individual)
        subtract = cv2.absdiff(self.image, img) 
        fitness1 = cv2.sumElems(subtract)
        #print fitness1
        fitness2 = fitness1[0] + fitness1[1] + fitness1[2]
        error = fitness2/pixels
        fitness2 = maxfitness - error
        
        return fitness2
def PSNR(I2):
	
        s1 = cv2.absdiff(I1, I2)
        s1 = np.float32(s1)
        s1 = cv2.multiply(s1, s1)

        s = cv2.sumElems(s1)
        sse = s[0] + s[1] + s[2]

        if (sse <= 1e-10):
             return 0
        else:
             mse = sse/(len(I1.shape) * I1.shape[0]*I1.shape[1])
             psnr = 10*math.log((255*255)/mse, 10)		
             return psnr
Пример #26
0
 def detect_motion(self,img):
     if self.lastImg is None:
         self.lastImg = img
         return False
     diff = cv2.absdiff(self.lastImg,img)
     self.lastImg = img
     kernel = np.ones((5,5),np.uint8)
     diff = cv2.medianBlur(diff,5)
     diff = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel)
     diff = cv2.morphologyEx(diff, cv2.MORPH_CLOSE, kernel)
     diff = cv2.threshold(diff,10,255,cv2.THRESH_BINARY)
     r=(cv2.sumElems(diff[1]))[0]
     if r > Config.DETECTION_THRESHOLD:
         logging.info("Motion Detected %d"%(self.motionDetected))
         return True
     return False
Пример #27
0
def calcPSNR(i1, i2):
    s1 = cv2.absdiff(i1, i2)
    s1 = np.float32(s1)
    s1 = cv2.multiply(s1, s1)
    s = cv2.sumElems(s1)

    sse = s[0] + s[1] + s[2]
    print s[0], s[1], s[2]
    if (sse <= 1e-10):
        return 0
    else:
        print i1.shape[0], i1.shape[1]
        #len(i1.shape) -> colorful image should divide 3 
        mse = sse / (len(i1.shape) * i1.shape[0] *  i1.shape[1])
        psnr = 10 * math.log((255 * 255) / mse, 10)
    return psnr
Пример #28
0
    def check_motion(self):
        pixel_sum, _, _, _ = cv2.sumElems(self.current_image)
        #print pixel_sum
        if (pixel_sum > self.threshold):
            sleep(1.0)
            self.current_f_rgb = self.cam.read()[1]
            cv2.imwrite('image.png', self.current_f_rgb)
            imgur_img = self.imgur_client.upload_from_path('image.png')
            link = imgur_img['link']
            body_text = "INTRUDER! " + link
            message = self.client.messages.create(body=body_text,
                    to=self.test_no,
                    from_="your twilio number")

            return True
        else:
            return False
Пример #29
0
    def _convert(self, video):
        if not hasattr(video, "frame_index"):
            frame_index = range(video.n_frames)
        else:
            frame_index = video.frame_index

        if not hasattr(video, "history"):
            history = pd.DataFrame(columns=["filter", "value", "n_frames"])
        else:
            history = video.history.copy()

        if self.every is not None:
            new_idx = range(video.n_frames)[:: self.every]
            history.loc[history.shape[0]] = ["every", self.every, len(new_idx)]
        elif self.hertz is not None:
            interval = int(video.fps / self.hertz)
            new_idx = range(video.n_frames)[::interval]
            history.loc[history.shape[0]] = ["hertz", self.hertz, len(new_idx)]
        elif self.top_n is not None:
            import cv2

            diffs = []
            for i, img in enumerate(video.frames):
                if i == 0:
                    last = img
                    continue
                diffs.append(sum(cv2.sumElems(cv2.absdiff(last, img))))
                last = img
            new_idx = sorted(range(len(diffs)), key=lambda i: diffs[i], reverse=True)[: self.top_n]
            history.loc[history.shape[0]] = ["top_n", self.top_n, len(new_idx)]

        frame_index = sorted(list(set(frame_index).intersection(new_idx)))

        # Construct new VideoFrameStim for each frame index
        onsets = [frame_num * (1.0 / video.fps) for frame_num in frame_index]
        elements = []
        for i, f in enumerate(frame_index):
            if f != frame_index[-1]:
                dur = onsets[i + 1] - onsets[i]
            else:
                dur = (len(video.frames) / video.fps) - onsets[i]

            elem = VideoFrameStim(video=video, frame_num=f, duration=dur)
            elements.append(elem)

        return DerivedVideoStim(filename=video.filename, elements=elements, frame_index=frame_index, history=history)
Пример #30
0
def findDot(image, squareSize, stepSize):
    shape = image.shape
    cols = shape[1]
    rows = shape[0]

    maxRow = 0
    maxCol = 0
    maxVal = 0

    for col in range(0, cols, stepSize):
        for row in range(0, rows, stepSize):
            sumElems = cv2.sumElems(image[row:(row + squareSize), col:(col + squareSize)])[0]
            if sumElems > maxVal:
                maxRow = row
                maxCol = col
                maxVal = sumElems

    return (maxCol, maxRow, maxVal)
Пример #31
0
img = cv2.imread('../../Images/DJI_0006.JPG')
#showImage(img)

# cvt to RGB instead of BGR
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

# Excercise 1 - find pixels satuated in all color channels and every single channel
r = img[:, :, 0]
g = img[:, :, 1]
b = img[:, :, 2]
r_saturated = r[:, :] > 254
g_saturated = g[:, :] > 254
b_saturated = b[:, :] > 254

# count numbers of overexposed elements
r_values = cv2.sumElems(1.0 * r_saturated)
g_values = cv2.sumElems(1.0 * g_saturated)
b_values = cv2.sumElems(1.0 * b_saturated)
#total_values = cv2.sumElems(1.0*)

print("r oversatuated: %d" % r_values[0])
print("g oversatuated: %d" % g_values[0])
print("b oversatuated: %d" % b_values[0])

#compare_original_and_its_color_channels(img)
thresImg = threshold(img, 190)
#compare_original_and_segmented_image(img, thresImg)

#convert to hsv
imgHSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
Пример #32
0
    return content


# capture frames from the camera
for frame in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
    # grab the raw NumPy array representing the image, then initialize the timestamp
    # and occupied/unoccupied text
    image = frame.array
    # Do a bunch of processing

    SAD = None

    if previousImage is not None:
        SAD = sum(cv2.sumElems(cv2.absdiff(previousImage, image)))

    # if we have enough history, check for changes
    if len(absDiffHistory) > 18:
        prevLast = absDiffHistory[-1]
        absDiffHistory.append(SAD)
        stddev = numpy.std(absDiffHistory)
        mean = sum(absDiffHistory) / len(absDiffHistory)
        threshold = prevLast * -1.0 + mean * 2.0 + stddev * 2.0
        if threshold < SAD:
            file_name = '/var/tmp/{0}.jpg'.format(current_idx % max_idx)
            cv2.imwrite(file_name, image)
            current_idx += 1
            print("Over threshold and not blur! ", file_name)

    elif SAD is not None:
Пример #33
0
def Pyramid(img):
    YUV = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
    YUV = cv2.resize(YUV, (40, 40))
    Y, U, V = cv2.split(YUV)
    YUV = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img = cv2.resize(YUV, (26, 26))
    kernel1 = np.ones((3, 1), np.float32)
    kernel2 = np.ones((1, 3), np.float32)
    kernel1[0] = -1
    kernel1[1] = 0
    kernel2[0] = [-1, 0, 1]
    dst = cv2.filter2D(img, cv2.CV_16S, kernel1)
    dstv1 = np.int16(dst)
    dstv2 = cv2.pow(dstv1, 2)
    dst = cv2.filter2D(img, cv2.CV_16S, kernel2)
    dsth1 = np.int16(dst)
    dsth2 = cv2.pow(dsth1, 2)
    dst1 = dsth2 + dstv2
    dst1 = np.float32(dst1)
    dstfinal = cv2.sqrt(dst1).astype(np.uint8)
    finalh = dsth1
    finalv = dstv1
    finalm = dstfinal
    UporDown = (finalv > 0).astype(int)
    LeftorRight = 2 * (finalh > 0).astype(int)
    absh = map(abs, finalh)
    absv = map(abs, finalv)
    absv[:] = [x * 1.732 for x in absv]
    absh = np.float32(absh)
    absv = np.float32(absv)
    high = 4 * (absv > absh).astype(int)
    out = high + LeftorRight + UporDown
    features = []
    for x in range(6):
        hrt = np.zeros(out.shape[:2], np.uint8)
        features.append(hrt)
    for x in range(out.shape[:2][0]):
        for y in range(out.shape[:2][1]):
            z = out[x][y]
            if z == 4 or z == 6:
                #				print "a",z
                features[4][x][y] = finalm[x][y]
            elif z == 5 or z == 7:
                features[5][x][y] = finalm[x][y]
#				print "b",z
            else:
                features[z][x][y] = finalm[x][y]


#				print z
    kernelg1 = 0.125 * np.ones((4, 4), np.float32)
    kernelg2 = 0.25 * np.ones((2, 2), np.float32)
    lastFeatures = []
    for img in features:
        tote = cv2.sumElems(img)
        tote = tote / img.size
        img = img / tote
        print img
        print cv2.sumElems(img)
        print img.size
        lastFeatures.append(img1)
    return lastFeatures
Пример #34
0
import cv2
import time
video_capture = cv2.VideoCapture(1)
video_capture.set(3, 1280)
video_capture.set(4, 720)
video_capture.set(10, 0.6)
ret, frame_old = video_capture.read()
i = 0
j = 0
while True:
    time.sleep(0.5)
    ret, frame = video_capture.read()
    diffimg = cv2.absdiff(frame,
                          frame_old)  #Просто вычитаем старый и новый кадр
    d_s = cv2.sumElems(diffimg)
    d = (d_s[0] + d_s[1] + d_s[2]) / (1280 * 720)
    frame_old = frame
    print(d)
    if i > 30:  #Первые 5-10 кадров камера выходит на режим, их надо пропустить
        if d > 1:  #Порог срабатывания
            cv2.imwrite("base2/" + str(j) + ".jpg", frame)
            j = j + 1
    else:
        i = i + 1
Пример #35
0
import numpy as np
import cv2

cap = cv2.VideoCapture(0)
fgbg = cv2.BackgroundSubtractorMOG(5000, 16, 0.80)
while(1):
    ret, frame = cap.read()
    fgmask = fgbg.apply(frame)
    bgSum = cv2.sumElems(fgmask)
    #if bgSum[0] > 10000:
        #print("FACE!")	
    #else:
        #print("NO FACE")
    cv2.imshow('frame',fgmask)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break
cap.release()
cv2.destroyAllWindows()
    def vidcap(self):
        while (self.cap.isOpened()):
            # Capture frame-by-frame, edge detection
            _, self.frame = self.cap.read()
            frame = cv2.resize(self.frame, (1024, 800))
            frame = frame[300:550, 550:700]
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            kernel = (7, 7)
            blur = cv2.GaussianBlur(frame, kernel, 0)
            edges = cv2.Canny(blur, 80, 200)
            closing = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)

            #Identifying edges as points(x and y coordinates) and packing into a list
            indices = np.where(closing != [0])
            coordinates = list(zip(indices[1], indices[0]))
            num = len(coordinates)

            #Separating coordinates into top and bottom edge
            bot_cor = coordinates[:int(num / 2)]
            top_cor = coordinates[-int(num / 2):]

            #Converting into arrays, sorting
            a, b = np.array(top_cor), np.array(bot_cor)
            a, b = a[a[:, 0].argsort()], b[b[:, 0].argsort()]

            #Approximation with a 3rd degree polynomial
            min_a_x, max_a_x = np.min(a[:, 0]), np.max(a[:, 0])
            new_a_x = np.linspace(min_a_x, max_a_x, 1000)
            a_coefs = np.polyfit(a[:, 0], a[:, 1], 3)
            new_a_y = np.polyval(a_coefs, new_a_x)

            min_b_x, max_b_x = np.min(b[:, 0]), np.max(b[:, 0])
            new_b_x = np.linspace(min_b_x, max_b_x, 1000)
            b_coefs = np.polyfit(b[:, 0], b[:, 1], 3)
            new_b_y = np.polyval(b_coefs, new_b_x)

            #Defining the center line
            midx = [
                np.average([new_a_x[i], new_b_x[i]], axis=0)
                for i in range(1000)
            ]
            midy = [
                np.average([new_a_y[i], new_b_y[i]], axis=0)
                for i in range(1000)
            ]

            #Identifying the coordinates of the centerline, packing into a list
            coords = list(zip(midx, midy))
            points = list(np.int_(coords))

            #Drawing a center line as a series of points (circles)
            for point in points:
                cv2.circle(frame, tuple(point), 1, (255, 255, 255), -1)

            for point in points:
                cv2.circle(closing, tuple(point), 1, (255, 255, 255), -1)

            #Dividing closing by 255 to get 0's and 1's, performing
            #an accumulate addition for each column.
            a = np.add.accumulate(closing / 255, 0)
            #Clipping values: anything greater than 2 becomes 2
            a = np.clip(a, 0, 2)
            #Performing a modulo, to get areas alternating with 0 or 1; then multiplying by 255
            a = a % 2 * 255
            #Converting to uint8
            mask1 = cv2.convertScaleAbs(a)

            #Flipping the array to get a second mask
            a = np.add.accumulate(np.flip(closing, 0) / 255, 0)
            a = np.clip(a, 0, 2)
            a = a % 2 * 255
            mask2 = cv2.convertScaleAbs(np.flip(a, 0))

            #Summing the intensities of pixels in each mask
            sums = [0, 0]
            s1 = (sums[0]) = cv2.sumElems(cv2.bitwise_and(frame, mask1))
            s2 = (sums[1]) = cv2.sumElems(cv2.bitwise_and(frame, mask2))

            cv2.imshow('masked1', cv2.bitwise_and(frame, mask1))
            cv2.imshow('masked2', cv2.bitwise_and(frame, mask2))
            cv2.imshow('mask1', mask1)
            cv2.imshow('mask2', mask2)

            # Display the resulting frame
            cv2.imshow('frame', frame)
            cv2.imshow('closing', closing)

            if cv2.waitKey(5) & 0xFF == ord('q'):

                self.cap.release()
                cv2.destroyAllWindows()
Пример #37
0
def horz_proj(img):
    height, width = img.shape
    h_proj = []
    for i in range(height):
        h_proj.append(cv2.sumElems(img[i])[0])
    return h_proj
Пример #38
0
def lucy_richardson_deconv(img, num_iterations, sigmag):
    """" Lucy-Richardson Deconvolution Function
    // input-1 img: NxM matrix image
    // input-2 num_iterations: number of iterations
    // input-3 sigma: sigma of point spread function (PSF)
    // output result: deconvolution result
    """

    epsilon = 2.2204e-16
    win_size = 8 * sigmag + 1  # Window size of PSF

    # Initializations Numpy
    j1 = img.copy()
    j2 = img.copy()
    w_i = img.copy()
    im_r = img.copy()

    t1 = np.zeros(img.shape, dtype=np.float32)
    t2 = np.zeros(img.shape, dtype=np.float32)
    tmp1 = np.zeros(img.shape, dtype=np.float32)
    tmp2 = np.zeros(img.shape, dtype=np.float32)
    # size = (w, h, channels), grayscale -> channels = 1

    # Lucy - Rich.Deconvolution CORE
    lambda_ = 0
    for j in range(1, num_iterations):
        # gotta clean this up, maybe a warmup before the for-loop
        if j > 1:
            # calculation of lambda
            # https://docs.opencv.org/2.4/modules/core/doc/operations_on_arrays.html#multiply
            tmp1 = t1 * t2
            tmp2 = t2 * t2

            # https://docs.opencv.org/2.4/modules/core/doc/operations_on_arrays.html#sum
            lambda_ = cv2.sumElems(tmp1)[0] / (cv2.sumElems(tmp2)[0] + epsilon)

        # y = j1 + (lambda_ * (j1 - j2))
        y = j1 + np.multiply(lambda_, np.subtract(j1, j2))

        y[(y < 0)] = 0

        # applying Gaussian filter
        re_blurred = cv2.GaussianBlur(y, (int(win_size), int(win_size)),
                                      sigmag)
        re_blurred[(re_blurred <= 0)] = epsilon

        cv2.divide(w_i, re_blurred, im_r, 1,
                   cv2.CV_64F)  # couldn't get numpys divide to work yet
        im_r = im_r + epsilon

        # applying Gaussian filter
        im_r = cv2.GaussianBlur(im_r, (int(win_size), int(win_size)), sigmag)

        j2 = j1.copy()
        # print(f"{y.dtype}, {im_r.dtype}")
        j1 = y * im_r

        t2 = t1.copy()
        t1 = j1 - y

    result = j1.copy()
    return result
Пример #39
0
# convert it to binary
imgray[imgray >= 255] = 1


# read rgb image
im_rgb = cv.imread('circularimage.bmp')
plt.figure(2)
plt.imshow(im_rgb)


# convert single channel imgray image to three channel for masking
imgray3 = np.zeros_like(im_rgb)
imgray3[:,:,0] = imgray
imgray3[:,:,1] = imgray
imgray3[:,:,2] = imgray


# mask rgb image
croppedCircularImage= cv.multiply(im_rgb,imgray3)
plt.figure(3)
plt.imshow(croppedCircularImage)

# calculate average R channel value
pixelpointsCV2 = cv.findNonZero(imgray)
numberofnonzeroelements= pixelpointsCV2.shape[0]

sumvalues=cv.sumElems(croppedCircularImage[:,:,2])[0]

average=sumvalues/numberofnonzeroelements
Пример #40
0
    def run(self):
        global wt_1, wt, isQFull

        wt = self.wt  # 0 is no motion, 1 is motion at t
        wt_1 = self.wt_1  # 0 is no motion, 1 is motion at t_1
        Kt = self.Kt  # Counter
        K1 = self.K1  # 0.7 * M

        Np = self.Np
        K2_up = self.K2_up
        K2_down = self.K2_down

        while 1:
            # cv2.imshow("origDepth", imutils.resize(get_depth(pts), height=320))
            isQFull = self.imageQ.full()
            if isQFull is False:
                self.imageQ.put(self.get_depth())
                continue

            motionMat = np.zeros(self.get_depth().shape)
            for i in range(Np - 2, -1, -1):
                # noinspection PyTypeChecker
                motionMat += cv2.absdiff(self.qlist()[Np - 1], self.qlist()[i])

            Tm = 0.00015 * 65535
            motionBin = np.zeros(motionMat.shape)
            motionBin[motionMat >= Tm] = 1

            Cmt, _, _, _ = cv2.sumElems(motionBin)
            # print(Cmt)

            if wt == 0:
                # to detect when motion starts
                if Cmt > K1 and Kt < K2_up:
                    Kt += 1
                elif Cmt < K1 and 0 < Kt < K2_up:
                    Kt -= 1
                    if Kt < 0:
                        Kt = 0
                elif Kt == K2_up:
                    wt = 1
                    Kt = 0
            elif wt == 1:
                if Cmt < K1 and Kt < K2_down:
                    Kt += 1
                elif Cmt > K1 and 0 < Kt < K2_down:
                    Kt -= 1
                    if Kt < 0:
                        Kt = 0
                elif Kt == K2_down:
                    if wt_1 == 1:
                        wt = 0
                        Kt = 0
                    elif wt_1 == 0:
                        wt = 1
                        Kt = 0
                elif wt_1 == 0 and Kt < K2_down:
                    wt = 0
                    Kt = 0
                elif wt_1 == 1 and Kt < K2_down:
                    wt = 1
                    Kt = 0

            self.imageQ.get()
            self.imageQ.put(self.get_depth())

            wt_1 = wt
Пример #41
0
def data_generator(index, state, cf_data_num_h, writer, raw_in_data,
                   label_in_data, data_path, benchmark_name):
    '''read as gray'''
    raw_im = imread(raw_in_data, 0)
    label_im = imread(label_in_data, 0)
    '''get whole image info'''
    height, width = label_im.shape  # raw_im.shape

    print("Raw image info: ", height, width)
    '''crop size & num of generate image'''

    cf_crop_size = 256

    detected = 0
    count = 0
    '''determine how many hotspots in the cropped image can be labeled as positive sample '''
    threshold = 255

    while cf_data_num_h > 0:  # or cf_data_num_nh >= 0:
        if index == 4:
            cf_crop_size = 128

        # NOTE: we use left half as training data
        if benchmark_name == 'iccad16':
            random_x = abs(
                np.random.random_integers(0, width / 2) - cf_crop_size)
        else:
            random_x = abs(np.random.random_integers(0, width) - cf_crop_size)

        random_y = abs(np.random.random_integers(0, height) - cf_crop_size)
        bottom_right_x = random_x + cf_crop_size
        bottom_right_y = random_y + cf_crop_size
        crop_data = raw_im[random_y:bottom_right_y, random_x:bottom_right_x]
        crop_label = label_im[random_y:bottom_right_y, random_x:bottom_right_x]
        sum_label = int(cv2.sumElems(crop_label)[0])
        '''
        crop the label 4 with small size and upsample to 128 -> 256
        * INTER_NEAREST, make sure the hotspot pixel value is 255
        '''
        if index == 4:
            cf_crop_size = 256
            crop_data = cv2.resize(crop_data, (cf_crop_size, cf_crop_size),
                                   interpolation=cv2.INTER_NEAREST)
            crop_label = cv2.resize(crop_label, (cf_crop_size, cf_crop_size),
                                    interpolation=cv2.INTER_NEAREST)

        ismiddle = 0  # check whether in the middle or not
        delta = 34
        mask_type = 'png'

        if index == 4 and benchmark_name == 'iccad16':
            delta = 68

        object_id = index - 1
        object_name = 'hotspot_' + str(object_id)

        xmin = []
        ymin = []
        xmax = []
        ymax = []
        classes = []
        classes_text = []
        masks = []

        if sum_label != 0 and sum_label >= threshold and cf_data_num_h > 0:
            # find hotspot
            detected = 1
            h, w = crop_data.shape

            if h == cf_crop_size and w == cf_crop_size:
                # cv2.imwrite(data_path + str(index) + '_' + str(count) + '.png',
                #             crop_data)
                # cv2.imwrite(
                #     data_path + str(index) + '_' + str(count) + '_label.png',
                #     crop_label)
                '''generate bounding box'''
                for j in range(cf_crop_size - 4):
                    for i in range(cf_crop_size - 4):
                        if (index != 4 and crop_label[j, i] == 255) or (
                                index == 4 and np.sum(
                                    crop_label[j:j + 256 / 128, i:i +
                                               256 / 128]) == 2 * 2 * 255):
                            ismiddle = 1
                            box_min_x = i - delta
                            box_min_y = j - delta
                            box_max_x = i + delta
                            box_max_y = j + delta

                            if box_min_x < 0:
                                box_min_x = 1

                            if box_min_y < 0:
                                box_min_y = 1

                            if box_max_x >= cf_crop_size:
                                box_max_x = cf_crop_size - 1

                            if box_max_y >= cf_crop_size:
                                box_max_y = cf_crop_size - 1

                            xmin.append(float(box_min_x) / float(cf_crop_size))
                            ymin.append(float(box_min_y) / float(cf_crop_size))
                            xmax.append(float(box_max_x) / float(cf_crop_size))
                            ymax.append(float(box_max_y) / float(cf_crop_size))
                            classes_text.append(object_name.encode('utf8'))
                            classes.append(object_id)

                if ismiddle:
                    img_path = data_path + \
                        str(index) + '_' + str(count) + '.png'
                    label_path = data_path + \
                        str(index) + '_' + str(count) + '_label.png'
                    mask_path = data_path + \
                        str(index) + '_' + str(count) + '_mask.png'

                    cv2.imwrite(img_path, crop_data)
                    cv2.imwrite(label_path, crop_label)
                    cv2.imwrite(mask_path, crop_label)

                    filename = str(index) + '_' + str(count) + '.png'
                    count += 1
                    cf_data_num_h -= 1
                    with tf.gfile.GFile(img_path, 'rb') as fid:
                        encoded_png = fid.read()
                    encoded_png_io = io.BytesIO(encoded_png)
                    image = Image.open(encoded_png_io)
                    if image.format != 'PNG':
                        raise ValueError('Image format not PNG')
                    key = hashlib.sha256(encoded_png).hexdigest()

                    with tf.gfile.GFile(mask_path, 'rb') as fid:
                        encoded_mask_png = fid.read()
                    encoded_png_mask_io = io.BytesIO(encoded_mask_png)
                    mask = Image.open(encoded_png_mask_io)
                    if mask.format != 'PNG':
                        raise ValueError('Mask format not PNG')

                    mask_np = np.asarray(mask)
                    mask_remapped = (mask_np != 2).astype(np.uint8)
                    masks.append(mask_remapped)

                    encoded_mask_png_list = []
                    for mask in masks:
                        img = Image.fromarray(mask)
                        output = io.BytesIO()
                        img.save(output, format='PNG')
                        encoded_mask_png_list.append(output.getvalue())

                    example = tf.train.Example(features=tf.train.Features(
                        feature={
                            'image/height':
                            dataset_util.int64_feature(h),
                            'image/width':
                            dataset_util.int64_feature(w),
                            'image/filename':
                            dataset_util.bytes_feature(filename.encode(
                                'utf8')),
                            'image/source_id':
                            dataset_util.bytes_feature(filename.encode(
                                'utf8')),
                            'image/key/sha256':
                            dataset_util.bytes_feature(key.encode('utf8')),
                            'image/encoded':
                            dataset_util.bytes_feature(encoded_png),
                            'image/format':
                            dataset_util.bytes_feature('png'.encode('utf8')),
                            'image/object/bbox/xmin':
                            dataset_util.float_list_feature(xmin),
                            'image/object/bbox/xmax':
                            dataset_util.float_list_feature(xmax),
                            'image/object/bbox/ymin':
                            dataset_util.float_list_feature(ymin),
                            'image/object/bbox/ymax':
                            dataset_util.float_list_feature(ymax),
                            'image/object/class/text':
                            dataset_util.bytes_list_feature(classes_text),
                            'image/object/class/label':
                            dataset_util.int64_list_feature(classes),
                        }))

                    writer.write(example.SerializeToString())
Пример #42
0
    #cv2.imshow("origDepth", imutils.resize(get_depth(pts), height=320))
    a = time.time()
    if imageQ.full() is False:
        imageQ.put(get_depth(pts))
        continue

    motionMat = np.zeros(get_depth(pts).shape)
    for i in range(Np - 2, -1, -1):
        # noinspection PyTypeChecker
        motionMat += cv2.absdiff(qlist(imageQ)[Np - 1], qlist(imageQ)[i])

    Tm = 0.00015 * 65535
    motionBin = np.zeros(motionMat.shape)
    motionBin[motionMat >= Tm] = 1

    Cmt, _, _, _ = cv2.sumElems(motionBin)
    print(Cmt)

    if wt == 0:
        # to detect when motion starts
        if Cmt > K1 and Kt < K2:
            Kt += 1
        elif Cmt < K1 and 0 < Kt < K2:
            Kt -= 1
            if Kt < 0:
                Kt = 0
        elif Kt == K2:
            wt = 1
            Kt = 0
    elif wt == 1:
        if Cmt < K1 and Kt < K2:
Пример #43
0
yolo = yolo.YOLO(**kwargs)

while (1):
    ret, frame = video.read()
    subframe = cv2.subtract(frame, avgframe)
    grayscaled = cv2.cvtColor(subframe, cv2.COLOR_BGR2GRAY)
    retval2, th1 = cv2.threshold(grayscaled, 35, 255, cv2.THRESH_BINARY)
    avgframe = cv2.addWeighted(frame, 0.1, avgframe, 0.9, 0.0)

    if show:
        cv2.imshow('Frame', frame)
        cv2.imshow('Treshold diff', th1)

    th1 = th1 / 255
    w, h = th1.shape
    sum = cv2.sumElems(th1)[0] / (w * h)
    print("SUM:", cv2.sumElems(th1)[0] / (w * h), w, h)
    if sum > 0.001:
        fconv = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        image = Image.fromarray(fconv)
        out_boxes, out_scores, out_classes = yolo.detect_image_boxes(image)
        detect_name = ''
        detect_class = ''
        max_score = 0
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = yolo.class_names[c]
            score = out_scores[i]
            if score > max_score:
                max_score = score
                detect_name = predicted_class + "-" + str(score) + "-"
                detect_class = predicted_class
Пример #44
0
def test_pair(transform, jpg, mat, out):
    """ 
        jpg = filename
        mat = filename
    """

    data = scipy.io.loadmat(mat)
    regions = data['regions'].flatten()
    max_type = 0
    for r in regions:
        max_type = max(max_type, r['type'][0][0][0][0])
    r_vals = {}

    for t in np.arange(max_type):
        r_vals[t + 1] = np.array([], 'float32')

    g_vals = copy.deepcopy(r_vals)
    b_vals = copy.deepcopy(r_vals)
    h_vals = copy.deepcopy(r_vals)
    s_vals = copy.deepcopy(r_vals)
    v_vals = copy.deepcopy(r_vals)

    result_stats = {
        'average_abs_err': [],
        'total_pixels': 0,
        'total_error': 0,
        'total_regions': 0,
        'r_vals': r_vals,
        'g_vals': g_vals,
        'b_vals': b_vals,
        'h_vals': h_vals,
        's_vals': s_vals,
        'v_vals': v_vals
    }
    for r in regions:
        logger.info('region')
        x = r['x'][0][0].flatten()
        y = r['y'][0][0].flatten()
        mask = r['mask'][0][0]
        mask3 = cv2.merge([mask, mask, mask])
        print 'x', x
        print 'y', y
        print 'mask shape', mask.shape
        print 'type', r['type'][0][0][0][
            0]  # type in 1- based / matlab-based indices from the list of region types (i.e road, white, yellow, red, or what ever types were annotated)
        print 'color', r['color'][
            0]  # color in [r,g,b] where [r,g,b]are between 0 and 1
        t = r['type'][0][0][0][0]
        # print 'guy look here'
        region_color = r['color'][0]
        region_color = region_color[0][0]
        rval = region_color[0] * 255.
        gval = region_color[1] * 255.
        bval = region_color[2] * 255.
        image = image_cv_from_jpg_fn(jpg)
        transformed = transform(image)
        [b2, g2, r2] = cv2.split(transformed)
        thsv = cv2.cvtColor(transformed, cv2.cv.CV_BGR2HSV)
        [h2, s2, v2] = cv2.split(thsv)
        r2_ = r2[mask.nonzero()]
        g2_ = g2[mask.nonzero()]
        b2_ = b2[mask.nonzero()]
        h2_ = h2[mask.nonzero()]
        s2_ = s2[mask.nonzero()]
        v2_ = v2[mask.nonzero()]
        # ipython_if_guy()
        result_stats['r_vals'][t] = np.concatenate(
            (result_stats['r_vals'][t], r2_), 0)
        result_stats['g_vals'][t] = np.concatenate(
            (result_stats['g_vals'][t], g2_), 0)
        result_stats['b_vals'][t] = np.concatenate(
            (result_stats['b_vals'][t], b2_), 0)
        result_stats['h_vals'][t] = np.concatenate(
            (result_stats['h_vals'][t], h2_), 0)
        result_stats['s_vals'][t] = np.concatenate(
            (result_stats['s_vals'][t], s2_), 0)
        result_stats['v_vals'][t] = np.concatenate(
            (result_stats['v_vals'][t], v2_), 0)
        absdiff_img = cv2.absdiff(transformed, np.array([bval, gval, rval,
                                                         0.]))
        masked_diff = cv2.multiply(np.array(absdiff_img, 'float32'),
                                   np.array(mask3, 'float32'))
        num_pixels = cv2.sumElems(mask)[0]
        region_error = cv2.sumElems(cv2.sumElems(masked_diff))[0]
        avg_abs_err = region_error / (num_pixels + 1.)
        print 'Average abs. error', avg_abs_err
        result_stats['average_abs_err'].append(avg_abs_err)
        result_stats[
            'total_pixels'] = result_stats['total_pixels'] + num_pixels
        result_stats[
            'total_error'] = result_stats['total_error'] + region_error
        result_stats['total_regions'] = result_stats['total_regions'] + 1
        # XXX: to finish
    return result_stats
Пример #45
0
def vert_proj(img):
    height, width = img.shape
    v_proj = []
    for i in range(width):
        v_proj.append(cv2.sumElems(img[:, i])[0])
    return v_proj
Пример #46
0
print(tempImageSummary)
# PRINTS the TUPLE for THE REFERENCE IMAGE (413,433) which is HEIGHT and WIDTH
key = cv2.waitKey(0)
min = 10000000000
# for y in range(209 - 98 + 1):
#     for x in range(113 - 54 + 1):
for y in range(413 - 125 + 1):
    for x in range(433 - 118 + 1):
        crop_img = imgTemplate[y:y + 125, x:x + 118]
        # cv2.imshow("cropped", crop_img)
        # cv2.waitKey(0)
        delta_frame = cv2.multiply(crop_img, imgReference)
        # delta_frame = cv2.absdiff(crop_img, imgReference)
        # print(y,x)
        cv2.imshow("Ghost Frame", delta_frame)
        intensity_delta = cv2.sumElems(delta_frame)[0]
        # print(intensity_delta)
        if intensity_delta < min:
            min = intensity_delta
            print("Current Min", min)
            print(y, x)

        if intensity_delta == 0.0:
            print("It's a Match!")
            break

        # print(delta_frame)
        # print(cv2.sumElems(delta_frame))
        key = cv2.waitKey(1)

# for i in range(0,413):
Пример #47
0
    def findPolygon(self, image, originalimg):
        topFix = 770
        bottomFix = 790
        leftFix = 296
        rightFix = 306
        sideStep = 10
        top = topFix
        bottom = bottomFix
        left = leftFix
        right = rightFix
        points = numpy.array([[346, 800]])
        ary = numpy.zeros((15, 10))
        image = cv2.cvtColor(image, cv2.cv.CV_GRAY2RGB)
        for y in range(0, 15):
            for x in range(0, 10):
                #Waehle Bildbereich zum Betrachten aus
                roi = image[top:bottom, left:right]
                #Summiere alle Pixel in diesem Bereich: summe[0] = 0 entspricht alles schwarz
                summe = cv2.sumElems(roi)
                #print summe[0]
                if (summe[0] <= 10.0):
                    cv2.rectangle(originalimg, (left, bottom), (right, top),
                                  (0, 255, 0), 0)  #rand
                    #points.append([bottom, right])
                    #points = numpy.concatenate((points, ([[right-((right-left)/2),bottom-((bottom-top)/2)]])))
                    ary[y, x] = 0
                else:
                    cv2.rectangle(originalimg, (left, bottom), (right, top),
                                  (0, 0, 255), -1)  #gefuellt
                    ary[y, x] = 1
                left = left + sideStep
                right = right + sideStep
            left = leftFix
            right = rightFix
            top = top - 20
            bottom = bottom - 20

        #cv2.imshow("Ohne Linie", originalimg)
        #pass

        top = topFix
        bottom = bottomFix
        left = leftFix
        right = rightFix
        center = 346
        centerTop = 346.0
        for y in range(0, 15):
            sumItemsLeft = 0
            sumItemsRight = 0
            found = False
            left = leftFix + (sideStep * 4)
            right = rightFix + (sideStep * 4)
            for x in range(4, -1, -1):
                if ary[y, x] == 1:
                    found = True
                if found == True:
                    ary[y, x] = 1
                    sumItemsLeft += 1
                    #cv2.rectangle(originalimg,(left,bottom),(right,top),(0,0,255),-1)
                left = left - sideStep
                right = right - sideStep
            found = False
            left = leftFix + (sideStep * 5)
            right = rightFix + (sideStep * 5)
            for x in range(5, 10):
                if ary[y, x] == 1:
                    found = True
                if found == True:
                    ary[y, x] = 1
                    sumItemsRight += 1
                    #cv2.rectangle(originalimg,(left,bottom),(right,top),(0,0,255),-1)
                left = left + sideStep
                right = right + sideStep
            centerTop += (sumItemsLeft - sumItemsRight) / 2.0
            points = numpy.concatenate((points, ([[
                346 + int((sumItemsLeft - sumItemsRight) / 2.0 * sideStep),
                bottom - ((bottom - top) / 2)
            ]])))
            cv2.circle(originalimg,
                       (int(centerTop), bottom - ((bottom - top) / 2)), 4,
                       (128, 128, 128), -1)
            cv2.circle(originalimg, (346 + int(
                (sumItemsLeft - sumItemsRight) / 2.0 * sideStep), bottom -
                                     ((bottom - top) / 2)), 4, (255, 255, 255),
                       -1)
            left = leftFix
            right = rightFix
            top = top - 20
            bottom = bottom - 20
            if (y == 0):
                center = centerTop

        direction = cv2.fitLine(points, cv2.cv.CV_DIST_L1, 0, 0.01, 0.01)
        x1 = direction[2]
        x2 = 346 + (direction[0] * -200)
        y1 = direction[3]
        y2 = 800 - numpy.abs(direction[1] * -200)
        print x1, y1, x2, y2
        cv2.line(originalimg, (x1, y1), (x2, y2), (255, 255, 0), 4)
        #cv2.line(originalimg, (int(center), 800), (int(centerTop), 700), (255,255,0),2)
        cv2.imshow("Mit Linie", originalimg)

        return originalimg
Пример #48
0
                frame = imutils.resize(frame, width=int(newfw))
        except Exception as e:
            print("Exception at cap.read: ", e)
            cv2.waitKey(0)
            exit()

        # EXTRACT ROI from frame
        frameROI = frame[roi[0]:roi[1], roi[2]:roi[
            3]]  # frameROI = imutils.resize(frame[roi[0]:roi[1],roi[2]:roi[3]], height=640)

        # DIFF gray, fixed-size frames
        smallGray = plateUtils.toGrayscale(frame, width=480)
        frameDiff = 1000000
        try:
            diffFrame = cv2.absdiff(smallGray, previousFrame)
            frameDiff = cv2.sumElems(diffFrame)[0]
        except Exception as e:
            print("EXCEPTION", e)
        # STORE current frame for frame diff
        previousFrame = smallGray.copy()

        # SKIP if scene is stationary
        if (frameDiff < 5000):
            skippedFrameCounter += 1
        else:
            # READ PLATES each processPerNFrames
            if counter % processPerNFrames == 0:
                plates, plateLocations = pe.loop(frameROI.copy())
                for pl in plateLocations:
                    cv2.drawContours(frame, [pl], -1, (0, 255, 0), 3)
                mergeFrameReadings(plates)