예제 #1
0
def generateIntegralChannels(image):
	channels = [] # list of array
	img = image
	colored = True if len(img.shape) > 2 else False
	if not colored: 
		return
	img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	if colored:
		img_luv = cv2.cvtColor(img, cv2.COLOR_BGR2LUV)
	x_der = cv2.Sobel(img_gray, cv2.CV_32F, 0, 1)
	y_der = cv2.Sobel(img_gray, cv2.CV_32F, 1, 0)
	mag, angle = cv2.cartToPolar(x_der, y_der)


	no_rows = mag.shape[0]
	no_cols = mag.shape[1]

	hist = []
	for i in range(6):
		hist.append(np.zeros((no_rows, no_cols)))

	for row in range(no_rows):
		for col in range(no_cols):
			ang = angle[row, col] * 180 / pi
			ang1 = ang
			if ang >= 180 and ang < 360:
				ang -= 180
			elif ang < 0:
				ang += 180
			elif ang >=  360:
				ang = 0

			ind = int(ang/30)


			try:
				hist[ind][row, col] = mag[row, col]
			except:
				print mag.shape
				print (np.asarray(hist)).shape
				print row, col, ind, ang, ang1

	for i in range(3):
		channels.append(cv2.integral(img_luv[:,:,i]))
	channels.append(cv2.integral(mag))
	for i in range(len(hist)):
		hist[i] = cv2.integral(hist[i])
	channels += hist
	return channels
예제 #2
0
def motion_detection(frames, location_list, block_size):

    history = len(frames)  # 训练帧数
    bs = cv2.createBackgroundSubtractorKNN(detectShadows=True)  # 背景减除器,设置阴影检测
    bs.setHistory(history)
    i = 0
    num = 0 - history
    while i < history:
        fg_mask = bs.apply(frames[num])  # 获取 foreground mask
        num += 1
        i += 1

    th = cv2.threshold(fg_mask.copy(), 244, 255, cv2.THRESH_BINARY)[1]
    th = cv2.erode(th,
                   cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),
                   iterations=2)
    dilated = cv2.dilate(th,
                         cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 3)),
                         iterations=2)

    int_diff = cv2.integral(dilated)

    result = list()
    for pt in iter(location_list):
        xx, yy, _bz, _bz = pt
        t11 = int_diff[xx, yy]
        t22 = int_diff[xx + block_size, yy + block_size]
        t12 = int_diff[xx, yy + block_size]
        t21 = int_diff[xx + block_size, yy]
        block_diff = t11 + t22 - t12 - t21
        if block_diff > 0:
            result.append((xx, yy, block_size, block_size))
    return result
예제 #3
0
    def generate_negative(self, image, bbox, num):
        img = self.image_transformation(image.copy())
        self.integral_img = cv2.integral(img)
        i = 0
        ind = 4000

        while ind < len(self.init_windows) and i < num:
            if self.init_windows[ind] != "switch":
                box = self.init_windows[ind:ind + 4]
                ind += 4
                if self.mean_filter(box):
                    width = box[2] - box[0]
                    height = box[3] - box[1]
                    bbox_new = [bbox[0] - width, bbox[1] - height, bbox[2], bbox[3]]
                    outside = box[0] > bbox_new[2] or box[0] < bbox_new[0] \
                              or box[1] > bbox_new[3] or box[1] < bbox_new[1]
                    inside = box[2] < 640 and box[3] < 120
                    if outside and inside:
                        i += 1
                        self.train(box, image, 0, 0)
                        cv2.rectangle(self.viz, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
                        cv2.imshow("Image", self.viz)
                        cv2.waitKey(1)
            else:
                ind += 1
                continue
        print "Number of Negatives: %i" % (num), "windows"
예제 #4
0
def averaging(image, distances):
    k = 2
    hist = cv2.equalizeHist(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
    result = np.empty(hist.shape, dtype=np.float32)
    integral = cv2.integral(hist)

    for i in range(result.shape[0]):
        for j in range(result.shape[1]):
            radius = k * distances[i, j] // 2
            if radius == 0:
                result[i, j] = hist[i, j]
            else:

                def clamp(value, a, b):
                    if value < a:
                        return a
                    elif value > b:
                        return b
                    else:
                        return value

                x1 = int(clamp(i - radius, 0, integral.shape[0] - 1))
                x2 = int(clamp(i + radius, 0, integral.shape[0] - 1))
                y1 = int(clamp(j - radius, 0, integral.shape[1] - 1))
                y2 = int(clamp(j + radius, 0, integral.shape[1] - 1))
                color = int(
                    (integral[x2, y2] - integral[x2, y1] - integral[x1, y2] +
                     integral[x1, y1]) / (x2 - x1) / (y2 - y1))
                result[i, j] = color if color > 0 else 1

    cv2.imwrite('results/averaging.png', result)

    return result
예제 #5
0
def buildDescMat(gx, gy, descinfo):
    nbin = descinfo.nBins - 1 if descinfo.isHof else descinfo.nBins
    ndims = descinfo.nBins
    anglebase = nbin / 360.0
    index = 0
    # Python Calculate gradient magnitude and direction ( in degrees )
    mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)
    desc = np.zeros([gx.shape[0] + 1, gx.shape[1] + 1, ndims])
    for i in range(gx.shape[0]):  # going through rows
        for j in range(gx.shape[1]):  # going through columns
            # ensure not invalid(nan or inf)
            mag[i, j] = np.nan_to_num(mag[i, j])
            # for zero bin of HOF
            if descinfo.isHof and (mag[i, j] <= min_flow):
                bin0 = nbin  # zero bin is the last one, index 8
                mag0 = 1.0
                bin1 = 0
                mag1 = 0
            else:
                fbin = anglebase * (angle[i, j] % 360.)
                bin0 = int(np.floor(fbin))
                bin1 = int((bin0 + 1) % nbin)
                mag1 = (fbin - bin0) * mag[i, j]
                mag0 = mag[i, j] - mag1
                if np.isnan(mag0) or np.isnan(mag1):
                    print(i, j)
            desc[i][j][bin0] = mag0
            desc[i][j][bin1] = mag1
    desc = cv2.integral(desc)
    return desc
예제 #6
0
def adaptative_thresholding(self, path, threshold):
    ImageIn = path
    gray = cv2.cvtColor(ImageIn, cv2.COLOR_BGR2GRAY)
    orignrows, origncols = gray.shape
    M = int(np.floor(orignrows / 16) + 1)
    N = int(np.floor(origncols / 16) + 1)
    Mextend = round(M / 2) - 1
    Nextend = round(N / 2) - 1
    aux = cv2.copyMakeBorder(gray,
                             top=Mextend,
                             bottom=Mextend,
                             left=Nextend,
                             right=Nextend,
                             borderType=cv2.BORDER_REFLECT)
    windows = np.zeros((M, N), np.int32)
    imageIntegral = cv2.integral(aux, windows, -1)
    nrows, ncols = imageIntegral.shape
    result = np.zeros((orignrows, origncols))
    for i in range(nrows - M):
        for j in range(ncols - N):
            result[i, j] = imageIntegral[i + M, j + N] - imageIntegral[i, j + N] + imageIntegral[i, j] - \
                            imageIntegral[
                                i + M, j]
    binar = np.ones((orignrows, origncols), dtype=np.bool)
    graymult = (gray).astype('float64') * M * N
    binar[graymult <= result * (100.0 - threshold) / 100.0] = False
    binar = (255 * binar).astype(np.uint8)

    return binar
예제 #7
0
def process_image(img, templ, out_dir):
    height, width, depth = img.shape
    int_image = cv2.integral(img)

    h_lines = find_horizontal_lines(img, int_image)
    count = 1

    if (len(h_lines) > 1):
        prev = h_lines[0]
        scene = 1
        for li in range(1, len(h_lines)):
            current = h_lines[li]
            v_lines = find_vertical_lines(img, prev[3], current[1], int_image)

            if (len(v_lines) > 1):
                prev_x = v_lines[0][2]
                for vli in range(1, len(v_lines)):
                    vl = v_lines[vli]
                    x1, y1, x2, y2 = vl

                    crop = Image.fromarray(img[y1:y2, prev_x:x1, ::-1])
                    crop.save(out_dir / templ.format(scene=scene))

                    prev_x = x2
                    scene = scene + 1

            prev = current
예제 #8
0
파일: cv.py 프로젝트: Saufaer/CV_works
def fil(img):
    dist = dis_s(img)
    h, w = img.shape[0:2]
    k = 0.75
    Int = cv2.integral(contr(img))

    fil = np.zeros((h, w), np.uint8)

    x = 0
    y = 0
    for x in range(h):
        for y in range(w):
            r = min(int(k * dist[x, y]), 5)
            sh = h - 1
            sw = w - 1
            fx = x + r + 1
            fy = y + r + 1
            sx = x - r
            sy = y - r
            sbordX = Borders(sx, 0, sh)
            sbordY = Borders(sy, 0, sw)
            fbordX = Borders(fx, 0, sh)
            fbordY = Borders(fy, 0, sw)
            ch = Int[sbordX, sbordY] + Int[fbordX, fbordY] - Int[
                sbordX, fbordY] - Int[fbordX, sbordY]
            fil[x, y] = ch / ((1 + 2 * r)**2)
    return fil
예제 #9
0
def get_integral_image(image):
    '''
		Compute the integral image of the input image.
		@image: np.ndarray of input image
		@return: np.ndarray of integral image
	'''
    return cv2.integral(image)
예제 #10
0
    def __find_ROI(self, frame):
        '''
        Sliding window that finds an initial good seed for the pupil based
        on image integrals (pupil is usually dark)

        If no pupil center has been successfully found before, then 
        it assumes that the pupil is probably in the center region of the image
        '''
        h = int(frame.shape[0] / 3)
        w = int(frame.shape[1] / 3)
        hfs = int(h / 4)
        wfs = int(w / 4)
        if self.center is None:
            self.center = np.array([frame.shape[0] // 2, frame.shape[1] // 2])
        self.bbox_size['min'], self.bbox_size['max'] = w // 3, w * 1.3
        self.pupil_size['min'], self.pupil_size['max'] = w // 6, w // 2
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        minval = sys.maxsize
        bbox = None
        for y in range(self.grid_v):
            for x in range(self.grid_v):
                crop = gray[y * hfs:y * hfs + h, x * wfs:x * wfs + w]
                mpoint = np.array([y * hfs + h // 2, x * wfs + w // 2])
                integral = cv2.integral(crop)
                weight = self.__get_weight(mpoint)
                val = integral[-1, -1] * weight
                if val < minval:
                    minval = val
                    bbox = (x * wfs, y * hfs, w, h)
        #cv2.rectangle(frame, bbox, (255,120,120), 2, 1)
        return bbox
예제 #11
0
def testing():
    imageName = "test_im/test0.png"
    im = ~cv2.imread(imageName, 0)
    integral = cv2.integral(im)
    print integral
    feature1 = haar_one(im, integral)
    print feature1
예제 #12
0
	def Compute(self, flow):
		# Split xy flow
		x_flow, y_flow = cv2.split(flow)
		
		# Split xy flow to magnitude and angle 
		flow_mag = numpy.sqrt(x_flow*x_flow + y_flow*y_flow)
		flow_ang_rad = numpy.arctan2(y_flow, x_flow)
		
		# Calc flow histogram bins
		bin_origin = flow_ang_rad * (self.BIN_NUM - 1)/(2*numpy.pi)

		# Extract min flow and replace magnitude and bin
		min_flow_mask = (flow_mag <= self.MIN_FLOW_THRESH)
		flow_mag[min_flow_mask] = 1.0
		bin_origin[min_flow_mask] = self.BIN_NUM - 1

		# Split two adjacent bins
		bin_floor = numpy.floor(bin_origin).astype(numpy.int64)
		bin_ceil = numpy.ceil(bin_origin).astype(numpy.int64) % (self.BIN_NUM - 1)

		# Calc adjacent bins mask
		bin_masks_floor = [(bin_floor == i).astype(numpy.int64) for i in range(self.BIN_NUM)]
		bin_masks_ceil = [(bin_ceil == i).astype(numpy.int64) for i in range(self.BIN_NUM)]
		
		# Split the magnitude to two adjacent bins
		mag_floor = (bin_origin - bin_floor)*flow_mag
		mag_ceil = (flow_mag - mag_floor)
	
		# Add adjacent magnitudes and convert integral features
		features = [(mag_floor*mask_floor) + (mag_ceil*mask_ceil) for(mask_floor, mask_ceil) in zip(bin_masks_floor, bin_masks_ceil)]
		integral_features = [cv2.integral(feature) for feature in features]

		return integral_features
예제 #13
0
    def compute_response_map(self):
        top, bottom, left, right = self.ext_len
        patch = np.lib.pad(self.color_map, ((top, bottom - 1), (left, right - 1)), 'constant', constant_values=0)
        bg_w, bg_h = self.bg_box[2], self.bg_box[3]

        # compute response map
        # patch = patch.astype(np.float64) / 255

        SAT = cv2.integral(patch)
        self.response_map = SAT[:bg_h, :bg_w] + SAT[-bg_h:, -bg_w:] - SAT[:bg_h, -bg_w:] - SAT[-bg_h:, :bg_w]
        # self.response_map = self.response_map / (self.tg_box[2] * self.tg_box[3])
        # self.response_map = normalize_255(self.response_map)

        # h,w = self.response_map.shape
        # for i in xrange(h):
        #     for j in xrange(w):
        #         print self.response_map[i,j],
        #     print
        # exit()

        # compute pred_cpos
        idxs = np.where(self.response_map == self.response_map.max())
        # print  idxs[1][0], idxs[0][0]

        pred_cpos = (self.bg_box[0] + idxs[1][0], self.bg_box[1] + idxs[0][0])

        return pred_cpos
예제 #14
0
def testHaarFeatureCalculation():
    test = np.array([[5, 2, 5, 2], [3, 6, 3, 6], [5, 2, 5, 2], [3, 6, 3, 6]])
    test2 = np.array([[5, 2], [3, 6]])
    integral = np.array([[0, 0, 0, 0, 0], [0, 5, 7, 12, 14], [0, 8, 16, 24, 32], [
                        0, 13, 23, 36, 46], [0, 16, 32, 48, 64]]).astype(np.float32).reshape((5, 5, 1))

    integral = cv2.integral(test.astype(np.float32)).astype(
        np.float32).reshape((5, 5, 1))
    feature = np.array([0, 0, 1, 1, 0.25, 0.25, 0.5, 0.5]).reshape(1, 8)
    print calculateValues(integral, feature)
    print "--------------"
    integral = cv2.integral(test2.astype(np.float32)).astype(
        np.float32).reshape((3, 3, 1))

    feature = np.array([0, 0, 1, 1, 0.5, 0.5, 0.5, 0.5]).reshape(1, 8)
    print calculateValues(integral, feature)
예제 #15
0
def hw1(original, filename):

    original = cv2.imread(original)
    image = cv2.imread(filename)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    img1 = gray.copy()
    # median_filter(gray, img1)
    thresh, contours = find_text_area(img1, original)
    intergral_image = cv2.integral(img1)

    i = 1
    for cnt in contours:
        x, y, w, h = cv2.boundingRect(cnt)
        cropped = thresh[y:y + h, x:x + w]
        if cv2.contourArea(cnt) > 3000:
            print("---- Region ", i, ": ----")
            calc_text_area(cropped)
            print("Bounding Box Area (px): ", cv2.contourArea(cnt))
            word_count(cropped)
            integral_gray(intergral_image, x, y, w, h)
            i += 1

    cv2.namedWindow('image', cv2.WINDOW_NORMAL)
    cv2.imshow('image', original)
    cv2.waitKey(0)
예제 #16
0
    def Compute(self, gray):
        # Calc sobel filter
        x_edge = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=1)
        y_edge = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=1)

        # Split xyedge to magnitude and angle
        edge_mag = numpy.sqrt(x_edge * x_edge + y_edge * y_edge)
        edge_ang_rad = numpy.arctan2(y_edge, x_edge)

        # Calc two adjacent bins
        bin_origin = edge_ang_rad * self.BIN_NUM / (2 * numpy.pi)
        bin_floor = numpy.floor(bin_origin).astype(numpy.int64)
        bin_ceil = numpy.ceil(bin_origin).astype(numpy.int64) % self.BIN_NUM

        # Calc adjacent bins mask
        bin_masks_floor = [(bin_floor == i).astype(numpy.int64)
                           for i in range(self.BIN_NUM)]
        bin_masks_ceil = [(bin_ceil == i).astype(numpy.int64)
                          for i in range(self.BIN_NUM)]

        # Split the magnitude to two adjacent bins
        mag_floor = (bin_origin - bin_floor) * edge_mag
        mag_ceil = (edge_mag - mag_floor)

        # Add adjacent magnitudes and convert integral features
        features = [(mag_floor * mask_floor) + (mag_ceil * mask_ceil)
                    for (mask_floor,
                         mask_ceil) in zip(bin_masks_floor, bin_masks_ceil)]
        integral_features = [cv2.integral(feature) for feature in features]

        return integral_features
예제 #17
0
def filter_with_dark(pp_image, whole_diff):
    # 1. sliding window to get the pic croped 
    # using 64 x 64 with stripe 48
    # in fact we have to do the crop things, when its not the muliplier of 64, the remains are ignored

	nrow, ncol = pp_image.shape 
	acc_whole_diff = cv.integral(whole_diff)
	snrow = nrow - sq_size + 1
	sncol = ncol - sq_size + 1
	row = 0 
	crops = []
	while row < snrow:
        #
		col = 0
		while col < sncol:
			if black_area_checker(row, col, acc_whole_diff):
				crops.append(pp_image[row:row+sq_size, col:col+sq_size])
			col += stride 
		# after 
		row += stride
	
	# 2. using small kernel to pre-filter out some parts
	pre_f_crops = []
	for crop_img in crops:
		if pre_filter5(crop_img):
			pre_f_crops.append(crop_img)
    
	# print(len(crops), len(pre_f_crops))	

    # 3. feed into CNN as a whole group # ignored 
	return pre_f_crops
예제 #18
0
 def __init__(self, image):
     ret, self.thresh_mask = cv2.threshold(image, 127, 255, 0)
     self.image = self.thresh_mask // 128
     self.image = 1 - self.image
     cv2.sumElems(self.image)
     self.sum_image = cv2.integral(self.image)
     self.contour = None
예제 #19
0
def get_center_likelihood(likelihood_map, sz):
    h, w = likelihood_map.shape[:2]
    n1 = h - sz[1] + 1
    n2 = w - sz[0] + 1
    sat = cv2.integral(likelihood_map)
    i, j = np.arange(n1), np.arange(n2)
    i, j = np.meshgrid(i, j)
    sat1 = sat[i, j]
    sat2 = np.roll(sat, -sz[1], axis=0)
    sat2 = np.roll(sat2, -sz[0], axis=1)
    sat2 = sat2[i, j]
    sat3 = np.roll(sat, -sz[1], axis=0)
    sat3 = sat3[i, j]
    sat4 = np.roll(sat, -sz[0], axis=1)
    sat4 = sat4[i, j]
    center_likelihood = ((sat1 + sat2 - sat3 - sat4) / (sz[0] * sz[1])).T

    def fillzeros(im, sz):
        res = np.zeros((sz[1], sz[0]))
        msz = ((sz[0] - im.shape[1]) // 2, (sz[1] - im.shape[0]) // 2)
        res[msz[1]:msz[1] + im.shape[0], msz[0]:msz[0] + im.shape[1]] = im
        return res

    center_likelihood = fillzeros(center_likelihood, (w, h))
    return center_likelihood
예제 #20
0
    def IntegrateImage(self, img):
        im = deepcopy(img)

        intCalc = cv2.integral(im)  # integral calculation by cv2
        integVector = np.zeros(
            img.shape)  # allows values to be larger than 255

        # calc. histogram for integral image and return
        #hist,bins = np.histogram(np.asarray(integIm).ravel(),256,[0,256])

        #integVector = np.array(integIm).flatten() # doesnt work as needed
        integVector[:] = intCalc[
            1:, 1:]  # syntactical sugar for squishing matrix into vector
        integVector.reshape(img.size)
        #integVector.append(np.asarray(hist))

        #integralIm = deepcopy(img)

        height, width = np.array(img).shape
        ''' THIS MANUALLY PERFORMS INTEGRAL, IT TAKES TOO MUCH TIME
        # used to hold sum of intensities up until pixel
        sum = 0
        # loop to set area
        for j in range(0, height):
            for i in range (0, width):
                # loop to sum up area
                for b in range (0,j + 1):
                    for a in range (0, i + 1):
                        sum += img[b][a]
                sum = 0
                integralIm[j][i] = sum
        '''
        # return flattened integral image NOT histogram
        return integVector[0]
예제 #21
0
def BoxFilter(img, d):
    s = d * d;
    imgCopy = np.array(img)/255;
    res = np.array(imgCopy);
    [height, width, colors] = imgCopy.shape;
    if ((d > 1) and (d < height) and (d < width)):
        imgCopy = cv2.integral(imgCopy)[1:,1:,:];
        d = (int)(d / 2);
        for y in range(height):
            for x in range(width):
                left = x - d;
                right = x + d;
                top = y - d;
                bottom = y + d;
                if (left < d):
                    left = 0;
                q
                if (right + 1 > width):
                    right = width - 1;
                    
                if (top < 0):
                    top = 0;
                
                if (bottom + 1 > height):
                    bottom = height - 1;
                
                s = (right - left) * (bottom - top);
                
                res[y, x] = (imgCopy[top, left] + imgCopy[bottom, right] - imgCopy[bottom, left] - imgCopy[top, right]) / s;        
    return res;
예제 #22
0
    def calculate_integral_HOG(self, image):
        # X, Y方向に微分
        xsobel = np.zeros(image.shape)
        filters.sobel(image,1,xsobel) # 1はaxis=1のこと = x方向
        
        ysobel = np.zeros(image.shape)
        filters.sobel(image,0,ysobel) # 1はaxis=0のこと = y方向
        
        # 角度別の画像を生成しておく
        bins = np.zeros((N_BIN, image.shape[0], image.shape[1]))
         
        # X, Y微分画像を勾配方向と強度に変換
        Imag, Iang = cv2.cartToPolar(xsobel, ysobel, None, None, True) # outputs are magnitude, angle
        # 勾配方向を[0, 180)にする(181~360は0~180に統合する。x軸をまたいだ方向は考えない)
        Iang = (Iang>180)*(Iang-180) + (Iang<=180)*Iang 
        Iang[Iang==360] = 0; Iang[Iang==180] = 0 
        # 勾配方向を[0, 1, ..., 8]にする準備(まだfloat)
        Iang /= THETA
        # 勾配方向を強度で重みをつけて、角度別に投票する
        ind = 0

        for ind in xrange(N_BIN):
            bins[ind] += (np.int8(Iang) == ind)*Imag
        
        # 角度別に積分画像生成
        """ !ここの計算があやしい! """
        integrals = np.array([cv2.integral(bins[i]) for i in xrange(N_BIN)])
        
        return integrals
def dark_channl(frames, location_list, block_size):
    r, g, b = cv2.split(frames[-1])
    min_img = cv2.min(r, cv2.min(g, b))
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
    dc_img = cv2.erode(min_img, kernel)

    ret, thresh1 = cv2.threshold(dc_img, 170, 255, cv2.THRESH_BINARY)

    cv2.imshow("dark_channl", thresh1)

    int_diff = cv2.integral(thresh1)
    # This is a key parameter. Change this value can control motion_block number.
    # threshold = block_size * block_size / 2
    # threshold = 400
    result = list()
    for pt in iter(location_list):
        xx, yy, _bz, _bz = pt
        t11 = int_diff[xx, yy]
        t22 = int_diff[xx + block_size, yy + block_size]
        t12 = int_diff[xx, yy + block_size]
        t21 = int_diff[xx + block_size, yy]
        block_diff = t11 + t22 - t12 - t21
        if block_diff > 0:
            result.append((xx, yy, block_size, block_size))
    return result
def extract_faces(img, casecades, haars):
    selected_haars = []
    for casecade in casecades:
        this_haars = []
        for item in casecade:
            this_haars.append(item['num'])
        selected_haars.append(this_haars)
    new_img = img
    for i in range(10):
        for j in range(new_img.shape[0] - IMG_SIZE):
            for k in range(new_img.shape[1] - IMG_SIZE):
                croped_img = new_img[j:j + IMG_SIZE, k:k + IMG_SIZE]
                integral_img = cv2.integral(croped_img)
                face_flag = True
                for index, casecade in enumerate(casecades):
                    haar_scores = []
                    for selected_haar in selected_haars[index]:
                        haar_scores.append(_cal_haar(integral_img, haars[selected_haar]))
                    if calculate_adaboost(haar_scores, casecade):
                        face_flag = False
                        break
                if face_flag:
                    face_num = len(os.listdir(os.path.join(BASE_DIR, 'faces'))) + 1
                    cv2.imwrite(os.path.join(BASE_DIR, 'faces', '%d.png' % face_num), croped_img)

        new_img = cv2.resize(new_img, (int(new_img.shape[0] * 0.7), int(new_img.shape[1] * 0.7)))
예제 #25
0
def main():
    # reading image
    image = cv2.imread(path_to_image)
    # changing color space
    im = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
    plt.figure(1)
    plt.title('Original image')
    plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
    # computation of integral image
    i_im = cv2.integral(im)
    n_bins = 16
    hist = cv2.calcHist([im], [0, 1, 2], None, [n_bins, n_bins, n_bins],
                        [0, 100, -128, 127, -128, 127])
    hill_climb = Hill_climbing(hist, n_bins)
    # appliying hill climbing algorithm to find k
    k = hill_climb.find_peaks()
    # computation of saliency maps
    V, region_idxes = saliency_means_per_region(im, k)
    threshold = np.median(V) - 0.1
    # segmenting the image
    seg_image = segmented_image(image, V, threshold, region_idxes)
    # color space revert
    seg_image = cv2.cvtColor(seg_image, cv2.COLOR_RGB2BGR)
    # displaying the image
    plt.figure(2)
    plt.title('Segmented image')
    plt.imshow(seg_image)
    plt.show()
예제 #26
0
def computeMotionblur( img ):
    """
    Compute IQA (Image Quality Assessment) of iris image stored in a folder
    and listed by a correlated text file
    Motion Blur Degree
    Written by Yunlong Wang 2020.09.15
    Rewritten in Python by Hongda Liu 2021.04.13
    """
    integralImg = cv2.integral(img)
    [row,col] = img.shape
    countPtNum = 0
    blurscoreSum = 0
    for r in range(2,row-6) :
        for c in range (2,col-6,4):
            down = integralImg[r-1,c+6]+integralImg[r-2,c-2]-integralImg[r-1,c-2]-integralImg[r-2,c+6];
            up = integralImg[r,c+6]+integralImg[r-1,c-2]-integralImg[r,c-2]-integralImg[r-1,c+6];
            blurscoreSum = blurscoreSum + (up-down)*(up-down);
            countPtNum = countPtNum + 1;
    if blurscoreSum < 0 :
        BlurScore = 100
    else :
        # BlurScore = math.floor(math.sqrt(blurscoreSum/countPtNum)/8);
        BlurScore = math.floor(math.sqrt(blurscoreSum/countPtNum));
    if BlurScore >= 100:
        BlurScore = 99
    return BlurScore
예제 #27
0
파일: rtc.py 프로젝트: kkew3/dolphins-mc
    def process_frame(self, frame: np.ndarray, object_box: Rect) -> Rect:
        # predict
        self._detect_box = sample_rect(frame.shape[:2], object_box,
                                       self.search_window_size)
        integral_frame = cv2.integral(frame).astype(np.float32)
        self._detect_feature = compute_feature(integral_frame, self._harr,
                                               self._detect_box)
        radio_max, radio_max_index = compute_radio_classifier(
            self._pos_mean, self._pos_std, self._neg_mean, self._neg_std,
            self._detect_feature)
        new_object_box = self._detect_box[radio_max_index]

        # update
        self._sample_positive_box = sample_rect(
            frame.shape[:2], new_object_box, 1.0 * self.radical_scope_positive,
            0.0, 1000000)
        self._sample_negative_box = sample_rect(
            frame.shape[:2], new_object_box, 1.5 * self.search_window_size,
            4.0 + self.radical_scope_positive, 100)
        self._sample_positive_features = compute_feature(
            integral_frame, self._harr, self._sample_positive_box)
        self._sample_negative_features = compute_feature(
            integral_frame, self._harr, self._sample_negative_box)
        self._pos_mean, self._pos_std = update_gaussian_classifier(
            self._sample_positive_features, self._pos_mean, self._pos_std,
            self.lr)
        self._neg_mean, self._neg_std = update_gaussian_classifier(
            self._sample_negative_features, self._neg_mean, self._neg_std,
            self.lr)
        return new_object_box
예제 #28
0
def compute_aggregation(costs0, window_size, mode='mean'):
    """
    Compute aggregation for an input disparity map.

    Parameters
    ----------
    costs0 : numpy.ndarray
        Input disparity map.
    window_size : int
        Window size to be used to compute aggregation. Must be an odd value!
    mode : string (optional, default=mean)
        Choose between: mean OR mode. Criterion to be used to compute the aggregation.

    Returns
    -------
    costs : numpy.ndarray
        Disparity map with aggregation.

    """
    assert mode in ('mean', 'median'), 'Invalid mode to compute costs with aggregation!'
    assert window_size % 2 != 0, 'Window size should be an odd value!'

    if mode == 'mean':
        # if to use mean mode, calc the integral image for each disparity value
        integral = np.stack([cv2.integral(costs0[..., i])[1:, 1:] for i in range(costs0.shape[-1])], axis=-1)
        costs0 = np.copy(integral)
    costs0 = np.float32(costs0)
    
    # use aux function to compute aggregation in threads
    return __compute_aggregation(costs0, window_size, mode)
예제 #29
0
def extract_saliency(img, scale):
    """
    Computes saliency map for a specific scale
    Arguments : 
    image -- numpy array, of shape (n_H, n_W, n_C)
    scale -- integer, representing the scale.
    Outputs :
    saliency_map -- saliency map fo a specific scale, of shape (n_H, n_W)
    """
    n_H, n_W, n_channel = img.shape
    W_R2 = scale
    N2 = W_R2**2
    integral_img = cv2.integral(img)
    saliency_map = np.zeros((n_H, n_W))
    for i in range(n_H):
        for j in range(n_W):
            #Window limits in integral image
            mini_x, mini_y, maxi_x, maxi_y, N = get_limits(
                i, j, n_H, n_W, W_R2)
            #Sum of window pixels in original image
            tmp = 1.0*(integral_img[mini_x,mini_y,:] + integral_img[maxi_x,maxi_y,:]\
                       - (integral_img[mini_x,maxi_y,:] \
                          + integral_img[maxi_x,mini_y,:]))/N
            #Distance between R1 and R2
            saliency_map[i, j] = np.linalg.norm(img[i, j, :] - np.array(tmp))
    return saliency_map
예제 #30
0
def generatepatchs(img, base_size, factor):
    # Compute the gradients as a proxy of the contextual cues.
    img_gray = rgb2gray(img)
    whole_grad = np.abs(cv2.Sobel(img_gray, cv2.CV_64F, 0, 1, ksize=3)) + \
                 np.abs(cv2.Sobel(img_gray, cv2.CV_64F, 1, 0, ksize=3))

    threshold = whole_grad[whole_grad > 0].mean()
    whole_grad[whole_grad < threshold] = 0

    # We use the integral image to speed-up the evaluation of the amount of gradients for each patch.
    gf = whole_grad.sum() / len(whole_grad.reshape(-1))
    grad_integral_image = cv2.integral(whole_grad)

    # Variables are selected such that the initial patch size would be the receptive field size
    # and the stride is set to 1/3 of the receptive field size.
    blsize = int(round(base_size / 2))
    stride = int(round(blsize * 0.75))

    # Get initial Grid
    patch_bound_list = applyGridpatch(blsize, stride, img, [0, 0, 0, 0])

    # Refine initial Grid of patches by discarding the flat (in terms of gradients of the rgb image) ones. Refine
    # each patch size to ensure that there will be enough depth cues for the network to generate a consistent depth map.
    print("Selecting patchs ...")
    patch_bound_list = adaptiveselection(grad_integral_image, patch_bound_list,
                                         gf, factor)

    # Sort the patch list to make sure the merging operation will be done with the correct order: starting from biggest
    # patch
    patchset = sorted(patch_bound_list.items(),
                      key=lambda x: getitem(x[1], 'size'),
                      reverse=True)
    return patchset
예제 #31
0
def density_of_a_rect(img, window_width):
    #    #binary image(0/1)
    #    img = np.array([[1, 0, 0, 1, 0],
    #                   [0, 0, 0, 1, 0],
    #                   [0, 1, 0, 1, 0],
    #                   [1, 1, 0, 1, 0],
    #                   [1, 0, 0, 0, 1]], dtype='uint8')
    #     window_width = 3#the width of rectangle window, assumes to be an odd number
    #    #now assumes the rectangle is square

    img_width = img.shape[0]
    img_height = img.shape[1]

    #pad first so that pixels at the edges still work correctly
    pad_sz = math.floor(window_width / 2)
    pad_img = np.pad(img, pad_sz)

    prev_img = cv2.integral(pad_img)
    #summing all the previous pixels.
    #Previous here means all the pixels above and to the left of that pixel (inclusive of that pixel)

    #the sum of all the pixels in the rectangular window
    #  Sum = Bottom right + top left - top right - bottom left
    prev_img_width = prev_img.shape[0]
    prev_img_height = prev_img.shape[1]
    bottom_right = np.copy(prev_img[(prev_img_width - img_width):,
                                    (prev_img_height - img_height):])
    top_left = np.copy(prev_img[0:img_width, 0:img_height])
    top_right = np.copy(prev_img[0:img_width, (prev_img_height - img_height):])
    bottom_left = np.copy(prev_img[(prev_img_width - img_width):,
                                   0:img_height])
    sum_img = bottom_right + top_left - top_right - bottom_left
    density_img = sum_img / (window_width * window_width)
    return (density_img)
예제 #32
0
파일: eye_spot.py 프로젝트: jonieva/EyeSpot
    def preprocess_images(self):
        """
        :return:
        """
        for image in os.listdir(self.training_images_path):
            print("Preproccesing image " + image)
            image = sitk.ReadImage(os.path.join(self.training_images_path, image))
            # Get the Green channel
            image_array = sitk.GetArrayFromImage(image)[:,:,1]
            # Get mean and standard deviation in local patches of size s=200
            # To that end, we convolve the image with filters m (mean) and var (variance)
            s = 101
            # v = np.ones((s,s), np.double) / (s**2)
            # m2 = signal.convolve2d(image_array, v, "same")
            util = Matplotlib_Util()
            dest = np.zeros(image_array.shape)
            i = cv.integral(image_array)
            i = i[:-1, :-1]
            m = np.zeros(image_array.shape, np.int)
            m = i[s:, s:]   + i[:(i.shape[0] - s), :(i.shape[1] - s)] \
                            - i[:(i.shape[0] - s), s:] - i[s:, :(i.shape[1]-s)]
            m[:s,:s] = i[:s, :s]
            util.plota(m)
            # Variance
            #var2 = signal.convolve2d(image_array**2, v, "same")
            #var2 = var - m2**2
            # var = i[s:, s:]-m[s:, s:]**2 \
            #         + (i[:(i.shape[0] - s), :(i.shape[1] - s)] - m[:(i.shape[0] - s), :(i.shape[1] - s)]**2) \
            #         - (i[:(i.shape[0] - s), s:] - m[:(i.shape[0] - s), s:]**2) \
            #         - (i[s:, :(i.shape[1]-s)] - m[s:, :(i.shape[1]-s)]**2)
            i2 = cv.integral(image_array**2, dest)
            i2 = i2[:-1, :-1]
            var = np.zeros(image_array.shape)
            var = i2[s:, s:] + i2[:(i2.shape[0] - s), :(i2.shape[1] - s)] \
                            - i2[:(i2.shape[0] - s), s:] - i2[s:, :(i2.shape[1]-s)]
            var = var - m**2
            var[:s,:s] = i2[:s, :s]
            util.plota(var)
            # Avoid discountinuities
            var[var == 0] = 1
            # Distance for every neighbourhood
            d = np.abs((image_array-m) / var**2)
            # Bakground set to those pixels whose distance is lower than a threshold
            t = 1
            Ib = d < t

            util.plota(Ib)
예제 #33
0
def imgCB(msg):
    global bridge

    start = time.time()
    try:
        cv_image = bridge.imgmsg_to_cv2(msg.image)
    except CvBridgeError as e:
        print(e)

    (rows,cols) = cv_image.shape
        
    integral = cv2.integral(cv_image)


    score_img = np.zeros((1,cols,1), np.float32)
    x = 0
    maxScore = 0
    for c in range(cols):
        score = (integral[rows, c+1] - integral[rows,c])/rows
        if (score > maxScore):
            maxScore = score
            x = c
        elif score < 0:
            score = 0
        score_img[0,c] = score

    [mean, std] = cv2.meanStdDev(score_img)
    _, thresh = cv2.threshold(score_img, mean+2.5*std, 500000, cv2.THRESH_TOZERO)

    kernel = np.ones((1,15),np.float32)
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
    thresh = thresh / thresh.max() * 255
    thresh = thresh.astype(np.uint8)

    im2, contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)


    img2.publish(msg.image)

    if len(contours) != 0:
        #find the biggest area
        c = max(contours, key = cv2.contourArea)

        x,y,w,h = cv2.boundingRect(c)
        
        if w > 15:

            disparity = (integral[rows * 3 / 4, x+w] - integral[rows * 1 / 4, x+w] - integral[rows* 3 / 4, x] + integral[rows* 1 / 4, x])*2 / rows / w
            depth = msg.f * msg.T / disparity

            img.publish(bridge.cv2_to_imgmsg(thresh))
            head = Header()
            head.stamp = rospy.Time.now()
            center = Point(depth, (x+w/2) - cols/2, 0)
            pub.publish(head, "pole", w, rows, center)
            return
    
    # else
    img.publish(bridge.cv2_to_imgmsg(score_img))
예제 #34
0
def convert_labels_to_integrals(label_mask, num_vals):
    masks = []
    print(label_mask.shape)
    for x in range(num_vals):
        m = np.asfarray(label_mask == x)
        m = cv2.integral(m)
        masks.append(m)
    return np.ascontiguousarray(np.swapaxes(np.swapaxes(np.array(masks), 0, 2), 0, 1))
예제 #35
0
파일: __init__.py 프로젝트: bwhite/imseg
def convert_all_probs_to_integrals(all_probs):
    """
    Args:
        all_probs: 
    """
    masks = []
    for x in range(all_probs.shape[2]):
        m = np.ascontiguousarray(all_probs[:, :, x], dtype=np.float64)
        masks.append(cv2.integral(m))
    return np.ascontiguousarray(np.dstack(masks))
예제 #36
0
def find_match(image, temp, cands) :
	result = []
	st = cv2.integral(temp)[-1, -1]
	for x, y, w, h in cands :
		cand = image[x : x + w, y : y + h]
		sc = cv2.integral(cand)[-1, -1]
		match = numpy.empty_like(cand)
		cv2.filter2D(cand, -1, temp, match, (-1, -1), 0, cv2.BORDER_CONSTANT)
		maximum = numpy.max(match)
		quality = maximum / numpy.sqrt(sc * st)
		if quality > .60 :
			for ddx in range(w) :
				for ddy in range(h) :
					if abs(match[ddx, ddy] - maximum) < 0.1 :
						dx = ddx
						dy = ddy
						break;
			result.append((dx + x - temp.shape[0] / 2, dy + y - temp.shape[1] / 2))
	return result
예제 #37
0
    def getIntegralHistogram(self, nbins):
        if hasattr(self, 'gray'):
            gray = self.gray
        else:
            gray = cv2.cvtColor(self.bgr, cv2.COLOR_BGR2GRAY)
            self.g = cv2.integral(gray)

        cv2.equalizeHist(gray, gray)

        sx = cv2.Sobel(gray, cv2.CV_32F, 1, 0, ksize=3)
        sy = cv2.Sobel(gray, cv2.CV_32F, 0, 1, ksize=3)

        bins = []
        for i in xrange(0, nbins):
            bins.append(numpy.zeros(gray.shape, dtype=float))

        magnitudes = numpy.zeros(gray.shape, dtype=float)

        for i in xrange(0, nbins):
            self.integrals.append(numpy.zeros((gray.shape[0] + 1, gray.shape[1] + 1), float))

        binStep = 180 / nbins

        for y in xrange(0, gray.shape[0]):
            for x in xrange(0, gray.shape[1]):
                if sx[y, x] == 0:
                    temp_gradient = ((math.atan(sy[y, x] / (sx[y, x] + 0.00001))) * (180 / math.pi)) + 90
                else:
                    temp_gradient = ((math.atan(sy[y, x] / sx[y, x])) * (180 / math.pi)) + 90
                magnitudes[y, x] = math.sqrt((sx[y, x] * sx[y, x]) + (sy[y, x] * sy[y, x]))

                for i in xrange(0, nbins):
                    if temp_gradient <= binStep * i:
                        bins[i-1][y, x] = magnitudes[y, x]
                        break

        for i in xrange(0, nbins):
            self.integrals[i] = cv2.integral(bins[i])
        self.magnitudes = cv2.integral(magnitudes)

        # todo - create and return integral image for gradient magnitude

        return self.integrals, self.magnitudes
    def scaled_window_object_detector(self, in_img, scale_factor=1.1, min_neighbors=3, min_size=(30,30)):
        """This object detector is based on scaled detector window. It scales the detector window instead of
        scaling image. Dectector window pyramid is constructed instead of image pyramid
        """
        v_stride = 1
        h_stride = 1
        objs = []
        # convert to gray scale if the image is color 
        if(len(in_img.shape) == 3):
            gray_img = cv2.cvtColor(in_img, cv2.COLOR_BGR2GRAY)
        else:
            gray_img = in_img

        img_height = gray_img.shape[0]
        img_width = gray_img.shape[1]
        cur_win_width = self.win_width 
        cur_win_height = self.win_height

        # compute integral image. just one time process
        ii_img = cv2.integral(gray_img)
        print ii_img.dtype
        # initial scale 1 . ie. original detector size is used
        scale = 1.0

        # upscale the detector window and detect objects until window_size becomes more than one
        # of the image dimension
        while(cur_win_width < img_width and cur_win_height < img_height):
            # max possible window top left corner positions.
            x_max = img_width - cur_win_width + 1
            y_max = img_height - cur_win_height + 1
            print ('current scale = {:f}'.format(scale))
            print('Detector height = {:d}, Detector width = {:d}'.format(cur_win_height, cur_win_width))
            for row in range(0, y_max, v_stride):
                for col in range(0, x_max, h_stride):
                    #print row, col
                    # detect if the current window contains any objects
                    win_pass = self._evaluate_window_scaled(col, row, scale, ii_img)
                    # record the window if it passes
                    if(win_pass):
                        objs.append(tuple([int(col),
                                     int(row),
                                     int(cur_win_width),
                                     int(cur_win_height)]))
 
            # upscale the detector window
            scale *= scale_factor
            cur_win_width = int(self.win_width*scale)
            cur_win_height = int(self.win_height*scale)
            # perform new detections on the rescaled image.

        print('No of boxes before NMS = {:d}'.format(len(objs)))
        # perform NMS 
        objs = box_nms(objs, 0.2)
        print('No of boxes after NMS = {:d}'.format(len(objs)))
        return objs
예제 #39
0
 def isMoving():
     integral = cv2.integral(flow)
     totalFlow = integral[height - 1, width - 1]
 
     totalFlow = totalFlow / area
     norm = totalFlow[0] * totalFlow[0] + totalFlow[1] * totalFlow[1]
     norm = math.sqrt(norm) + 1.0 # 1.0 : relaxation parameter
     totalFlow = totalFlow / norm
     if (totalFlow[0] > 0.5):
         return True
     return False
    def detect_objects(self, in_img, scale_factor=1.1, min_neighbors=3, min_size=(30,30), max_size=()):
        """Detect objects using the LBP cascade classifier present in the given grayscale image.
        This has similar functionality as that of cv2.detectMultiScale() method
        """
        v_stride = 1
        h_stride = 1
        objs = []
        # convert to gray scale if the image is color 
        if(len(in_img.shape) == 3):
            gray_img = cv2.cvtColor(in_img, cv2.COLOR_BGR2GRAY)
        else:
            gray_img = in_img

        org_height = gray_img.shape[0]
        org_width = gray_img.shape[1]
        cur_width = org_width
        cur_height = org_height
        win_width = self.win_width
        win_height = self.win_height

        # initial scale 1 as we process  original image
        scale = 1.0
        # downscale image and detect objects until one of the image dimension
        # becomes less  than the window size
        while(cur_width > (win_width+1) and cur_height > (win_height+1)):
            # max possible window top left corner positions.
            x_max = cur_width - win_width + 1
            y_max = cur_height - win_height + 1
            # compute integral image
            ii_img = cv2.integral(gray_img)
            print ('current scale = {:f}'.format(scale))
            for row in range(0, y_max, v_stride):
                for col in range(0, x_max, h_stride):
                    # detect if the current window contains any objects
                    win_pass = self._evaluate_window(col, row, ii_img)
                    # record the window if it passes
                    if(win_pass):
                        objs.append(tuple([int(col*scale),
                                     int(row*scale),
                                     int(scale*win_width),
                                     int(scale*win_height)]))
 
            # down scale the image
            cur_width = int(cur_width/scale_factor)
            cur_height = int(cur_height/scale_factor)
            scale *= scale_factor
            gray_img = cv2.resize(gray_img, dsize=(cur_width, cur_height), interpolation=cv2.INTER_LINEAR)
            # perform new detections on the rescaled image.

        print('No of boxes before NMS = {:d}'.format(len(objs)))
        # perform NMS 
        objs = box_nms(objs, 0.2)
        print('No of boxes after NMS = {:d}'.format(len(objs)))
        return objs
예제 #41
0
 def __init__(self, original):        
     oh, ow = original.shape
     self.__original_width = ow
     self.__original_height = oh
     offset = IntegralImage.OFFSET
     
     extended = imgtools.ndarray_symmetric_ext(original, offset, offset)
     h, w = extended.shape
     self.__integral = cv2.integral(extended)[1:h, 1:w]
     
     self.__haar_x_resp = {}
     self.__haar_y_resp = {}       
예제 #42
0
파일: activate.py 프로젝트: Daiver/jff
def detect(img, coordSteps, scale2DSteps, size, clfs, alphas):
    res = []
    for frame, rect in getSlidingWindows(img, coordSteps, scale2DSteps):
        if min(frame.shape) <= 0:
            continue
        #print frame.shape, frame.dtype, rect, size
        frameSized = cv2.resize(frame, size)
        frameInt = cv2.integral(frameSized)
        ans = adaboost.predict(clfs, alphas, frameInt)
        if ans == 1:
            res.append(rect)
    return res
예제 #43
0
def find_cand(gray) :
	kernel = charKernel()
	filtered = numpy.empty_like(gray)
	cv2.filter2D(gray, -1, kernel, filtered, (-1, -1), 0, cv2.BORDER_CONSTANT)
	ss = cv2.integral(gray)
	result = []
	cset = set()
	for loc in localmax_loc(filtered) :
		r = local_find(ss, loc)
		if r not in cset :
			result.append(r)
			cset.add(r)
	return result
예제 #44
0
파일: __init__.py 프로젝트: bwhite/imseg
def convert_labels_probs_to_integrals(label_mask, max_probs, num_vals):
    """
    Args:
        label_mask:
        max_probs:
        num_vals: 
    """
    masks = []
    for x in range(num_vals):
        m = np.asfarray(label_mask == x) * max_probs
        m = cv2.integral(m)
        masks.append(m)
    return np.ascontiguousarray(np.dstack(masks))
예제 #45
0
파일: __init__.py 프로젝트: bwhite/imseg
def convert_labels_to_integrals(label_mask, num_vals):
    """
    Args:
        label_mask: 
        num_vals: 
    """
    masks = []
    print(label_mask.shape)
    for x in range(num_vals):
        m = np.asfarray(label_mask == x)
        m = cv2.integral(m)
        masks.append(m)
    out = np.dstack(masks)
    return np.ascontiguousarray(out)
예제 #46
0
파일: ct.py 프로젝트: tsaith/vision
    def update_distr(self, frame, object_box):

        # Obtain the positive and negative samples
        boxes_pos = self.sample_boxes(frame, object_box, self._radius_pos, 0, 1000000)
        boxes_neg = self.sample_boxes(frame, object_box, self._radius_search*1.5, self._radius_pos+4, 100)

        # Integral image
        integral_image = cv2.integral(frame, sdepth=cv2.CV_64F)

        feature_values_pos = cy_get_feature_values(boxes_pos, self._features, self._feature_weights, integral_image)
        feature_values_neg = cy_get_feature_values(boxes_neg, self._features, self._feature_weights, integral_image)

        self.update_classifier(self._mu_pos, self._sigma_pos, feature_values_pos, self._learn_rate)
        self.update_classifier(self._mu_neg, self._sigma_neg, feature_values_neg, self._learn_rate)
예제 #47
0
    def update_features(self, img_array, index, use_memory):

        if use_memory and (index in self.feats_dictionary):
            self.integral_img, self.avg_rc, self.avg_gc, self.avg_bc, self.avg_rc_h, \
            self.avg_gc_h, self.avg_bc_h, self.gauss1rc, self.gauss1gc, self.gauss1bc,\
            self.gauss35rc,self.gauss35gc, self.gauss35bc, self.log2rc, self.log2gc, self.log2bc,\
            self.log35rc, self.log35gc, self.log35bc = self.feats_dictionary[index]
            return

        img_array = mirror_borders(img_array, self.patch_size // 2)

        # integral image
        self.integral_img = cv2.integral(img_array[:,:,0])

        # average image red and green channel patch size
        self.avg_rc = cv2.blur(img_array[:,:,0], (self.patch_size, self.patch_size))
        self.avg_gc = cv2.blur(img_array[:,:,1], (self.patch_size, self.patch_size))
        self.avg_bc = cv2.blur(img_array[:,:,2], (self.patch_size, self.patch_size))

        # average images all three channels
        self.avg_rc_h = cv2.blur(img_array[:,:,0], (self.patch_size//2, self.patch_size//2))
        self.avg_gc_h = cv2.blur(img_array[:,:,1], (self.patch_size//2, self.patch_size//2))
        self.avg_bc_h = cv2.blur(img_array[:,:,2], (self.patch_size//2, self.patch_size//2))

        # gassiuan smoothed sigma 1
        self.gauss1rc = nd.gaussian_filter(img_array[:,:,0], 1)
        self.gauss1gc = nd.gaussian_filter(img_array[:,:,1], 1)
        self.gauss1bc = nd.gaussian_filter(img_array[:,:,2], 1)

        # gaussian smoothed sigma 3.5
        self.gauss35rc = nd.gaussian_filter(img_array[:, :, 0], 3.5)
        self.gauss35gc = nd.gaussian_filter(img_array[:, :, 1], 3.5)
        self.gauss35bc = nd.gaussian_filter(img_array[:, :, 2], 3.5)

        # laplace of gaussian sigma 2 (all three chaannels)
        self.log2rc = nd.gaussian_laplace(img_array[:,:,0], 2)
        self.log2gc = nd.gaussian_laplace(img_array[:,:,1], 2)
        self.log2bc = nd.gaussian_laplace(img_array[:,:,2], 2)

        # laplace of gaussian sigma 3.5
        self.log35rc = nd.gaussian_laplace(img_array[:,:,0], 3.5)
        self.log35gc = nd.gaussian_laplace(img_array[:,:,1], 3.5)
        self.log35bc = nd.gaussian_laplace(img_array[:,:,2], 3.5)

        if use_memory:
            # add the computed features to the dictionary
            self.feats_dictionary[index] = self.integral_img, self.avg_rc, self.avg_gc, self.avg_bc, self.avg_rc_h,\
                self.avg_gc_h, self.avg_bc_h, self.gauss1rc, self.gauss1gc, self.gauss1bc, self.gauss35rc, self.gauss35gc,\
                self.gauss35bc, self.log2rc, self.log2gc, self.log2bc, self.log35rc, self.log35gc, self.log35bc
예제 #48
0
    def __init__(self, image, needIntegral=True, needGray=True):
        height, width, channels = image.shape

        self.originalImage = image

        if needGray:
            self.gray_image = []
            if channels == 1:
                self.gray_image.append(image)
            elif channels == 3:
                self.gray_image.append(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))

        if needIntegral:
            self.integralImages = []
            for i in range(len(self.gray_image)):
                self.integralImages.append(cv2.integral(self.gray_image[i]))
예제 #49
0
파일: ct.py 프로젝트: tsaith/vision
    def process_frame(self, frame, object_box):
        """
        Process the frame.
        """

        # Update the object box
        detect_boxes = self.sample_boxes(frame, object_box, self._radius_search, 0, 1000000)
        self._integral_image = cv2.integral(frame, sdepth=cv2.CV_64F)
        feature_values = cy_get_feature_values(detect_boxes, self._features, self._feature_weights, self._integral_image)

        index_ratio_max, _ = cy_ratio_classifier(self._mu_pos, self._sigma_pos, self._mu_neg, self._sigma_neg, feature_values)
        object_box = detect_boxes[index_ratio_max]

        # Update distribution
        self.update_distr(frame, object_box)

        return object_box
예제 #50
0
파일: filter.py 프로젝트: hunse/fast-stereo
def _choose_fovea(cost, fovea_shape, n_disp):
    if tuple(fovea_shape) == (0, 0):
        return (0, 0)

    # this is copied from bryanfilter
    fm, fn = fovea_shape
    icost = cv2.integral(cost)
    fcost = -np.inf * np.ones_like(icost)
    fcostr = fcost[:-fm, :-fn]
    fcostr[:] = icost[fm:, fn:]
    fcostr -= icost[:-fm, fn:]
    fcostr -= icost[fm:, :-fn]
    fcostr += icost[:-fm, :-fn]
    fcostr[:, :n_disp] = -np.inf  # need space left of fovea for disparity

    fovea_ij = np.unravel_index(np.argmax(fcost), fcost.shape)
    return fovea_ij
예제 #51
0
def get_acf_sum(img_data):
    '''Get sum of 4 * 4 matrix in a channel.

    Parameters
    ----------
    img_data: np.ndarray
      Data of image.
    '''
    length, width = img_data.shape
    s_length, s_width = ceil(length / 4.), ceil(width / 4.)
    sum_data = np.zeros((s_length, s_width), dtype=np.float32)
    fix_img_data = np.zeros((s_length * 4, s_width * 4),
                            dtype=np.float32)
    for x in range(0, length):
        fix_img_data[x, :width] = img_data[x, :]
    integral_data = cv2.integral(fix_img_data)
    for x in range(0, length, 4):
        for y in range(0, width, 4):
            sum_data[x / 4, y / 4] = (integral_data[x + 4, y + 4] -
                                      integral_data[x, y])
    return sum_data
예제 #52
0
def detectaM(imag,sx,sy):
    somaf = cv2.integral(imag)
    h,w = imag.shape
    px = w / sx
    py = h / sy
    sqv = []
    bmax = float(w*h/(sx*sy))*255.0
    for ix in range(0,sx):
        for iy in range(0,sy):
            x1 = ix * px
            y1 = iy * py
            x2 = (ix + 1) * px -1
            y2 = (iy + 1) * py -1
            v = somaf[y2,x2]-somaf[y2,x1]-somaf[y1,x2]+somaf[y1,x1]
            sqv.append(v)
            #print ix,':',iy,'->',v
            pp = float(v)/bmax
            if pp > 0.1:
                return True
            #    cv2.imshow('blackmask',imag)
        return False
예제 #53
0
def detectaM(imag,sx,sy):
    somaf = cv2.integral(imag)
    h,w = imag.shape
    px = w / sx
    py = h / sy
    sqv = []
    bmax = float(w*h/(sx*sy))*255.0
    resp = False
    xmin = w
    ymin = h
    xmax = 0
    ymax = 0
    for ix in range(0,sx):
        for iy in range(0,sy):
            x1 = ix * px
            y1 = iy * py
            x2 = (ix + 1) * px 
            y2 = (iy + 1) * py 
            v = somaf[y2,x2]-somaf[y2,x1]-somaf[y1,x2]+somaf[y1,x1]
            sqv.append(v)
            pp = float(v)/bmax
            #print ix,':',iy,'->',v," per:",pp
            if pp > 0.1:
                #print ix,':',iy,'(',bmax,')->',v," per:",pp
                xmin = x1 if x1 < xmin else xmin
                ymin = y1 if y1 < ymin else ymin
                xmax = x2 if x2 > xmax else xmax
                ymax = y2 if y2 > ymax else ymax
                #return True
                resp = True
            #    cv2.imshow('blackmask',imag)
    if resp:
       xmin = xmin - px if xmin >= px else xmin
       ymin = ymin - py if ymin >= py else ymin
       xmax = xmax + px if (xmax + px) <= w else xmax
       ymax = ymax + py if (ymax + py) <= h else ymax
       
       print xmin,ymin,xmax,ymax
    return resp,np.array([xmin,ymin,xmax,ymax])
예제 #54
0
파일: imag.py 프로젝트: Strilanc/Qubery
def integrate_rows(frame):
    """
    Computes the row-wise integral of an image. The row-wise integral changes each entry to the partial sum of all
    elements in the row up to the given entry. For example, [[1,2,3],[2,3,4]] becomes [[1,3,6],[2,5,9]].

    :param frame: The image to integrate row-wise.

    >>> (integrate_rows(np.array([[1, 2, 3], \
                                  [2, 3, 4]], dtype=np.float32)) \
           == np.array([[1, 3, 6], \
                        [2, 5, 9]])).all()
    True
    >>> (integrate_rows(np.array([[1, 2, 3, 4], \
                                  [5, 6, 7, 8], \
                                  [9, 10, 11, 12]], dtype=np.float32)) \
           == np.array([[1, 3, 6, 10], \
                        [5, 11, 18, 26], \
                        [9, 19, 30, 42]])).all()
    True
    """
    integral = cv2.integral(frame)
    reduced = integral - np.roll(integral, +1, axis=0)
    return reduced[1:, 1:]
예제 #55
0
파일: ImageIO.py 프로젝트: zo7/face-rec
def loadFaceDetectionImages(directory, label):
    '''
    Loads base face detection images from a given directory.

    Args:
        directory (str): The directory where face detection images are.
        label (int): The label that should be assigned to each image.
    Returns:
        list[Image], the loaded images.
    '''
    image_data = []
    for file in os.listdir(directory):
        if file.endswith('.pgm'):
            image = cv2.imread(os.path.join(directory, file), cv2.CV_LOAD_IMAGE_GRAYSCALE)
            integral_image = cv2.integral(image)

            img_data_tuple = list()
            img_data_tuple.append(integral_image) # integral image
            img_data_tuple.append(label) # label
            img_data_tuple.append(0.0) # weight

            image_data.append(img_data_tuple)
    return image_data
예제 #56
0
    def geometricmoments(gray):
        """computes the 0-3rd geometric moments of a 2-dimensional grayscale 
        images and returns a dictionary. Grayscale image pixel values are 
        normalized to [0,1] 
        >> Note: See dictionary definition for dictionary keys (ms_keys)
        >> (numpy.array) -> dictionary
        >> input: 
            gray := 2D numpy array of size M,N
        >> output: 
            moments:= 10-element dictionary, 0 to 3rd geometric moments 'pq'; 
                      p+q = moment order
        """
        # *********** THE DICTIONARY THAT TILL CONTAIN THE MOMENTS!
        moments = {}
        ms_list = [[1,0],[0,1],[2,0],[1,1],[0,2],[3,0],[2,1],[1,2],[0,3]]
        ms_keys = ['m10','m01','m20','m11', 'm02', 'm30', 'm21','m12', 'm03']
        # =============== Using Opencv expression for geo moments (see opencv docs):
        # Image dimensions:
        M,N = gray.shape
        # Zeroth moment = max(integral image):
        integral_image = cv2.integral(gray)
        moments['m00'] = np.max(integral_image)
        # The remaining moments:
        k = 0 #index for the dictionary keys
        for l in ms_list:
            p,q = l
            m =  raw_moments(gray,p,q)
#            for xi in np.arange(0,M):
#                xp = xi**p
#                for yj in np.arange(0,N):
#                    m += xp * (yj**q) * (gray[xi,yj])
#                # end yj
#            #end xi
            moments[ms_keys[k]] = m
            k += 1
        #end l
        return moments
예제 #57
0
파일: tests.py 프로젝트: Daiver/jff
    def test_stump01(self):
        a = np.array([
            [1, 1, 1, 1],
            [1, 1, 1, 1],
            [2, 2, 2, 2],
            [2, 2, 2, 2],
            ], dtype=np.float32)
        aInt = cv2.integral(a)
        rect = [0, 0, 4, 4]
        negR, posR = haar.haarHorizLine(rect[0], rect[1], rect[2], rect[3])
        val = haar.computeHaarFeature(aInt, negR, posR)
        self.assertTrue(abs(val - 8.0) < 0.00001)

        stump = dec_stump.Stump('hor', rect, 6, 1)
        ans = stump.predict(aInt)
        self.assertEquals(ans, 1)

        stump = dec_stump.Stump('hor', rect, 9, 1)
        ans = stump.predict(aInt)
        self.assertEquals(ans, 0)

        stump = dec_stump.Stump('hor', rect, 6, -1)
        ans = stump.predict(aInt)
        self.assertEquals(ans, 0)
예제 #58
0
    def detect(self,frame,user_roi,visualize=False):
        u_r = user_roi
        if self.window_should_open:
            self.open_window((frame.img.shape[1],frame.img.shape[0]))
        if self.window_should_close:
            self.close_window()

        if self._window:
            debug_img = np.zeros(frame.img.shape,frame.img.dtype)


        #get the user_roi
        img = frame.img
        r_img = img[u_r.view]
        #        bias_field = preproc.EstimateBias(r_img)
        # r_img = preproc.Unbias(r_img, bias_field)
        r_img = preproc.GaussBlur(r_img)
        r_img = preproc.RobustRescale(r_img)
        frame.img[u_r.view] = r_img
        gray_img = cv2.cvtColor(r_img,cv2.COLOR_BGR2GRAY)


        # coarse pupil detection

        if self.coarse_detection.value:
            integral = cv2.integral(gray_img)
            integral =  np.array(integral,dtype=c_float)
            x,y,w,response = eye_filter(integral,self.coarse_filter_min,self.coarse_filter_max)
            p_r = Roi(gray_img.shape)
            if w>0:
                p_r.set((y,x,y+w,x+w))
            else:
                p_r.set((0,0,-1,-1))
        else:
            p_r = Roi(gray_img.shape)
            p_r.set((0,0,None,None))
            w = img.shape[0]/2

        coarse_pupil_width = w/2.
        padding = coarse_pupil_width/4.
        pupil_img = gray_img[p_r.view]



        # binary thresholding of pupil dark areas
        hist = cv2.calcHist([pupil_img],[0],None,[256],[0,256]) #(images, channels, mask, histSize, ranges[, hist[, accumulate]])
        bins = np.arange(hist.shape[0])
        spikes = bins[hist[:,0]>40] # every intensity seen in more than 40 pixels
        if spikes.shape[0] >0:
            lowest_spike = spikes.min()
            highest_spike = spikes.max()
        else:
            lowest_spike = 200
            highest_spike = 255

        offset = self.intensity_range.value
        spectral_offset = 5
        if visualize:
            # display the histogram
            sx,sy = 100,1
            colors = ((0,0,255),(255,0,0),(255,255,0),(255,255,255))
            h,w,chan = img.shape
            hist *= 1./hist.max()  # normalize for display

            for i,h in zip(bins,hist[:,0]):
                c = colors[1]
                cv2.line(img,(w,int(i*sy)),(w-int(h*sx),int(i*sy)),c)
            cv2.line(img,(w,int(lowest_spike*sy)),(int(w-.5*sx),int(lowest_spike*sy)),colors[0])
            cv2.line(img,(w,int((lowest_spike+offset)*sy)),(int(w-.5*sx),int((lowest_spike+offset)*sy)),colors[2])
            cv2.line(img,(w,int((highest_spike)*sy)),(int(w-.5*sx),int((highest_spike)*sy)),colors[0])
            cv2.line(img,(w,int((highest_spike- spectral_offset )*sy)),(int(w-.5*sx),int((highest_spike - spectral_offset)*sy)),colors[3])

        # create dark and spectral glint masks
        self.bin_thresh.value = lowest_spike
        binary_img = bin_thresholding(pupil_img,image_upper=lowest_spike + offset)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
        cv2.dilate(binary_img, kernel,binary_img, iterations=2)
        spec_mask = bin_thresholding(pupil_img, image_upper=highest_spike - spectral_offset)
        cv2.erode(spec_mask, kernel,spec_mask, iterations=1)

        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))

        #open operation to remove eye lashes
        pupil_img = cv2.morphologyEx(pupil_img, cv2.MORPH_OPEN, kernel)

        if self.blur > 1:
            pupil_img = cv2.medianBlur(pupil_img,self.blur.value)

        edges = cv2.Canny(pupil_img,
                            self.canny_thresh,
                            self.canny_thresh*self.canny_ratio,
                            apertureSize= self.canny_aperture)


        # remove edges in areas not dark enough and where the glint is (spectral refelction from IR leds)
        edges = cv2.min(edges, spec_mask)
        edges = cv2.min(edges,binary_img)

        overlay =  img[u_r.view][p_r.view]
        if visualize:
            b,g,r = overlay[:,:,0],overlay[:,:,1],overlay[:,:,2]
            g[:] = cv2.max(g,edges)
            b[:] = cv2.max(b,binary_img)
            b[:] = cv2.min(b,spec_mask)

            # draw a frame around the automatic pupil ROI in overlay.
            overlay[::2,0] = 255 #yeay numpy broadcasting
            overlay[::2,-1]= 255
            overlay[0,::2] = 255
            overlay[-1,::2]= 255
            # draw a frame around the area we require the pupil center to be.
            overlay[padding:-padding:4,padding] = 255
            overlay[padding:-padding:4,-padding]= 255
            overlay[padding,padding:-padding:4] = 255
            overlay[-padding,padding:-padding:4]= 255

        if visualize:
            c = (100.,frame.img.shape[0]-100.)
            e_max = ((c),(self.pupil_max.value,self.pupil_max.value),0)
            e_recent = ((c),(self.target_size.value,self.target_size.value),0)
            e_min = ((c),(self.pupil_min.value,self.pupil_min.value),0)
            cv2.ellipse(frame.img,e_min,(0,0,255),1)
            cv2.ellipse(frame.img,e_recent,(0,255,0),1)
            cv2.ellipse(frame.img,e_max,(0,0,255),1)

        #get raw edge pix for later
        raw_edges = cv2.findNonZero(edges)

        def ellipse_true_support(e,raw_edges):
            a,b = e[1][0]/2.,e[1][1]/2. # major minor radii of candidate ellipse
            ellipse_circumference = np.pi*abs(3*(a+b)-np.sqrt(10*a*b+3*(a**2+b**2)))
            distances = dist_pts_ellipse(e,raw_edges)
            support_pixels = raw_edges[distances<=1.3]
            # support_ratio = support_pixel.shape[0]/ellipse_circumference
            return support_pixels,ellipse_circumference

        # if we had a good ellipse before ,let see if it is still a good first guess:
        if self.strong_prior:
            e = p_r.sub_vector(u_r.sub_vector(self.strong_prior[0])),self.strong_prior[1],self.strong_prior[2]

            self.strong_prior = None
            if raw_edges is not None:
                support_pixels,ellipse_circumference = ellipse_true_support(e,raw_edges)
                support_ratio =  support_pixels.shape[0]/ellipse_circumference
                if support_ratio >= self.strong_perimeter_ratio_range[0]:
                    refit_e = cv2.fitEllipse(support_pixels)
                    if self._window:
                        cv2.ellipse(debug_img,e,(255,100,100),thickness=4)
                        cv2.ellipse(debug_img,refit_e,(0,0,255),thickness=1)
                    e = refit_e
                    self.strong_prior = u_r.add_vector(p_r.add_vector(e[0])),e[1],e[2]
                    goodness = min(1.,support_ratio)
                    pupil_ellipse = {}
                    pupil_ellipse['confidence'] = goodness
                    pupil_ellipse['ellipse'] = e
                    pupil_ellipse['roi_center'] = e[0]
                    pupil_ellipse['major'] = max(e[1])
                    pupil_ellipse['minor'] = min(e[1])
                    pupil_ellipse['apparent_pupil_size'] = max(e[1])
                    pupil_ellipse['axes'] = e[1]
                    pupil_ellipse['angle'] = e[2]
                    e_img_center =u_r.add_vector(p_r.add_vector(e[0]))
                    norm_center = normalize(e_img_center,(frame.img.shape[1], frame.img.shape[0]),flip_y=True)
                    pupil_ellipse['norm_pupil'] = norm_center
                    pupil_ellipse['center'] = e_img_center
                    pupil_ellipse['timestamp'] = frame.timestamp

                    self.target_size.value = max(e[1])

                    self.confidence.value = goodness
                    self.confidence_hist.append(goodness)
                    self.confidence_hist[:-200]=[]
                    if self._window:
                        #draw a little animation of confidence
                        cv2.putText(debug_img, 'good',(410,debug_img.shape[0]-100), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
                        cv2.putText(debug_img, 'threshold',(410,debug_img.shape[0]-int(self.final_perimeter_ratio_range[0]*100)), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
                        cv2.putText(debug_img, 'no detection',(410,debug_img.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
                        lines = np.array([[[2*x,debug_img.shape[0]-int(100*y)],[2*x,debug_img.shape[0]]] for x,y in enumerate(self.confidence_hist)])
                        cv2.polylines(debug_img,lines,isClosed=False,color=(255,100,100))
                        self.gl_display_in_window(debug_img)
                    return pupil_ellipse





        # from edges to contours
        contours, hierarchy = cv2.findContours(edges,
                                            mode=cv2.RETR_LIST,
                                            method=cv2.CHAIN_APPROX_NONE,offset=(0,0)) #TC89_KCOS
        # contours is a list containing array([[[108, 290]],[[111, 290]]], dtype=int32) shape=(number of points,1,dimension(2) )

        ### first we want to filter out the bad stuff
        # to short
        good_contours = [c for c in contours if c.shape[0]>self.min_contour_size.value]
        # now we learn things about each contour through looking at the curvature.
        # For this we need to simplyfy the contour so that pt to pt angles become more meaningfull
        aprox_contours = [cv2.approxPolyDP(c,epsilon=1.5,closed=False) for c in good_contours]

        if self._window:
            x_shift = coarse_pupil_width*2
            color = zip(range(0,250,15),range(0,255,15)[::-1],range(230,250))
        split_contours = []
        for c in aprox_contours:
            curvature = GetAnglesPolyline(c)
            # we split whenever there is a real kink (abs(curvature)<right angle) or a change in the genreal direction
            kink_idx = find_kink_and_dir_change(curvature,80)
            segs = split_at_corner_index(c,kink_idx)

            #TODO: split at shart inward turns
            for s in segs:
                if s.shape[0]>2:
                    split_contours.append(s)
                    if self._window:
                        c = color.pop(0)
                        color.append(c)
                        s = s.copy()
                        s[:,:,0] += debug_img.shape[1]-coarse_pupil_width*2
                        # s[:,:,0] += x_shift
                        # x_shift += 5
                        cv2.polylines(debug_img,[s],isClosed=False,color=map(lambda x: x,c),thickness = 1,lineType=4)#cv2.CV_AA

        split_contours.sort(key=lambda x:-x.shape[0])
        # print [x.shape[0]for x in split_contours]
        if len(split_contours) == 0:
            # not a single usefull segment found -> no pupil found
            self.confidence.value = 0
            self.confidence_hist.append(0)
            if self._window:
                self.gl_display_in_window(debug_img)
            return {'timestamp':frame.timestamp,'norm_pupil':None}


        # removing stubs makes combinatorial search feasable
        split_contours = [c for c in split_contours if c.shape[0]>3]

        def ellipse_filter(e):
            in_center = padding < e[0][1] < pupil_img.shape[0]-padding and padding < e[0][0] < pupil_img.shape[1]-padding
            if in_center:
                is_round = min(e[1])/max(e[1]) >= self.min_ratio
                if is_round:
                    right_size = self.pupil_min.value <= max(e[1]) <= self.pupil_max.value
                    if right_size:
                        return True
            return False

        def ellipse_on_blue(e):
            center_on_dark = binary_img[e[0][1],e[0][0]]
            return bool(center_on_dark)

        def ellipse_support_ratio(e,contours):
            a,b = e[1][0]/2.,e[1][1]/2. # major minor radii of candidate ellipse
            ellipse_area =  np.pi*a*b
            ellipse_circumference = np.pi*abs(3*(a+b)-np.sqrt(10*a*b+3*(a**2+b**2)))
            actual_area = cv2.contourArea(cv2.convexHull(np.concatenate(contours)))
            actual_contour_length = sum([cv2.arcLength(c,closed=False) for c in contours])
            area_ratio = actual_area / ellipse_area
            perimeter_ratio = actual_contour_length / ellipse_circumference #we assume here that the contour lies close to the ellipse boundary
            return perimeter_ratio,area_ratio


        def final_fitting(c,edges):
            #use the real edge pixels to fit, not the aproximated contours
            support_mask = np.zeros(edges.shape,edges.dtype)
            cv2.polylines(support_mask,c,isClosed=False,color=(255,255,255),thickness=2)
            # #draw into the suport mast with thickness 2
            new_edges = cv2.min(edges, support_mask)
            new_contours = cv2.findNonZero(new_edges)
            if self._window:
                new_edges[new_edges!=0] = 255
                overlay[:,:,1] = cv2.max(overlay[:,:,1], new_edges)
                overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
                overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
            new_e = cv2.fitEllipse(new_contours)
            return new_e,new_contours


        # finding poential candidates for ellipse seeds that describe the pupil.
        strong_seed_contours = []
        weak_seed_contours = []
        for idx, c in enumerate(split_contours):
            if c.shape[0] >=5:
                e = cv2.fitEllipse(c)
                # is this ellipse a plausible canditate for a pupil?
                if ellipse_filter(e):
                    distances = dist_pts_ellipse(e,c)
                    fit_variance = np.sum(distances**2)/float(distances.shape[0])
                    if fit_variance <= self.inital_ellipse_fit_threshhold:
                        # how much ellipse is supported by this contour?
                        perimeter_ratio,area_ratio = ellipse_support_ratio(e,[c])
                        # logger.debug('Ellipse no %s with perimeter_ratio: %s , area_ratio: %s'%(idx,perimeter_ratio,area_ratio))
                        if self.strong_perimeter_ratio_range[0]<= perimeter_ratio <= self.strong_perimeter_ratio_range[1] and self.strong_area_ratio_range[0]<= area_ratio <= self.strong_area_ratio_range[1]:
                            strong_seed_contours.append(idx)
                            if self._window:
                                cv2.polylines(debug_img,[c],isClosed=False,color=(255,100,100),thickness=4)
                                e = (e[0][0]+debug_img.shape[1]-coarse_pupil_width*4,e[0][1]),e[1],e[2]
                                cv2.ellipse(debug_img,e,color=(255,100,100),thickness=3)
                        else:
                            weak_seed_contours.append(idx)
                            if self._window:
                                cv2.polylines(debug_img,[c],isClosed=False,color=(255,0,0),thickness=2)
                                e = (e[0][0]+debug_img.shape[1]-coarse_pupil_width*4,e[0][1]),e[1],e[2]
                                cv2.ellipse(debug_img,e,color=(255,0,0))

        sc = np.array(split_contours)


        if strong_seed_contours:
            seed_idx = strong_seed_contours
        elif weak_seed_contours:
            seed_idx = weak_seed_contours

        if not (strong_seed_contours or weak_seed_contours):
            if self._window:
                self.gl_display_in_window(debug_img)
            self.confidence.value = 0
            self.confidence_hist.append(0)
            return {'timestamp':frame.timestamp,'norm_pupil':None}

        # if self._window:
        #     cv2.polylines(debug_img,[split_contours[i] for i in seed_idx],isClosed=False,color=(255,255,100),thickness=3)

        def ellipse_eval(contours):
            c = np.concatenate(contours)
            e = cv2.fitEllipse(c)
            d = dist_pts_ellipse(e,c)
            fit_variance = np.sum(d**2)/float(d.shape[0])
            return fit_variance <= self.inital_ellipse_fit_threshhold


        solutions = pruning_quick_combine(split_contours,ellipse_eval,seed_idx,max_evals=1000,max_depth=5)
        solutions = filter_subsets(solutions)
        ratings = []


        for s in solutions:
            e = cv2.fitEllipse(np.concatenate(sc[s]))
            if self._window:
                cv2.ellipse(debug_img,e,(0,150,100))
            support_pixels,ellipse_circumference = ellipse_true_support(e,raw_edges)
            support_ratio =  support_pixels.shape[0]/ellipse_circumference
            # TODO: refine the selection of final canditate
            if support_ratio >=self.final_perimeter_ratio_range[0] and ellipse_filter(e):
                ratings.append(support_pixels.shape[0])
                if support_ratio >=self.strong_perimeter_ratio_range[0]:
                    self.strong_prior = u_r.add_vector(p_r.add_vector(e[0])),e[1],e[2]
                    if self._window:
                        cv2.ellipse(debug_img,e,(0,255,255),thickness = 2)
            else:
                #not a valid solution, bad rating
                ratings.append(-1)


        # selected ellipse
        if max(ratings) == -1:
            #no good final ellipse found
            if self._window:
                self.gl_display_in_window(debug_img)
            self.confidence.value = 0
            self.confidence_hist.append(0)
            return {'timestamp':frame.timestamp,'norm_pupil':None}

        best = solutions[ratings.index(max(ratings))]
        e = cv2.fitEllipse(np.concatenate(sc[best]))

        #final calculation of goodness of fit
        support_pixels,ellipse_circumference = ellipse_true_support(e,raw_edges)
        support_ratio =  support_pixels.shape[0]/ellipse_circumference
        goodness = min(1.,support_ratio)

        #final fitting and return of result
        new_e,final_edges = final_fitting(sc[best],edges)
        size_dif = abs(1 - max(e[1])/max(new_e[1]))
        if ellipse_filter(new_e) and size_dif < .3:
            if self._window:
                cv2.ellipse(debug_img,new_e,(0,255,0))
            e = new_e


        pupil_ellipse = {}
        pupil_ellipse['confidence'] = goodness
        pupil_ellipse['ellipse'] = e
        pupil_ellipse['pos_in_roi'] = e[0]
        pupil_ellipse['major'] = max(e[1])
        pupil_ellipse['apparent_pupil_size'] = max(e[1])
        pupil_ellipse['minor'] = min(e[1])
        pupil_ellipse['axes'] = e[1]
        pupil_ellipse['angle'] = e[2]
        e_img_center =u_r.add_vector(p_r.add_vector(e[0]))
        norm_center = normalize(e_img_center,(frame.img.shape[1], frame.img.shape[0]),flip_y=True)
        pupil_ellipse['norm_pupil'] = norm_center
        pupil_ellipse['center'] = e_img_center
        pupil_ellipse['timestamp'] = frame.timestamp

        self.target_size.value = max(e[1])

        self.confidence.value = goodness
        self.confidence_hist.append(goodness)
        self.confidence_hist[:-200]=[]
        if self._window:
            #draw a little animation of confidence
            cv2.putText(debug_img, 'good',(410,debug_img.shape[0]-100), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
            cv2.putText(debug_img, 'threshold',(410,debug_img.shape[0]-int(self.final_perimeter_ratio_range[0]*100)), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
            cv2.putText(debug_img, 'no detection',(410,debug_img.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
            lines = np.array([[[2*x,debug_img.shape[0]-int(100*y)],[2*x,debug_img.shape[0]]] for x,y in enumerate(self.confidence_hist)])
            cv2.polylines(debug_img,lines,isClosed=False,color=(255,100,100))
            self.gl_display_in_window(debug_img)
        return pupil_ellipse