def normalisation(src):

    res = []

    # On fait une copie l'image pour le traitement (en gris)
    gris = cv.CreateImage((src.width, src.height), cv.IPL_DEPTH_8U, 1)
    normal = cv.CreateImage((NORM_W, NORM_H), cv.IPL_DEPTH_8U, 1)
    cv.CvtColor(src, gris, cv.CV_BGR2GRAY)

    # On detecte les visages (objects) sur l'image copiee
    faces = cv.HaarDetectObjects(gris, face_path, cv.CreateMemStorage())
    print "Nombre faces detectes : " + str(len(faces))
    for (x, y, w, h), n in faces:
        tmp = cv.CreateImage((w, h), cv.IPL_DEPTH_8U, 1)
        cv.GetRectSubPix(gris, tmp, (float(x + w / 2), float(y + h / 2)))

        cv.Resize(tmp, normal)
        cv.EqualizeHist(normal, normal)

        #Detection oeil nez bouche sur l'image source:
        d = detection(normal)

        # On detecte au moins 2 yeux "normaux", au moins un oeil avec lunette, au moins une bouche et au moins un nez
        if ((len(d['eyes']) >= 2 or len(d['eyes2']) >= 1)
                and len(d['mouth']) >= 1 and len(d['nose']) >= 1):
            print "Visage detecte dans la photo"
            res.append((normal, (x, y, w, h)))
    return res
Exemple #2
0
    def listen(self):

        if isinstance(self.background, np.ndarray):
            bgimg = cv.CreateImage(self.background.shape[:2], 8, 3)
            img = cv.CreateImage(self.background.shape[:2], 8, 3)
            theWidth = self.background.shape[1]
            theHeight = self.background[0]
        else:

            bgimg = cv.CreateImage(
                (self.background.width, self.background.height), 8, 3)
            img = cv.CreateImage(
                (self.background.width, self.background.height), 8, 3)
            theWidth = self.background.width
            theHeight = self.background.height

        cv.Copy(self.background, bgimg)
        smallimg = cv.CreateImage(
            (theWidth / self.zoom, theHeight / self.zoom), 8, 3)
        cv.GetRectSubPix(bgimg, smallimg,
                         (theWidth / (2 * self.zoom) + self.offset[0],
                          theHeight / (2 * self.zoom) + self.offset[1]))
        cv.Resize(smallimg, img)
        if (self.cp != False):
            cv.Circle(img, self.zoomPt(self.cp.x, self.cp.y), 3,
                      cv.RGB(0, 255, 0), -1)

        cv.Line(img, (self.ch_x - 25, self.ch_y), (self.ch_x + 25, self.ch_y),
                cv.RGB(255, 255, 0))
        cv.Line(img, (self.ch_x, self.ch_y - 25), (self.ch_x, self.ch_y + 25),
                cv.RGB(255, 255, 0))
        cv.ShowImage(self.name, img)
        cv.WaitKey(25)
Exemple #3
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    window = cv.CreateImage((cv.Round(img.width), cv.Round(img.height)), 8, 3)
    if (cascade):
        t = cv.GetTickCount()
        faces = local_haar_detect(small_img, cascade, cv.CreateMemStorage(0),
                                  haar_scale, min_neighbors, haar_flags,
                                  min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        channels = None
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (cv.Round(
                    (x + w * .2) * image_scale), cv.Round(y * image_scale))
                pt2 = (cv.Round(
                    (x + w * .8) * image_scale), cv.Round(
                        (y + h) * image_scale))

                window = cv.CreateImage((cv.Round(w * .6) * image_scale,
                                         cv.Round(h) * image_scale), 8, 3)
                cv.Smooth(window, window, cv.CV_GAUSSIAN)
                channels = [
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1),
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1),
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1)
                ]
                cv.GetRectSubPix(img, window, (cv.Round(
                    (pt1[0] + pt2[0]) / 2.0), cv.Round(
                        (pt1[1] + pt2[1]) / 2.0)))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                cv.Split(window, channels[0], channels[1], channels[2], None)
                result.append([
                    cv.Avg(channels[0])[0],
                    cv.Avg(channels[1])[0],
                    cv.Avg(channels[2])[0]
                ])

    cv.ShowImage("result", img)
Exemple #4
0
def ClipImage(cvmat, p1, p2):
    u'''画像を矩形領域で切り抜き'''
    assert p1.x > 0 and p1.y > 0 and p2.x > 0 and p2.y > 0
    width = abs(p2.x - p1.x)
    height = abs(p2.y - p1.y)
    if width > 0 and height > 0:
        x = p1.x + (p2.x - p1.x) / 2
        y = p1.y + (p2.y - p1.y) / 2
        clipped = cv.CreateMat(height, width, cvmat.type)
        cv.GetRectSubPix(cvmat, clipped, (x, y))
        return clipped
    return None
Exemple #5
0
def cut_image(image):
    grayscale = cv.CreateImage(cv.GetSize(image), 8, 1)
    cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY)
    #cv.EqualizeHist(grayscale, grayscale)

    faces = DetectFaces(grayscale)
    ans = []
    for face in faces:
        x, y, dx, dy = face[0]
        cropped = cv.CreateMat(dx, dy, cv.CV_8UC1)
        cv.GetRectSubPix(grayscale, cropped, (x + dx / 2, y + dy / 2))
        resized = cv.CreateImage((92, 112), 8, 1)
        cv.Resize(cropped, resized)
        ans.append(resized)
    return ans
Exemple #6
0
def extract_plate(image, cluster):
	"""De-skew and extract a detected plate from an image."""
	cluster = list(cluster)
	cluster.sort(cmp=lambda x,y: cmp(x.cx,y.cx))
	o = cluster[-1].cy - cluster[0].cy
	h = cluster[0].dist_to(cluster[-1])
	angle = math.asin(o/h)
	matrix = cv.CreateMat(2, 3, cv.CV_32FC1)
	cx = (cluster[0].cx + cluster[-1].cx)/2.0
	cy = (cluster[0].cy + cluster[-1].cy)/2.0
	getattr(cv,'2DRotationMatrix')((cx,cy), angle*180.0/math.pi, 1, matrix)
	warp = cv.CreateImage(cv.GetSize(image), 
		cv.IPL_DEPTH_8U, 3)
	cv.WarpAffine(image, warp, matrix)
	ret = cv.CreateImage((h+cluster[0].dia*3.0, cluster[0].dia*1.5), cv.IPL_DEPTH_8U, 3)
	cv.GetRectSubPix(warp, ret, (cx,cy))
	print cv.GetSize(ret)
	return ret
Exemple #7
0
def normalisation(img) :

	print ""
	if(os.path.exists(norm_path+dossier+"small_0."+img)):
		print "L'image "+str(img)+" est deja normalisee"
	else:
		print image_path+img
		src = cv.LoadImage(image_path+img)

		# On fait une copie l'image pour le traitement (en gris)
		gris = cv.CreateImage( (src.width, src.height) , cv.IPL_DEPTH_8U, 1)
		normal = cv.CreateImage((NORM_W,NORM_H), cv.IPL_DEPTH_8U, 1)
		cv.CvtColor(src, gris, cv.CV_BGR2GRAY)		

		# On detecte les visages (objects) sur l'image copiee
		faces = cv.HaarDetectObjects(gris, face_path, cv.CreateMemStorage())

		cp = 0
		for (x,y,w,h),n in faces: 
			tmp = cv.CreateImage( (w,h) , cv.IPL_DEPTH_8U, 1)
			cv.GetRectSubPix(gris, tmp, (float(x + w/2), float(y + h/2)))

			cv.EqualizeHist(tmp, tmp)
			cv.Resize(tmp, normal)

			#Detection oeil nez bouche sur l'image source:
			d = detection(tmp)
			d['mouth2'] = best_mouth(d['mouth'])

			if( (len(d['eyes'])>=2 or len(d['eyes2'])>=1) and len(d['mouth'])>=1 and len(d['nose'])>=1 ) : 

				print "Visage detecte dans la photo : "+img
				# ----- Affichage visage ----- #
				affichage_visage((x,y,w,h), src)
				# ----- Affichage de toute les bouches ----- #
				#affichage(src, d['eyes'], d['eyes2'], d['nose'], d['mouth'], x, y)
				# ----- Affichage de la bouche la plus basse (en general la bonne) ----- #
				#affichage(src, d['eyes'], d['eyes2'], d['nose'], d['mouth2'], x, y)

				save(norm_path, dossier+"small_"+str(cp)+"."+img, normal)
				save(result, dossier+"face_"+img, src)
				cp = cp +1
Exemple #8
0
def NormalizeImage(cvmat, cilp_rect, perspective_points):
    u'''読み取りやすくするために画像を正規化する'''
    # 液晶部分の抽出
    lcd = cv.CreateMat(cilp_rect.height, cilp_rect.width, cv.CV_8UC3)
    cv.GetRectSubPix(cvmat, lcd, (cilp_rect.cx, cilp_rect.cy))

    # グレイスケール化
    grayed = cv.CreateMat(lcd.height, lcd.width, cv.CV_8UC1)
    cv.CvtColor(lcd, grayed, cv.CV_BGR2GRAY)

    # 適応的2値化
    filterd = cv.CreateMat(grayed.height, grayed.width, cv.CV_8UC1)
    cv.AdaptiveThreshold(
        grayed,
        filterd,
        255,
        adaptive_method=cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
        thresholdType=cv.CV_THRESH_BINARY,
        blockSize=15,
    )

    # ゆがみ補正
    transformed = cv.CreateMat(grayed.height, grayed.width, filterd.type)
    matrix = cv.CreateMat(3, 3, cv.CV_32F)
    cv.GetPerspectiveTransform(
        ((perspective_points.tl.x, perspective_points.tl.y),
         (perspective_points.tr.x, perspective_points.tr.y),
         (perspective_points.bl.x, perspective_points.bl.y),
         (perspective_points.br.x, perspective_points.br.y)),
        ((0, 0), (filterd.width, 0), (0, filterd.height),
         (filterd.width, filterd.height)), matrix)
    cv.WarpPerspective(
        filterd,
        transformed,
        matrix,
        flags=cv.CV_WARP_FILL_OUTLIERS,
        fillval=255,
    )

    return transformed
Exemple #9
0
def find_faces_and_normalize(src) :
	# On fait une copie l'image pour le traitement (en gris)
	gris = cv.CreateImage( (src.width, src.height) , cv.IPL_DEPTH_8U, 1)
	normal = cv.CreateImage((NORM_W,NORM_H), cv.IPL_DEPTH_8U, 1)
	cv.CvtColor(src, gris, cv.CV_BGR2GRAY)		

	# On detecte les visages (objects) sur l'image copiee
	faces = cv.HaarDetectObjects(gris, face_path, cv.CreateMemStorage())
	res = []
	for (x,y,w,h),n in faces: 
		tmp = cv.CreateImage( (w,h) , cv.IPL_DEPTH_8U, 1)
		cv.GetRectSubPix(gris, tmp, (float(x + w/2), float(y + h/2)))

		cv.EqualizeHist(tmp, tmp)
		cv.Resize(tmp, normal)

		#Detection oeil nez bouche sur l'image source:
		d = detection(tmp)

		if( (len(d['eyes'])>=2 or len(d['eyes2'])>=1) and len(d['mouth'])>=1 and len(d['nose'])>=1 ): 
			res.append((normal,(x,y,w,h)))
	return res
    def listen(self):
        bgimg = cv.CreateImage((self.background.width, self.background.height),
                               8, 3)
        img = cv.CreateImage((self.background.width, self.background.height),
                             8, 3)
        cv.Copy(self.background, bgimg)
        smallimg = cv.CreateImage((self.background.width / self.zoom,
                                   self.background.height / self.zoom), 8, 3)
        cv.GetRectSubPix(
            bgimg, smallimg,
            (self.background.width / (2 * self.zoom) + self.offset[0],
             self.background.height / (2 * self.zoom) + self.offset[1]))
        cv.Resize(smallimg, img)
        if (self.cp != False):
            cv.Circle(img, self.zoomPt(self.cp.x, self.cp.y), 3,
                      cv.RGB(0, 255, 0), -1)

        cv.Line(img, (self.ch_x - 25, self.ch_y), (self.ch_x + 25, self.ch_y),
                cv.RGB(255, 255, 0))
        cv.Line(img, (self.ch_x, self.ch_y - 25), (self.ch_x, self.ch_y + 25),
                cv.RGB(255, 255, 0))
        cv.ShowImage(self.name, img)
        cv.WaitKey(25)
    print friend[u'name'], friend[u'id']
    # retrieve image file and store in tree
    urllib.urlretrieve(
        "https://graph.facebook.com/" + friend[u'id'] +
        "/picture?width=200&height=200",
        "images/photo/" + friend[u'id'] + ".jpg")
    id_fb = friend[u'id'].decode('utf8')
    name = MySQLdb.escape_string(friend[u'name'].decode('utf8'))
    valeurs = "(" + id_fb + ",'" + name + "')"
    req = "INSERT INTO %s %s VALUES %s" % (table, champs, valeurs)
    bd.commit()
    bd.executerReq(req)

    image = cv.LoadImage("images/photo/" + friend[u'id'] + ".jpg")
    storage = cv.CreateMemStorage()
    detected = cv.HaarDetectObjects(image, haar, storage, 1.1, 2,
                                    cv.CV_HAAR_DO_CANNY_PRUNING, (10, 10))
    if detected:
        c = 0
        for face in detected:
            center = (face[0][0] + face[0][2] / 2, face[0][1] + face[0][3] / 2)
            rows = face[0][3]
            cols = face[0][2]
            dst = cv.CreateMat(rows, cols, cv.CV_8UC3)
            cv.GetRectSubPix(image, dst, center)
            cv.SaveImage(
                'images/face/' + friend[u'id'] + "_" + str(c) + ".jpg", dst)
            c += 1

bd.close()
Exemple #12
0
    def listen(self):
        bgimg = cv.CreateImage((self.background.width, self.background.height),
                               8, 3)
        img = cv.CreateImage((self.background.width, self.background.height),
                             8, 3)
        cv.Copy(self.background, bgimg)
        smallimg = cv.CreateImage((self.background.width / self.zoom,
                                   self.background.height / self.zoom), 8, 3)
        cv.GetRectSubPix(
            bgimg, smallimg,
            (self.background.width / (2 * self.zoom) + self.offset[0],
             self.background.height / (2 * self.zoom) + self.offset[1]))
        cv.Resize(smallimg, img)

        cv.Smooth(img, img, cv.CV_GAUSSIAN)
        if (self.cp != False):
            cv.Circle(img, self.zoomPt(int(self.cp.x), int(self.cp.y)), 3,
                      cv.RGB(0, 255, 0), -1)
        mask = thresholding.threshold(img,
                                      thresholding.CUSTOM,
                                      False,
                                      crop_rect=None,
                                      cam_info=None,
                                      listener=None,
                                      hue_interval=(self.hue_low, self.hue_up))

        cv.Not(mask, mask)
        new_img = cv.CloneImage(img)
        cv.SetZero(new_img)
        cv.Copy(img, new_img, mask)
        new_img = thresholding.sat_threshold(new_img, 50)
        cv.Line(img, (self.ch_x - 25, self.ch_y), (self.ch_x + 25, self.ch_y),
                cv.RGB(255, 255, 0))
        cv.Line(img, (self.ch_x, self.ch_y - 25), (self.ch_x, self.ch_y + 25),
                cv.RGB(255, 255, 0))

        image_gray = cv.CreateImage(cv.GetSize(new_img), 8, 1)
        cv.CvtColor(new_img, image_gray, cv.CV_RGB2GRAY)
        cv.MorphologyEx(image_gray, image_gray, None, None, cv.CV_MOP_OPEN, 1)
        storage = cv.CreateMemStorage(0)
        seq = cv.FindContours(image_gray, storage)
        points = []
        contour = seq
        centers = []
        ccs = []
        while contour:
            bound_rect = cv.BoundingRect(list(contour))
            area = cv.ContourArea(contour)
            cc = contour
            contour = contour.h_next()

            if area < 50 or area > 2500:
                continue
            ccs.append(cc)
            win, center, radius = cv.MinEnclosingCircle(cc)
            cv.DrawContours(new_img, cc, (0, 255, 0), (0, 255, 0), 0, 1)
            pt1 = (bound_rect[0], bound_rect[1])
            pt2 = (bound_rect[0] + bound_rect[2],
                   bound_rect[1] + bound_rect[3])
            points.append(pt1)
            points.append(pt2)
            cv.Circle(new_img, center, radius, (0, 0, 255))
            centers.append(center)
            #cv.Rectangle(new_img, pt1, pt2, cv.CV_RGB(255,0,0), 1)

            font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1)
            cv.PutText(new_img, "%.2f" % area, pt1, font, (255, 255, 255))

        for cont1, cont2 in itertools.combinations(ccs, 2):
            if is_next_to(cont1, cont2):
                win, c1, r1 = cv.MinEnclosingCircle(cont1)
                win, c2, r2 = cv.MinEnclosingCircle(cont2)
                cv.Line(new_img, c1, c2, (255, 255, 0))
        #DRAW
        cv.ShowImage(self.name, new_img)
        #Do some funny business
        imgcs = {}
        satt = thresholding.sat_threshold(img, 50)
        for color in HueRanges.__dict__:
            if color == color.upper():
                img_c = thresholding.filter_color(satt, color)
                cv.ShowImage(color, img_c)
        cv.WaitKey(25)
 def getRegion(self, image, x, y, width, height):
     final = cv.CreateImage((int(width), int(height)), image.depth,
                            image.nChannels)
     cv.GetRectSubPix(image, final, (x + width / 2, y + height / 2))
     return final
Exemple #14
0
def detect_and_draw(img, cascade):

    global time_point
    global frame_no
    global input_data
    global fs
    global max_bps

    global last_f

    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    window = cv.CreateImage((cv.Round(img.width), cv.Round(img.height)), 8, 3)
    if (cascade):
        faces = local_haar_detect(small_img, cascade, cv.CreateMemStorage(0),
                                  haar_scale, min_neighbors, haar_flags,
                                  min_size)

        channels = None
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (cv.Round(
                    (x + w * .2) * image_scale), cv.Round(y * image_scale))
                pt2 = (cv.Round(
                    (x + w * .8) * image_scale), cv.Round(
                        (y + h) * image_scale))

                window = cv.CreateImage((cv.Round(w * .6) * image_scale,
                                         cv.Round(h) * image_scale), 8, 3)
                #cv.Smooth(window, window, cv.CV_GAUSSIAN, 3, 3)
                channels = [
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1),
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1),
                    cv.CreateImage((cv.Round(w * .6) * image_scale,
                                    cv.Round(h) * image_scale), 8, 1)
                ]

                cv.GetRectSubPix(img, window, (cv.Round(
                    (pt1[0] + pt2[0]) / 2.0), cv.Round(
                        (pt1[1] + pt2[1]) / 2.0)))

                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                cv.Split(window, channels[0], channels[1], channels[2], None)
                input_data.append([
                    cv.Avg(channels[0])[0],
                    cv.Avg(channels[1])[0],
                    cv.Avg(channels[2])[0]
                ])

                #measure the sampling frequency
                now_point = cv.GetTickCount()

                if float(fs) / 2 < max_bps and fs != 0:
                    max_bps = float(fs) / 2

                if len(input_data) > frame_no:
                    fs = cv.GetTickFrequency() * 1000000. / (now_point -
                                                             time_point)
                    input_data.pop(0)

                    #print my_functions.calc_heart_rate(input_data)
                    final_data = my_functions.calc_heart_rate(input_data)
                    tmp_last_f = my_functions.plot_diagrams(
                        final_data, fs, last_f)
                    last_f = tmp_last_f
                    print last_f

                time_point = now_point
        else:
            print "Can not detect face"

    cv.ShowImage("result", img)