Example #1
0
 def execute(self, image):
     gray = cv2.cvtColor(image, cv.CV_BGR2GRAY)
     cv2.equalizeHist(gray, gray)
     faces = self.face_cascade.detectMultiScale(gray,
                                                1.1,
                                                2,
                                                0 | cv.CV_HAAR_SCALE_IMAGE,
                                                (30, 30))
     for face in faces:
         faceimg = self.get_face(image, face)
         mask = np.zeros(
             (image.shape[0],
              image.shape[1],
              1),
             dtype=np.uint8)
         rect = (face[0], face[1], face[0] + face[2], face[1] + face[3])
         bgd_model = np.zeros((1, 5 * 13))
         fgd_model = np.zeros((1, 5 * 13))
         cv2.grabCut(
             image,
             mask,
             rect,
             bgd_model,
             fgd_model,
             10,
             mode=cv2.GC_INIT_WITH_RECT)
         b, g, r = cv2.split(image)
         b[mask == cv2.GC_BGD] = 255
         g[mask == cv2.GC_BGD] = 255
         r[mask == cv2.GC_BGD] = 255
Example #2
0
def edge_detect(img):
    """ Edge detector """
    cv2.equalizeHist(img, img)
    img = cv2.Canny(img, img.mean()*0.66, img.mean()*1.33, apertureSize=3)
    # img[img != 0] = 255
    img = norm_img(img)
    return img
    def execute(self, image):
        gray = cv2.cvtColor(image, cv.CV_BGR2GRAY)
        cv2.equalizeHist(gray, gray)
        faces = self.face_cascade.detectMultiScale(gray, 1.1, 2, 0 | cv.CV_HAAR_SCALE_IMAGE, (30, 30))
        for i in xrange(0, len(faces)):
            lastface = faces[i - 1]
            face = faces[i]
            lastrect = self.get_image_size(image, face)
            face = cv2.resize(face, (lastmaxy - lastminy, lastmaxx - lastminx))

            faceimg, coord = self.get_image_data(image, face)
            y, x, _ = faceimg.shape
            if x < smallestx:
                smallestx = x
            if y < smallesty:
                smallesty = y
            facedata.append((faceimg, coord))

        for i in xrange(0, len(facedata)):
            _, lastcoord = facedata[i - 1]
            lastminy, lastmaxy, lastminx, lastmaxx = lastcoord

            face, coord = facedata[i]
            miny, maxy, minx, maxx = coord

            face = cv2.resize(face, (lastmaxy - lastminy, lastmaxx - lastminx))
            welp = image[lastminy:lastmaxy, lastminx:lastmaxx]

            image[lastminy:lastmaxy, lastminx:lastmaxx] = cv2.addWeighted(
                welp, 0.5, face, 0.5, 0.0
            )  # cv2.merge((gray,gray,gray))

        return image
Example #4
0
def find_face_from_img(img):
   #convert
   gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
   gray = cv2.equalizeHist(gray)
   rects = cascade.detectMultiScale(img, scaleFactor=1.2, minNeighbors=3, minSize=(40, 40), flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
   print 'found %d faces' % (len(rects))
   if len(rects) > 0:
      image_scale=1.0
      for x, y, w, h in rects:
   # the input to cv.HaarDetectObjects was resized, so scale the
   # bounding box of each face and convert it to two CvPoints
         pt1 = (int(x * image_scale), int(y * image_scale))
         pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
         face = img[y: y + h, x: x + w]
         face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
         face = cv2.equalizeHist(face)
         eyes = eyecascade.detectMultiScale(face, scaleFactor=1.2, minNeighbors=3, minSize=(3,3),flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
         if len(eyes)==2: #if two eyes aren't detected, throw away the face
            eye=[]
            for xx,yy,ww,hh in eyes:
               eye.append((int(xx+xx+ww)/2, int(yy+yy+hh)/2))
               pt1 = (int(xx), int(yy))
               pt2 = (int((xx + ww) ), int((yy + hh)))
         #      cv2.rectangle(face,pt1,pt2,(255,0,0))
               cv2.circle(face,((int(xx+xx+ww)/2, int(yy+yy+hh)/2)),4,255,-1)
            eye = [ min(eye), max(eye) ] #sort the eyes
            angle = math.atan2(eye[1][1] - eye[0][1], eye[1][0] - eye[0][0])
            rot = cv2.getRotationMatrix2D(eye[0], -angle, 1.0 )
            face = cv2.warpAffine(face, rot, (150,150))
            face = cv2.resize(face,(150,150), interpolation=cv.CV_INTER_CUBIC)
            cv2.imwrite("faces/" + str(uuid.uuid4()) + ".jpg",face)
Example #5
0
def preprocess(image):
    mv = cv2.split(image)
    mv[0] = cv2.equalizeHist(mv[0])
    mv[1] = cv2.equalizeHist(mv[1])
    mv[2] = cv2.equalizeHist(mv[2])
    color = cv2.merge(mv)
    return color
 def allHistEqual(self):
     """Wendet einen partiellen Histogrammausgleich auf Gesicht an."""
     width = self.fpp_result.shape[1]
     # Histogramm Ausgleich wird auf getrennt auf linke und rechte Seite angewendet, mitte gemischt
     left = self.fpp_result[0:self.fpp_result.shape[0],0:width/2]
     right = self.fpp_result[0:self.fpp_result.shape[0],width/2:width]
     entire = cv2.equalizeHist(self.fpp_result)
     left = cv2.equalizeHist(left)
     right = cv2.equalizeHist(right)
     for x in range(width):
         for y in range(self.fpp_result.shape[0]):
             p = 0
             if x<(width/4):
                 p = left[y,x]
             elif x<(width/2):
                 l = left[y,x]
                 e = entire[y,x]
                 f = (x-width/4.0)/(width/4)
                 p = int((1.0-f) * l+ f*e +0.5)
             elif x < (width*3/4):
                 r = right[y,x-width/2]
                 e = entire[y,x]
                 f = (x-width/2.0)/(width/4)
                 p = int((1.0-f)*e+f*r+0.5)
             else:
                 p = right[y,x-width/2]
             self.fpp_result[y,x] = p
def detect():
    q = QRobot()
    cap=cv2.VideoCapture(1)
    success, frame = cap.read()
    color = (0, 255, 0)
    classfier=cv2.CascadeClassifier("/home/echo/Desktop/qrobot/qrobot_py/haarcascade_frontalface_alt.xml")
    while success:
        success, frame = cap.read()
        size=frame.shape[:2]
        image=np.zeros(size,dtype=np.float16)
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        cv2.equalizeHist(image, image)
        
        divisor=8
        h, w = size
        minSize=(w/divisor, h/divisor)
        faceRects = classfier.detectMultiScale(image, 1.2, 2, cv2.CASCADE_SCALE_IMAGE,minSize)
        if len(faceRects)>0:
            for faceRect in faceRects: 
                    x, y, w, h = faceRect
                    cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
                    q.left_wing_fast_up(1)
                    q.right_wing_fast_up(1)
                    q.heart_light_color(randint(0,255), randint(0,255), randint(0,255))
                    q.eye_emotion(randint(0,50), 1)
                    q.send_data()
        cv2.imshow("test", frame)
        key=cv2.waitKey(10)
        c = chr(key & 255)
        if c in ['q', 'Q', chr(27)]:
            break

    del q  
    cap.release()
    cv2.destroyWindow("test")
Example #8
0
    def update(self, image):
        self._faces = []

        if utils.is_gray(image):
            image = cv2.equalizeHist(image)
        else:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            cv2.equalizeHist(image, image)

        min_size = utils.width_height_devided_by(image, 8)
        face_rects = self._face_classifier.detectMultiScale(image,
                                                            self.scale_factor,
                                                            self.min_neighbors,
                                                            self.flags, min_size)

        if face_rects is not None:
            for face_rect in face_rects:
                face = Face()
                face.face_rect = face_rect
                x, y, w, h = face_rect

                search_rect = (x + w / 7, y, w * 2 / 7, h / 2)
                face.left_eye_rect = self._detect_one_object(self._eye_classifier, image, search_rect, 64)

                search_rect = (x + w * 4 / 7, y, w * 2 / 7, h / 2)
                face.right_eye_rect = self._detect_one_object(self._eye_classifier, image, search_rect, 64)

                search_rect = (x + w / 4, y + h /4, w / 2, h / 2)
                face.nose_rect = self._detect_one_object(self._nose_classifier, image, search_rect, 32)

                search_rect = (x + w / 6, y + h * 2 / 3, w * 2 / 3, h / 3)
                face.mouth_rect = self._detect_one_object(self._mouth_classifier, image, search_rect, 16)

                self._faces.append(face)
Example #9
0
def histEq(img):
    b,g,r = cv2.split(img)
    b_eq = cv2.equalizeHist(b)
    g_eq = cv2.equalizeHist(g)
    r_eq = cv2.equalizeHist(r)
    img_output = cv2.merge([b_eq, g_eq, r_eq])
    return img_output
Example #10
0
def crop_face(cascades, img):
    rects = []

    height, width, depth = img.shape
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)

    for cascade in cascades:
        rect = detect(gray, cascade)
        if len(rect) > 0:
            rects.append(rect)
    if len(rects) > 0:
        rect_final = np.vstack(tuple(rects))
        c_x = width / 2
        c_y = height / 2
        (x1, y1, x2, y2) = find_center_rect(rect_final, (c_x, c_y))
        # draw_rects(img, rects, (255, 0,0))
        face_im = img[y1:y2, x1:x2]
        # face_im = cv2.resize(img[y1:y2,x1:x2], (32,32))
        face_im = cv2.cvtColor(face_im, cv2.COLOR_BGR2GRAY)
        face_im = cv2.equalizeHist(face_im)

        # cv2.imshow('facedetect', img)

        ##        if 0xFF & cv2.waitKey(5) == 27:
        ##            break
        # cv2.imwrite("D:\\NancyStudyData\\face\\fcb01f7a_9\\" + num + "_" + str(frame_cnt) + ".png", face_im)
        return (True, face_im)
    else:
        return (False, gray)
def test_load(r,c):
    """loads images from folders and creates a test db for a given resolution rowsxcolumns"""
    #label matrix 
    labeltest=[]
    nmb=0
    for i in range(0,10):
        for fich in os.listdir("test/"+str(i)):
            labeltest.append(i)
            nmb=nmb+1
    #data matrix: 10 rows (digits): rows(kxtup(k)) each block is a matrix of the images of the corresponding size and digit
    test_set=np.zeros((1,int(r)*int(c)))

    for i in range(0,10):
        for fich in os.listdir("test/"+str(i)):
            img=cv2.imread("test/"+str(i)+"/"+fich,0)
            img=cv2.equalizeHist(img)
            #if img.shape != (11,7):
            #    img=cv2.resize(img,(7,11),interpolation=cv2.INTER_CUBIC)
            samp=cv2.resize(img,(int(c),int(r)),interpolation=cv2.INTER_CUBIC)
            samp=cv2.equalizeHist(samp)
            samp=samp.flatten()/255.0
            test_set=np.vstack((test_set,samp))
                
    test_set=np.delete(test_set, (0), axis=0)
    labeltest=np.asarray(labeltest).reshape(nmb,1)
    return test_set,labeltest
Example #12
0
    def __init__(self, frame_first, gamma=1.0, motion_compensation=False):
        # Define settings
        """
        The constructor for a new frameFusion instance

        @param frame_first: initial picture
        @param gamma: contrast parameter
        @param motion_compensation: (boolean flag) compensate motion over time
        """
        self.n_fused_frames = 0
        self.gamma = gamma
        self.n_max_corners = 400
        self.corners_q_level = 4
        self.motion_comp = motion_compensation
        self.motion_compensation_method = 'orb'
        self.reset = False
        self.reset_ratio = 0.3

        # Allocate buffers
        self.frame_acc = np.float32(frame_first)
        self.frame_acc_disp = np.float32(frame_first)
        self.frame_eq = np.float32(frame_first)
        self.frame_prev = frame_first

        # Do the first accumulation
        cv2.equalizeHist(frame_first, self.frame_acc)
        cv2.normalize(self.frame_acc, self.frame_acc_disp, 0., 1., cv2.NORM_MINMAX)  # just for the display stuf
Example #13
0
def hist(img):
    ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
    channels = cv2.split(ycrcb)
    cv2.equalizeHist(channels[0], channels[0])  #输入通道、输出通道矩阵
    cv2.merge(channels, ycrcb)  #合并结果通道
    cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR, img)
    return img
Example #14
0
def img_to_matrix(img, tr, STANDARD_SIZE, verbose=False):
    #img = Image.open(filename) 
   #img = ImageOps.autocontrast(img, cutoff = 2)
    #img = ImageEnhance.Brightness(img)
   # img = img.enhance(0.8)
    
    if tr==2:
        img = img.resize((100,100), Image.ANTIALIAS)
        img = img.crop((5,5,95,95))
        img = img.resize(STANDARD_SIZE, Image.ANTIALIAS)
    if tr==1:
        img = img.resize((100,100), Image.ANTIALIAS)
        img = img.crop((15,15,85,85))
        img = img.resize(STANDARD_SIZE, Image.ANTIALIAS)
    if tr==0:
        img = img.resize(STANDARD_SIZE, Image.ANTIALIAS)
    img2 = img.convert('RGB')   
    H,S,V = HSV(img2)    
    r, g, b = img2.split()

    r = cv2.equalizeHist(np.array(r))
    g = cv2.equalizeHist(np.array(g))
    b = cv2.equalizeHist(np.array(b))
    
    img = np.hstack ((H,S,V,np.array(r), np.array(g), np.array(b)))

    return img
Example #15
0
    def detect_faces(image, face_cascade, return_image=False):
        # This function takes a gray scale cv image and finds
        # the patterns defined in the haarcascade function
        # modified from: http://www.lucaamore.com/?p=638

        # variables
        min_size = (20, 20)
        haar_scale = 1.1
        min_neighbors = 3
        haar_flags = 0

        # Equalize the histogram
        cv2.equalizeHist(image, image)

        # Detect the faces
        faces = face_cascade.detectMultiScale(
            image, scaleFactor=haar_scale, minNeighbors=min_neighbors, minSize=min_size, flags=haar_flags
        )

        # If faces are found
        if isinstance(faces, numpy.ndarray) and return_image:
            for (x, y, w, h) in faces:
                # Convert bounding box to two CvPoints
                pt1 = (int(x), int(y))
                pt2 = (int(x + w), int(y + h))
                cv2.rectangle(image, pt1, pt2, (255, 0, 0), 5, 8, 0)

        if return_image:
            return image
        else:
            return faces
    def execute(self, image):
        gray = cv2.cvtColor(image, cv.CV_BGR2GRAY)
        cv2.equalizeHist(gray, gray)
        faces = self.face_cascade.detectMultiScale(gray,
                                                   1.1,
                                                   2,
                                                   0 | cv.CV_HAAR_SCALE_IMAGE,
                                                   (30, 30)
                                                )
        for face in faces:
            faceimg = self.draw_rectangle(image, face, (0, 0, 255))

            """eyes = self.eye_cascade.detectMultiScale(faceimg,
                                                   1.1,
                                                   2,
                                                   0|cv.CV_HAAR_SCALE_IMAGE,
                                                   (30, 30)
                                                )
            for eye in eyes:
                self.draw_rectangle(image,
                        (face[0] + eye[0], face[1] + eye[1], eye[2], eye[3]),
                        (255, 0, 0))"""

            self.nb_face = 1
        return image
 def allHistEqual(self):
     width = self.warped.shape[1]
     #Histogramm Ausgleich wird auf getrennt auf linke und rechte Seite angewendet, mitte gemischt
     left = self.warped[0:self.warped.shape[0],0:width/2]
     right = self.warped[0:self.warped.shape[0],width/2:width]
     entire = cv2.equalizeHist(self.warped)
     left = cv2.equalizeHist(left)
     right = cv2.equalizeHist(right)
     for x in range(width):
         for y in range(self.warped.shape[0]):
             v = 0
             if x<(width/4):
                 v = left[y,x]
             elif x<(width/2):
                 l = left[y,x]
                 e = entire[y,x]
                 f = (x-width/4.0)/(width/4)
                 v = int((1.0-f) * l+ f*e +0.5)
             elif x < (width*3/4):
                 r = right[y,x-width/2]
                 e = entire[y,x]
                 f = (x-width/2.0)/(width/4)
                 v = int((1.0-f)*e+f*r+0.5)
             else:
                 v = right[y,x-width/2]
             self.warped[y,x] = v
Example #18
0
def find_bright(cimg):
  chans= cv2.split(cimg)

  gray = cv2.cvtColor(cimg, cv2.COLOR_BGR2GRAY)

  blue_maxed = cv2.equalizeHist(chans[0])
  red_maxed = cv2.equalizeHist(chans[2])

  #cv2.imshow('blue', blue_maxed)
  #cv2.imshow('red', red_maxed)

  blue_blur = cv2.GaussianBlur(blue_maxed, (9,9), 0)
  #cv2.imshow('blue blur', blue_blur)

  red_blur = cv2.GaussianBlur(red_maxed, (9,9), 0)
  cv2.imshow('red blur', red_blur)

  #gray = orig
  (minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(red_blur)
  cv2.circle(cimg, maxLoc, 15, (255,0,0), 2)

  cv2.imshow("brightest", cimg)
  c = cv2.waitKey(1)

  return cimg
Example #19
0
 def circles(self,cv_image):
     cv_image=cv2.resize(cv_image,dsize=(self.screen['width'],self.screen['height']))
     #if self.blur:
     #    cv_image=cv2.GaussianBlur(cv_image,ksize=[5,5],sigmaX=0)
     
     channels=cv2.split(cv_image)
     channels[0] = cv2.equalizeHist(channels[0])
     channels[1] = cv2.equalizeHist(channels[1])
     #channels[2] = cv2.equalizeHist(channels[2])
     img = cv2.merge(channels, cv_image)
     img=cv2.bilateralFilter(img, -1, 5, 0.1)
     kern = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
     img=cv2.morphologyEx(img, cv2.MORPH_CLOSE, kern)
     hsvImg=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
     luvImg=cv2.cvtColor(img,cv2.COLOR_BGR2LUV)
     gauss = cv2.GaussianBlur(luvImg, ksize=(5,5), sigmaX=10)
     sum = cv2.addWeighted(luvImg, 1.5, gauss, -0.6, 0)
     enhancedImg = cv2.medianBlur(sum, 3)
     ch=cv2.split(enhancedImg)
     mask = cv2.inRange(ch[2],self.highThresh[2],self.lowThresh[2])
     mask1=cv2.inRange(ch[1],self.highThresh[0],self.lowThresh[0])
     mask2=cv2.inRange(ch[2],self.highThresh[1],self.lowThresh[1])
     
    # cv2.imshow(mask)
     #cv2.imshow(mask1)
     #cv2.imshow(mask2)
     mask_out=cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
     try:
         self.image_filter_pub.publish(self.bridge.cv2_to_imgmsg(mask_out, encoding="bgr8"))
     except CvBridgeError as e:
         rospy.logerr(e)
def HistEqual(imgBGR):
    # hist euql in BGR
    b,g,r = cv2.split(imgBGR)
    b = cv2.equalizeHist(b)
    g = cv2.equalizeHist(g)
    r = cv2.equalizeHist(r)
    imgBGRHist = cv2.merge((b,g,r))

    # hist euql in HSV
    imgHSV = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2HSV)
    h,s,v = cv2.split(imgHSV)
##    h = cv2.equalizeHist(h)
##    s = cv2.equalizeHist(s)
    v = cv2.equalizeHist(v)
    imgHSV = cv2.cvtColor(cv2.merge((h,s,v)), cv2.COLOR_HSV2BGR)

    # hist euql in LAB
    imgLAB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2LAB)
    l,a,b = cv2.split(imgLAB)
    l = cv2.equalizeHist(l)
##    a = cv2.equalizeHist(a)
##    b = cv2.equalizeHist(b)
    imgLAB = cv2.cvtColor(cv2.merge((l,a,b)), cv2.COLOR_LAB2BGR)

    # normalization
##    imgBGRL1 = imgBGR.copy().astype(np.float)
##    imgBGRL2 = imgBGR.copy().astype(np.float)
##    imgBGRINF = imgBGR.copy().astype(np.float)
##    cv2.normalize(imgBGRL1, imgBGRL1, 255, 0, cv2.NORM_L1)
##    cv2.normalize(imgBGRL2, imgBGRL2, 255, 0, cv2.NORM_L2)
##    cv2.normalize(imgBGRINF, imgBGRINF, 255, 0, cv2.NORM_INF)

    return imgBGRHist, imgHSV, imgLAB
Example #21
0
 def find(image1, image2):
     rect = (0, 0, 0, 0)
     firstFrame = image1.decode('base64', 'strict')
     firstFrame = cv2.imdecode(np.fromstring(firstFrame, dtype=np.uint8), -1)
     img = image2.decode('base64', 'strict')
     img = cv2.imdecode(np.fromstring(img, dtype=np.uint8), -1)
     if firstFrame is not None and img is not None:
         firstGray = cv2.cvtColor(firstFrame, cv2.COLOR_BGR2GRAY)
         firstGray = cv2.equalizeHist(firstGray)
         gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
         gray = cv2.equalizeHist(gray)
         frameDelta = cv2.absdiff(firstGray, gray)
         thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
         thresh = cv2.dilate(thresh, None, iterations=2)
         if platform.system() == 'Windows':
             _, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
         else:
             cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
         contourMax = None
         areaMax = None
         for c in cnts:
             contour = cv2.contourArea(c)
             if contour < 500:
                 continue
             if contourMax is None or contour > contourMax:
                 contourMax = contour
                 areaMax = c
         if not areaMax is None:
             (x, y, w, h) = cv2.boundingRect(areaMax)
             rect = (x, y, w, h)
     return rect
Example #22
0
def engine_rotating(stat,framecnt,queue):
	classfier=cv2.CascadeClassifier("D:\\Anaconda\\haarcascade_frontalface_alt.xml")
	color = (255,0,0)
	while True:
		signal=queue.get()
		if(signal=="rotate_start"):
			break

	print("eng_start")

	while True:
		if(stat.value==1):
			break
		cnt=framecnt.value-1
		frame=cv2.imread("./temp_frame/"+str(cnt)+".jpg")
		size=frame.shape[:2]
		image=np.zeros(size,dtype=np.float16)
		image = cv2.cvtColor(frame, cv2.cv.CV_BGR2GRAY)
		cv2.equalizeHist(image, image)
		divisor=32
		h, w = size
		minSize=(w/divisor, h/divisor)
		faceRects = classfier.detectMultiScale(image, 1.2, 2, cv2.CASCADE_SCALE_IMAGE,minSize)
		if len(faceRects)>0:
			for faceRect in faceRects:
				x, y, w, h = faceRect
				print("detected face: ",x,y,w,h)
				#cv2.rectangle(frame, (x, y), (x+w, y+h), color)
				#TODO: Please Code Here

	print("rotating finish")
Example #23
0
def whiteBalance(img):
    b, g, r = cv2.split(img)
    b = cv2.equalizeHist(b)
    g = cv2.equalizeHist(g)

    img = cv2.merge((b, g, r))
    return img
def preprocess_image(path):
    """
    Loads an image, converts to grayscale, flips the image if necessary based
    on which eye it is and if there is a notch present, and equalizes the
    image's histogram.
    :param str path: Path to an image.
    :rtype: numpy.ndarray
    """
    # Loading the image also converts it to grayscale.
    img = load_image(path)
    img_thresh = threshold(img)

    # Two-part notch-detection. Notch could be in upper-right quadrant, or it
    # could be in the bottom-right quadrant. Try the upper-right corner first -
    # if it's not there, try the bottom-right. If still no notch is detected,
    # assume there is no notch present and do no inversion.
    if detect_notch(img, img_thresh):
        cv.flip(img, -1, img)
        print "Notch detected in image {}.".format(path.split('/')[-1])
    else:
        vert_flip = cv.flip(img, 0)
        vert_flip_thresh = cv.flip(img_thresh, 0)

        if detect_notch(vert_flip, vert_flip_thresh):
            cv.flip(img, -1, img)
            print "Notch detected in image {}.".format(path.split('/')[-1])

    # Examine the file name and flip the eye horizontally if it's a left eye.
    if "left" in path:
        cv.flip(img, 1, img)

    # Finally, equalize the image.
    cv.equalizeHist(img, img)

    return img
Example #25
0
 def FindFace(self):
     success,frame=self.cap.read()
     classifier=cv2.CascadeClassifier("%shaarcascade_frontalface_default.xml" % self.RES_PATH)
     if success:
         size=frame.shape[:2]
         # self.Image_Name = '%s%s.jpg' % (self.PATH,time.strftime('%Y%m%d_%H%M%S'))
         # cv2.imwrite(self.Image_Name,frame)
         image=np.zeros(size,dtype=np.float16)
         image=cv2.cvtColor(frame,cv2.cv.CV_BGR2GRAY)
         cv2.equalizeHist(image,image)
         divisor=8
         h,w=size
         minSize=(w/divisor,h/divisor)
         faceRects=classifier.detectMultiScale(image,1.2,2,cv2.CASCADE_SCALE_IMAGE,minSize)
         self.log.info("找到 %d 个面部特征" % len(faceRects))
         if len(faceRects) > 0:
             self.Image_Name = '%s%s.jpg' % (self.PATH,time.strftime('%Y%m%d_%H%M%S'))
             cv2.imwrite(self.Image_Name,frame)
             return True
         else:
             return False
     else:
         traceback.print_stack()
         self.log.error("摄像头读取错误")
         return False
Example #26
0
def calcHogDes(img,mpoints,auto_orientation=False,angle=0,scale=1.0):
	points=copy.deepcopy(mpoints)
	# img=cv2.imread(imgName,cv2.IMREAD_COLOR)
		
	(height,width,channel)=img.shape
	gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

	cv2.equalizeHist(gray,gray)

	cnt=len(points)
	kp=[]
	des=[]
	for i in range(cnt/2):
		cx=int(0.5*width+points[2*i]*scale)
		cy=int(0.5*height-points[2*i+1]*scale)
		local_img=gray[cy-9:cy+9,cx-9:cx+9]
		try:
			local_hog_des=hog(local_img,orientations=9,pixels_per_cell=(6,6),cells_per_block=(3,3))
		except:
			print local_img.shape,cx,cy,cx-12,cx+12,cy-12,cy+12
			raise
		des.extend(local_hog_des.tolist())
	des=np.array(des)
	sz=des.size
	des.shape=(cnt/2,sz*2/cnt)
	return des,kp
Example #27
0
def f(filename):
    import cv2
    import matplotlib.pyplot as plt
    im = cv2.imread("pic/"+filename, cv2.CV_LOAD_IMAGE_GRAYSCALE)
    f, axarr = plt.subplots(nrows=2, ncols=2)
    f.tight_layout()

    axarr[0,0].hist(im.flatten(), 256, [0,256])
    axarr[0,0].set_xlabel("Graylevels")
    axarr[0,0].set_ylabel("Frequency")
    axarr[0,0].set_xlim(0, 256)
    axarr[0,0].set_title("Histogram of " + filename + " before equalization")

    axarr[1,0].hist(im.flatten(), 256, [0,256], cumulative=True)
    axarr[1,0].set_xlabel("Graylevels")
    axarr[1,0].set_ylabel("Cumulative Frequency")
    axarr[1,0].set_xlim(0, 256)
    axarr[1,0].set_title("Cumulative Histogram of " + filename + " before equalization")

    axarr[0,1].hist(cv2.equalizeHist(im).flatten(), 256, [0,256])
    axarr[0,1].set_xlabel("Graylevels")
    axarr[0,1].set_ylabel("Frequency")
    axarr[0,1].set_xlim(0, 256)
    axarr[0,1].set_title("Histogram of " + filename + " after equalization")

    axarr[1,1].hist(cv2.equalizeHist(im).flatten(), 256, [0,256], cumulative=True)
    axarr[1,1].set_xlabel("Graylevels")
    axarr[1,1].set_ylabel("Cumulative Frequency")
    axarr[1,1].set_xlim(0, 256)
    axarr[1,1].set_title("Cumulative Histogram of " + filename + " after equalization")

    plt.show()
Example #28
0
def postUplodProcessing(parDir):
    fnPreviewIMG=os.path.join(parDir, 'preview.png')
    fnInpCT_Orig=os.path.join(parDir, 'inputct.nii.gz')
    fnInpCT_uint8=os.path.join(parDir, fileNameInputCT_uint8)
    fnInpXR_Orig=glob.glob('%s/inputxrorig.*' % parDir)[0]
    fnInpXR_uint8=os.path.join(parDir, fileNameInputXR_uint8)
    isDicom=False
    try:
        inpDicom=dcm.read_file(fnInpXR_Orig).pixel_array.astype(np.float)
        vmin=inpDicom.min()
        vmax=inpDicom.max()
        imgu8=(255.*(inpDicom-vmin)/(vmax-vmin)).astype(np.uint8)
        imgu8=cv2.equalizeHist(imgu8)
        cv2.imwrite(fnInpXR_uint8, imgu8)
        isDicom=True
    except dcm.errors.InvalidDicomError:
        pass
    if not isDicom:
        imgu8=cv2.imread(fnInpXR_Orig, 0) #cv2.CV_LOAD_IMAGE_GRAYSCALE)
        imgu8=cv2.equalizeHist(imgu8)
        cv2.imwrite(fnInpXR_uint8, imgu8)
    imgCTu8=getPreviewFromCTNifti(fnInpCT_Orig)
    cv2.imwrite(fnInpCT_uint8, imgCTu8)
    makePreviewForCTXR(fnInpCT_uint8,fnInpXR_uint8,fnPreviewIMG)
    ret=os.path.isfile(fnPreviewIMG)  and \
        os.path.isfile(fnInpXR_uint8) and \
        os.path.isfile(fnInpCT_uint8)
    return ret
Example #29
0
    def detectface(self,frame):
        gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
        cv2.equalizeHist(gray, gray)
        faceCascade = cv2.CascadeClassifier("haarcascades/haarcascade_frontalface_alt2.xml")
        faces = faceCascade.detectMultiScale(
            gray,
            minNeighbors=5,
            scaleFactor=1.1,
            minSize=(15, 15),
            flags=cv2.cv.CV_HAAR_SCALE_IMAGE )
        distnaceCenter = 999
        self.currentFace[0] = 0
        for (x, y, w, h) in faces:


            cv2.rectangle(frame , (x, y), (x + w, y + h), (0, 255, 0), 2)
            center = [x + (w/2),y + (h/2)]
            cv2.circle(frame, (center[0],center[1]), 3, (0, 0, 255), -1)
        #Oonly move to closes face to the center
            currentdistanceC = abs(center[0] - self.imgCx)
            if currentdistanceC < distnaceCenter:
                self.currentFace = center
        if self.currentFace[0] != 0:
            # if face is too far away from the center we dont want to move
            if abs(self.currentFace[0]  - self.imgCx  )  <  self.imgThreshX * 2.5:
                self.calculateMovement(self.currentFace)





            # cv2.imshow("Image window", self.draw(frame) )
        return  self.draw(frame)
Example #30
0
def read_csv(training_path):
    """
    Load the training file, and read each image into the program as a matrix.

    Arguments:
        training_path: The path to the training file.
        scale_size: The size to scale the images to.

    """

    trainingFILE = open(training_path, "r")
    indexes = []
    images = []
    for line in trainingFILE:
        image_path = line.strip().split(";")[0]
        subjectid = line.strip().split(";")[1]

        image = cv2.imread(image_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)

        if (image) is not None:
            image = cv2.resize(image, (150,150))

            cv2.equalizeHist( image, image)
            indexes.append(int(subjectid))
            images.append(image)

    return indexes, images
Example #31
0
    def image_callback(self,data):
    #def image_callback(self,data):
        print('Iam here!')

        sizeX = 640
        sizeY = 480

        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        #cv_image = cv2.imread('Img.jpg')
        except CvBridgeError as e:
            print(e)

        # Set the dimensions of the image
        self.dims = cv_image.shape
        #print(cv_image.shape)

        cv_image = cv_image[sizeY/5: sizeY/5*4, 0:sizeX/5*4]
        #cv_image = cv2.resize(cv_image, (sizeX, sizeY), interpolation=cv2.INTER_CUBIC)

        cv2.imshow("Image window", cv_image)
        cv2.waitKey(0)

        # Tranform image to gayscale
        cv_image_hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
        gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)

        # Do histogram equlization
        img = cv2.equalizeHist(gray)
        print('Iam here!2')
        # Binarize the image
        #ret, thresh = cv2.threshold(img, 50, 255, 0)
        thresholds = []
        # Binarize the image

        for tr in range(20, 180, 15):
            ret, thresh = cv2.threshold(img, tr, 255, 0)
            thresholds.append(thresh)
            #cv2.imshow("Image window", thresh)
            #cv2.waitKey(0)

        # Extract contours
        # Extract contours
        allContours = [[], [], [], [], [], [], [], [], [], [], [], [], [], []]
        for i, thresh in enumerate(thresholds):
            _,contours, hierarchy = cv2.findContours(thresh, 2, 2)
            for contoure in contours:
                allContours[i].append(contoure)
        print('Iam here!3')
        # Example how to draw the contours
        # cv2.drawContours(img, contours, -1, (255, 0, 0), 3)

        # Fit elipses to all extracted contours
        elpses = [[], [], [], [], [], [], [], [], [], [], [], [], [], []]
        for i, contoure in enumerate(allContours):
            for cnt in contoure:
                #     print cnt
                #     print cnt.shape
                if cnt.shape[0] >= 20:
                    ellipse = cv2.fitEllipse(cnt)
                    elpses[i].append(ellipse)


        candidates = []

        # Find two elipses with same centers
        for elps in elpses:
            # print("---")
            for n in range(len(elps)):
                # i = 0
                for m in range(n + 1, len(elps)):

                    e1 = elps[n]
                    e2 = elps[m]
                    dist = np.sqrt(((e1[0][0] - e2[0][0]) ** 2 + (e1[0][1] - e2[0][1]) ** 2))
                    pos = (((e1[0][0] + e2[0][0]) / 2), ((e1[0][1] + e2[0][1]) / 2))

                    try:
                        razmerje = e1[1][0] / e2[1][0]
                        razmerje2 = e1[1][1] / e2[1][1]
                    except:
                        break
                    #print(pos[1])
                    if dist < 5 and ((razmerje < 1.5 and razmerje > 1.1) or (razmerje < 0.9 and razmerje > 0.6)) and (
                        (razmerje2 < 1.5 and razmerje2 > 1.1) or (razmerje2 < 0.9 and razmerje2 > 0.6)) and pos[1] < sizeY/3*2:# and pos[
                        #1] > 0 and pos[1] < (sizeY / 2 / 4) * 3:
                        # i += 1
                        # if i==2:
                        #print (e1)
                        candidates.append((e1, e2, pos))
        print('Iam here!4')
        realCandidates = []
        used = []
        for n in range(len(candidates)):
            for m in range(n + 1, len(candidates)):

                e1 = candidates[n]
                e2 = candidates[m]

                if abs(e1[2][0] - e2[2][0]) < 5 and abs(e1[2][1] - e2[2][1]) < 5:
                    if n not in used and m not in used:
                        realCandidates.append(e1)

                    used.append(n)
                    used.append(m)
                    # candidates[m] = False
                    # print(n, " ", m)
                    break

        print('Iam here!4.5')
        try:
           	    depth_img = rospy.wait_for_message('/camera/depth_registered/image_raw', Image)
        except Exception as e:
            print(e)
        print('Iam here!5')
            # Extract the depth from the depth image
        for c in realCandidates:
            print("CIRCLE FOUND!")
            #print(c)
            e1 = c[0]
            e2 = c[1]

            if(e1[1][1]*e1[1][0]<e2[1][1]*e2[1][0]):
				temp = e1
				e1 = e2
				e2 = temp


            # size = (e1[1][0]+e1[1][1])/2
			sizex = (e1[1][0])/2
			sizey = (e1[1][1])/2
			sizex2 = (e2[1][0])/2
			sizey2 = (e2[1][1])/2
			#center = (e1[0][1], e1[0][0])
			center = (e1[0][0], e1[0][1])
			center2 = (e2[0][0], e2[0][1])

			dist = np.sqrt(((e1[0][0] - e2[0][0]) ** 2 + (e1[0][1] - e2[0][1]) ** 2))
			print("sizex: "+str(sizex))
			print("center: "+str(center))

			# elipse 1
			# x1 = center[0] - sizex / 2
			x1 = center[0] - sizex
			print("x1: "+str(x1))
			x2 = center[0] + sizex
			print("x2: "+str(x2))
			x_min = x1 if x1<x2 else x2
			x_max = x2 if x2>x1 else x1
			print("xmin: "+str(x_min))
			print("xmax: "+str(x_max))
			y1 = center[1] - sizey
			y2 = center[1] + sizey
			print("y1: "+str(y1))
			print("y2: "+str(y2))
			y_min = y1 if y1 < y2 else y2
			y_max = y2 if y2 > y1 else y1
			print("ymin: "+str(y_min))
			print("ymax: "+str(y_max))

			#elipse 2
			x1_2 = center2[0] - sizex2
			x2_2 = center2[0] + sizex2
			x_min2 = x1_2 if x1_2 < x2_2 else x2_2
			x_max2 = x2_2 if x2_2 > x1_2 else x1_2
			print("xmin2: "+str(x_min2))
			print("xmax2: "+str(x_max2))
			y1_2 = center2[1] - sizey2
			y2_2 = center2[1] + sizey2
			y_min2 = y1_2 if y1_2 < y2_2 else y2_2
			y_max2 = y2_2 if y2_2 > y1_2 else y1_2
			print("ymin2: "+str(y_min2))
			print("ymax2: "+str(y_max2))

			x_max = x_max if x_max<cv_image.shape[0] else cv_image.shape[0]
			y_max = y_max if y_max < cv_image.shape[1] else cv_image.shape[1]
			x_max2 = x_max2 if x_max2<cv_image.shape[0] else cv_image.shape[0]
			y_max2 = y_max2 if y_max2< cv_image.shape[1] else cv_image.shape[1]

			x_min = x_min if x_min>0 else 0
			y_min = y_min if y_min>0 else 0
			x_min2 = x_min2 if x_min2>0 else 0
			y_min2 = y_min2 if y_min2>0 else 0

			# ne dela ker se prva pa druga elipsa zamenjata kakdaj

			size_diffx = abs(sizex-sizex2)
			size_diffy = abs(sizey-sizey2)
			thick_x_right = (x_max2+x_max)/2
			thick_x_left = (x_min2+x_min)/2
			thick_y_bot = (y_max2+y_max)/2
			thick_y_top = (y_min2+y_min)/2


			left = [thick_x_left,center[1]]
			right = [thick_x_right,center[1]]
			top = [center[0],thick_y_top]
			bot = [center[0],thick_y_bot]
			bgr_point_left = cv_image[int(left[1]),int(left[0])]
			hsv_point_left = cv_image_hsv[int(left[1]),int(left[0])]

			bgr_point_right = cv_image[int(right[1]),int(right[0])]
			hsv_point_right = cv_image_hsv[int(right[1]),int(right[0])]

			bgr_point_top = cv_image[int(top[1]),int(top[0])]
			hsv_point_top = cv_image_hsv[int(top[1]),int(top[0])]

			bgr_point_bot = cv_image[int(bot[1]),int(bot[0])]
			hsv_point_bot = cv_image_hsv[int(bot[1]),int(bot[0])]


			# Opencv hsv to normal HSV conversion
			hsv_point_left[0] = hsv_point_left[0] * 2
			hsv_point_left[1] = (hsv_point_left[1] / 255)*100
			hsv_point_left[2] = (hsv_point_left[2] / 255)*100

			hsv_point_right[0] = hsv_point_right[0] * 2
			hsv_point_right[1] = (hsv_point_right[1] / 255)*100
			hsv_point_right[2] = (hsv_point_right[2] / 255)*100

			hsv_point_top[0] = hsv_point_top[0] * 2
			hsv_point_top[1] = (hsv_point_top[1] / 255)*100
			hsv_point_top[2] = (hsv_point_top[2] / 255)*100

			hsv_point_bot[0] = hsv_point_bot[0] * 2
			hsv_point_bot[1] = (hsv_point_bot[1] / 255)*100
			hsv_point_bot[2] = (hsv_point_bot[2] / 255)*100

			pink = (255,20,147)
			print(left[0])
			print(left[1])
			cv2.circle(cv_image, (int(left[0]),int(left[1])), 3,pink)
			cv2.circle(cv_image, (int(right[0]),int(right[1])), 3,pink)
			cv2.circle(cv_image, (int(top[0]),int(top[1])), 3,pink)
			cv2.circle(cv_image, (int(bot[0]),int(bot[1])), 3,pink)
			#points_coords = []
			#points_coords.append([bgr_point_left,hsv_point_left,[left[0],left[1]],filename,target_color])
			#points_coords.append([bgr_point_right,hsv_point_right,[right[0],right[1]],filename,target_color])
			#points_coords.append([bgr_point_top,hsv_point_top,[top[0],top[1]],filename,target_color])
			#points_coords.append([bgr_point_bot,hsv_point_bot,[bot[0],bot[1]],filename,target_color])
			points_coords_real = []
			points_coords_real.append([bgr_point_left[2],bgr_point_left[1],bgr_point_left[0],hsv_point_left[0],hsv_point_left[1],hsv_point_left[2],target_color])
			points_coords_real.append([bgr_point_right[2],bgr_point_right[1],bgr_point_right[0],hsv_point_right[0],hsv_point_right[1],hsv_point_right[2],target_color])
			points_coords_real.append([bgr_point_top[2],bgr_point_top[1],bgr_point_top[0],hsv_point_top[0],hsv_point_top[1],hsv_point_top[2],target_color])
			points_coords_real.append([bgr_point_bot[2],bgr_point_bot[1],bgr_point_bot[0],hsv_point_bot[0],hsv_point_bot[1],hsv_point_bot[2],target_color])
			#with open("results/results.csv", "a") as csvfile:
			#	pointswriter = csv.writer(csvfile, delimiter=',',quotechar='"', quoting=csv.QUOTE_ALL)
			#	for point in points_coords:
			#		pointswriter.writerow(point)

			#with open("results/results_real.csv", "a") as csvfile:
			#	pointswriter = csv.writer(csvfile, delimiter=',',quotechar='"', quoting=csv.QUOTE_ALL)
				#pointswriter.writerow(["R","G","B","H","S","V","barva"])
			#	for point in points_coords_real:
			#		pointswriter.writerow(point)
			cv2.ellipse(cv_image, e1, (0, 255, 0), 2)  #zunanji (elipsa)
			cv2.ellipse(cv_image, e2, (0, 255, 0), 2) #notranji (elipsa)
			for point in points_coords_real:
				color_name = predictColor(point)
				cv2.putText(cv_image, color_name, (10,20), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
			# depth_image = self.bridge.imgmsg_to_cv2(depth_img, "16UC1")
			#print(x_min)
			#print(x_max)
			#print(y_min)
			#print(y_max)

			#depth_image = depth_image[0: sizeY/5*4, 0:sizeX/5*4]
			only_ring_img = cv_image[int(y_min):int(y_max),int(x_min):int(x_max)]
			qrDetector(only_ring_img)


			cv2.imshow("Image window",only_ring_img)
			cv2.waitKey(4000)
			cv2.destroyAllWindows()
			#cv2.startWindowThread()
			cv2.imshow("Image window",cv_image)
			cv2.waitKey(4000)
			cv2.destroyAllWindows()
			#print(depth_image)
			#print(float(np.mean(depth_image[0:1, 0:1]))/1000.0)

			#print(float(np.mean(depth_image[x_min:x_max,y_min:y_max]))/1000.0)

			#self.get_pose(e1, float(np.mean(depth_image[x_min:x_max,y_min:y_max]))/1000.0)

			print("boom")
root.withdraw()
root.filename =  filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("all files",".*"),("jpg files",".jpg")))
imagen = cv2.imread(root.filename)
""" imagen[177:200,:,0] = 0
imagen[177:200,:,1] = 0
imagen[177:200,:,2] = 0
 """

hsv = cv2.cvtColor(imagen, cv2.COLOR_BGR2HSV)
root.destroy()

H = hsv[:,:,0]
S = hsv[:,:,1]
V = hsv[:,:,2]
print (H)
VIe = cv2.equalizeHist(V)
hsv[:,:,2]=VIe
new=cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

hmin = 0
hmax = 30.6  #0.2
sat = 1.25 ##0.15   38.25  0.0025

skin = np.zeros([len(H), len(H[0])])
for i in range(0,len(S)):
     for j in range (0,len(H[0])) : 
         if ((S[i][j] > sat) and (H[i][j]>hmin) and (H[i][j]<hmax)):
             skin[i][j] = 1
         else:
             skin[i][j] = 0
Example #33
0
# cv2.imshow('inRange', mask)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
"""

# histogram equalization
# """
lower = (0, 100, 80)  # define lower boundary
upper = (10, 255, 255)  # define upper boundary
ball = cv2.imread('ball63.jpg')  # read ball
x, y, r = 533, 242, 11
ball = cv2.resize(ball, (1080, 810))  # resize
# cv2.imshow('ball', ball)
hsv = cv2.cvtColor(ball, cv2.COLOR_BGR2HSV)  # convert to hsv
h, s, v = cv2.split(hsv)  # split hsv
v_hist = cv2.equalizeHist(v)  # perform equalize on v
hsv_merge = cv2.merge([h, s, v_hist])  # merge h, s and equalize v
# bgr = cv2.cvtColor(hsv_merge, cv2.COLOR_HSV2BGR)
# cv2.imshow('bgr', bgr)
mask = cv2.inRange(hsv_merge, lower, upper)  # inrange
mask = cv2.erode(mask, None, iterations=2)  # erosion
mask = cv2.dilate(mask, None, iterations=2)  # dilation
# cv2.imshow('mask', mask)
# """

# gamma correction
"""
lower = (0, 100, 80)                                # define lower boundaries
upper = (10, 255, 255)                              # define upper boundaries
ball = cv2.imread('ball63.jpg')                      # read file
x, y, r = 533, 242, 11
import cv2
import numpy as np

img = cv2.imread('../../Assets/Images/senn01.jpeg', 1)

cv2.imshow('src', img)

(b, g, r) = cv2.split(img)

bH = cv2.equalizeHist(b)
gH = cv2.equalizeHist(g)
rH = cv2.equalizeHist(r)

result = cv2.merge((bH, gH, rH))

cv2.imshow('dst', result)
cv2.waitKey(0)
Example #35
0
    def update_frame(self):
        global running, image, dft
        if running:
            img = image_f.copy()
            
            img_height, img_width, img_colors = img.shape
            scale_w = float(self.window_width) / float(img_width)
            scale_h = float(self.window_height) / float(img_height)
            scale = min([scale_w, scale_h])

            if scale == 0:
                scale = 1
            
            img = cv2.resize(img, None, fx=scale, fy=scale, interpolation = cv2.INTER_CUBIC)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)        
            height, width, bpc = img.shape
            bpl = bpc * width
            img_o = img.copy()     

            if self.inversion.isChecked():
                img = cv2.bitwise_not(img)

            if self.brightness_value.value() > 0:
                hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
                h, s, v = cv2.split(hsv)
                v += self.brightness_value.value()
                #v = np.where((255 - v) < 255,255,v+self.brightness_value.value())
                final_hsv = cv2.merge((h, s, v))
                img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2RGB)

            if self.zoom_value.value() != 100:
                resize_val = int(float(self.zoom_value.value())*0.1)
                print(resize_val)
                img = cv2.resize(img,None,fx=resize_val, fy=resize_val)

            if self.translate_x.value() != 100:
                M = np.float32([[1,0,self.translate_x.value()-100],[0,1,0]])
                rows,cols,d = img.shape
                img = cv2.warpAffine(img,M,(cols,rows))

            if self.translate_y.value() != 100:
                M = np.float32([[1,0,0],[0,1,self.translate_y.value()-100]])
                rows,cols,d = img.shape
                img = cv2.warpAffine(img,M,(cols,rows))

            if self.hist_eq.isChecked():
                img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
                img = cv2.equalizeHist(img)
                img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)

            if self.gray.isChecked():
                img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
                img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)

            if self.gaussian_blur.value()>3:
                img = cv2.GaussianBlur(img,(self.gaussian_blur.value(),self.gaussian_blur.value()),0)

            if self.median_blur.value()>3:
                img = cv2.medianBlur(img,self.median_blur.value())

            if self.rotate.value() > 0:
                cols = img.shape[1]
                rows = img.shape[0]
                M = cv2.getRotationMatrix2D((cols/2,rows/2),360-self.rotate.value(),1)
                img = cv2.warpAffine(img,M,(cols,rows))

            kernel = np.ones((5,5),np.uint8)
            if self.erosion_value.value() > 0:
                img = cv2.erode(img,kernel,iterations = self.erosion_value.value())
            if self.dilation_value.value() > 0:
                img = cv2.dilate(img,kernel,iterations = self.dilation_value.value())
    
            if self.canny.isChecked():
                img =  cv2.Canny(img,self.canny_min.value(),self.canny_max.value())
                img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
            elif self.laplacian.isChecked():
                img = cv2.Laplacian(img,cv2.CV_8U)
            elif self.sobel.isChecked():
                if self.sobel_x.isChecked():
                    img = cv2.Sobel(img,cv2.CV_8U,1,0,ksize=5)
                if self.sobel_y.isChecked():
                    img = cv2.Sobel(img,cv2.CV_8U,0,1,ksize=5)
            
            if self.corner.isChecked():
                gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
                gray = np.float32(gray)
                corners = cv2.goodFeaturesToTrack(gray, 100, 0.01, 10)
                corners=cv2.goodFeaturesToTrack(gray,
                                                 self.corner_maxc.value(),
                                                 0.0001*float(self.corner_quality.value()),
                                                 self.corner_mind.value())
                corners=np.int0(corners)
                for corner in corners:
                    x,y = corner.ravel()
                    cv2.circle(img,(x,y),3,255,-1)   
                    
            if self.blob.isChecked():
                hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
                lower_blue = np.array([110,50,50])
                upper_blue = np.array([130,255,255])
                mask = cv2.inRange(hsv, lower_blue, upper_blue)
                img = cv2.bitwise_and(img,img,mask= mask)

            image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
            image_o = QtGui.QImage(img_o.data, width, height, bpl, QtGui.QImage.Format_RGB888)
            
            self.ImgWidget.setImage(image)
            self.ImgWidget_O.setImage(image_o)
            running = False
Example #36
0
def slidingWindowsEval(image):
    windows_size = 16
    stride = 1
    height = image.shape[0]
    # print image.shape[1]
    p = []
    ch_p = []

    gain = []
    pin = []
    for i in range(0, image.shape[1] - windows_size + 1, stride):
        data = image[0:height, i:i + windows_size]
        data = cv2.resize(data, (23, 23))
        # cv2.imshow("image",data)
        data = cv2.equalizeHist(data)
        data = data.astype(np.float) / 255
        data = np.expand_dims(data, 3)
        res = model.predict(np.array([data]))
        pin.append(res[0])

        p.append(res[0][0] + res[0][2])
        ch_p.append(res[0][2])

        gain.append(res.argmax())

    p = np.insert(p, 0, 0)
    p = np.insert(p, len(p), 0)
    p = f.gaussian_filter1d(np.array(p, dtype=np.float), 3)
    # print p
    sum = image.sum(axis=0)

    lmin = l.argrelmax(np.array(p), order=3)[0]
    interval = []
    for i in xrange(len(lmin) - 1):
        interval.append(lmin[i + 1] - lmin[i])

    if (len(interval) > 3):
        mid = get_median(interval)
    else:
        return []

    ch_p = np.array(ch_p)
    pin = np.array(pin)
    res = searchOptimalCuttingPoint(image, pin, 0, mid, 3)

    cutting_pts = res[1]
    last = cutting_pts[-1] + mid
    if last < image.shape[1]:
        cutting_pts.append(last)
    else:
        cutting_pts.append(image.shape[1] - 1)

    name = ""
    confidence = 0.00
    seg_block = []
    for x in xrange(1, len(cutting_pts)):
        if x != len(cutting_pts) - 1 and x != 1:
            section = image[0:36, cutting_pts[x - 1] - 2:cutting_pts[x] + 2]
        elif x == 1:

            c_head = cutting_pts[x - 1] - 2
            if c_head < 0:
                c_head = 0
            c_tail = cutting_pts[x] + 2

            section = image[0:36, c_head:c_tail]
        elif x == len(cutting_pts) - 1:
            end = cutting_pts[x]
            diff = image.shape[1] - end

            c_head = cutting_pts[x - 1]
            c_tail = cutting_pts[x]

            if diff < 7:
                section = image[0:36, c_head - 5:c_tail + 5]

            else:
                diff -= 1

                section = image[0:36, c_head - diff:c_tail + diff]

        elif x == 2:
            section = image[0:36,
                            cutting_pts[x - 1] - 3:cutting_pts[x - 1] + mid]
        else:
            section = image[0:36, cutting_pts[x - 1]:cutting_pts[x]]
        seg_block.append(section)
    refined = refineCrop(seg_block, mid - 1)
    for i, one in enumerate(refined):

        res_pre = cRP.SimplePredict(one, i)
        confidence += res_pre[0]

        name += res_pre[1]

    return seg_block, name, confidence
 def process(self, rect, frame):
     x, y, w, h = rect
     subimg = np.array(frame[y:y + h, x:x + w])
     subimg = self.beta * subimg + self.alpha * cv2.equalizeHist(subimg)
     frame[y:y + h, x:x + w] = subimg
     return frame
Example #38
0
    source = VideoStream(src=0).start()
elif args['videosource'] == 'r':
    source = VideoStream(src="http://"+ip+":8081").start()

# save codes
codes = {}

while (True):
    # read front camera
    im = source.read()

    # convert to grayscale
    im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            
    # increase contrast
    contrast_image = cv2.equalizeHist(im)

    # loop over all rotations
    for angle in range(0, 360, angle_increase):
        
        # rotate contrast_image by angle
        rows,cols = contrast_image.shape
        M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
        dst = cv2.warpAffine(contrast_image, M, (cols,rows))
        #cv2.imshow('test', dst)

        # decode QR code
        decoded = decode(dst)
        decodedString = dataArrayToString(str(decoded), "data=")
            
        # if theres something in the output of decoded()   
Example #39
0
cv2.imshow('anh co ve chu tuyen lay tu anh bien Ie',I)
#23l
Ihsv = cv2.cvtColor(Igoc, cv2.COLOR_BGR2HSV) #chuyển sang ảnh grayscale
#tách kênh s và hiển thị
Is=Ihsv[:,:,1]
cv2.imshow("kenh S",Is)
#23m
h=Is.shape[0]
w=Is.shape[1]
Istb=cv2.blur(Is,(3,3))
cv2.imshow('anh smooth trung binh cong',Istb)
#23n
nguong_otsu,Isb = cv2.threshold(Istb,0,255,cv2.THRESH_OTSU) #nhị phân ảnh OTSU
cv2.imshow('anh nhi phan the Otsu',Isb)

#23o màu xanh
_, contours, _ = cv2.findContours(Ib, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(I, contours, -1, (0,0,255), 3)
cv2.imshow("anh I voi Contours", I)
#23p
hist=cv2.calcHist([Ihsv],channels=[2],mask=None,histSize=[256],ranges=[0,256])
plt.plot(hist)
plt.show()
#23q
Ihsv[:,:,2]= cv2.equalizeHist(Ihsv[:,:,2])
cv2.imshow('anh can bang histogram kenh V', Ihsv[:,:,2])
Imoi=cv2.cvtColor(Ihsv, cv2.COLOR_HSV2BGR)
cv2.imshow('anh RGB can bang kenh V trong bieu dien HSV',Imoi)
###############
cv2.waitKey()
Example #40
0
from facial import facecrop

import cv2
import numpy as np
a = lambda *k: np.array(k)

jeff = load_image('jeff.jpg')
jeff = facecrop(jeff)

jeff = resize(jeff, 512)

height, width = jeff.shape[0:2]

bw = jeff[:, :, 1:2]

bw = cv2.equalizeHist(bw)

# lowpass
from cv2tools import vis, filt
fbw = np.divide(bw, 255, dtype='float32')
# lp = vis.lanczos_filter(fbw, 2,2, a=2)
lp = vis.lanczos_filter(fbw**2.2, 2, 2, a=2)

c = np.full_like(jeff, 255)


def italic_iteration(refimg, callback, angle=25, step=4.0, vstep=4.0):
    # signature for callback: (x, y, linebegin=False)
    t = theta = angle / 180 * np.pi
    xf = a([np.cos(t), -np.sin(t)], [np.sin(t), np.cos(t)])
    ixf = np.linalg.inv(xf)
Example #41
0
import cv2
import matplotlib.pyplot as plt

img = cv2.imread('timg.jpg', 0)  #直接读为灰度图像
res = cv2.equalizeHist(img)

clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(10, 10))
cl1 = clahe.apply(img)

plt.subplot(131), plt.imshow(img, 'gray')
plt.subplot(132), plt.imshow(res, 'gray')
plt.subplot(133), plt.imshow(cl1, 'gray')

plt.show()
Example #42
0
def histogram_equalize(img):
    b, g, r = cv2.split(img)
    red = cv2.equalizeHist(r)
    green = cv2.equalizeHist(g)
    blue = cv2.equalizeHist(b)
    return True, cv2.merge((blue, green, red))
Example #43
0
def equalize_image(img):
    img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
    img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
    img = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
    return img
def equalize(img):
    img = cv2.equalizeHist(img)
    return img
Example #45
0
def histeq_fn(chapBGR):
    # Histogram equalization of a grayscale image. from  _tpl/other
    chipLAB = cv2.cvtColor(chapBGR, cv2.COLOR_BGR2LAB)
    chipLAB[:, :, 0] = cv2.equalizeHist(chipLAB[:, :, 0])
    chapBGR = cv2.cvtColor(chipLAB, cv2.COLOR_LAB2BGR)
    return chapBGR
def histogramEsitleme(gurultuazalt):
    histogram_e = cv2.equalizeHist(gurultuazalt)
    #cv2.namedWindow("Histogram esitleme islemi", cv2.WINDOW_NORMAL)
    #cv2.imshow("Histogram esitleme islemi", histogram_e)
    return histogram_e
Example #47
0
#print ("no of good bf matches", len(goodmatches),"no of good knn matches", len(goodknnmatches), "no of good flann matches", len(goodflann)) 

#good13=[]
#for m,n in matches13:
#	if m.distance < 0.75*n.distance:
#		good13.append([m])
#print (len(good13))
#del good13[:]

while 1:
    startt=time.time()
    # grab the raw NumPy array representing the image, then initialize the timestamp
    # and occupied/unoccupied text
    frame = vs.read()
    gray_image=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    eqhi=cv2.equalizeHist(gray_image)
    clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(21,21))
    cl1 = clahe.apply(eqhi)
    eqhia=cv2.equalizeHist(cl1)    
    blur = cv2.GaussianBlur(eqhia,(5,5),sigmaX=5)
    blur2 = cv2.GaussianBlur(eqhia,(15,15),sigmaX=25)
    blurinv = cv2.bitwise_not(blur)
    subtracted= cv2.add(blur2,blurinv)
    #subtracted=cv2.absdiff(blur3,blur)
    #subtracted2=cv2.absdiff(blur2,blur4)
    eqhsub= cv2.equalizeHist(subtracted)
    cl2=clahe.apply(eqhsub)
    blur3=cv2.medianBlur(cl2,3)
    eqhsub2=cv2.equalizeHist(blur3)
    ret,th1 = cv2.threshold(eqhsub2,100,255,cv2.THRESH_BINARY_INV)  
    kernel = np.ones((3,3), np.uint8)
Example #48
0
camera = PiCamera()
camera.resolution = (FRAME_W, FRAME_H)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(FRAME_W, FRAME_H))
time.sleep(0.1)

for image in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):

    frame = image.array
    # frame = cv2.flip(frame, -1) # 上下反転する場合.

    # Convert to greyscale for detection
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)

    # 顔検出
    faces = faceCascade.detectMultiScale(gray, 1.1, 3, 0, (10, 10))

    # 検出した顔に枠を書く
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    frame = cv2.resize(frame, (540, 300))

    # 表示
    cv2.imshow('Video', frame)
    key = cv2.waitKey(1) & 0xFF

    rawCapture.truncate(0)
Example #49
0
    def detect(self):

        self._media.openMedia()
        self._frameRects = []
        self._frameAngles = []

        n = 0
        while (True):
            ret, frame = self._media.getNextFrame()
            if ret == False:
                break

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            grayN = cv2.equalizeHist(gray)
            fgmask = self.__FGBG.apply(grayN)

            ret, thresh = cv2.threshold(fgmask, 127, 255, 0)
            fgmask = cv2.morphologyEx(thresh, cv2.MORPH_ERODE,
                                      self._kernelErode)
            fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_DILATE,
                                      self._kernelDilate)
            #fgmask = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,self._KERNEL_ERODE)
            _, contours, _ = cv2.findContours(fgmask, cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)
            rectangles = []
            angles = []

            if self._debug:
                drawFrame = frame.copy()

            for i in xrange(len(contours)):
                contourArea = cv2.contourArea(contours[i])
                if (contourArea > self.__CONTOUR_AREA_TRESHOLD[0] *
                        self._areaFactor) and (
                            contourArea < self.__CONTOUR_AREA_TRESHOLD[1] *
                            self._areaFactor):
                    rect = cv2.minAreaRect(contours[i])
                    rectangles.append(rect)

                    if self._debug:
                        box = cv2.boxPoints(rect)
                        for ii in range(4):
                            cv2.line(drawFrame, tuple(box[ii]),
                                     tuple(box[(ii + 1) % 4]), (0, 255, 255))

                    #image,"Hello World!!!", (x,y),
                    xy = np.int0(np.array(rect[0]))
                    #xy = np.int0((np.array(rect[0]) + np.array(rect[1]))/2.0)
                    #print
                    x = xy[0]
                    y = xy[1]

                    #width  = np.abs(rect[0][0] - rect[1][0])
                    #height = np.abs(rect[0][1] - rect[1][1])

                    width = rect[1][0]
                    height = rect[1][1]

                    angle = rect[2]

                    if height > width:
                        angle = angle + 90

                    if angle > 90:
                        angle -= 180
                    elif angle < -90:
                        angle += 180

                    angle = -angle
                    angles.append(angle)
                    if self._debug:
                        #color = (255,0,0) if flag else (255,255,255)
                        color = (255, 255, 255)
                        cv2.putText(drawFrame, "%0.1f" % (angle), (x, y),
                                    cv2.FONT_HERSHEY_DUPLEX, 0.5, color)

            self._frameRects.append(rectangles)
            self._frameAngles.append(angles)
            if self._debug:
                cv2.imwrite("out/%05d.jpg" % (n), drawFrame)

            n += 1
Example #50
0
#from matplotlib import pyplot as plt

print(cv2.__version__)
vidcap = cv2.VideoCapture('E:\Projects\Videos\movie1.mp4')
success, image = vidcap.read()

#Greyscal conversion of image
img1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

#Remove defect from image by removing dark pixels
b = img1 < 100  #remove defect using numpy array operation - loop too slow
img1[b] = 255

#histogram equalize the image
img2 = cv2.equalizeHist(img1)

img = img2
#cv2.imwrite("E:/Projects/Results/hist.jpg",equ)

#Implement Gaussian blurring of the image
gblur1 = cv2.GaussianBlur(img, (5, 5), 0)
gblur2 = cv2.GaussianBlur(img, (11, 11), 0)
gblur3 = cv2.GaussianBlur(img, (15, 15), 0)
gblur4 = cv2.GaussianBlur(img, (19, 19), 0)
gblur5 = cv2.GaussianBlur(img, (23, 23), 0)
gblur6 = cv2.GaussianBlur(img, (27, 27), 0)
gblur7 = cv2.GaussianBlur(img, (43, 53), 0)
gblur8 = cv2.GaussianBlur(img, (59, 59), 0)
gblur9 = cv2.GaussianBlur(img, (73, 73), 0)
Example #51
0
def engine_rotating(stat, framecnt, queue):
    pi = 3.14159265358979723846
    ags = [0 for x in range(0, 20)]  #angle_sin
    agc = [0 for x in range(0, 20)]  #angle_cos
    agh = [0 for x in range(0, 20)]
    angle = 22.5
    for i in range(1, 9):
        agh[i] = 2 * pi * angle / 360
        ags[i] = math.sin(agh[i])
        agc[i] = math.cos(agh[i])
        print(i, angle, agh[i], agc[i])
        angle += 45

    ser = serial.Serial('COM8', 9600)
    ser.write("sssssssss")
    classfier = cv2.CascadeClassifier(
        "D:\\HFUTProject\\cam\\cam_Controller_Py\\haarcascade_frontalface_alt.xml"
    )
    color = (255, 0, 0)
    while True:
        signal = queue.get()
        if (signal == "rotate_start"):
            break

    print("eng_start")

    while True:
        time.sleep(0.01)  ##??
        if (stat.value == 1):
            break
        cnt = framecnt.value - 1
        frame = cv2.imread("./temp_frame/" + str(cnt) + ".jpg")
        size = frame.shape[:2]
        image = np.zeros(size, dtype=np.float16)
        image = cv2.cvtColor(frame, cv2.cv.CV_BGR2GRAY)
        cv2.equalizeHist(image, image)
        divisor = 32
        h, w = size
        minSize = (w / divisor, h / divisor)
        faceRects = classfier.detectMultiScale(image, 1.2, 2,
                                               cv2.CASCADE_SCALE_IMAGE,
                                               minSize)
        #print(cnt,len(faceRects))
        if len(faceRects) > 0:
            for faceRect in faceRects:
                x, y, w, h = faceRect
                #print("detected face: ",x,y,w,h)
                #cv2.rectangle(frame, (x, y), (x+w, y+h), color,3)
                cx = x + (w / 2) - 320
                cy = -(y + (h / 2) - 240)
                cz = math.sqrt(cx * cx + cy * cy)
                angle_cos = cy / cz
                angle_sin = cx / cz
                #640x480    center:320,240

                if ((abs(cx) > 100) or (abs(cy) > 75)):
                    #print(x,x+w,y,y+h,"   ",cx,cy,angle_sin,angle_cos)
                    if (angle_sin > 0):  #AQWED
                        if (angle_cos > agc[1]):  #D
                            ser.write("sww")
                        elif (angle_cos > agc[2]):  #E
                            ser.write("see")
                        elif (angle_cos > agc[3]):  #W
                            ser.write("sddd")
                        elif (angle_cos > agc[4]):  #Q
                            ser.write("scc")
                        else:  #A
                            ser.write("sxx")
                    elif (angle_sin <= 0):
                        if (angle_cos > agc[8]):  #D
                            ser.write("sww")
                        elif (angle_cos > agc[7]):  #C
                            ser.write("sqq")
                        elif (angle_cos > agc[6]):  #X
                            ser.write("saaa")
                        elif (angle_cos > agc[5]):  #Z
                            ser.write("szz")
                        else:  #A
                            ser.write("sxx")
                    elif ((abs(cx) > 40) or (abs(cy) > 30)):
                        #print(x,x+w,y,y+h,"   ",cx,cy,angle_sin,angle_cos)
                        if (angle_sin > 0):  #AQWED
                            if (angle_cos > agc[1]):  #D
                                ser.write("sw")
                            elif (angle_cos > agc[2]):  #E
                                ser.write("se")
                            elif (angle_cos > agc[3]):  #W
                                ser.write("sdd")
                            elif (angle_cos > agc[4]):  #Q
                                ser.write("sc")
                            else:  #A
                                ser.write("sx")
                        elif (angle_sin <= 0):
                            if (angle_cos > agc[8]):  #D
                                ser.write("sw")
                            elif (angle_cos > agc[7]):  #C
                                ser.write("sq")
                            elif (angle_cos > agc[6]):  #X
                                ser.write("saa")
                            elif (angle_cos > agc[5]):  #Z
                                ser.write("sz")
                            else:  #A
                                ser.write("sx")

    print("rotating finish")
    cv2.destroyWindow("test")
Example #52
0
def main():

    if not os.path.exists('data/data_images/Persons/%s' % (name)):
        os.makedirs("data/data_images/Persons/%s" % (name))

    args, video_src = getopt.getopt(sys.argv[1:], '',
                                    ['cascade=', 'nested-cascade='])
    try:
        video_src = video_src[0]
    except:
        video_src = 0

    args = dict(args)
    cascade_fn = args.get('--cascade',
                          "data/haarcascades/haarcascade_frontalface_alt.xml")
    nested_fn = args.get('--nested-cascade',
                         "data/haarcascades/haarcascade_eye.xml")

    cascade = cv2.CascadeClassifier(cascade_fn)
    nested = cv2.CascadeClassifier(nested_fn)

    cam = cv2.VideoCapture(0)

    n = 0
    samples = np.empty((0, 2500))

    cv2.namedWindow('image')

    while n < int(n_images):
        ret, img = cam.read()

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        rects = detect(gray, cascade)
        rects
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))

        key = cv2.waitKey(2)
        for x1, y1, x2, y2 in rects:
            crop_img = img[y1:y2, x1:x2]
            #--------------------------------------------------------
            gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
            gray = cv2.equalizeHist(gray)
            #Basic threshold example
            #Set threshold and maxValue
            thresh = 127
            maxValue = 255
            th, dst = cv2.threshold(gray, thresh, maxValue, cv2.THRESH_BINARY)
            #--------------------------------------------------------

            resized_image = cv2.resize(dst, (50, 50))
            cv2.imshow("cropped", resized_image)

            if key == 32:
                cv2.imwrite(
                    "data/data_images/Persons/%s/cara%d.jpg" % (name, n), img)
                print("cara%d.jpg saved!" % n)

                resized_image = np.asarray(resized_image, dtype='uint8')
                sample = resized_image.reshape((1, 2500))
                sample = sample / 255
                samples = np.append(samples, sample, 0)
                n = n + 1

        cv2.imshow('image', vis)

        if key == 27:
            break

    print("Data was saved!!. Run \"training.py\"")
    np.savetxt('data/data_images/Persons/%s/generalsamples.data' % name,
               samples)
    cv2.destroyAllWindows()
Example #53
0
def main(gui):
    cap = cv2.VideoCapture(0)  #640,480
    w = 640
    h = 480

    count_open = 0
    count_closed = 0
    count_side = 0

    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret == True:
            #detect face
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            faces = cv2.CascadeClassifier('haarcascade_eye.xml')
            detected = faces.detectMultiScale(frame, 1.3, 5)

            front_faces = cv2.CascadeClassifier(
                'haarcascade_frontalface_default.xml')
            detected2 = front_faces.detectMultiScale(frame, 1.3, 5)

            pupilFrame = frame
            pupilO = frame
            windowClose = np.ones((5, 5), np.uint8)
            windowOpen = np.ones((2, 2), np.uint8)
            windowErode = np.ones((2, 2), np.uint8)

            count_closed += 1

            if len(detected2) == 0:
                count_side += 1

            if count_side > 2:
                print('Look Ahead!')
                play_sound = look_ahead.play()
                play_sound.wait_done()
                count_closed = 0
                count_side = 0

            if count_closed > 3:
                print('Wake Up!')
                #gmaps call
                #url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=-33.8670522,151.1957362&radius=500&type=restaurant&keyword=cruise&key=AIzaSyA1aBGwZwO0rooO9cgJVtYqzl37VhbhTVA"
                #with urllib.request.urlopen("http://www.python.org") as url:
                #	response = url.read()
                #str_response = response.readall().decode('utf-8')
                #data = json.loads(str_response)
                #print(response)
                addr = 'Closest Restaurant will be displayed here if you are sleepy'
                gui.ui.addrBox.setText(addr)
                play_obj = wave_obj.play()
                play_obj.wait_done()
            count_open = 0

            #draw square
            for (x, y, w, h) in detected:
                cv2.rectangle(frame, (x, y), ((x + w), (y + h)), (0, 0, 255),
                              1)
                cv2.line(frame, (x, y), ((x + w, y + h)), (0, 0, 255), 1)
                cv2.line(frame, (x + w, y), ((x, y + h)), (0, 0, 255), 1)
                pupilFrame = cv2.equalizeHist(frame[y + (h * .25):(y + h),
                                                    x:(x + w)])
                pupilO = pupilFrame
                count_open += 1
                count_closed = 0
                count_side = 0

                ret, pupilFrame = cv2.threshold(
                    pupilFrame, 55, 255,
                    cv2.THRESH_BINARY)  #50 ..nothin 70 is better
                pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_CLOSE,
                                              windowClose)
                pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_ERODE,
                                              windowErode)
                pupilFrame = cv2.morphologyEx(pupilFrame, cv2.MORPH_OPEN,
                                              windowOpen)

                #so above we do image processing to get the pupil..
                #now we find the biggest blob and get the centriod

                threshold = cv2.inRange(pupilFrame, 250, 255)  #get the blobs
                image, contours, hierarchy = cv2.findContours(
                    threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

                #if there are 3 or more blobs, delete the biggest and delete the left most for the right eye
                #if there are 2 blob, take the second largest
                #if there are 1 or less blobs, do nothing

                if len(contours) >= 2:
                    #find biggest blob
                    maxArea = 0
                    MAindex = 0  #to get the unwanted frame
                    distanceX = []  #delete the left most (for right eye)
                    currentIndex = 0
                    for cnt in contours:
                        area = cv2.contourArea(cnt)
                        center = cv2.moments(cnt)
                        if center['m00'] <= 0:
                            center['m00'] = 1
                        cx, cy = int(center['m10'] / center['m00']), int(
                            center['m01'] / center['m00'])
                        distanceX.append(cx)
                        if area > maxArea:
                            maxArea = area
                            MAindex = currentIndex
                        currentIndex = currentIndex + 1

                    del contours[MAindex]  #remove the picture frame contour
                    del distanceX[MAindex]

                eye = 'right'

                if len(contours
                       ) >= 2:  #delete the left most blob for right eye
                    if eye == 'right':
                        edgeOfEye = distanceX.index(min(distanceX))
                    else:
                        edgeOfEye = distanceX.index(max(distanceX))
                    del contours[edgeOfEye]
                    del distanceX[edgeOfEye]

                if len(contours) >= 1:  #get largest blob
                    maxArea = 0
                    for cnt in contours:
                        area = cv2.contourArea(cnt)
                        if area > maxArea:
                            maxArea = area
                            largeBlob = cnt

                if len(largeBlob) > 0:
                    center = cv2.moments(largeBlob)
                    if center['m00'] <= 0:
                        center['m00'] = 1
                    cx, cy = int(center['m10'] / center['m00']), int(
                        center['m01'] / center['m00'])
                    cv2.circle(pupilO, (cx, cy), 5, 255, -1)

                k = cv2.waitKey(33)
                if k == 27:
                    break

            #show picture
            cv2.imshow('frame', pupilO)
            #cv2.imshow('frame2',pupilFrame)
            k = cv2.waitKey(33)
            if k == 27:
                break
        #else:
        #break

    # Release everything if job is finished
    cap.release()
    cv2.destroyAllWindows()
Example #54
0
    def save_training_TP_FP_using_voc(self,
                                      rects=False,
                                      neural=True,
                                      viola=False,
                                      img_names=None):
        '''use the voc scores to decide if a patch should be saved as a TP or FP or not
        '''
        general_path = utils.get_path(neural=neural,
                                      viola=viola,
                                      data_fold=utils.TRAINING,
                                      in_or_out=utils.IN,
                                      out_folder_name=self.folder_name)
        general_path = '../slide_training_data_neural/{}'.format(
            self.folder_name)
        utils.mkdir(out_folder_path=general_path)

        path_true = general_path + 'truepos/'
        utils.mkdir(path_true)

        path_false = general_path + 'falsepos/'
        utils.mkdir(path_false)
        img_names = img_names if img_names is not None else self.img_names

        num_patches = 0  #we can only save around 30000 images per folder!!!
        for i, img_name in enumerate(img_names):
            print 'Saving patches for {} {}/{}'.format(img_name, i + 1,
                                                       len(img_names))

            good_detections = defaultdict(list)
            bad_detections = defaultdict(list)
            try:
                if viola:  #viola training will need grayscale patches
                    img = cv2.imread(self.in_path + img_name,
                                     flags=cv2.IMREAD_COLOR)
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    img = cv2.equalizeHist(img)
                else:  #neural network will need RGB
                    img = cv2.imread(self.in_path + img_name,
                                     flags=cv2.IMREAD_COLOR)
            except:
                print 'Cannot open image'
                sys.exit(-1)

            for roof_type in utils.ROOF_TYPES:
                detection_scores = self.detections.best_score_per_detection[
                    img_name][roof_type]
                for detection, score in detection_scores:
                    if score > 0.5:
                        #true positive
                        good_detections[roof_type].append(detection)
                    if score < self.negThres:
                        #false positive
                        bad_detections[roof_type].append(detection)

            for roof_type in utils.ROOF_TYPES:
                extraction_type = 'good'
                num_patches = self.save_training_FP_and_TP_helper(
                    num_patches,
                    img_name,
                    good_detections[roof_type],
                    path_true,
                    general_path,
                    img,
                    roof_type,
                    extraction_type, (0, 255, 0),
                    rects=rects)
                extraction_type = 'background'
                num_patches = self.save_training_FP_and_TP_helper(
                    num_patches,
                    img_name,
                    bad_detections[roof_type],
                    path_false,
                    general_path,
                    img,
                    roof_type,
                    extraction_type, (0, 0, 255),
                    rects=rects)
Example #55
0
                                use_video_port=True):
 try:
     image_buffer.truncate()
     image_buffer.seek(0)
     logging.debug("Camera capture took {}".format(time.time() - s))
 except Exception, e:
     logging.exception("Error capturing image")
     time.sleep(CAMERA_ERROR_DELAY_SECS)
     continue
 s = time.time()
 display_image = Image.open(image_buffer)
 cv2_image = numpy.array(display_image)
 cv2_image = cv2.cvtColor(cv2_image, cv2.COLOR_RGB2GRAY)
 logging.debug("Image conversion took {}".format(time.time() - s))
 s = time.time()
 cv2_image = cv2.equalizeHist(cv2_image)
 #(thresh, cv2_image) = cv2.threshold(cv2_image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)  # too high contrast
 cv2_image = cv2.resize(cv2_image,
                        (OLED_display.width, OLED_display.height))
 display_image = Image.fromarray(cv2_image).convert('1')
 image_buffer.seek(0)
 logging.debug("Image processing took {}".format(time.time() - s))
 s = time.time()
 image_queue.put(display_image)
 logging.debug("Image queuing took {}".format(time.time() - s))
 frame_frequency = time.time() - last_start
 last_start = time.time()
 frame_rate = 1 / frame_frequency
 fps += frame_rate
 frame_count += 1
 if last_start - last_report_at >= 1.0:
Example #56
0
def spatter(x, severity=1):
    c = [(0.65, 0.3, 4, 0.69, 0.6, 0), (0.65, 0.3, 3, 0.68, 0.6, 0),
         (0.65, 0.3, 2, 0.68, 0.5, 0), (0.65, 0.3, 1, 0.65, 1.5, 1),
         (0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1]
    x_PIL = x
    x = np.array(x, dtype=np.float32) / 255.

    liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])

    liquid_layer = gaussian(liquid_layer, sigma=c[2])
    liquid_layer[liquid_layer < c[3]] = 0
    if c[5] == 0:
        liquid_layer = (liquid_layer * 255).astype(np.uint8)
        dist = 255 - cv2.Canny(liquid_layer, 50, 150)
        dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
        _, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
        dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
        dist = cv2.equalizeHist(dist)
        ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
        dist = cv2.filter2D(dist, cv2.CV_8U, ker)
        dist = cv2.blur(dist, (3, 3)).astype(np.float32)

        m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
        m /= np.max(m, axis=(0, 1))
        m *= c[4]
        # water is pale turqouise
        color = np.concatenate(
            (175 / 255. * np.ones_like(m[..., :1]), 238 / 255. *
             np.ones_like(m[..., :1]), 238 / 255. * np.ones_like(m[..., :1])),
            axis=2)

        color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)

        if len(x.shape) < 3 or x.shape[2] < 3:
            add_spatter_color = cv2.cvtColor(np.clip(m * color, 0, 1),
                                             cv2.COLOR_BGRA2BGR)
            add_spatter_gray = rgb2gray(add_spatter_color)

            return np.clip(x + add_spatter_gray, 0, 1) * 255

        else:

            x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)

            return cv2.cvtColor(np.clip(x + m * color, 0, 1),
                                cv2.COLOR_BGRA2BGR) * 255
    else:
        m = np.where(liquid_layer > c[3], 1, 0)
        m = gaussian(m.astype(np.float32), sigma=c[4])
        m[m < 0.8] = 0

        x_rgb = np.array(x_PIL.convert('RGB'))

        # mud brown
        color = np.concatenate((63 / 255. * np.ones_like(x_rgb[..., :1]),
                                42 / 255. * np.ones_like(x_rgb[..., :1]),
                                20 / 255. * np.ones_like(x_rgb[..., :1])),
                               axis=2)
        color *= m[..., np.newaxis]
        if len(x.shape) < 3 or x.shape[2] < 3:
            x *= (1 - m)
            return np.clip(x + rgb2gray(color), 0, 1) * 255

        else:
            x *= (1 - m[..., np.newaxis])
            return np.clip(x + color, 0, 1) * 255
Example #57
0
# best to keep it a factor of 100 as image dimensions
# will first be resized to a factor of 100.
blocksize = 10

# get list of image in folder
images = glob.glob("images/*")

# for each image in folder:
# len(images)
for i in range(len(lbps)):

    # read current image
    img = cv2.imread(images[0])

    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    equ = cv2.equalizeHist(gray)  # do histogram equalization

    hist = lbps[i].describe(equ)

    print(hist)

    plt.subplot(1, 1, 1)
    plt.plot(hist)
    plt.axis('off')

    # compute distance array via LBP (checkerboard format)
    # for j in range(5):
    # checkered = subdivide_checkeredLBP(img, blocksize, lbps[3])
    # equ = cv2.equalizeHist(np.uint8(checkered)) # do histogram equalization
    # sub = cv2.bitwise_not(equ)
    # ret,thresh1 = cv2.threshold(sub,200,255,cv2.THRESH_TOZERO)
Example #58
0
    def run(self):
        self.running = True

        cap = cv2.VideoCapture(0)
        # cap = cv2.VideoCapture('output.avi')

        kernel_ellipse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        kernel_ellipse = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        font = cv2.FONT_HERSHEY_SIMPLEX
        prev_edges = -1
        prev_flooded_edges = -1

        while (self.running and cap.isOpened()):
            if (self.destroyAllWindows == True):
                cv2.destroyAllWindows()
                self.destroyAllWindows = False
            t = time.time()
            # Capture frames from the camera
            ret, originalFrame = cap.read()
            if ret == False:
                print("Erro na leitura da câmera")
                self.running = False
                break
            frame = originalFrame[:, 0:300]

            ##########################################
            originalGray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
            gray = cv2.GaussianBlur(originalGray, (5, 5), 0)
            gray = cv2.dilate(gray, kernel_ellipse, iterations=1)
            gray = cv2.equalizeHist(gray)
            gray = cv2.medianBlur(gray, 3)
            ##########################################

            ##########################################
            # Original edges = apenas o edge obtido da imagem atual
            # Simple edge = edge da imagem atual processado
            # Previous edges = edges calculados das imagem anteriores

            edges, prev_edges = self.findEdges(gray, prev_edges,
                                               kernel_ellipse)
            flooded_eges, prev_flooded_edges = self.floodFillEdges(
                edges, prev_flooded_edges)

            im2, contours, hierarchy = cv2.findContours(
                flooded_eges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            ##########################################

            ##########################################
            '''try:
                eroded_edges = cv2.erode(flooded_eges, kernel_ellipse, iterations=2)
                lines = cv2.HoughLines(image=eroded_edges, rho=20,
                                       theta=np.pi / 6, threshold=80)
                for x in range(0,len(lines)):
                    for rho, theta in lines[x]:
                        a = np.cos(theta)
                        b = np.sin(theta)
                        x0 = a * rho
                        y0 = b * rho
                        x1 = int(x0 + 1000 * (-b))
                        y1 = int(y0 + 1000 * (a))
                        x2 = int(x0 - 1000 * (-b))
                        y2 = int(y0 - 1000 * (a))

                        cv2.line(originalFrame, (x1, y1), (x2, y2), (0, 0, 255), 2)
                
                lines_dict = self.dictLines(lines)
            except:
                print("Standard Hough Line problem")
                ##########################################
            '''
            '''
                ##########################################
            try:
                lines_p = cv2.HoughLinesP(image=edges, rho=1, theta=np.pi / 100,
                                          threshold=self.thresh, lines=np.array([]),
                                          minLineLength=self.minLineSize, maxLineGap=20)
                filtered_lines = self.filterLines(lines_p)
            except:
                print("Probabilistic Line problem")
                ###########################################
            '''

            try:
                biggestContour = self.findContour(contours)
            except:
                print("Error na análise do contour")

            try:
                cv2.drawContours(originalFrame, [biggestContour], -1,
                                 (255, 0, 0), 2)
                hull = cv2.convexHull(biggestContour, returnPoints=False)
                defects = cv2.convexityDefects(biggestContour, hull)
                defects_list = self.filterHullDefects(defects)
                numberOfDefects = len(defects_list)
            except:
                print("No contour found")

            fps_c = 1 / (time.time() - t)
            self.fps = 0.75 * self.fps + 0.25 * fps_c
            cv2.putText(originalFrame, str(self.fps), (590, 30), font, 1,
                        (255, 255, 255), 2, cv2.LINE_AA)

            try:
                if (self.showConvexHull == True):
                    for i in range(defects.shape[0]):
                        s, e, f, d = defects[i, 0]
                        if (d > 0):
                            start = tuple(biggestContour[s][0])
                            end = tuple(biggestContour[e][0])
                            cv2.line(originalFrame, start, end, [0, 255, 0], 2)

                if (self.showHullDefects == True):
                    for s, e, f, d in defects_list:
                        start = tuple(biggestContour[s][0])
                        end = tuple(biggestContour[e][0])
                        far = tuple(biggestContour[f][0])
                        cv2.line(originalFrame, start, end, [0, 255, 0], 2)
                        midx = int((start[0] + end[0]) / 2)
                        midy = int((start[1] + end[1]) / 2)
                        midpoint = (midx, midy)
                        cv2.line(originalFrame, midpoint, far, [0, 255, 125],
                                 2)
                        cv2.circle(originalFrame, far, 5, [0, 0, 255], -1)
                '''if (self.showHoughLines == True):
                    for line in filtered_lines:
                        cv2.line(gray, line[1], line[0], (0, 0, 255), 3, cv2.LINE_AA)
                '''
            except:
                print("Error showing one or more frames")

            if (self.visibleFloodfill == True):
                cv2.imshow("Floodfill", flooded_eges)
            if (self.visibleFrame == True):
                cv2.imshow("Frame", originalFrame)
            if (self.visibleEdges == True):
                cv2.imshow("Edges", edges)
            if (self.visibleGray == True):
                cv2.imshow("Gray", gray)

            k = cv2.waitKey(1) & 0xFF
            if k == 27:  # esc
                self.running = False
                break

            gesture = self.checkGesture(k)
            if gesture != -1:
                try:
                    data = self.cnt_hull_attributes(biggestContour)
                    data.append(numberOfDefects)
                    line_to_be_defined = 0
                    data.append(line_to_be_defined)
                    data.append(gesture)
                    self.dataframe.write(data)
                except:
                    print("Error writing to database")

        cap.release()
        cv2.destroyAllWindows()
Example #59
0
    split = line.split(",")
    emotion = split[6]
    if (int(emotion) <= 8):
        try:
            file = indir + split[0]
            extension = os.path.splitext(file)[1]
            gen = uuid.uuid4()
            emotion_path = outdir + dict[emotion] + "/" + str(gen) + extension
            start_x = int(split[1])
            start_y = int(split[2])
            end_x = start_x + int(split[3])
            end_y = start_y + int(split[4])
            print("File: " + file + "\nEmo aff: " + emotion + "\nEmo fer: " +
                  dict[emotion] + "\nOut path: " + emotion_path +
                  "\nStart x: " + str(start_x) + "\tEnd x: " + str(end_x) +
                  "\nStart y: " + str(start_y) + "\tEnd y: " + str(end_y) +
                  "\nPercent: " + str(percent) + " (" + str(c) + " over " +
                  str(num_lines) + ")")

            img_src = cv2.imread(file)
            img_src = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
            img_src = img_src[start_y:end_y, start_x:end_x]
            img_src = cv2.resize(img_src, (48, 48),
                                 interpolation=cv2.INTER_CUBIC)
            img_src = cv2.equalizeHist(img_src)
            cv2.imwrite(emotion_path, img_src)
        except Exception as e:
            print("Error: " + str(e))

        print("____")
Example #60
0
#reading the image
#frame = cv2.imread('ASLsigns/C/172189195.jpg')
frame = cv2.imread('ASL.jpg')

frame = cv2.GaussianBlur(frame, (5, 5), 2)
cv2.imwrite("lowPass55_2.png", frame)

#resizing the image
frame = cv2.resize(frame, (100, 100))
cv2.imwrite("resize.png", frame)

#adaptive histogram equalization for contrast enhancement
frame_yuv = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV)
cv2.imwrite("yuvspace.png", frame_yuv)
frame_yuv[:, :, 0] = cv2.equalizeHist(frame_yuv[:, :, 0])
cv2.imwrite("yuvhisteq.png", frame_yuv)
frame = cv2.cvtColor(frame_yuv, cv2.COLOR_YUV2BGR)
cv2.imwrite("yuv2bgr.png", frame)
#prewitt filtering

# apply a series of erosions and dilations to the mask
# using an elliptical kernel
converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cv2.imwrite("bgr2hsv.png", converted)
skinMask = cv2.inRange(converted, lower, upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
skinMask = cv2.erode(skinMask, kernel, iterations=2)
skinMask = cv2.dilate(skinMask, kernel, iterations=2)

# blur the mask to help remove noise, then apply the