Ejemplo n.º 1
0
 def _close(self):
     if self.verbose:
         print('Closing window')
         print('\n--------------------------------------')
         self._print_stats()
         print('--------------------------------------\n')
     cv2.destroyWindow(self.name)
Ejemplo n.º 2
0
def colour_picker(colourSTR ="(unspecified)", colourGrabWindowSize = 5, colourHueWindowSize = 40, colourSaturationWindowSize= 40, colourValueWindowSize =40):
    global cam
    global hsv

    print "Right click the ",colourSTR," blob. Hit escape when done."
    cv2.namedWindow("image") 
    cv2.setMouseCallback("image", mouseCallBack, param=None)
    try:
        while True:
            #Get image from webcam and convert to greyscale
            ret, img = cam.read()
            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            
            
            cv2.imshow("image", img)        
            
            cMinY = max(0, rightButtonY - colourGrabWindowSize)
            cMaxY = min(len(hsv) - 1, rightButtonY + colourGrabWindowSize)
            cMinX = max(0, rightButtonX - colourGrabWindowSize)
            cMaxX = min(len(hsv[0]) - 1, rightButtonX + colourGrabWindowSize)          
            
            cHue = int(npy.mean(hsv[redMinY:redMaxY, redMinX:redMaxX, 0]))
                        
            
            cSaturation = int(npy.mean(hsv[redMinY:redMaxY, redMinX:redMaxX, 1]))
            cValue = int(npy.mean(hsv[redMinY:redMaxY, redMinX:redMaxX, 2]))
            
            #Sleep infinite loop for ~10ms
            #Exit if user presses <Esc>
            if cv2.waitKey(10) == 27:
                break
    
    finally:
        cv2.destroyWindow("image")
        return np.array(cHue, cSaturation, cValue)
Ejemplo n.º 3
0
    def resetAll ( self ):
        if self.ebb:
            self.ebb.closeSerial()
        if self._wormFinder:
            self._wormFinder = None
            self.runTracking = False
            cv2.destroyWindow('gaussian')
            
        if self._cap.isWritingVideo:
            self.record()
            self.ui.buttonRec.setStyleSheet('QPushButton {}')
#            self._cap.stopWritingVideo()

        if self._cap._capture.isOpened():
            self._cap._capture.release()

        self.showImage = False
        self.runTracking = False
        if self.motorsOn:
            self.motorized()
            self.motorsOn = False
            self.ui.buttonMotorized.setStyleSheet('QPushButton {}')
        self.ui.videoFrame.setText("Select source if video not displayed here on boot up")
        self.ui.fps.setText("")
        logger.info("Reset button pressed") 

        self.setAble('source', True)
        self.setAble('reset', False) 
def show_depth_img():
    if show_depth_img_flag == 1:
        cv2.namedWindow("Depth Img", cv2.WINDOW_NORMAL)
        cv2.imshow("Depth Img", depth_img)
        # cv2.imshow("Depth Img", depth_img_averaging.average())
    else:
        cv2.destroyWindow("Depth Img")
Ejemplo n.º 5
0
    def start(self):
        # in case the cv library is not available must return
        # immediately in order to avoid any problems (required)
        if not cv2: return

        # retrieves the reference to the first video device
        # present in the current system, this is going to be
        # used for the capture of the image and delta calculus
        self.camera = cv2.VideoCapture(0)

        # creates both windows that are going to be used in the
        # display of the current results,
        cv2.namedWindow(self.win_image, cv2.CV_WINDOW_AUTOSIZE)
        cv2.namedWindow(self.win_delta, cv2.CV_WINDOW_AUTOSIZE)

        # sets the initial previous image as an invalid image as
        # there's no initial image when the loop starts
        self.previous = None

        # iterates continuously for the running of the main loop
        # of the current program (this is the normal behavior)
        while True:
            result = self.tick()
            if not result: break
            key = cv2.waitKey(10)
            if key == 27: break

        # destroys the currently displayed windows on the screen
        # so that they can no longer be used in the current screen
        cv2.destroyWindow(self.win_image)
        cv2.destroyWindow(self.win_delta)
Ejemplo n.º 6
0
    def run(self):
        rate = rospy.Rate(10)
        done = False
        cv2.namedWindow("kinect_view")
        cv2.setMouseCallback("kinect_view", self.mouse_call)

        while (not rospy.is_shutdown() and not done):

            if self.image is None:
                continue

            image = np.copy(self.image)
            state = self.states[self.state].replace('_', ' ')
            cv2.putText(image, 'Click the {}'.format(self.target_object), (10, self.image.shape[1] - 100), self.font, 1, (255, 100, 80), 2)
            self.draw_corners(image)

            if self.is_done:
                cv2.polylines(image, np.int32([self.corners]), True, (0, 255, 0), 6)
                done = True
                print 'DONE'

            cv2.imshow("kinect_view", image)

            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
                rate.sleep()

            if done:
                cv2.destroyWindow("kinect_view")
Ejemplo n.º 7
0
def position_interpolator(background):
    global positions
    if not isfile(POSITIONS_DUMP_FILENAME):
        def callback(event, x, y, flags, parameters):
            if event == 1: #cv2.EVENT_RBUTTONDOWN:
                positions.append(Coordinate(x, y))
    
        cv2.namedWindow("Interpolator")
        cv2.setMouseCallback("Interpolator", callback)

        while True: 
            cv2.imshow("Interpolator", background.array)
            if cv2.waitKey() & 0xFF == 27:
                break
        cv2.destroyWindow("Interpolator")
        with open(POSITIONS_DUMP_FILENAME, "w") as positions_dump_file:
            pickle.dump(positions, positions_dump_file) 
    else:
        with open(POSITIONS_DUMP_FILENAME, "r") as positions_dump_file:
            positions = pickle.load(positions_dump_file)
        
    
    t = map(lambda i: i * STEP, range(len(positions)))
    x = map(lambda p: p.x, positions)
    y = map(lambda p: p.y, positions)



    f_x = interpolate.interp1d(t, x, kind = "quadratic")
    f_y = interpolate.interp1d(t, y, kind = "quadratic")
    
    return PositionInterpolator(f_x, f_y)
Ejemplo n.º 8
0
 def run(self):
   while True:
     f, orig_img = self.capture.read()
     orig_img = cv2.flip(orig_img, 1)
     img = cv2.GaussianBlur(orig_img, (5,5), 0)
     img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2HSV)
     img = cv2.resize(img, (len(orig_img[0]) / self.scale_down, len(orig_img) / self.scale_down))
     red_lower = np.array([0, 150, 0],np.uint8)
     red_upper = np.array([5, 255, 255],np.uint8)
     red_binary = cv2.inRange(img, red_lower, red_upper)
     dilation = np.ones((15, 15), "uint8")
     red_binary = cv2.dilate(red_binary, dilation)
     contours, hierarchy = cv2.findContours(red_binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
     max_area = 0
     largest_contour = None
     for idx, contour in enumerate(contours):
       area = cv2.contourArea(contour)
       if area > max_area:
         max_area = area
         largest_contour = contour
     if not largest_contour == None:
       moment = cv2.moments(largest_contour)
       if moment["m00"] > 1000 / self.scale_down:
         rect = cv2.minAreaRect(largest_contour)
         rect = ((rect[0][0] * self.scale_down, rect[0][1] * self.scale_down), (rect[1][0] * self.scale_down, rect[1][1] * self.scale_down), rect[2])
         box = cv2.cv.BoxPoints(rect)
         box = np.int0(box)
         cv2.drawContours(orig_img,[box], 0, (0, 0, 255), 2)
         cv2.imshow("ColourTrackerWindow", orig_img)
         if cv2.waitKey(20) == 27:
           cv2.destroyWindow("ColourTrackerWindow")
           self.capture.release()
           break
Ejemplo n.º 9
0
def display_loop(framequeue, quick_catchup, quick_catchup_pixels):
  # Open a window in which to display the images
  display_window_name = "slowjector"
  cv2.namedWindow(display_window_name, cv2.cv.CV_WINDOW_NORMAL)
  last_delta_count = 0
  listq = deque()
  while True:
    sleep(0.001) # Small amount of sleeping for thread-switching
    data = framequeue.get()
    # Source thread will put None if it receives c-C; if this happens, exit the
    # loop and shut off the display.
    if data is None:
      break

    # Frames are pushed onto a queue (FIFO)
    listq.append(data)
    data = listq.popleft()

    # Otherwise, it puts a tuple (delta_count, image)
    delta_count, image = data

    # Draw the image
    cv2.imshow(display_window_name, image)

    # Optionally, catch up to the live feed after seeing some motion stop by
    # popping all images off of the queue.
    if (quick_catchup and
        delta_count <= quick_catchup_pixels and
        last_delta_count > quick_catchup_pixels):
      listq.clear()
    last_delta_count = delta_count

  # Clean up by closing the window used to display images.
  cv2.destroyWindow(display_window_name)
Ejemplo n.º 10
0
def detect():
    q = QRobot()
    cap=cv2.VideoCapture(1)
    success, frame = cap.read()
    color = (0, 255, 0)
    classfier=cv2.CascadeClassifier("/home/echo/Desktop/qrobot/qrobot_py/haarcascade_frontalface_alt.xml")
    while success:
        success, frame = cap.read()
        size=frame.shape[:2]
        image=np.zeros(size,dtype=np.float16)
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        cv2.equalizeHist(image, image)
        
        divisor=8
        h, w = size
        minSize=(w/divisor, h/divisor)
        faceRects = classfier.detectMultiScale(image, 1.2, 2, cv2.CASCADE_SCALE_IMAGE,minSize)
        if len(faceRects)>0:
            for faceRect in faceRects: 
                    x, y, w, h = faceRect
                    cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
                    q.left_wing_fast_up(1)
                    q.right_wing_fast_up(1)
                    q.heart_light_color(randint(0,255), randint(0,255), randint(0,255))
                    q.eye_emotion(randint(0,50), 1)
                    q.send_data()
        cv2.imshow("test", frame)
        key=cv2.waitKey(10)
        c = chr(key & 255)
        if c in ['q', 'Q', chr(27)]:
            break

    del q  
    cap.release()
    cv2.destroyWindow("test")
Ejemplo n.º 11
0
def click_and_crop(event, x, y, flags, param):
    # grab references to the global variables
    global refPt, cropping, cropImage

    # if the left mouse button was clicked, record the starting
    # (x, y) coordinates and indicate that cropping is being
    # performed
    if event == cv2.EVENT_LBUTTONDOWN:
        refPt = [(x, y)]
        cropping = True
    # check to see if the left mouse button was released
    elif event == cv2.EVENT_LBUTTONUP:
        # record the ending (x, y) coordinates and indicate that
        # the cropping operation is finished
        refPt.append((x, y))
        cropping = False
        # draw a rectangle around the region of interest
        cv2.rectangle(cropImage, refPt[0], refPt[1], (0, 255, 0), 2)
        cv2.imshow("crop1", cropImage)
        msg = Target()
        msg.bb.x = refPt[0][0]
        msg.bb.y = refPt[0][1]
        msg.bb.width = x - refPt[0][0]
        msg.bb.height = y - refPt[0][1]
        msg.bb.confidence = 1
        msg.img = bridge.cv2_to_imgmsg(cropImage, 'bgr8')
        pub1 = rospy.Publisher("tld_gui_bb", Target, queue_size = 10)
        cv2.waitKey(1000)
        cv2.destroyWindow('crop1')
        pub1.publish(msg)
def callback_depth(msg):
    # treating the image containing the depth data
    global depthImgIndex, lastDepthImgs, depth_img_Avg, got_depth
    # getting the image
    try:
        img = CvBridge().imgmsg_to_cv2(msg, "passthrough")
    except CvBridgeError as e:
        print (e)
        return
    cleanimage = clean(img, 255)
    if show_depth:
        # shows the image after processing
        cv2.imshow("Depth", img)
        cv2.waitKey(1)
    else:
        cv2.destroyWindow("Depth")
    # storing the image
    lastDepthImgs[depthImgIndex] = np.copy(cleanimage)
    depthImgIndex += 1
    if depthImgIndex > NB_DEPTH_IMGS:
        depthImgIndex = 0
    # creates an image which is the average of the last ones
    depth_img_Avg = np.copy(lastDepthImgs[0])
    for i in range(0, NB_DEPTH_IMGS):
        depth_img_Avg += lastDepthImgs[i]
    depth_img_Avg /= NB_DEPTH_IMGS
    got_depth = True  # ensures there is an depth image available
    if got_color and got_depth:
        filter_by_depth()
Ejemplo n.º 13
0
 def get_cams(self, image):
     cv2.namedWindow("Screw")
     cv2.startWindowThread()
     cv2.setMouseCallback("Screw", self._get_screw)
     self.screw_location = Point(0,0)
     while True:
         img = copy.copy(image)
         cv2.circle(img, self.screw_location.to_image(), 3, (0,0,255), 3)
         cv2.imshow('Screw', img)
         k = cv2.waitKey(33)
         if k==10:
             # enter pressed
             break
         elif k==-1:
             pass
         else:
             print k
             print 'Press Enter to continue..'
     cv2.destroyWindow('Screw')
     arc = self.screw_location - self.board_center
     cam_angle = arc.angle()
     self.cam_angle = cam_angle
     if len(self.base_cams) != 0:
         self.cams = self.rotate(self.base_cams, cam_angle)
         # self.cams = [c for c in self.cams if c.y <= 0]
         self.locate_cams(image)
Ejemplo n.º 14
0
    def run(self):
        cv2.namedWindow('ANALOG', flags=cv2.WINDOW_KEEPRATIO)
        cv2.setMouseCallback('ANALOG', self.on_mouse)
        while True:
            image = self.img.copy()
            text = 'Press "e" to exit and save, "r" to reset current data'
            cv2.putText(img=image, org=(100, 100), text=text,
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
                        color=(255, 255, 255), thickness=2, lineType=cv2.LINE_AA)
            h = 150
            for t in self.training_data:
                txt = "deg {0}, value: {1}".format(t['degree'], t['value'])

                cv2.putText(img=image, org=(100, h), text=txt,
                            fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
                            color=(255, 255, 255), thickness=2, lineType=cv2.LINE_AA)
                h += 30
            cv2.imshow('ANALOG', image)
            key = cv2.waitKey(33) & 0xFF
            if key == ord('e'):
                break
            elif key == ord('r'):
                self.training_data = []

        cv2.destroyWindow('ANALOG')
        return self.training_data
Ejemplo n.º 15
0
  def prepare_face_data(self, face_label):

    height = 480
    width = 640
    rate = rospy.Rate(10)
    facecount = 0
    face_images = []
    face_labels = []
    cv2.namedWindow("Training Set Preparation", 1)

    while (not rospy.is_shutdown()) and (facecount < TRAIN_SIZE+TEST_SIZE):
      if(self.is_image_present):
        im = self.image
      else:
        im = np.ones((height,width,3), np.uint8)
      (rows,cols,channels) = im.shape
      im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
      faces = face_cascade.detectMultiScale(im_gray, 1.3, 5)
      if (len(faces) == 1):
        for (x,y,w,h) in faces:
          if ((w>50) and (h>50)):
            cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)
            face_gray = np.array(im_gray[y:y+h, x:x+w], 'uint8')
            face_sized = cv2.resize(face_gray, (200, 200))
            face_images.append(face_sized)
            face_labels.append(face_label)
            facecount += 1
      cv2.imshow("Training Set Preparation", im)
      cv2.waitKey(3)
      rate.sleep()

    cv2.destroyWindow("Training Set Preparation")
    return face_images, face_labels
def demoHaarLike(namevideo, width, height):
    cv2.namedWindow("preview")
    vc = cv2.VideoCapture(0)
    outvideo=cv2.VideoWriter(namevideo,-1,60,(width,height))
    idx=0
    if vc.isOpened(): # try to get the first frame
        rval, frame = vc.read()
    else:
        rval = False
    while rval:
#         frame=cv2.imread('0.bmp')
        cv2.imshow("preview", frame)
        rval, frame = vc.read()
        frame=HandDetectionImproved(frame)
#       outvideo.write(frame)

        key = cv2.waitKey(1)
        
        if key==ord('s'):
            cv2.imwrite(str(idx)+'.bmp',frame)
            idx=idx+1
        if key == 27: # exit on ESC
            break
    cv2.destroyWindow("preview")
    vc.release()
    outvideo.release()
Ejemplo n.º 17
0
    def detect_motion_new(self, winName, interval=1):
        #Implemented after talking with Dr Smart about image averaging and background extraction

        min_contour_area = 25
        max_contour_area = 1250
        retval = False
        threshold = 65
        try:
            _image_static = None
            _image_static = self.save_image(persist=False)
            _image_static = cv2.cvtColor(numpy.array(_image_static), cv2.COLOR_RGB2GRAY)
            _image_static = cv2.GaussianBlur(_image_static, (21, 21), 0)

            accumulator = numpy.float32(_image_static)
            while True:
                sleep(interval)
                _image_static = self.save_image(persist=False)
                _image_static = cv2.cvtColor(numpy.array(_image_static), cv2.COLOR_RGB2GRAY)
                _image_static = cv2.GaussianBlur(_image_static, (21, 21), 0)

                cv2.accumulateWeighted(numpy.float32(_image_static), accumulator, 0.1)

                _image_static = cv2.convertScaleAbs(accumulator)

                _image_dynamic = self.save_image(persist=False)
                _image_dynamic1 = cv2.cvtColor(numpy.array(_image_dynamic), cv2.COLOR_RGB2GRAY)
                _image_dynamic1 = cv2.GaussianBlur(_image_dynamic1, (21, 21), 0)

                # ideas from http://docs.opencv.org/master/d4/d73/tutorial_py_contours_begin.html#gsc.tab=0
                _delta = cv2.absdiff(_image_dynamic1, _image_static)

                _threshold = cv2.threshold(_delta, 17, 255, cv2.THRESH_BINARY)[1]
                # dilate the thresholded image to fill in holes, then find contour on thresholded image
                _threshold = cv2.dilate(_threshold, None, iterations=5)

                (img, contours, _) = cv2.findContours(_threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

                dyn = cv2.cvtColor(numpy.array(_image_dynamic), cv2.COLOR_RGB2GRAY)
                # loop over the contours
                for contour in contours:
                    # if the contour is too small, ignore it
                    _area = cv2.contourArea(contour)
                    if _area < min_contour_area: # or _area > max_contour_area:
                        continue  # skip to the next

                    # compute the bounding box for the contour, draw it on the frame,

                    (x, y, w, h) = cv2.boundingRect(contour)
                    #cv2.rectangle(dyn, (x, y), (x + w, y + h), (0, 12, 255), 2)
                    cv2.ellipse(dyn, (x+5, y+25), (10, 20), 90, 0, 360, (255, 0, 0), 2)

                cv2.imshow(winName, numpy.hstack([dyn, _threshold]))

                key = cv2.waitKey(10)
                if key == 27:
                    cv2.destroyWindow(winName)
                    break

        except Exception as ex:
            print(ex)
 def onmouse(self, event, x, y, flags, param):
     """
     Mouse callback when mouse event detected in the window.
     Note: This function is only used for ROI setting.
     """
     if event == cv2.EVENT_LBUTTONDOWN:
         self.dragStart = x, y
         self.sel = 0,0,0,0
     elif self.dragStart:
         #print flags
         if flags & cv2.EVENT_FLAG_LBUTTON:
             minpos = min(self.dragStart[0], x), min(self.dragStart[1], y)
             maxpos = max(self.dragStart[0], x), max(self.dragStart[1], y)
             self.sel = minpos[0], minpos[1], maxpos[0], maxpos[1]
             img = cv2.cvtColor(self.img, cv2.COLOR_GRAY2BGR)
             cv2.rectangle(img, (self.sel[0], self.sel[1]), (self.sel[2], self.sel[3]), (0,255,255), 1)
             cv2.imshow(self.test_winname, img)
         else:
             patch = self.img[self.sel[1]:self.sel[3], self.sel[0]:self.sel[2]]
             self.hist_lines(patch, showhist=True)
             cv2.destroyWindow("patch")
             cv2.imshow("patch", patch)
             self.get_dft(img2dft=patch, showdft=True)
             print "Press a to accept the ROI"
             self.roiNeedUpadte = True
             self.dragStart = None
    def remove_sin_noise(self):
        """
        Warning: this function is unoptimized and may runs slowly.

        Show the origin image with 3 trackbar.
        These bars control the direction, amplitude and phase 
        of the compensation sin wave, respectively.
        A result window shows the result.
        """
        h, w = self.img.shape[:2]
        small_img = cv2.resize(self.img, (w / 2, h / 2))
        cv2.imshow(self.test_winname, small_img)
        cv2.createTrackbar("A*100", self.test_winname,
                           0, 100, self.update_sine_win)
        cv2.createTrackbar("B", self.test_winname,
                           1, 100, self.update_sine_win)
        cv2.createTrackbar("amp", self.test_winname,
                           0, 255, self.update_sine_win)
        cv2.createTrackbar("pha", self.test_winname,
                           0, 360, self.update_sine_win)
        self.update_sine_win()
        while True:
            ch = cv2.waitKey()
            if ch == 27:  # Esc
                break
        cv2.destroyWindow(self.test_winname)
    def capture_new_frame(self):
        assert self.camera.isOpened()
        # get feed from camera frame by frame
        status,self.frame = self.camera.read()

        #just mirror the image (vertical flip)
        if status==True:
            self.frame=cv2.flip(self.frame,flipCode=1)
             #else break from the loop as the frame was
             #not grabbed :D nothing to process


        if not self.hand_histogram_captured:
            # Draw hand detection filter
            result = self.draw_hand_filter(self.frame)
            cv2.imshow('Cover all squares with your hand', result)
        else:
            self.detect_finger_tip(self.frame)
        # Scan Human Skin to calc histogram if SPACE is pressed
        pressed_key = cv2.waitKey(1)
        if pressed_key == 32:
            self.scan_skin()
            cv2.destroyWindow('Cover all squares with your hand')
            self.hand_histogram_captured = True
        # If ESC is pressed destroy all Opencv windows (Close Debugging Windows)
        elif pressed_key == 27:
            cv2.destroyAllWindows()
            self.DEBUGGING = False
Ejemplo n.º 21
0
def vertCrop(edge,a):
    # find the top and bottom of joint
    h = edge.shape[0]; w = edge.shape[1]
    top,bot = 0,0
    mid = int(w/2)
    print "mid=",mid,"h=", h, 'w=', w
    for x in xrange(h/12,h-(h/12)):
        if top != 0 and bot != 0: break
        if top == 0:
            if edge[x,mid] == 255.0: top = x
        if bot == 0:
            if edge[h-x-1,mid] == 255.0: bot = h-x-1

    print 'top=',top,'bot=',bot
    # edge[top,mid] = edge[bot,mid] = 111

    # crop image to just the joint gap vertically
    cratio = h/12
    ctop = top-(cratio)
    cbot = bot+(cratio)
    print 'ctop=',ctop,'cbot=',cbot
    cedge = edge[range(ctop,cbot)]

    cv.imshow("New Edge", cedge)
    a = a[range(ctop,cbot)]
    cv.imshow("Cropped Real", a)
    cv.waitKey(0)
    cv.destroyWindow("Cropped Real")
    cv.destroyWindow("New Edge")
    return a,ctop,cbot
Ejemplo n.º 22
0
def findDigits(imagefile, digitheight, fontfile="C:\\Windows\\Fonts\\Arial.ttf"):
    im = cv2.imread(imagefile)
    out = np.zeros(im.shape,np.uint8)
    gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
    thresh = cv2.adaptiveThreshold(gray,255,1,1,11,2)

    contours,hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_SIMPLE)

    model = createDigitsModel(fontfile, digitheight)
    for cnt in contours:
        x,y,w,h = cv2.boundingRect(cnt)
        if  h>w and h>(digitheight*4)/5 and h<(digitheight*6)/5: #+/-20%
            cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),1)
            roi = thresh[y:y+h,x:x+w] # crop
            roi = cv2.resize(roi,(digitheight,digitheight/2))
            roi = roi.reshape((1,digitheight*(digitheight/2)))
            roi = np.float32(roi)
            retval, results, neigh_resp, dists = model.find_nearest(roi, k=1)
            string = str(int((results[0][0])))
            #cv2.drawContours(out,[cnt],-1,(0,255,255),1)
            cv2.putText(out,string,(x,y+h),0,1,(0,255,0))

    cv2.imshow('in',im)
    cv2.imshow('out',out)
    cv2.waitKey(0)
    cv2.destroyWindow( 'in' )
    cv2.destroyWindow( 'out' )
Ejemplo n.º 23
0
def get_coords(img, winname=None):
    out = img.copy()
    coords = []

    if winname is None:
        winname = 'choose coords'

    def onmouse(event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            coords.append((x, y))
            cv2.circle(out, (x, y), radius=10, color=(0, 0, 255),
                       thickness=-1)
            cv2.imshow(winname, out)

    cv2.imshow(winname, out)
    cv2.setMouseCallback(winname, onmouse)

    while True:
        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            out = img.copy()
            coords = []
        if k == ord(' '):
            cv2.destroyWindow(winname)
            return coords
Ejemplo n.º 24
0
def get_polyline(image,window_name):
    cv2.namedWindow(window_name)
    class GetPoly:
        xys = []        
        done = False
        def callback(self,event, x, y, flags, param):
            if self.done == True:
                pass
            elif event == cv2.EVENT_LBUTTONDOWN:
                self.xys.append((x,y))
            elif event == cv2.EVENT_MBUTTONDOWN:
                self.done = True
    gp = GetPoly()
    cv2.setMouseCallback(window_name,gp.callback)
    print "press middle mouse button or 'c' key to complete the polygon"
    while not gp.done:
        im_copy = image.copy()
        for (x,y) in gp.xys:
            cv2.circle(im_copy,(x,y),2,(0,255,0))
        if len(gp.xys) > 1 and not gp.done:
            cv2.polylines(im_copy,[np.array(gp.xys).astype('int32')],False,(0,255,0),1)
        cv2.imshow(window_name,im_copy)
        key = cv2.waitKey(50)
        if key == ord('c'): gp.done = True
    cv2.destroyWindow(window_name)
    return gp.xys
Ejemplo n.º 25
0
def show_hls(img, winname=None):
    if winname is None:
        winname = 'Choose HLS bounds'

    h, w, _ = img.shape
    f = 720.0 / h
    img_720p = cv2.resize(img, dsize=None, fx=f, fy=f)

    def onmouse(event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            bgr = img[y, x]
            hls = cv2.cvtColor(np.asarray([[bgr]], dtype=img.dtype),
                               cv2.COLOR_BGR2HLS)[0, 0]
            out = img_720p.copy()
            cv2.putText(out, text='BGR={}, HLS={}'.format(bgr, hls),
                        org=(0, 300), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.5, color=(255, 255, 255))

            cv2.imshow(winname, out)

    cv2.imshow(winname, img_720p)
    cv2.setMouseCallback(winname, onmouse)

    while True:
        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            cv2.destroyWindow(winname)
            return
Ejemplo n.º 26
0
def interactive_imshow(img, lclick_cb=None, rclick_cb=None, **kwargs):
    """
    Args:
        img (np.ndarray): an image (expect BGR) to show.
        lclick_cb, rclick_cb: a callback ``func(img, x, y)`` for left/right click event.
        kwargs: can be {key_cb_a: callback_img, key_cb_b: callback_img}, to
            specify a callback ``func(img)`` for keypress.

    Some existing keypress event handler:

    * q: destroy the current window
    * x: execute ``sys.exit()``
    * s: save image to "out.png"
    """
    name = 'tensorpack_viz_window'
    cv2.imshow(name, img)

    def mouse_cb(event, x, y, *args):
        if event == cv2.EVENT_LBUTTONUP and lclick_cb is not None:
            lclick_cb(img, x, y)
        elif event == cv2.EVENT_RBUTTONUP and rclick_cb is not None:
            rclick_cb(img, x, y)
    cv2.setMouseCallback(name, mouse_cb)
    key = chr(cv2.waitKey(-1) & 0xff)
    cb_name = 'key_cb_' + key
    if cb_name in kwargs:
        kwargs[cb_name](img)
    elif key == 'q':
        cv2.destroyWindow(name)
    elif key == 'x':
        sys.exit()
    elif key == 's':
        cv2.imwrite('out.png', img)
 def test_find_holes(self):
     ft = FishTracker()
     BAG_NAME = '../../fish_detective/bags/2016-04-06-16-57-50.bag' #1437
     # BAG_NAME = '../../fish_detective/training/bags/three_fish.bag'
     bag = rosbag.Bag(BAG_NAME)
     imgs = self.get_ros_img(bag)
     zero = imgs.next()
     # import IPython; IPython.embed()
     ft.find_holes(zero)
     fishes = ft.track_holes(zero)
     cv2.namedWindow("Calibration")
     cv2.startWindowThread()
     cv2.imshow('Calibration', fishes)
     k = cv2.waitKey(0)
     one = imgs.next()
     ft.find_holes(one)
     fishes = ft.track_holes(one)
     cv2.imshow('Calibration', fishes)
     k = cv2.waitKey(0)
     for i in range(60):
         im = imgs.next()
         ft.find_holes(im)
         fishes = ft.track_holes(im)
         cv2.imshow('Calibration', fishes)
         k = cv2.waitKey(1)
     cv2.destroyWindow('Calibration')
Ejemplo n.º 28
0
def get_hls_range(img, winname=None):
    if winname is None:
        winname = 'Choose HLS bounds'

    h, w, _ = img.shape
    f = 720.0 / h
    img_720p = cv2.resize(img, dsize=None, fx=f, fy=f)

    def on_trackbar_changed(x):
        lowerb = tuple(
            cv2.getTrackbarPos(ch + '_min', winname) for ch in 'HLS')
        upperb = tuple(
            cv2.getTrackbarPos(ch + '_max', winname) for ch in 'HLS')
        out = img_util.apply_mask_hls(img_720p, lowerb, upperb)
        cv2.imshow(winname, out)

    cv2.imshow(winname, img_720p)
    for ch in 'HLS':
        cv2.createTrackbar(ch + '_min', winname, 0, 255, on_trackbar_changed)
        cv2.createTrackbar(ch + '_max', winname, 255, 255, on_trackbar_changed)

    while True:
        cv2.waitKey()
        lowerb = tuple(
            cv2.getTrackbarPos(ch + '_min', winname) for ch in 'HLS')
        upperb = tuple(
            cv2.getTrackbarPos(ch + '_max', winname) for ch in 'HLS')
        cv2.destroyWindow(winname)
        return lowerb, upperb
Ejemplo n.º 29
0
def ExpansionVertical(img, punto, direccion):
	punto = Punto(punto.x, punto.y)
	puntos = []
	puntos += ExpansionVerticalSimple(img, punto, punto.y - 1, punto.y -23, direccion)[::-1] # invertida
	puntos += [punto]	
	puntos += ExpansionVerticalSimple(img, punto, punto.y + 1, punto.y +23, direccion)
	
	linea = Linea(punto)
	linea.puntos = puntos

	if debug:
		imagen = img.copy()
		imagen = cv2.cvtColor(imagen, cv2.COLOR_GRAY2BGR)
		for punto in puntos:
			cv2.circle(imagen,(punto.x,punto.y),1,(0,0,255),-1)
		cv2.imshow("debug",imagen)
		esperar()
		cv2.destroyWindow("debug")

	if len(puntos) < 35:
		# Supongo discontinuidad
		return False, linea
	else:
		linea.recta, linea.error = minimosCuadrados(puntos)
		if linea.error > 0.5:
			return False, linea

	return True, linea
Ejemplo n.º 30
0
def create_process_window(img):
    window_name = "res"    
    cv2.namedWindow(window_name)
    createBars(window_name,img)
    process(img)
    cv2.waitKey(-1)
    cv2.destroyWindow(window_name)
Ejemplo n.º 31
0
    def run(self):
        """Will run the tracking program on the video from vid_src."""
        running = True
        cv2.namedWindow("Drone Camera")
        while running:
            image = self.drone.image.copy()
            red, green, blue = cv2.split(image)
            image = cv2.merge((blue, green, red))
            self.currFrame = image
            x = cv2.waitKey(33)
            if x != -1:
                print("User override")
                key = chr(x & 255)
                if key == 't':
                    time = datetime.now().strftime('%Y-%m-%d-%H%M%S')
                    filename = "cap-" + time + ".jpg"
                    path = ".." + os.path.join(os.sep, "res", "captures",
                                               filename)
                    print(path)
                    cv2.imwrite(path, image)
                    print("Image Saved")
                elif key in {'i', 'I'}:
                    self.drone.move_up()
                    time.sleep(0.3)
                    self.drone.hover()
                elif key in {'k', 'K'}:
                    self.drone.move_down()
                    time.sleep(0.3)
                    self.drone.hover()
                elif key in {'j', 'J'}:
                    self.drone.turn_left()
                    time.sleep(0.3)
                    self.drone.hover()
                elif key in {'l', 'L'}:
                    self.drone.turn_right()
                    time.sleep(0.3)
                    self.drone.hover()
                elif key in {'w', 'W'}:
                    self.drone.move_forward()
                    time.sleep(0.3)
                    self.drone.hover()
                elif key in {'s', 'S'}:
                    self.drone.move_backward()
                    time.sleep(0.3)
                    self.drone.hover()
                elif key in {'a', 'A'}:
                    self.drone.move_left()
                    time.sleep(0.3)
                    self.drone.hover()
                elif key in {'d', 'D'}:
                    self.drone.move_right()
                    time.sleep(0.3)
                    self.drone.hover()
                elif key == 'q' or key == ' ':
                    self.parent.quit()

            frame = self.update()
            cv2.imshow("Drone Camera", frame)

            with self.lock:
                running = self.running
        print("Quitting MCS")
        cv2.destroyWindow("Drone Camera")
        cv2.waitKey(10)
Ejemplo n.º 32
0
def ImageProcessing(n_boards, board_w, board_h, board_dim):
    #Initializing variables
    board_n = board_w * board_h
    opts = []
    ipts = []
    npts = np.zeros((n_boards, 1), np.int32)
    intrinsic_matrix = np.zeros((3, 3), np.float32)
    distCoeffs = np.zeros((5, 1), np.float32)
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)

    # prepare object points based on the actual dimensions of the calibration board
    # like (0,0,0), (25,0,0), (50,0,0) ....,(200,125,0)
    objp = np.zeros((board_h * board_w, 3), np.float32)
    objp[:, :2] = np.mgrid[0:(board_w * board_dim):board_dim,
                           0:(board_h * board_dim):board_dim].T.reshape(-1, 2)

    #Loop through the images.  Find checkerboard corners and save the data to ipts.
    for i in range(1, n_boards + 1):

        #Loading images
        print('Loading... Calibration_Image' + str(i) + '.png')
        image = cv2.imread('Calibration_Image' + str(i) + '.png')
        if (type(image) is not None):
            #Converting to grayscale
            grey_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

            #Find chessboard corners
            found, corners = cv2.findChessboardCorners(
                grey_image, (board_w, board_h),
                cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE)
            print(found)

            if found == True:

                #Add the "true" checkerboard corners
                opts.append(objp)

                #Improve the accuracy of the checkerboard corners found in the image and save them to the ipts variable.
                cv2.cornerSubPix(grey_image, corners, (20, 20), (-1, -1),
                                 criteria)
                ipts.append(corners)

                #Draw chessboard corners
                cv2.drawChessboardCorners(image, (board_w, board_h), corners,
                                          found)

                #Show the image with the chessboard corners overlaid.
                cv2.imshow("Corners", image)

                char = cv2.waitKey(10)

    cv2.destroyWindow("Corners")

    print('')
    print('Finished processes images.')

    #Calibrate the camera
    print('Running Calibrations...')
    print(' ')
    ret, intrinsic_matrix, distCoeff, rvecs, tvecs = cv2.calibrateCamera(
        opts, ipts, grey_image.shape[::-1], None, None)

    #Save matrices
    print('Intrinsic Matrix: ')
    print(str(intrinsic_matrix))
    print(' ')
    print('Distortion Coefficients: ')
    print(str(distCoeff))
    print(' ')

    #Save data
    print('Saving data file...')
    np.savez('calibration_data',
             distCoeff=distCoeff,
             intrinsic_matrix=intrinsic_matrix)
    print('Calibration complete')

    #Calculate the total reprojection error.  The closer to zero the better.
    tot_error = 0
    for i in range(len(opts)):
        imgpoints2, _ = cv2.projectPoints(opts[i], rvecs[i], tvecs[i],
                                          intrinsic_matrix, distCoeff)
        error = cv2.norm(ipts[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
        tot_error += error

    print("total reprojection error: ", tot_error / len(opts))

    #Undistort Images

    #Scale the images and create a rectification map.
    newMat, ROI = cv2.getOptimalNewCameraMatrix(intrinsic_matrix,
                                                distCoeff,
                                                image_size,
                                                alpha=crop,
                                                centerPrincipalPoint=1)
    mapx, mapy = cv2.initUndistortRectifyMap(intrinsic_matrix,
                                             distCoeff,
                                             None,
                                             newMat,
                                             image_size,
                                             m1type=cv2.CV_32FC1)

    for i in range(1, n_boards + 1):

        #Loading images
        print('Loading... Calibration_Image' + str(i) + '.png')
        image = cv2.imread('Calibration_Image' + str(i) + '.png')
        if (type(image) is not None):

            # undistort
            dst = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR)

            cv2.imshow('Undisorted Image', dst)

            char = cv2.waitKey(0)

    cv2.destroyAllWindows()
Ejemplo n.º 33
0
started = False

while rval:
    if started == False:
        start = time.time()
        framesPast = 0
        started = True
    rval, frame = vc.read()
    framesPast = framesPast + 1
    if frame[240, 300][0] < 150 and frame[240, 300][1] < 150 and frame[
            240, 300][2] < 150 and switchingW == False:
        print time.time() - start
        ser.write('1')
        print framesPast
        switchingW = True
        started = False

    if frame[240, 300][0] > 150 and frame[240, 300][1] > 150 and frame[
            240, 300][2] > 150 and switchingW == True:
        print time.time() - start
        ser.write('0')
        print framesPast
        switchingW = False
        started = False
    # ser.write('2') debug command
    # print ser.readline() debug command
# cleanup
cv2.destroyWindow("preview")
vc.release()
ser.close()
Ejemplo n.º 34
0
def detectCharsInPlates(listOfPossiblePlates):
    intPlateCounter = 0
    imgContours = None
    contours = []

    if len(listOfPossiblePlates) == 0:  # if list of possible plates is empty
        return listOfPossiblePlates  # return
    # end if

    # at this point we can be sure the list of possible plates has at least one plate

    for possiblePlate in listOfPossiblePlates:  # for each possible plate, this is a big for loop that takes up most of the function

        possiblePlate.imgGrayscale, possiblePlate.imgThresh = Preprocess.preprocess(
            possiblePlate.imgPlate
        )  # preprocess to get grayscale and threshold images

        if Main.showSteps == True:  # show steps ###################################################
            cv2.imshow("5a", possiblePlate.imgPlate)
            cv2.imshow("5b", possiblePlate.imgGrayscale)
            cv2.imshow("5c", possiblePlate.imgThresh)
        # end if # show steps #####################################################################

        # increase size of plate image for easier viewing and char detection
        possiblePlate.imgThresh = cv2.resize(possiblePlate.imgThresh, (0, 0),
                                             fx=1.6,
                                             fy=1.6)

        # threshold again to eliminate any gray areas
        thresholdValue, possiblePlate.imgThresh = cv2.threshold(
            possiblePlate.imgThresh, 0.0, 255.0,
            cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        if Main.showSteps == True:  # show steps ###################################################
            cv2.imshow("5d", possiblePlate.imgThresh)
        # end if # show steps #####################################################################

        # find all possible chars in the plate,
        # this function first finds all contours, then only includes contours that could be chars (without comparison to other chars yet)
        listOfPossibleCharsInPlate = findPossibleCharsInPlate(
            possiblePlate.imgGrayscale, possiblePlate.imgThresh)

        if Main.showSteps == True:  # show steps ###################################################
            height, width, numChannels = possiblePlate.imgPlate.shape
            imgContours = np.zeros((height, width, 3), np.uint8)
            del contours[:]  # clear the contours list

            for possibleChar in listOfPossibleCharsInPlate:
                contours.append(possibleChar.contour)
            # end for

            cv2.drawContours(imgContours, contours, -1, Main.SCALAR_WHITE)

            cv2.imshow("6", imgContours)
        # end if # show steps #####################################################################

        # given a list of all possible chars, find groups of matching chars within the plate
        listOfListsOfMatchingCharsInPlate = findListOfListsOfMatchingChars(
            listOfPossibleCharsInPlate)

        if Main.showSteps == True:  # show steps ###################################################
            imgContours = np.zeros((height, width, 3), np.uint8)
            del contours[:]

            for listOfMatchingChars in listOfListsOfMatchingCharsInPlate:
                intRandomBlue = random.randint(0, 255)
                intRandomGreen = random.randint(0, 255)
                intRandomRed = random.randint(0, 255)

                for matchingChar in listOfMatchingChars:
                    contours.append(matchingChar.contour)
                # end for
                cv2.drawContours(imgContours, contours, -1,
                                 (intRandomBlue, intRandomGreen, intRandomRed))
            # end for
            cv2.imshow("7", imgContours)
        # end if # show steps #####################################################################

        if (len(listOfListsOfMatchingCharsInPlate) == 0
            ):  # if no groups of matching chars were found in the plate

            if Main.showSteps == True:  # show steps ###############################################
                print "chars found in plate number " + str(
                    intPlateCounter
                ) + " = (none), click on any image and press a key to continue . . ."
                intPlateCounter = intPlateCounter + 1
                cv2.destroyWindow("8")
                cv2.destroyWindow("9")
                cv2.destroyWindow("10")
                cv2.waitKey(0)
            # end if # show steps #################################################################

            possiblePlate.strChars = ""
            continue  # go back to top of for loop
        # end if

        for i in range(0, len(listOfListsOfMatchingCharsInPlate)
                       ):  # within each list of matching chars
            listOfListsOfMatchingCharsInPlate[i].sort(
                key=lambda matchingChar: matchingChar.intCenterX
            )  # sort chars from left to right
            listOfListsOfMatchingCharsInPlate[i] = removeInnerOverlappingChars(
                listOfListsOfMatchingCharsInPlate[i]
            )  # and remove inner overlapping chars
        # end for

        if Main.showSteps == True:  # show steps ###################################################
            imgContours = np.zeros((height, width, 3), np.uint8)

            for listOfMatchingChars in listOfListsOfMatchingCharsInPlate:
                intRandomBlue = random.randint(0, 255)
                intRandomGreen = random.randint(0, 255)
                intRandomRed = random.randint(0, 255)

                del contours[:]

                for matchingChar in listOfMatchingChars:
                    contours.append(matchingChar.contour)
                # end for

                cv2.drawContours(imgContours, contours, -1,
                                 (intRandomBlue, intRandomGreen, intRandomRed))
            # end for
            cv2.imshow("8", imgContours)
        # end if # show steps #####################################################################

        # within each possible plate, suppose the longest list of potential matching chars is the actual list of chars
        intLenOfLongestListOfChars = 0
        intIndexOfLongestListOfChars = 0

        # loop through all the vectors of matching chars, get the index of the one with the most chars
        for i in range(0, len(listOfListsOfMatchingCharsInPlate)):
            if len(listOfListsOfMatchingCharsInPlate[i]
                   ) > intLenOfLongestListOfChars:
                intLenOfLongestListOfChars = len(
                    listOfListsOfMatchingCharsInPlate[i])
                intIndexOfLongestListOfChars = i
            # end if
        # end for

        # suppose that the longest list of matching chars within the plate is the actual list of chars
        longestListOfMatchingCharsInPlate = listOfListsOfMatchingCharsInPlate[
            intIndexOfLongestListOfChars]

        if Main.showSteps == True:  # show steps ###################################################
            imgContours = np.zeros((height, width, 3), np.uint8)
            del contours[:]

            for matchingChar in longestListOfMatchingCharsInPlate:
                contours.append(matchingChar.contour)
            # end for

            cv2.drawContours(imgContours, contours, -1, Main.SCALAR_WHITE)

            cv2.imshow("9", imgContours)
        # end if # show steps #####################################################################

        possiblePlate.strChars = recognizeCharsInPlate(
            possiblePlate.imgThresh, longestListOfMatchingCharsInPlate)

        if Main.showSteps == True:  # show steps ###################################################
            print "chars found in plate number " + str(
                intPlateCounter
            ) + " = " + possiblePlate.strChars + ", click on any image and press a key to continue . . ."
            intPlateCounter = intPlateCounter + 1
            cv2.waitKey(0)
        # end if # show steps #####################################################################

    # end of big for loop that takes up most of the function

    if Main.showSteps == True:
        print "\nchar detection complete, click on any image and press a key to continue . . .\n"
        cv2.waitKey(0)
    # end if

    return listOfPossiblePlates
Ejemplo n.º 35
0
def mouseDoubleClick(eX, eY, dragObj):
    if dragObj.active:

        if pointInRect(eX, eY, dragObj.outRect.x, dragObj.outRect.y, dragObj.outRect.w, dragObj.outRect.h):
            dragObj.returnflag = True
            cv2.destroyWindow(dragObj.wname)
Ejemplo n.º 36
0
import cv2
import numpy as np
import copy

eyes_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')

img = cv2.imread("selfie.jpg")
original = copy.deepcopy(img)

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

eyes = eyes_cascade.detectMultiScale(gray, 1.1, 3)
for (x, y, w, h) in eyes:
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)

winName1 = "Original image"
winName2 = "Filtered image"
cv2.startWindowThread()

cv2.namedWindow(winName1)
cv2.namedWindow(winName2)

cv2.imshow(winName1, original)
cv2.imshow(winName2, img)

# pressing any key will break GUI loop and close window
cv2.waitKey(0)

cv2.destroyWindow(winName1)
cv2.destroyWindow(winName2)
Ejemplo n.º 37
0
def _show_image(win, img, destroy=True):
    cv2.imshow(win, img)
    cv2.waitKey(0)
    if destroy:
        cv2.destroyWindow(win)
Ejemplo n.º 38
0
import numpy as np
import cv2

img = np.zeros((512, 512, 3), np.uint8)
cv2.line(img, (0, 0), (511, 511), (255, 0, 0), 5)
cv2.rectangle(img, (384, 0), (510, 128), (0, 255, 0), 3)
cv2.circle(img, (447, 63), 63, (0, 0, 255), -1)
cv2.ellipse(img, (256, 256), (100, 50), 0, 0, 188, 255, -1)
pts = np.array([[10, 5], [20, 30], [70, 20], [50, 10]], np.int32)
pts = pts.reshape((-1, 1, 2))
font = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX
cv2.putText(img, 'OpenCV', (10, 500), font, 4, (255, 255, 255, 0), 2)
winname = 'example'
cv2.namedWindow(winname)
cv2.imshow(winname, img)
cv2.waitKey(0)
cv2.destroyWindow(winname)
Ejemplo n.º 39
0
img = frame
while 1:
    cv2.imshow('image', img)

    # get current positions of trackbars
    h = cv2.getTrackbarPos('Lower_H', 'image')
    s = cv2.getTrackbarPos('Lower_S', 'image')
    v = cv2.getTrackbarPos('Lower_V', 'image')

    u_h = cv2.getTrackbarPos('Upper_H', 'image')
    u_s = cv2.getTrackbarPos('Upper_S', 'image')
    u_v = cv2.getTrackbarPos('Upper_V', 'image')

    it = cv2.getTrackbarPos('iter', 'image')

    lower_limit = np.array([h, s, v])
    upper_limit = np.array([u_h, u_s, u_v])

    aux = cv2.inRange(hsv, lower_limit, upper_limit)
    aux = cv2.erode(aux, None, iterations=it)
    aux = cv2.dilate(aux, None, iterations=it)
    img2 = cv2.bitwise_and(frame, frame, mask=aux)

    # original mais segmentada
    img = cv2.addWeighted(frame, 0.1, img2, 0.9, 0)

    k = cv2.waitKey(5) & 0xFF
    if k == 27:
        cv2.destroyWindow('image')
Ejemplo n.º 40
0
def get_marginpos(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        ret = [x, y]
        margin_pos.append(ret)
        print(margin_pos)


cv2.namedWindow("image")
cv2.namedWindow("face")
cv2.setMouseCallback("image", get_marginpos)
# 获取表情包的背景图片,并进行二值化处理
emoji_back = cv2.imread("pics/back.jpg")
ret, emoji_back = cv2.threshold(emoji_back, 100, 255, cv2.THRESH_BINARY)
face = cv2.imread("pics_processed/20210205/wyh_face_binary.jpg")

# 获取填充的坐标

cv2.imshow("image", face)
key = cv2.waitKey(0)
if key == 27:
    cv2.destroyWindow("image")

margin_area = np.array([margin_pos])
image_margin = cv2.fillPoly(face, [margin_area], (255, 255, 255))

cv2.imshow("face", image_margin)
key = cv2.waitKey(0)
if key == 27:
    cv2.destroyWindow("face")
            # Each location contains positions in order: top, right, bottom, left
            top_left = (face_location[3], face_location[0])
            bottom_right = (face_location[1], face_location[2])

            # Get color by name using our fancy function
            color = name_to_color(match)

            # Paint frame
            cv2.rectangle(image, top_left, bottom_right, color,
                          FRAME_THICKNESS)

            # Now we need smaller, filled grame below for a name
            # This time we use bottom in both corners - to start from bottom and move 50 pixels down
            top_left = (face_location[3], face_location[2])
            bottom_right = (face_location[1], face_location[2] + 22)

            # Paint frame
            cv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)

            # Wite a name
            cv2.putText(image, match,
                        (face_location[3] + 10, face_location[2] + 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200),
                        FONT_THICKNESS)

    # Show image
    cv2.imshow(filename, image)
    cv2.waitKey(0)
    cv2.destroyWindow(filename)
Ejemplo n.º 42
0
dir = os.getcwd()
fname = ut.m_name(dir)
cimg = ut.m_color(fname, 8000, 16000, pmin=0.01, pmax=0.97)
cv2.imshow('color', cimg)
print 'now image "color" is xmin 8000 xmax 16000'
loop_flag = True
while loop_flag:
    xmin, xmax = raw_input(
        'Please write xmin and xmax or if you want to save, please write xmin "0"  '
    ).split()
    x = [xmin, xmax]
    if xmin == '0':
        while True:
            YorN = raw_input('save the image y/n?  ')
            if YorN == 'y':
                cv2.imwrite('../' + fname + '.tif', cimg)
                loop_flag = False
                break
            elif YorN == 'n':
                loop_flag = False
                break
            else:
                print 'Please enter y/n'

    else:
        xmin = int(x[0])
        xmax = int(x[1])
        cimg2 = ut.m_color(fname, xmin, xmax, pmin=0.01, pmax=0.97)
        cv2.destroyWindow('color2')
        cv2.imshow('color2', cimg2)
Ejemplo n.º 43
0
                
            cv2.circle(frame,(xlmh,ylmh),radius,(0,255,127),-1)

            #right middle
            xrmh=int(x+0.65*w)
            yrmh=int(y+0.07*h)
            radius=int(0.15*w)                        
                
            cv2.circle(frame,(xrmh,yrmh),radius,(240,32,160),-1)

            #middle 
            xmh=int(x+0.5*w)
            ymh=int(y+0.05*h)
            radius=int(0.15*w)                        
                
            cv2.circle(frame,(xmh,ymh),radius,(0,0,255),-1)

            

        cv2.namedWindow('Faces found')
        cv2.imshow("Faces found", frame)

        if cv2.waitKey(1)> 10:

            videoLoop = False
            cv2.destroyWindow('Faces found')
            break

cam.release()
cv2.destroyAllWindows()
    from PIL import Image

    #Open image from the directories and returns it's histogram's
    for j in (1, 104):
        h1 = Image.open(r"C:\Users\dell\Desktop\DATASET\shot" + str(j) +
                        ".jpg").histogram()
        h2 = Image.open(r"C:\Users\dell\Desktop\SHOTS\shot.jpg").histogram()

        #Finding rms value of the two images opened before
        rms = math.sqrt(
            reduce(operator.add, map(lambda a, b:
                                     (a - b)**2, h1, h2)) / len(h1))
        #print(int(rms))

        #If the RMS value of the images are under our limit
        if (rms < 450):
            print("Accident")
            cv2.imwrite(r"C:\Users\dell\Desktop\email\accident.jpg", frame)
            SendMail(accident.jpg)

    #Updates the frames
    t_minus = t
    t = t_plus
    t_plus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)

    #Destroys the window after key press
    key = cv2.waitKey(10)
    if key == ord("q"):
        cv2.destroyWindow(winName)
        break
Ejemplo n.º 45
0
 def __del__(self):
     cv2.destroyWindow(self._window_name)
        ret, frame = capture.read()
        if ret == True:
            cv2.imshow('My Camera', frame)
        key = cv2.waitKey(10)
        if key == 27:
            flag = 1
            break
        if key == ord('x'):

            filename = "face.jpg"
            cv2.imwrite(filename, frame)
            print('正在进行认证')
            break

    del (capture)
    cv2.destroyWindow("camera")
    if flag == 0:
        url = 'https://api-cn.faceplusplus.com/facepp/v3/search'
        payload = {
            'api_key': 'XXX',
            'api_secret': 'XXX',
            'faceset_token': '7f0cc482207099671fc446129b8453ce'
        }
        files = {'image_file': open('face.jpg', 'rb')}
        r = requests.post(url, files=files, data=payload)
        data = json.loads(r.text)
        print(data)
        print(r.text)
        # print(data["results"][0]["face_token"]=='dc83bd034ef87241e8fcb99bf088ed5f')
        # if data["results"][0]["face_token"] == "ba24b26bc85ce3048a4e0b6ebee4acb2" and data["results"][0]["confidence"]>=data["thresholds"]["1e-5"]:
        if data["results"][0][
Ejemplo n.º 47
0
    def trackDrone(self):

        # Start video thread
        self.read_video_thread.start()

        # Reset the counter so the time from when the sync light is detected is the same for both videos
        self.current_frame = 0

        frame = self.frame_queue.get(block=True)

        # Here I resize the frame to 40% of its size, so the entire frame can be displayed onto the screen
        # The 4k footage is too large for my laptop screen, so this scales it down
        resizedFrame = self.rescale_frame(frame, 30)
        # Ask the user to draw a box around the drone
        bbox = cv2.selectROI("Select Drone", resizedFrame, False)
        # Reposition the bounding box from 40% resolution to 100% resolution
        bbox = self.resize_bbox(bbox, 3.33)

        cv2.destroyWindow("Select Drone")

        # Initialize tracker with first frame and bounding box
        ok = self.tracker.init(frame, bbox)

        # This while loop will run until the footage is finished, processing all of the drone footage
        while True:
            # Check if queue is empty but the read thread for the video is not done
            if self.frame_queue.empty() and not self.done_reading:
                # If so, sleep for a second for the producer to catch up and try again
                time.sleep(1)
                print("slept for 1 second")
                continue

            # Check if queue is empty and the read thread for video 1 is done
            if self.frame_queue.empty() and self.done_reading:
                # If so, destroy the window and break out of loop
                cv2.destroyWindow("Tracking")
                print("done tracking this drone at " + str(tm) + " seconds")
                break

            # Get frame from the frame queue
            frame = self.frame_queue.get(block=True)

            # Start timer
            timer = cv2.getTickCount()

            # Update tracker
            ok, bbox = self.tracker.update(frame)

            # Calculate Frames per second (FPS)
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            tm = self.current_frame / 30

            # Draw bounding box
            if ok:
                # Tracking success
                p1 = (int(bbox[0]), int(bbox[1]))
                p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                if self.current_frame % 15 == 0:
                    x_coord = bbox[0] + bbox[2] / 2
                    y_coord = bbox[1] + bbox[3] / 2
                    print("drone coordinate at time " + str(tm) + ": [" +
                          str(x_coord) + ", " + str(y_coord) + "] (" +
                          str((x_coord / 3860) * 15) + ", " +
                          str((y_coord / 2160) * 10) + ")")
                    self.data_points.append((x_coord, y_coord, tm))
                cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)

            else:
                # Tracking failure
                cv2.putText(frame, "Tracking failure detected", (100, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

                skip = 5

                # This will loop until the drone is reselected, and creates a new tracker that will
                # continue to track the drone
                while True:
                    # Rescale the frame so the user can see the entire frame to reselect the drone
                    resizedFrame = self.rescale_frame(frame, 30)
                    # Ask the user to draw a box around the drone
                    bbox = cv2.selectROI("Reselect Drone", resizedFrame, False)
                    # Reposition the bounding box from 40% resolution to 100% resolution
                    bbox = self.resize_bbox(bbox, 3.33)
                    # Close the ROI selection window
                    cv2.destroyWindow("Reselect Drone")
                    # This will skip 1 second of video if the drone is out of frame,
                    # which is detected if the user presses escape on the selectROI function
                    if bbox[0] == 0.0 and bbox[1] == 0.0 and bbox[
                            2] == 0.0 and bbox[3] == 0.0:

                        print("skipping " + str(skip) + " frames")
                        for i in range(1, skip):
                            if self.current_frame % 15 == 0:
                                tm = self.current_frame / 30
                                self.data_points.append((None, None, tm))
                            # If the video has totally been processed, exit the function
                            # Kind of a hacky way to do it, but I need to break out of 3 loops here
                            # so returning is easier than functionalizing the small parts of this giant function
                            if self.frame_queue.empty() and self.done_reading:
                                self.read_video_thread.join()
                                return self.data_points
                            frame = self.frame_queue.get(block=True)
                            self.current_frame += 1

                        if skip == 5:
                            skip = 15
                        elif skip == 15:
                            skip = 30
                        elif skip == 30:
                            skip = 60
                        elif skip == 60:
                            skip = 90
                        # else:
                        #     skip += 30

                    # If the user selected a bounding box, create a new tracker
                    # and continue to track the drone
                    else:
                        # Creates a new KCF tracker (this is due to the update function not working
                        # the same in python as it does in C++: in C++, we could simply update the
                        # tracker with the new bounding box instead of creating a new one, but this
                        # is a known limitation in python's implementation of OpenCV and this is my workaround)
                        self.tracker = cv2.TrackerKCF_create()
                        # Initialize the tracker with the newly selected bounding box
                        ok = self.tracker.init(frame, bbox)
                        break

            # Display tracker type on frame
            cv2.putText(frame, self.tracker_type + " Tracker", (100, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            # Display FPS on frame
            cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            resizedFrame = self.rescale_frame(frame, 20)

            # Display result
            cv2.imshow("Tracking", resizedFrame)

            self.current_frame += 1

            # Exit if ESC pressed
            k = cv2.waitKey(1) & 0xff
            if k == 27:
                break

            # Allows the user to reset the bounding box in the case that the tracker
            # begins to track a different object or isn't tracking the drone accurately
            # enough
            elif "r" == chr(k):
                # Rescale the frame so the user can see the entire frame to reselect the drone
                resizedFrame = self.rescale_frame(frame, 40)
                # Ask the user to draw a box around the drone
                bbox = cv2.selectROI("Reselect Drone", resizedFrame, False)
                # Reposition the bounding box from 40% resolution to 100% resolution
                bbox = self.resize_bbox(bbox, 2.5)
                # Close the ROI selection window
                cv2.destroyWindow("Reselect Drone")

                # Creates a new KCF tracker (this is due to the update function not working
                # the same in python as it does in C++: in C++, we could simply update the
                # tracker with the new bounding box instead of creating a new one, but this
                # is a known limitation in python's implementation of OpenCV and this is my workaround)
                self.tracker = cv2.TrackerKCF_create()
                # Initialize the tracker with the newly selected bounding box
                ok = self.tracker.init(frame, bbox)

        self.read_video_thread.join()

        return self.data_points
def run(source=0, dispLoc=False):
    # Create the VideoCapture object
    cam = cv2.VideoCapture(source)

    # If Camera Device is not opened, exit the program
    if not cam.isOpened():
        print "Video device or file couldn't be opened"
        exit()

    print "Press key `p` to pause the video to start tracking"
    while True:
        # Retrieve an image and Display it.
        retval, img = cam.read()
        if not retval:
            print "Cannot capture frame device"
            exit()
        if (cv2.waitKey(10) == ord('p')):
            break
        cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.imshow("Image", img)
    cv2.destroyWindow("Image")

    # Co-ordinates of objects to be tracked
    # will be stored in a list named `points`
    points = get_points.run(img, multi=True)

    if not points:
        print "ERROR: No object to be tracked."
        exit()

    cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
    cv2.imshow("Image", img)

    # Initial co-ordinates of the object to be tracked
    # Create the tracker object
    tracker = [dlib.correlation_tracker() for _ in xrange(len(points))]
    # Provide the tracker the initial position of the object
    [
        tracker[i].start_track(img, dlib.rectangle(*rect))
        for i, rect in enumerate(points)
    ]

    while True:
        # Read frame from device or file
        retval, img = cam.read()
        if not retval:
            print "Cannot capture frame device | CODE TERMINATION :( "
            exit()
        # Update the tracker
        for i in xrange(len(tracker)):
            tracker[i].update(img)
            # Get the position of th object, draw a
            # bounding box around it and display it.
            rect = tracker[i].get_position()
            pt1 = (int(rect.left()), int(rect.top()))
            pt2 = (int(rect.right()), int(rect.bottom()))
            cv2.rectangle(img, pt1, pt2, (255, 255, 255), 3)
            print "Object {} tracked at [{}, {}] \r".format(i, pt1, pt2),
            if dispLoc:
                loc = (int(rect.left()), int(rect.top() - 20))
                txt = "Object tracked at [{}, {}]".format(pt1, pt2)
                cv2.putText(img, txt, loc, cv2.FONT_HERSHEY_SIMPLEX, .5,
                            (255, 255, 255), 1)
        cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
        cv2.imshow("Image", img)
        # Continue until the user presses ESC key
        if cv2.waitKey(1) == 27:
            break

    # Relase the VideoCapture object
    cam.release()
with anki_vector.Robot() as robot:
    robot.camera.init_camera_feed()
    robot.behavior.set_head_angle(degrees(25.0))
    robot.behavior.set_lift_height(0.0)
    while robot.camera.image_streaming_enabled():
        # Perform some pre-processing steps to minimize noise and simplify the processing steps
        img = cv2.cvtColor(np.array(robot.camera.latest_image.raw_image),
                           cv2.COLOR_RGB2BGR)
        img = cv2.flip(img, 1)
        gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)
        #Select the ROI  and get the average background of it over the first 40 frames
        if num_frames < 40:
            if num_frames == 0:
                x1, y1, x2, y2 = cv2.selectROI('ROI selector', img, False)
                cv2.destroyWindow('ROI selector')
                print('Calibrating Background...\nDo not move the camera')
            gray = gray[int(y1):int(y1 + y2), int(x1):int(x1 + x2)]
            get_background(gray)
            num_frames += 1
            continue
        gray = gray[int(y1):int(y1 + y2), int(x1):int(x1 + x2)]
        cv2.rectangle(img, (x1 + x2, y1), (x1, y1 + y2), (0, 255, 0), 2)
        #Peform the background subtraction so we can extract the hand
        mask, hand_contour = background_sub(gray)
        #Check to see if the area is a certain size, otherwise it will recognize random noise
        if hand_contour is not None and hand_contour.sum() > 11000:
            #Identify the finger contours, this is performed using a circle technique is this paper: http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.454.3689&rep=rep1&type=pdf
            finger_contours, y_bar, circumference = get_finger_contours(
                hand_contour)
            #Count the number of finger contours if they're at least 25% of the circuference of the circle and excluding the wrist
Ejemplo n.º 50
0
    def showfrm(self):
        # initializing file variable
        self.fi = open('D:/Folder/main/!!Nirma/Sem_4/PSC/project/log.txt', 'a')

        # Log entry
        x = datetime.datetime.now()
        ope = 'Started'
        stri = '\n' + str(x.date()) + ' ' + str(x.hour) + ':' + str(x.minute) + ':' + str(x.second) + ' ' + ope
        self.fi.write(stri)

        # Log Entry
        ope = 'WebCam On'
        stri = '\n' + str(x.date()) + ' ' + str(x.hour) + ':' + str(x.minute) + ':' + str(x.second) + ' ' + ope
        self.fi.write(stri)

        # Method call to make elements visible
        self.AllShow()

        # Hide the button which will not be in use now
        self.btstart.hide()

        # display the button which will be used now on
        self.bt5.show()

        # initializing the file to determine parts of face
        p = "C:/Users/dhruv/AppData/Local/Programs/Python/Python38/shape_predictor_68_face_landmarks.dat"

        # Returns Returns the default face detector.
        detector = dlib.get_frontal_face_detector()

        # take in the input of face image and returns the set of point locations that define the face
        predictor = dlib.shape_predictor(p)

        # cv2  method to access the WebCam, will not work if the WebCam is occupied
        cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)

        # haar file that contains the features of face
        face_cascade = cv2.CascadeClassifier("C:/Users/dhruv/AppData/Local/Programs/Python/Python38/Cascade/haarcascade_frontalface_default.xml")
        while True:
            #read the image from WebCam
            check, freeframe = cap.read()

            # use copy method to create the copy of the WebCam frame ; not using the '=' because,
            # the second variable is just pointing the address of the first variable
            frame = copy.copy(freeframe)

            # converting to gray image
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            # returns the array that contain screen coordinates where face is detected
            rects = detector(gray, 0)
            # again copying the frame
            image = copy.copy(frame)

            # face detection by compare the image with the features
            # if you find the inaccuracy in detection of face, try re adjusting the scaleFactor between 1.0 to 2.0
            faces = face_cascade.detectMultiScale(frame, scaleFactor=1.2,minNeighbors=5)

            # draw rectangle around the face
            for x, y, w, h in faces:
                frame = cv2.rectangle(frame, (x, y), (x + w, y + h), (self.b, self.g, self.r), 3)

            for (i, rect) in enumerate(rects):
                shape = predictor(gray, rect)

                # this will convert the encoded facial landmarks from face_utils to numPy array
                shape = face_utils.shape_to_np(shape)
                # print(shape)

                for (x, y) in shape:

                    # as per the desired part to be detected, re format the startind & endind to specifically locate a part of list of array
                    if self.startind < self.dotcount <= self.endind:

                        # draw dot/point
                        cv2.circle(image, (x, y), 2, (self.b, self.g, self.r), -1)
                    else:
                        pass
                    #loop counter
                    self.dotcount += 1

                self.dotcount = 1

            # condition checking
            if self.showsq == 0 and self.showdot == 0:
                cv2.imshow("WebCam", freeframe)
                cv2.destroyWindow("Mapping")
                cv2.destroyWindow("Detection")
            else:
                cv2.destroyWindow("WebCam")
                if self.showdot == 1:
                    cv2.imshow("Mapping", image)  # face maping
                    self.captureframeDot = image

                    # enabling the required elements
                    self.pbMouth.setEnabled(True)
                    self.pbEyeBrows.setEnabled(True)
                    self.pbEyes.setEnabled(True)
                    self.pbNose.setEnabled(True)
                    self.pbJaw.setEnabled(True)
                    self.sqRed.setEnabled(True)
                    self.sqGreen.setEnabled(True)
                    self.sqBlue.setEnabled(True)

                else:
                    # disabling the non required elements
                    cv2.destroyWindow("Mapping")
                    self.pbMouth.setEnabled(False)
                    self.pbEyeBrows.setEnabled(False)
                    self.pbEyes.setEnabled(False)
                    self.pbNose.setEnabled(False)
                    self.pbJaw.setEnabled(False)
                    if self.showsq == 0:
                        self.sqRed.setEnabled(False)
                        self.sqGreen.setEnabled(False)
                        self.sqBlue.setEnabled(False)

                if self.showsq == 1:
                    cv2.imshow("Detection", frame)  # face detection
                    self.captureframeSq = frame

                    # enabling the required elements
                    self.sqRed.setEnabled(True)
                    self.sqGreen.setEnabled(True)
                    self.sqBlue.setEnabled(True)
                else:
                    cv2.destroyWindow("Detection")
                    if self.showdot == 0:
                        self.sqRed.setEnabled(False)
                        self.sqGreen.setEnabled(False)
                        self.sqBlue.setEnabled(False)

            if self.showsq == 0 and self.showdot == 0:
                # disabling the non required elements
                self.pbMouth.setEnabled(False)
                self.pbEyeBrows.setEnabled(False)
                self.pbEyes.setEnabled(False)
                self.pbNose.setEnabled(False)
                self.pbJaw.setEnabled(False)
                self.sqRed.setEnabled(False)
                self.sqGreen.setEnabled(False)
                self.sqBlue.setEnabled(False)

            # confirming for existence of window
            if cv2.getWindowProperty("Mapping", 0) == -1.0:
                self.showdot = 0

            if cv2.getWindowProperty("Detection", 0) == -1.0:
                self.showsq = 0

            cv2.waitKey(1)
            # exit condition
            if self.counter == 0:
                # print('Q pressed')
                break
        # releasing the window
        cap.release()
        cv2.destroyAllWindows()
        self.counter = 1
Ejemplo n.º 51
0
import cv2

faceCascade = cv2.CascadeClassifier("./haarcascade_frontalface_default.xml")
img = cv2.imread('./lena.png')
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)

for (x, y, w, h) in faces:
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)

cv2.imshow("Result", img)
cv2.waitKey(0)
cv2.destroyWindow("Result")
Ejemplo n.º 52
0
def show_cv(img, name='image'):
    cv2.namedWindow(name, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(name, 600, 400)
    cv2.imshow(name, img)
    cv2.waitKey()
    cv2.destroyWindow(name)
Ejemplo n.º 53
0
        roi_color=frame[y:y+h,x:x+w]
        id_,conf=recognizer.predict(roi_gray)
        print(conf)
        if conf>=67:
            
            #print(id_)
            print(labels[id_])
            img=cv2.imread('C:/Users/dania/Desktop/python/Face Recognition/Smile.png',1)
            img2=cv2.imread('C:/Users/dania/Desktop/python/Face Recognition/Poker.png',1)
            cv2.imshow('smile',img)
            font=cv2.FONT_HERSHEY_SIMPLEX
            name=labels[id_]
            color=(255,255,255)
            stroke=2
            cv2.putText(frame,name,(x,y),font,1,color,stroke,cv2.LINE_AA)
            cv2.destroyWindow('Poker')
        
        else:
            cv2.destroyWindow('smile')
            cv2.imshow('Poker',img2)
        img_item="my-image1.png"
       
        cv2.imwrite(img_item,roi_color)

        color=(255,0,0) ##BGR
        stroke=2
        width=x+w
        height=y+h
        #print(str(width)+" "+str(height))
        cv2.rectangle(frame,(x,y),(width,height),color,stroke)
    cv2.imshow('frame',frame)
Ejemplo n.º 54
0
for file in backgrounds_files:
	current_file = file
	img = cv2.imread('Backgrounds/'+ file)
	img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
	backgroud_images.append([img, file])
	# img = cv2.resize(img, None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)
	# img = np.zeros((512,512,3), np.uint8)
	if file not in overlay_loc or force_mark:
		cv2.namedWindow(file)
		cv2.setMouseCallback(file,get_point)

		while(1):
		    cv2.imshow(file ,cv2.cvtColor(img,cv2.COLOR_RGB2BGR))
		    cv2.waitKey(0)
		    cv2.destroyWindow(file)
		    break

f = open("overlay_locations.p","wb")
pickle.dump(overlay_loc,f)
f.close()

gif = imageio.mimread("delaware.gif")
duration = Image.open("delaware.gif").info['duration']
# print(len(gif)/Image.open("delaware.gif").info['duration'])

nums = len(gif)

# new_img = img

Ejemplo n.º 55
0
def show(img):
    cv2.imshow('opencv_sift_function', img)
    cv2.waitKey(0)
    cv2.destroyWindow('opencv_sift_function')
while True:
    ret, image = video.read()

    locations = face_recognition.face_locations(image, model=MODEL) # detect all faces on one image
    encodings = face_recognition.face_encodings(image, locations)

    for face_encoding, face_location in zip(encodings, locations):
        results = face_recognition.compare_faces(known_faces,face_encoding, TOLERANCE)
        match = None
        if True in results:
            match = known_names[results.index(True)]
            print(f"Match found: {match}")

            top_left = (face_location[3], face_location[0])
            bottom_right = (face_location[1], face_location[2])
            color = [0,255,0]
            cv2.rectangle(image, top_left, bottom_right, color, FRAME_THICKNESS)
            
            top_left = (face_location[3], face_location[2])
            bottom_right = (face_location[1], face_location[2]+22)
            cv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)
            cv2.putText(image, match, (face_location[3]+10, face_location[2]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200,200,200),FONT_THICKNESS)

    cv2.imshow("FaceRecognition", image)
    if cv2.waitKey(1) & 0xFF == ord("q"):
        cv2.destroyWindow("FaceRecognition")
#    cv2.waitKey(0)
#   cv2.destroyWindow(filename)


Ejemplo n.º 57
0
    
    key = cv2.waitKey(20)
    if key == 27: # exit on ESC
        break
#        depthimagefile+=1;
#        scaler = 255
#        depthArray = depthArray*scaler
#        filename = 'C:/ADEV/WinPython-64bit-2.7.10.3/python-2.7.10.amd64/Scripts/ADEV/depthImages/' + str(depthimagefile) + '.tiff'
#        cv2.imwrite(filename, depthArray)
#  
#    	
#    		
#    	
#  

cv2.destroyWindow("Left")
cv2.destroyWindow("Right")
cv2.destroyWindow('DepthImage')

#    for(pixel in depthArray):
#        pixel = 
#    	A = ((xvalue/depth.getCols())*depthMaxHorzAngle)-90
#    	#if distance values aren't along the hypotenuse add next line
#    	#x = pixel*cos()
#    	#otherwise
#    	x=pixel
#    
#    	# for left webcam
#    	if(A>0):
#    		C=A+90
#    	else:
Ejemplo n.º 58
0
 def close(self):
     self.cam.release()
     cv2.destroyWindow(self.window_name)
Ejemplo n.º 59
0
red_lower = np.array([136, 87, 111])
red_upper = np.array([180, 255, 255])
prev_y = 0

while True:
    rate, frame = capture.read()
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, red_lower, red_upper)
    contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)

    for c in contours:
        area = cv2.contourArea(c)
        if area > 300:
            x, y, w, h = cv2.boundingRect(c)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            if y < prev_y:
                pyautogui.press('space')

            prev_y = y

    cv2.imshow('Mask', mask)
    cv2.imshow("Frame", frame)

    if cv2.waitKey(10) == ord('c'):
        break

capture.release()
cv2.destroyWindow()
Ejemplo n.º 60
0
def cv2ShowWait(img, title='Image'):
    cv2.imshow(title, img)
    cv2.waitKey(0)
    cv2.destroyWindow(title)