예제 #1
0
def start():
    '''
    
    '''
    #Load splash screen
    splScr = splash()
    found = []
    #find connected cameras        
    for num in range(10):
        cam = VideoCapture(num)
        cam.open
        #show progress bar 'movement' while the main program find cameras
        splScr.update()
        if not cam.read()[0]:
            del(cam)
        else:
            cam.release()
            found.append(num)
        while gtk.events_pending():
            gtk.main_iteration()
    #destroy splash screen when all cameras are finded
    splScr.destroy()
    print 'connected cameras:', len(found)
    #run main program
    main_gui(found)
    gtk.main()
    return
예제 #2
0
class Camera(object):

    def get_settings(self):
        if not hasattr(self, '_video_capture'):
            raise Exception("Start video capture before getting settings")
        settings = []
        for prop in global_camera_properties:
            prop_value = self._video_capture.get(prop['value'])
            if prop_value >= 0:
                settings.append({'name': prop['name'], 'value': prop_value})
        return settings

    def set_setting(self, setting, value):
        if not hasattr(self, '_video_capture'):
            raise Exception("Start video capture before setting a setting")
        setting_id = filter(lambda x: x['name'] == setting, global_camera_properties)
        if len(setting_id) == 1:
            setting_id = setting_id[0]['value']
        else:
            raise Exception("Setting {} not available".format(setting))
        self._video_capture.set(setting_id, value)

    def read(self):
        (retVal, image) = self._video_capture.read()
        return image

    def start(self):
        self._video_capture = VideoCapture(0)
        self.shape = self.read().shape

    def stop(self):
        self._video_capture.release()
예제 #3
0
파일: __init__.py 프로젝트: Azique/pupil
class Camera_Capture():
    """
    VideoCapture without uvc control using cv2.VideoCapture
    """
    def __init__(self,src_id,size=(640,480),fps=None,timebase=None):
        self.controls = None
        self.cvId = src_id
        self.name = "VideoCapture"
        self.controls = None
        ###add cv videocapture capabilities
        self.capture = VideoCapture(src_id)
        self.set_size(size)

        if timebase == None:
            logger.debug("Capture will run with default system timebase")
            self.timebase = c_double(0)
        elif isinstance(timebase,c_double):
            logger.debug("Capture will run with app wide adjustable timebase")
            self.timebase = timebase
        else:
            logger.error("Invalid timebase variable type. Will use default system timebase")
            self.timebase = c_double(0)


    def get_frame(self):
        s, img = self.capture.read()
        timestamp = time()
        return Frame(timestamp,img)

    def set_size(self,size):
        width,height = size
        self.capture.set(3, width)
        self.capture.set(4, height)

    def get_size(self):
        return self.capture.get(3), self.capture.get(4)

    def set_fps(self,fps):
        self.capture.set(5,fps)

    def get_fps(self):
        return self.capture.get(5)

    def get_now(self):
        return time()

    def create_atb_bar(self,pos):
        size = 0,0
        return size

    def kill_atb_bar(self):
        pass

    def close(self):
        pass
예제 #4
0
파일: __init__.py 프로젝트: blupixelz/pupil
class FileCapture():
    """
    simple file capture that can auto_rewind
    """
    def __init__(self,src):
        self.auto_rewind = True
        self.controls = None #No UVC controls available with file capture
        # we initialize the actual capture based on cv2.VideoCapture
        self.cap = VideoCapture(src)
        timestamps_loc = os.path.join(src.rsplit(os.path.sep,1)[0],'eye_timestamps.npy')
        logger.info("trying to load timestamps with video at: %s"%timestamps_loc)
        try:
            self.timestamps = np.load(timestamps_loc).tolist()
            logger.info("loaded %s timestamps"%len(self.timestamps))
        except:
            logger.info("did not find timestamps")
            self.timestamps = None
        self._get_frame_ = self.cap.read


    def get_size(self):
        return self.cap.get(3),self.cap.get(4)

    def set_fps(self):
        pass

    def get_fps(self):
        return None

    def read(self):
        s, img =self._get_frame_()
        if  self.auto_rewind and not s:
            self.rewind()
            s, img = self._get_frame_()
        return s,img

    def get_frame(self):
        s, img = self.read()
        if self.timestamps:
            timestamp = self.timestamps.pop(0)
        else:
            timestamp = time()
        return Frame(timestamp,img)

    def rewind(self):
        self.cap.set(1,0) #seek to the beginning

    def create_atb_bar(self,pos):
        return 0,0

    def kill_atb_bar(self):
        pass

    def close(self):
        pass
예제 #5
0
class CaptureSource:
    def __init__(self):
        self._video = None
        self._image = None

    def get_resolution(self):
        if (self._video):
            return (int(self._video.get(CAP_PROP_FRAME_WIDTH)),
                    int(self._video.get(CAP_PROP_FRAME_HEIGHT)))

    def camera(self, num_cams):
        cam = Cameras()
        cam.check_cameras(num_cams)
        self._video = cam.show_and_select_camera()

    def video(self, filename):
        self._video = VideoCapture(filename)

    def image(self, filename):
        self._image = filename

    def get_frame(self):
        if self._video:
            retval, frame = self._video.read()
            return retval, frame
        return True, imread(self._image)
예제 #6
0
 def __init__(self, camera, dic=None):
     self.camera = int(camera)
     self.cap = VideoCapture(self.camera)
     if dic:
         for propID, value in dic.iteritems():
             self.cap.set(propID, value)
     first_frame = self.frame()
예제 #7
0
 def __init__(self, cam_num = -1, resolution = (640, 480)):
     '''
     create camera object
         cam_num            device number (integer)
         resolution         image resolution (tuple width x height)
     '''
     self.device = cam_num
     self.resolution = resolution
     self.BGRimage = []
     self.HSVimage = []
     self.RGBimage = []
     self.FPS = [0, 0]
     self.__avr = 0
     #assign and open device
     self.__capture = VideoCapture(cam_num)
     self.__capture.set(CV_CAP_PROP_FRAME_WIDTH,resolution[0])
     self.__capture.set(CV_CAP_PROP_FRAME_HEIGHT,resolution[1])
     self.__capture.open
     self.__flag = False
     t0 = time()
     self.__flag, self.BGRimage = self.__capture.read()
     self.FPS[0] = 1/(time()-t0)
     self.FPS[1] = self.FPS[0]
     self.__avr = self.FPS[0]
     print "camera", self.device, "ready @", self.FPS[0], "fps"
     return
예제 #8
0
def caputure():
    # open Camera
    cam = VideoCapture(0)
    if not cam.isOpened():
        LOGGER.debug('FAILED to open camera!!!')
        return None

    # capture image
    for i in range(100):
        status, img = cam.read()
    if not status:
        LOGGER.debug('FAiLED to capture image!!!')
        return None

    cam.release()
    return img
예제 #9
0
    def __init__(self, mouse, n=1, data_dir='.', diff_thresh=80, resample=1, translation_max=50, smoothing_kernel=19, consecutive_skip_threshold=0.08, selection_from=[], point_mode='auto'):
        self.mouse = mouse
        self.n = n
        self.data_dir = data_dir
        
        # Parameters (you may vary)
        self.diff_thresh = diff_thresh
        self.resample = resample
        self.translation_max = translation_max
        self.kernel = smoothing_kernel

        # Parameters (you should not vary)
        self.cth1 = 0
        self.cth2 = 0
        plat = sys.platform
        if 'darwin' in plat:
            self.fourcc = CV_FOURCC('m','p','4','v') 
        elif plat[:3] == 'win':
            self.fourcc = 1
        else:
            self.fourcc = -1
        
        self.fh = FileHandler(self.data_dir, self.mouse, self.n)

        self.framei = 0
        self.load_time()
        self.consecutive_skip_threshold = (self.fs/self.resample) * consecutive_skip_threshold
        self.load_background()
        self.height, self.width = self.background.shape
        self.mov = VideoCapture(self.fh.get_path(self.fh.TRIAL, self.fh.MOV))
        self.mov.read();self.time=self.time[1:]
        #self.get_frame(self.mov,n=40) #MUST ADJUST TIME IF USING THIS
        self.load_pts(mode=point_mode)
        self.make_rooms()
예제 #10
0
 def __init__(self, src,size=(640,480)):
     self.controls = None
     self.cvId = src
     ###add cv videocapture capabilities
     self.cap = VideoCapture(src)
     self.set_size(size)
     self.read = self.cap.read
예제 #11
0
    def __init__(self, cam,size=(640,480)):
        super(CameraCapture, self).__init__(cam)

        ###add cv videocapture capabilities
        self.cap = VideoCapture(self.cvId)
        self.set_size(size)
        self.read = self.cap.read
예제 #12
0
파일: main.py 프로젝트: blagarde/robot
 def __init__(self, title="Video Stream"):
     ''' Uses OpenCV 2.3.1 method of accessing camera '''
     self.title = title
     self.cap = VideoCapture(0)
     self.prev = self.get_frame()
     self.frame = self.get_frame()
     namedWindow(title, 1) 
class Camera(object):
    """
    The class responsible for communicating with the actual camera and
    getting images from it.
    
    Attributes:
        cam: An instance of an openCV VideoCapture. 
    """
    
    def __init__(self, device_num):
        """
        Uses a device num in case the system has multiple cameras attached.
        """
        
        self.cam = VideoCapture(device_num) 
        
    def get_image(self):
        """
        Grab a frame from the camera. The cameraCommunicator is the caller,
        and is responsible for lighting and location. The filename of the
        image is returned. 
        
        Raises:
            FatalCameraException: An image was not taken successfully.
        """
        
        #create the systematic filename
        timestamp = datetime.datetime.now()
        filename = utils.get_image_dir() + str(timestamp.date()) + \
                    str(timestamp.hour) + str(timestamp.minute) + \
                    str(timestamp.second) + '.jpg'
        
        #A series of reads to allow the camera to adjust to lighting
        self.cam.read()
        self.cam.read()
        self.cam.read()
        self.cam.read()
        
        #only the last is saved
        success, image = self.cam.read()
        
        if not success:
            raise FatalCameraException()
        else:
            imwrite(filename, image)
            
        return timestamp, filename
예제 #14
0
    def __init__(self, mouse, mode,  data_directory='.', diff_thresh=100, resample=8, translation_max=100, smoothing_kernel=19, consecutive_skip_threshold=2, selection_from=[]):
        self.mouse = mouse
        self.data_dir = data_directory
        self.mode = mode
        self.selection_from = selection_from
        
        # Parameters (you may vary)
        self.diff_thresh = diff_thresh
        self.resample = resample
        self.translation_max = translation_max
        self.kernel = smoothing_kernel
        self.consecutive_skip_threshold = (37./self.resample) * consecutive_skip_threshold

        # Parameters (you should not vary)
        self.duration = 1
        self.cth1 = 0
        self.cth2 = 0
        plat = sys.platform
        if 'darwin' in plat:
            self.fourcc = CV_FOURCC('m','p','4','v') 
        elif plat[:3] == 'win':
            self.fourcc = 1
        else:
            self.fourcc = -1

        fh = FileHandler(self.data_dir, self.mouse)
        self.background_name = fh[mode][BACKGROUND][NAME]
        self.background_dir = fh[mode][BACKGROUND][DIR]
        self.trial_name = fh[mode][TRIAL][NAME]
        self.trial_dir = fh[mode][TRIAL][DIR]

        self.background, self.background_image = self.load_background()
        self.height, self.width = np.shape(self.background)
        
        timefile = os.path.join(self.trial_dir, self.trial_name+'-timestamps.json')
        self.time = json.loads(open(timefile,'r').read())
        vidfile = os.path.join(self.trial_dir, self.trial_name+'-cam.avi')
        if not os.path.exists(vidfile):
            vidfile = os.path.join(self.trial_dir, self.trial_name+'-cam0.avi')
        if not os.path.exists(vidfile):
            raise Exception('Movie %s not found.'%vidfile)
        self.mov = VideoCapture(vidfile)

        self.results = {}
        self.results['centers'] = []
        self.results['centers_all'] = []
        self.results['left'] = 0
        self.results['right'] = 0
        self.results['middle'] = 0
        self.results['left_assumed'] = 0
        self.results['right_assumed'] = 0
        self.results['middle_assumed'] = 0
        self.results['skipped'] = 0
        self.results['heat'] = np.zeros(np.shape(self.background))
        self.results['n_frames'] = 0
        self.results['params'] = [self.diff_thresh, self.kernel, self.translation_max, self.resample]
        self.results['params_key'] = ['diff_thresh','kernel','translation_max','resample']

        self.path_l, self.path_r, self.path_c, self.rooms_mask, self.paths_ignore, self.last_center = self.get_pt_selections()
예제 #15
0
 def __init__(self,src_id,size=(640,480),fps=None):
     self.controls = None
     self.cvId = src_id
     self.name = "VideoCapture"
     self.controls = None
     ###add cv videocapture capabilities
     self.capture = VideoCapture(src_id)
     self.set_size(size)
예제 #16
0
파일: camgrab.py 프로젝트: corerd/PyDomo
def grabImageFromUSB(cameraNumber=0):
    '''Grabs a snapshot from the specified USB camera.

    Returns bool, video frame decoded as a JPEG bytearray.
    '''
    from cv2 import VideoCapture, imencode

    # initialize the camera
    cam = VideoCapture(cameraNumber)
    retVal, rawData = cam.read()
    if not retVal:
        # frame captured returns errors
        return False, None
    retVal, jpgData = imencode('.jpg', rawData)
    if not retVal:
        # image encode errors
        return False, None
    return retVal, bytearray(jpgData)
예제 #17
0
class FileCapture():
    """
    simple file capture that can auto_rewind
    """
    def __init__(self,src):
        self.auto_rewind = True
        self.controls = None #No UVC controls available with file capture
        # we initialize the actual capture based on cv2.VideoCapture
        self.cap = VideoCapture(src)
        self._get_frame_ = self.cap.read


    def get_size(self):
        return self.cap.get(3),self.cap.get(4)

    def set_fps(self):
        pass

    def get_fps(self):
        return None

    def read(self):
        s, img =self._get_frame_()
        if  self.auto_rewind and not s:
            self.rewind()
            s, img = self._get_frame_()
        return s,img

    def get_frame(self):
        s, img = self.read()
        timestamp = time()
        return Frame(timestamp,img)

    def rewind(self):
        self.cap.set(1,0) #seek to the beginning

    def create_atb_bar(self,pos):
        return 0,0

    def kill_atb_bar(self):
        pass

    def close(self):
        pass
예제 #18
0
파일: main.py 프로젝트: blagarde/robot
class Window(object):
    def __init__(self, title="Video Stream"):
        ''' Uses OpenCV 2.3.1 method of accessing camera '''
        self.title = title
        self.cap = VideoCapture(0)
        self.prev = self.get_frame()
        self.frame = self.get_frame()
        namedWindow(title, 1) 

    def get_frame(self):
        success, frame = self.cap.read()
        return self.to_grayscale(frame) if success else False

    def to_grayscale(self, frame):
        return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    def optical_flow(self): # FIXME
        flow = CreateImage(GetSize(frame), 32, 2)
        CalcOpticalFlowFarneback(self.prev, self.frame, flow, # takes 0.05s
            pyr_scale=0.5, levels=3, winsize=15,
            iterations=3, poly_n=5, poly_sigma=1.2, flags=0)
        return flow

    def disparity(self):
        if self.left is None or self.right is None:
            print "Capture left and right images using 'l' and 'r' keys before running disparity"
            return None

        hl, wl = self.left.shape
        hr, wr = self.right.shape
        disp_left  = cv2.cv.CreateMat(hl, wl, cv2.cv.CV_16S)
        disp_right = cv2.cv.CreateMat(hr, wr, cv2.cv.CV_16S)
        state = cv2.cv.CreateStereoGCState(16,2)
        # running the graph-cut algorithm
        from cv2.cv import fromarray
        cv2.cv.FindStereoCorrespondenceGC(fromarray(self.left), fromarray(self.right), disp_left, disp_right, state)
        cv2.cv.Save( "left.png", disp_left) # save the map
        cv2.cv.Save( "right.pgm", disp_right) # save the map

    def mainloop(self):
        while True:
            self.prev = self.frame
            self.frame = self.get_frame()
            sift(self.frame) # takes ~0.14s!
            imshow(self.title, self.frame)
            k = waitKey(10)
            if k == -1:
                pass
            elif chr(k) == 'l':
                self.left = self.frame
            elif chr(k) == 'r':
                self.right = self.frame
            elif chr(k) == 'd':
                self.disparity()
            elif k == 27:
                break
예제 #19
0
    def check_cameras_linux(self, num=MAX_CAMERAS):
        """Comprueba las cámaras disponibles.

        :Param num: máximo número de cámaras a comprobar
        :Keyword num: 99 por defecto, ya que en Linux es lo permitido
        :Param num: int
        :Return: lista de cámaras disponibles
        :Rtype: list of Capture
        """
        n = 0
        while len(self.cameras) < num and n <= MAX_CAMERAS:
            camera = VideoCapture(n)
            retval, frame = camera.read()
            if retval:
                self.cameras.append(camera)
            n += 1
        if num != MAX_CAMERAS and len(self.cameras) != num:
            print("Found %d of %d cameras. " % (len(self.cameras), num))
            exit()
        return len(self.cameras)
예제 #20
0
class Camera_Capture():
    """
    VideoCapture without uvc control using cv2.VideoCapture
    """
    def __init__(self,src_id,size=(640,480),fps=None):
        self.controls = None
        self.cvId = src_id
        self.name = "VideoCapture"
        self.controls = None
        ###add cv videocapture capabilities
        self.capture = VideoCapture(src_id)
        self.set_size(size)

    def get_frame(self):
        s, img = self.capture.read()
        timestamp = time()
        return Frame(timestamp,img)

    def set_size(self,size):
        width,height = size
        self.capture.set(3, width)
        self.capture.set(4, height)

    def get_size(self):
        return self.capture.get(3), self.capture.get(4)

    def set_fps(self,fps):
        self.capture.set(5,fps)

    def get_fps(self):
        return self.capture.get(5)

    def create_atb_bar(self,pos):
        size = 0,0
        return size

    def kill_atb_bar(self):
        pass

    def close(self):
        pass
예제 #21
0
def video_loop(aframes_queue,person_queue):
    vc = VideoCapture(0)
    rval, frame = vc.read()
    people = {}
    colors = ((0,0,255),(255,255,0))
    while True:
        rval, frame = vc.read()
        if frame is None:
            c = waitKey(10)
            continue
        aframe = NP.asarray(frame[:,:])
        im = Image.fromarray(frame)
        draw = ImageDraw.Draw(im)
        
        while not person_queue.empty():
            name,rect,name_size = person_queue.get()
            people[name] = {'rect' : rect, 'name_size' : name_size, 
                            'time_found' : time.time()}

        name_counter = 0        
        for name in people.keys():
            if name_counter < 2:
                draw_name(draw, people[name], name, name_counter, colors[name_counter])
            name_counter += 1
            
            if time.time()>people[name]['time_found']+2:
                # stop displaying after 2 seconds
                people.pop(name)
                
        frame2 = NP.array(im)
        imshow('frame',frame2)


        if aframes_queue.empty():
            aframes_queue.put(aframe)
        c = waitKey(1)
        if c == 27: # exit on ESC
            break
    
    vc.release()
    destroyAllWindows()
예제 #22
0
class noUVCCapture():
    """
    VideoCapture without uvc control
    """
    def __init__(self, src,size=(640,480)):
        self.controls = None
        self.cvId = src
        ###add cv videocapture capabilities
        self.cap = VideoCapture(src)
        self.set_size(size)
        self.read = self.cap.read

    def set_size(self,size):
        width,height = size
        self.cap.set(3, width)
        self.cap.set(4, height)

    def get_size(self):
        return self.cap.get(3),self.cap.get(4)

    def read_RGB(self):
        s,img = self.read()
        if s:
            cvtColor(img,COLOR_RGB2BGR,img)
        return s,img

    def read_HSV(self):
        s,img = self.read()
        if s:
            cvtColor(img,COLOR_RGB2HSV,img)
        return s,img
예제 #23
0
class CameraCapture(uvc.Camera):
    """
    CameraCapture Class for Image grabbing and control
    inherits from an OS specitic Camera that defines all uvc control methods
    """
    def __init__(self, cam,size=(640,480)):
        super(CameraCapture, self).__init__(cam)

        ###add cv videocapture capabilities
        self.cap = VideoCapture(self.cvId)
        self.set_size(size)
        self.read = self.cap.read

    def set_size(self,size):
        width,height = size
        self.cap.set(3, width)
        self.cap.set(4, height)

    def get_size(self):
        return self.cap.get(3),self.cap.get(4)

    def read_RGB(self):
        s,img = self.read()
        if s:
            cvtColor(img,COLOR_RGB2BGR,img)
        return s,img

    def read_HSV(self):
        s,img = self.read()
        if s:
            cvtColor(img,COLOR_RGB2HSV,img)
        return s,img
예제 #24
0
    def __init__(self, cam, size=(640, 480), fps=30):
        self.src_id = cam.src_id
        self.uId = cam.uId
        self.name = cam.name
        self.controls = Controls(self.uId)

        try:
            self.controls["UVCC_REQ_FOCUS_AUTO"].set_val(0)
        except KeyError:
            pass

        self.capture = VideoCapture(self.src_id)
        self.set_size(size)
예제 #25
0
파일: __init__.py 프로젝트: blupixelz/pupil
 def __init__(self,src):
     self.auto_rewind = True
     self.controls = None #No UVC controls available with file capture
     # we initialize the actual capture based on cv2.VideoCapture
     self.cap = VideoCapture(src)
     timestamps_loc = os.path.join(src.rsplit(os.path.sep,1)[0],'eye_timestamps.npy')
     logger.info("trying to load timestamps with video at: %s"%timestamps_loc)
     try:
         self.timestamps = np.load(timestamps_loc).tolist()
         logger.info("loaded %s timestamps"%len(self.timestamps))
     except:
         logger.info("did not find timestamps")
         self.timestamps = None
     self._get_frame_ = self.cap.read
예제 #26
0
class FileCapture():
    """
    simple file capture that can auto_rewind
    """
    def __init__(self,src):
        self.auto_rewind = True
        self.controls = None #No UVC controls available with file capture
        # we initialize the actual capture based on cv2.VideoCapture
        self.cap = VideoCapture(src)
        self._get_frame_ = self.cap.read


    def get_size(self):
        return self.cap.get(3),self.cap.get(4)

    def read(self):
        s, img =self._get_frame_()
        if  self.auto_rewind and not s:
            self.rewind()
            s, img = self._get_frame_()
        return s,img

    def read_RGB(self):
        s,img = self.read()
        if s:
            cvtColor(img,COLOR_RGB2BGR,img)
        return s,img

    def read_HSV(self):
        s,img = self.read()
        if s:
            cvtColor(img,COLOR_RGB2HSV,img)
        return s,img

    def rewind(self):
        self.cap.set(1,0) #seek to the beginning
예제 #27
0
파일: __init__.py 프로젝트: Azique/pupil
    def __init__(self,src_id,size=(640,480),fps=None,timebase=None):
        self.controls = None
        self.cvId = src_id
        self.name = "VideoCapture"
        self.controls = None
        ###add cv videocapture capabilities
        self.capture = VideoCapture(src_id)
        self.set_size(size)

        if timebase == None:
            logger.debug("Capture will run with default system timebase")
            self.timebase = c_double(0)
        elif isinstance(timebase,c_double):
            logger.debug("Capture will run with app wide adjustable timebase")
            self.timebase = timebase
        else:
            logger.error("Invalid timebase variable type. Will use default system timebase")
            self.timebase = c_double(0)
예제 #28
0
    def re_init(self, cam, size=(640, 480), fps=30):
        self.src_id = cam.src_id
        self.uId = cam.uId
        self.name = cam.name
        self.controls = Controls(self.uId)

        try:
            self.controls["UVCC_REQ_FOCUS_AUTO"].set_val(0)
        except KeyError:
            pass

        self.capture = VideoCapture(self.src_id)
        self.set_size(size)

        # recreate the bar with new values
        bar_pos = self.bar._get_position()
        self.bar.destroy()
        self.create_atb_bar(bar_pos)
예제 #29
0
파일: Worker.py 프로젝트: macrobotics/BOSS
  def __init__(self):
    
    try:
      self.camera = VideoCapture(CAMERA_INDEX)
      self.camera.set(3,CAMERA_WIDTH)
      self.camera.set(4,CAMERA_HEIGHT)
      message = 'Success.'
    except Exception:
      message = 'Failure.'
    print('[Setting up Camera]...' + message)

    try:
      self.arduino = serial.Serial(DEVICE, BAUD)
      message = 'Success.'
    except Exception:
      message = 'Failure.'
    print('[Setting up Controller]...' + message)

    self.reset_worker()
예제 #30
0
파일: __init__.py 프로젝트: fabianvf/pupil
    def __init__(self, cam,size=(640,480),fps=30):
        self.src_id = cam.src_id
        self.uId = cam.uId
        self.name = cam.name
        self.controls = Controls(self.uId)

        try:
            self.controls['UVCC_REQ_FOCUS_AUTO'].set_val(0)
        except KeyError:
            pass

        if '6000' in self.name and False: #on mac we dont have enough controls to use this right.
            logger.info("adjusting exposure for HD-6000 camera")
            try:
                self.controls['UVCC_REQ_EXPOSURE_AUTOMODE'].set_val(1)
                self.controls['UVCC_REQ_EXPOSURE_ABS'].set_val(156)
            except KeyError:
                pass

        self.capture = VideoCapture(self.src_id)
        self.set_size(size)
예제 #31
0
class LKTrack:
    _lk_params = dict(winSize=(11, 11),
                      maxLevel=2,
                      criteria=(TERM_CRITERIA_EPS | TERM_CRITERIA_COUNT, 10,
                                .003))

    _feature_params = dict(maxCorners=500, qualityLevel=.01, minDistance=10)

    def __init__(self):
        self._root = Tk()
        self._flag_run = False
        self._frames = Label(self._root, text='frames: ')
        self._in_p = Label(self._root, text='in: 0')
        self._file_name = Label(self._root, text='file name: ')
        self._button_file = Button(self._root,
                                   text='open file',
                                   command=self._open_run_muvie)
        self._button_cum = Button(self._root,
                                  text='run on cum',
                                  command=self._open_run_cum)

        self._button_file.grid(row=0, column=0)
        self._button_cum.grid(row=0, column=1)
        self._file_name.grid(row=0, column=2)
        self._frames.grid(row=0, column=3)
        self._in_p.grid(row=0, column=4)

        self._build_online_rectangle = []
        self._cordinate_squade = self._load_squade()

    def _get_lmain(self):
        self._lmain = Label(self._root)
        self._lmain.bind('<Motion>', self._make_squade)
        self._lmain.grid(row=1, column=0, columnspan=5)

    def _open_run_muvie(self):
        open_file = filedialog.askopenfilename()
        if open_file:
            self._cam = VideoCapture(open_file)
            self._prepare_for_word()
            self._file_name['text'] = 'file name: ' + open_file.split('/')[-1]

    def _prepare_for_word(self):
        if self._flag_run:
            self._lmain.destroy()
        self._flag_run = True
        self._get_lmain()
        self._current_frame = self._people_in = 0
        self._track = []
        self._detect_points()

    def _open_run_cum(self):
        self._cam = VideoCapture(0)
        self._prepare_for_word()
        self._file_name['text'] = 'run on cam'

    def _detect_points(self):
        _, self._image = self._cam.read()
        self._gray = cvtColor(self._image, COLOR_BGR2GRAY)
        features = goodFeaturesToTrack(
            self._gray[self._cordinate_squade[2]:self._cordinate_squade[3],
                       self._cordinate_squade[0]:self._cordinate_squade[1]],
            **self._feature_params)
        try:
            features += np.array(
                [[self._cordinate_squade[0], self._cordinate_squade[2]]],
                dtype=np.float32)
            for x, y in features.reshape((-1, 2)):
                self._track.append([(x, y)])
        except:
            pass
        self._prev_gray = self._gray

    def _track_points(self):
        _, self._image = self._cam.read()
        self._gray = cvtColor(self._image, COLOR_BGR2GRAY)
        tmp = np.float32([tp[-1] for tp in self._track]).reshape(-1, 1, 2)
        features, status, _ = calcOpticalFlowPyrLK(self._prev_gray, self._gray,
                                                   tmp, None,
                                                   **self._lk_params)
        features = [p for (st, p) in zip(status, features) if st]
        features = np.array(features).reshape((-1, 2))
        ndx = [i for (i, st) in enumerate(status) if not st]
        ndx.reverse()
        for i in ndx:
            self._track.pop(i)
        for i, f in enumerate(features):
            self._track[i].append(tuple(f))
        self._track = [i[-100:] for i in self._track]
        self._prev_gray = self._gray

    def _point_is_move(self, point_track):
        if (abs(point_track[0][0] - point_track[-1][0]) >
                30) and (point_track[-1][0] > self._cordinate_squade[1]):
            return True
        return False

    def _centroid(self):
        points = [p for p in self._track if self._point_is_move(p)]
        if len(points) > 10:
            mean_x = int(sum([p[-1][0] for p in points]) // len(points))
            mean_y = int(sum([p[-1][1] for p in points]) // len(points))
            circle(self._image, (mean_x, mean_y), 10, (200, 0, 0), -1)
            if mean_x > self._cordinate_squade[1] + 90:
                for i in points:
                    self._track.remove(i)
                self._people_in += 1
                self._in_p['text'] = f'in {self._people_in}'
                self._save_img()

    def _draw_points(self):
        for index, (x, y) in enumerate((i[-1] for i in self._track)):
            color = (255, 0,
                     0) if self._point_is_move(self._track[index]) else (0,
                                                                         255,
                                                                         0)
            circle(self._image, (int(x), int(y)), 3, color, -1)

    def _draw_rectangle(self):
        rectangle(self._image,
                  (self._cordinate_squade[0], self._cordinate_squade[-1]),
                  (self._cordinate_squade[1], self._cordinate_squade[2]),
                  (0, 0, 255))

    def _update_widget(self):
        self._current_frame += 1
        self._frames['text'] = f"frames: {self._current_frame}"

    def _update_window(self):
        img = Image.fromarray(self._image)
        imgtk = ImageTk.PhotoImage(image=img)
        self._lmain.imgtk = imgtk
        self._lmain.configure(image=imgtk)

    def _save_img(self):
        imsave(str(self._current_frame) + '.jpg', self._image)

    def _save_squade(self):
        with open('cordinate_squade.txt', 'w') as file:
            file.write(str(self._cordinate_squade))

    def _load_squade(self):
        with open('cordinate_squade.txt') as file:
            return eval(file.read())

    def _make_squade(self, event):
        if event.state == 264:
            self._build_online_rectangle.append((event.x, event.y))
        elif self._build_online_rectangle:
            self._cordinate_squade.clear()
            self._cordinate_squade.append(
                min((self._build_online_rectangle[0][0],
                     self._build_online_rectangle[-1][0])))
            self._cordinate_squade.append(
                max((self._build_online_rectangle[0][0],
                     self._build_online_rectangle[-1][0])))
            self._cordinate_squade.append(
                min((self._build_online_rectangle[0][1],
                     self._build_online_rectangle[-1][1])))
            self._cordinate_squade.append(
                max((self._build_online_rectangle[0][1],
                     self._build_online_rectangle[-1][1])))
            self._build_online_rectangle.clear()
            self._save_squade()

    def _del_static_points(self):
        falg = True
        for point in self._track:
            if (point[-1][0] > self._cordinate_squade[1]
                ) and not self._point_is_move(point):
                self._track.remove(point)

    def run(self):
        if self._flag_run:
            if len(self._track) < 200:
                if not (self._current_frame % 200) and (
                        not [i
                             for i in self._track if self._point_is_move(i)]):
                    self._track.clear()
                self._detect_points()
            self._track_points()
            self._update_widget()
            self._draw_points()
            self._del_static_points()
            self._centroid()
            self._draw_rectangle()
            self._update_window()
        self._root.after(1, self.run)
        return self._root
예제 #32
0
def get_fps(video: VideoCapture) -> float:
    return video.get(cv2.CAP_PROP_FPS)
예제 #33
0
 def _open_run_cum(self):
     self._cam = VideoCapture(0)
     self._prepare_for_word()
     self._file_name['text'] = 'run on cam'
예제 #34
0
class Video_reader:
    def __init__(self, filename):
        self.curr_frame = 0  # BE careful !!! IT IS NOT ALWAYS UPDATED !!!!
        self.frames_count = 0
        print("Initializing video reader.")
        self.filename = filename
        print("Opening video file.")
        self.video = None
        try:
            self.video = VideoCapture(filename)
        except:
            print("Videofile failed to load.")
        if self.is_open():
            self.frames_count = self.video.get(7)
            print("Video " + filename + " is opened.")

    def read_frame(self, frame=-1):
        if frame is None:
            return None
        if frame <= -1:
            ret, frame = self.video.read()
            self.curr_frame += 1
            if ret:
                return frame
        try:
            self.video.set(1, int(frame))
        except:
            return None
        self.curr_frame = frame
        ret, frame = self.video.read()
        if ret:
            return frame

    def read_frame_at_time(self, time):
        self.video.set(0, time)
        ret, frame = self.video.read()
        if ret:
            return frame

    def set_position_in_ms(self, time):
        if self.video is not None:
            self.video.set(0, time)

    def set_position_frame(self, frame):
        if self.video is not None:
            if frame < 0:
                self.video.set(1, 0)
            elif frame > self.frames_count:
                self.video.set(1, self.frames_count - 1)
            else:
                self.video.set(1, frame)
                # self.curr_frame = frame

    def get_position_in_ms(self):
        if self.video is not None:
            return self.video.get(0)

    def get_position_frames(self):
        # self.curr_frame = self.video.get(1)
        if self.video is not None:
            return self.video.get(1)

    def get_position_ratio(self):
        if self.video is not None:
            return self.video.get(2)

    def get_frame_width(self):
        if self.video is not None:
            return self.video.get(3)

    def get_frame_height(self):
        if self.video is not None:
            return self.video.get(4)

    def get_fps(self):
        if self.video is not None:
            return self.video.get(5)

    def get_fourcc(self):
        if self.video is not None:
            return self.video.get(6)

    def get_frames_count(self):
        return self.frames_count

    def get_filename(self):
        return self.filename

    def is_open(self):
        if self.video is not None:
            return self.video.isOpened()
        else:
            return False
예제 #35
0
파일: vascii.py 프로젝트: vsezol/vascii
    return ascii_img


printIntro()

pixel_size = int(input('Enter the size of pixel: '))

is_update_display = False
if (input('Enable console clear [y/n] ? ') == 'y'):
    is_update_display = True
else:
    is_update_display = False

print('Please wait...')
cap = VideoCapture(0)

while True:
    _, img = cap.read()
    h, _, _ = img.shape
    gray_img = cvtColor(img, COLOR_RGB2GRAY)
    gray_img = flip(gray_img,1)
    output = makeAscii(gray_img, h, pixel_size)
    output = [''.join(output[i]) for i in range(h // pixel_size)]
    output = '\n'.join(output)
    if (is_update_display):
        clear()
    else:
        print('\n' * 150)
    print(output)
예제 #36
0
def get_current_frame_id(video_cap: cv2.VideoCapture) -> int:
    return int(video_cap.get(cv2.CAP_PROP_POS_FRAMES))
예제 #37
0
def get_frame_count(video_cap: cv2.VideoCapture) -> int:
    # NOT always accurate, see:
    # https://stackoverflow.com/questions/31472155/python-opencv-cv2-cv-cv-cap-prop-frame-count-get-wrong-numbers
    return int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))
예제 #38
0
def get_video_capture_specs(cap: cv.VideoCapture) -> do.VideoSpecs:
    if not isinstance(cap, cv.VideoCapture):
        raise TypeError
    return do.VideoSpecs(width=cap.get(cv.CAP_PROP_FRAME_WIDTH),
                         height=cap.get(cv.CAP_PROP_FRAME_HEIGHT),
                         fps=cap.get(cv.CAP_PROP_FPS))
예제 #39
0
    else:
        filename = filename \
        + '_Location:' + row + col + '_Heading:' + heading + '_Ship:' + ship
        print('SHIP: ' + ship)
        print('HEADING: ' + heading)
        print('LOCATION: ' + row + col)

    # Assign the approriate directory to the output file
    if np.random.rand() < fraction_train:
        filename = dataset_dirs[0] + filename
    else:
        filename = dataset_dirs[1] + filename

    # Initialize the camera capture object with the cv2.VideoCapture class.
    # All it needs is the index to a camera port.
    camera = VideoCapture(1)
    key = raw_input('When ready, press Enter to capture image.')

    # Ramp the camera - these frames will be discarded and are only used
    # to allow v4l2 to adjust light levels, if necessary
    print('Ramping camera...')
    for i in xrange(ramp_frames):
        return_val, image = camera.read()

    # Take the actual image we want to keep
    print('Capturing actual image...')
    return_val, image = camera.read()

    # Save the image in JPG format
    filename += '.jpg'
    print('Writing image: ' + filename)
예제 #40
0
THRESHOLD = 1e-4

audio = pyaudio.PyAudio()

# start Recording
stream = audio.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)

while True:
    cur_chunk = np.abs(np.fromstring(stream.read(CHUNK)))
    convolved = np.convolve(KERNEL, cur_chunk)
    if np.max(convolved) > THRESHOLD:
        vc = VideoCapture(0)

        if vc.isOpened():
            rval, frame = vc.read()
            if rval:
                imwrite('/tmp/.out-clapchat.png', frame)
                vc.release()
                break

# stop Recording
stream.stop_stream()
stream.close()
audio.terminate()

toaddr = input("Enter recipient email addresses separated by spaces: ").split()
fromaddr = "*****@*****.**"
    def __init__(self, configuration, names, type='video', convert_to_grayscale=False,
                 progress_signal=None):
        """
        Initialize the Frame object, and read all images. Images can be stored in a video file or
        as single images in a directory.

        :param configuration: Configuration object with parameters
        :param names: In case "video": name of the video file. In case "image": list of names for
                      all images.
        :param type: Either "video" or "image".
        :param convert_to_grayscale: If "True", convert frames to grayscale if they are RGB.
        :param progress_signal: Either None (no progress signalling), or a signal with the signature
                                (str, int) with the current activity (str) and the progress in
                                percent (int).
        """

        self.configuration = configuration
        self.progress_signal = progress_signal

        if type == 'image':
            # Use scipy.misc to read in image files. If "convert_to_grayscale" is True, convert
            # pixel values to 32bit floats.
            self.number = len(names)
            self.signal_step_size = max(int(self.number / 10), 1)
            if convert_to_grayscale:
                self.frames_original = [misc.imread(path, mode='F') for path in names]
            else:
                self.frames_original = []
                for frame_index, path in enumerate(names):
                    # After every "signal_step_size"th frame, send a progress signal to the main GUI.
                    if self.progress_signal is not None and frame_index%self.signal_step_size == 0:
                        self.progress_signal.emit("Read all frames",
                                             int((frame_index / self.number) * 100.))
                    # Read the next frame.
                    frame = imread(path, -1)
                    self.frames_original.append(frame)

                if self.progress_signal is not None:
                    self.progress_signal.emit("Read all frames", 100)
            self.shape = self.frames_original[0].shape
            dt0 = self.frames_original[0].dtype

            # Test if all images have the same shape, color type and depth.
            # If not, raise an exception.
            for image in self.frames_original:
                if image.shape != self.shape:
                    raise ShapeError("Images have different size")
                elif len(self.shape) != len(image.shape):
                    raise ShapeError("Mixing grayscale and color images not supported")
                if image.dtype != dt0:
                    raise TypeError("Images have different type")

        elif type == 'video':
            # In case "video", use OpenCV to capture frames from video file. Revert the implicit
            # conversion from RGB to BGR in OpenCV input.
            cap = VideoCapture(names)
            self.number = int(cap.get(CAP_PROP_FRAME_COUNT))
            self.frames_original = []
            self.signal_step_size = max(int(self.number / 10), 1)
            for frame_index in range(self.number):
                # After every "signal_step_size"th frame, send a progress signal to the main GUI.
                if self.progress_signal is not None and frame_index%self.signal_step_size == 0:
                    self.progress_signal.emit("Read all frames", int((frame_index/self.number)*100.))
                # Read the next frame.
                ret, frame = cap.read()
                if ret:
                    if convert_to_grayscale:
                        self.frames_original.append(cvtColor(frame, COLOR_BGR2GRAY))
                    else:
                        self.frames_original.append(cvtColor(frame, COLOR_BGR2RGB))
                else:
                    raise IOError("Error in reading video frame")
            cap.release()
            if self.progress_signal is not None:
                self.progress_signal.emit("Read all frames", 100)
            self.shape = self.frames_original[0].shape
            dt0 = self.frames_original[0].dtype
        else:
            raise TypeError("Image type " + type + " not supported")

        # Monochrome images are stored as 2D arrays, color images as 3D.
        if len(self.shape) == 2:
            self.color = False
        elif len(self.shape) == 3:
            self.color = True
        else:
            raise ShapeError("Image shape not supported")

        # Set the depth value of all images to either 16 or 8 bits.
        if dt0 == 'uint16':
            self.depth = 16
        elif dt0 == 'uint8':
            self.depth = 8
        else:
            raise TypeError("Frame type " + str(dt0) + " not supported")

        # Initialize lists of monochrome frames (with and without Gaussian blur) and their
        # Laplacians.
        self.frames_monochrome = None
        self.frames_monochrome_blurred = None
        self.frames_monochrome_blurred_laplacian = None
        self.used_alignment_points = None
예제 #42
0
def capture_images_continually(capture: cv2.VideoCapture, model, classes, img_size, device):
    global outputFrame, LINE_COORD_COLOR, DETECT_TRAFFIC_LIGHT, TRAFFIC_LIGHT_RECT

    violations = {}
    tracked_paths = {}
    track = sort.Sort()
    i = 0
    while True:
        i += 1
        ret, frame = capture.read()

        t0 = 0
        if PRINT_FRAME_DURATION:
            t0 = time.time()

        if not ret:
            print('no video')
            capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
            continue

        with torch.no_grad():
            boxes = detect.detect(model, frame, img_size, device=device)

        boxes_cars = boxes[np.isin(boxes[:, 5], [2, 3, 5, 7])]  # take car, motobuke, bus, truck

        if DETECT_TRAFFIC_LIGHT:
            DETECT_TRAFFIC_LIGHT = False
            traffic_lights = boxes[np.isin(boxes[:, 5], [9])]
            traffic_lights_coords = traffic_lights[:, :4]
            lights = traffic_color(frame, traffic_lights_coords)

            for color, light in lights:
                if color != NO_COLOR:
                    TRAFFIC_LIGHT_RECT = (light[0], light[1]), (light[2], light[3])
        if DETECT_TRAFFIC_LIGHT_COLOR:
            nptraffic_coords = np.reshape(np.array([*TRAFFIC_LIGHT_RECT[0], *TRAFFIC_LIGHT_RECT[1]]), (1, 4))
            lights = traffic_color(frame, nptraffic_coords)
            detected_tr_lights = Counter(
                list(filter(lambda x: x != NO_COLOR, map(lambda x: x[0], lights)))).most_common()
            if len(detected_tr_lights) > 0:
                LINE_COORD_COLOR = detected_tr_lights[0][0]

            # draw traffic light
            cv2.rectangle(frame, TRAFFIC_LIGHT_RECT[0], TRAFFIC_LIGHT_RECT[1], (255, 215, 0), 2)
            cv2.putText(frame, LINE_COORD_COLOR, (int(TRAFFIC_LIGHT_RECT[0][0]), int(TRAFFIC_LIGHT_RECT[0][1] - 10)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.9,
                        (36, 255, 12), 2)

        boxes_no_class = boxes_cars[:, :-1]

        if DRAW_DETECTION_BOXES:
            for x0, y0, x1, y1, confidence in boxes_no_class:
                x0, y0, x1, y1 = map(int, [x0, y0, x1, y1])
                cv2.rectangle(frame, (x0, y0), (x1, y1), (0, 255, 255), 2)

        tracked_objects = track.update(boxes_no_class)

        # process tracking boxes
        for x0, y0, x1, y1, obj_id in tracked_objects:
            x0, y0, x1, y1 = map(int, [x0, y0, x1, y1])
            if obj_id not in tracked_paths:
                tracked_paths[obj_id] = []
            path = tracked_paths[obj_id]
            path.append((int((x0 + x1) / 2.), int((y0 + y1) / 2.), time.time()))

            if line_intersection(LINE_COORD, path) and LINE_COORD_COLOR == RED_OR_YELLOW:
                if obj_id not in violations:
                    print("violation!")
                    # TODO: store something useful here, otherwise it's just a hashset
                    violations[obj_id] = None

            if obj_id in violations:
                rect_rgb = (0, 0, 215)
                line_rgb = (0, 0, 215)
            else:
                rect_rgb = (0, 215, 0)
                line_rgb = (0, 255, 0)

            # draw tracking box
            if DRAW_TRACKING_BOXES:
                cv2.rectangle(frame, (x0, y0), (x1, y1), rect_rgb, 2)

            for i in range(len(path) - 1):
                cv2.line(frame, tuple(path[i][:2]), tuple(path[i + 1][:2]), line_rgb, 2)

        cv2.line(frame, LINE_COORD[0], LINE_COORD[1], (255, 0, 0), 2)

        if PRINT_FRAME_DURATION:
            print(f"frame time: {time.time() - t0:.2}")

        outputFrame = frame

        # clean old paths (older than 30 seconds)
        if i % 1000 == 0:
            tracked_paths = {k: v for k, v in tracked_paths.items() if len(v) != 0}
            for key, val in tracked_paths.items():
                val[:] = [[*a, time_created] for *a, time_created in val if time.time() - time_created < 30]
예제 #43
0
def get_frame_size(video: VideoCapture) -> Tuple[float, float]:
    return (
        video.get(cv2.CAP_PROP_FRAME_HEIGHT),
        video.get(cv2.CAP_PROP_FRAME_WIDTH)
    )
예제 #44
0
def get_frame_count(video: VideoCapture) -> float:
    return video.get(cv2.CAP_PROP_FRAME_COUNT)
예제 #45
0
 def init(self):
     self.cap = VideoCapture(
         "test_videos/ntb/head_office/Cash_Counter_1-1.dav")
     self.total_input_frames = 0
예제 #46
0
import cv2
import numpy as np
from cv2 import VideoCapture
def callback(x):pass
cap = VideoCapture(0)
cv2.namedWindow('image')
ilowH = 0
ihighH = 179
ilowS = 0
ihighS = 255
ilowV = 0
ihighV = 255
cv2.createTrackbar('lowH','image',ilowH,179,callback)
cv2.createTrackbar('highH','image',ihighH,179,callback)
cv2.createTrackbar('lowS','image',ilowS,255,callback)
cv2.createTrackbar('highS','image',ihighS,255,callback)
cv2.createTrackbar('lowV','image',ilowV,255,callback)
cv2.createTrackbar('highV','image',ihighV,255,callback)
detector = cv2.SimpleBlobDetector_create()
while True:
    ret, frame = cap.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    ilowH = cv2.getTrackbarPos('lowH', 'image')
    ihighH = cv2.getTrackbarPos('highH', 'image')
    ilowS = cv2.getTrackbarPos('lowS', 'image')
    ihighS = cv2.getTrackbarPos('highS', 'image')
    ilowV = cv2.getTrackbarPos('lowV', 'image')
    ihighV = cv2.getTrackbarPos('highV', 'image')
    hsv_min = np.array([ilowH, ilowS, ilowV])
    hsv_max = np.array([ihighH, ihighS, ihighV])
예제 #47
0
def video_jump(video_cap: cv2.VideoCapture, frame_id: int):
    video_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id - 1)
                futures.append(
                    executor.submit(ocr_function, sudoku[i, j], i, j))
        for f in futures:
            result, i, j = f.result()
            sudoku_pf[i, j] = int(result)

        print(sudoku_pf)

    stop = time()
    exec_time = stop - start

    return sudoku_pf, True, exec_time


input = 0 if len(argv) == 1 else argv[1]
cap = VideoCapture(input)  # Use video/webcam as input source
#image = imread("sudoku3.jpg")       # Use image as input source

ret_val = True
sudoku = None
solved = False

# If True, ocr.space API is used, if False, Tesseract OCR is used
use_api = False
correct, incorrect = [], []
intersections, h_lines, v_lines = [], [], []

# while-loop keeps reading images from the source until ret_val is false,
# which means no image has been retrieved from the source
while ret_val:
    if sudoku is None:
예제 #49
0
def get_current_frame_time(video_cap: cv2.VideoCapture) -> float:
    return video_cap.get(cv2.CAP_PROP_POS_MSEC) / 1000
예제 #50
0
import cv2 as cv
from cv2 import VideoCapture
import numpy as np

# checking installation status & version of module
print(f'Version number is {cv.__version__} ')

# pass int 0 for first camera on our device
cap = VideoCapture('test/shesgone.mp4')
program_masih_berjalan = True
while program_masih_berjalan:
  if (cap.isOpened() == False):
    print('Error opening camera')
    break
  elif (cap.isOpened() == True):
    # frame by frame capture
    ret, frame = cap.read()
    # if frame is read correctly ret is True
    if not ret:
      print("Can't receive frame (stream end?). Exiting ...")
      break
    # Our operations on the frame come here
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    # Display the resulting frame
    cv.imshow('frame', gray)
    if cv.waitKey(1) == ord('q'):
      cap.release()
      cv.destroyAllWindows()
      break

예제 #51
0
def get_frame_size(video_cap: cv2.VideoCapture) -> typing.Tuple[int, int]:
    """ return size of frame: (width, height) """
    h = video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    w = video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    return int(w), int(h)
# Load the labels from COCO.
with open(labels_file, 'r') as lines:
    coco_labels = lines.readlines()[0].split(',')

# Import the DarkNet model, and load the pre-trained weights.
net = DarkNet(cfg_file, weights_file)

# Get the colors tha will be used to create the bounding boxes.
np.random.seed(42)
colors = np.random.randint(0, 255, size=(len(coco_labels), 3))

# Part 1: Video processing.
if img_or_vid == 'vid':

    # Use OpenCV to load the video and obtain the number of frames.
    video = VC(input_file)
    number_of_frames = int(video.get(n_frames))
    fps = video.get(n_fps)

    # Create a new video.
    _, first_frame = video.read()
    video_shape = first_frame.shape
    new_video = CW(output_file,
                   cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                   mt.floor(fps), (video_shape[1], video_shape[0]),
                   isColor=True)

    # Process the video.
    classes = []
    count = 0
    n = 1  # Change this value if you want to process non-sequential frame.
예제 #53
0
def calibrate_affine(
        src_cam: cv.VideoCapture,
        dst_cam: cv.VideoCapture,
        n_images=10,
        chessboard_size=(6, 9),
        size=(640, 480),
):
    src_points = []
    dst_points = []
    n_captured_images = 0
    while n_captured_images < n_images:
        # read images from both cameras
        is_ok, src_image = src_cam.read()
        if not is_ok:
            continue

        is_ok, dst_image = dst_cam.read()
        if not is_ok:
            continue

        # resize
        src_resized = utils.resize(src_image, size)
        dst_resized = utils.resize(dst_image, size)

        # get BGR and Gray
        src_bgr, src_gray = utils.to_bgr_gray(src_resized)
        dst_bgr, dst_gray = utils.to_bgr_gray(dst_resized)

        # get corners
        src_corners_ok, src_corners = utils.get_chessboard_corners(
            src_gray, chessboard_size)
        dst_corners_ok, dst_corners = utils.get_chessboard_corners(
            dst_gray, chessboard_size)

        # draw
        cv.drawChessboardCorners(src_bgr, chessboard_size, src_corners,
                                 src_corners_ok)
        cv.drawChessboardCorners(dst_bgr, chessboard_size, dst_corners,
                                 dst_corners_ok)
        cv.putText(
            src_bgr,
            f"{n_captured_images}/{n_images}",
            (0, src_bgr.shape[0]),
            cv.FONT_HERSHEY_SIMPLEX,
            2,
            (255, 0, 0),
        )

        cv.putText(
            dst_bgr,
            f"{n_captured_images}/{n_images}",
            (0, dst_bgr.shape[0]),
            cv.FONT_HERSHEY_SIMPLEX,
            2,
            (255, 0, 0),
        )

        cv.imshow('src', src_bgr)
        cv.imshow('dst', dst_bgr)

        # save if ok
        both_ok = src_corners_ok and dst_corners_ok
        wait_time = 10
        if both_ok:
            wait_time = -1

        key = cv.waitKey(wait_time)
        if key == 32 and both_ok:
            n_captured_images += 1
            src_points.append(src_corners)
            dst_points.append(dst_corners)

    cv.destroyAllWindows()
    # calc affine transform
    np_src_points = np.reshape(src_points, (-1, 2))
    np_dst_points = np.reshape(dst_points, (-1, 2))
    trans_mat, inliers = cv.estimateAffinePartial2D(np_src_points,
                                                    np_dst_points,
                                                    method=cv.RANSAC)

    return trans_mat
예제 #54
0
 def get_video_capture(self, video_path: Path) -> VideoCapture:
     return VideoCapture(str(video_path))
예제 #55
0
# Comprueba el tiempo que tarda en predecir la imagen sin humo
before = datetime.now()
# Predice la imagen sin humo
predictionNeutral = model.predict(imageNeutral, batch_size=1)
after = datetime.now()
print('Prediction time neutral image: ' +
      str((after - before).total_seconds()))
# Crea las máscaras de cada imagen
maskSmoke = threshold((predictionSmoke[0][..., 0] * 255.).astype('uint8'), 127,
                      255, 0)[1].astype('uint8')
maskNeutral = threshold((predictionNeutral[0][..., 0] * 255.).astype('uint8'),
                        127, 255, 0)[1].astype('uint8')
# Guarda las imágenes redimensionadas y las máscaras predecidas
imwrite('/root/Smoke/Test/smokeMask.png', maskSmoke)
imwrite('/root/Smoke/Test/smokeAug.png', cvtColor(imageSmokeAug, 4))
imwrite('/root/Smoke/Test/neutralMask.png', maskNeutral)
imwrite('/root/Smoke/Test/neutralAug.png', cvtColor(imageNeutralAug, 4))
# Imprime si la predicción ha fallado o ha sido exitosa
if sum(sum(maskSmoke)) <= 0 or sum(sum(maskNeutral)) > 0:
    print('Prediction FAILED!')
else:
    print('Prediction SUCCESSFUL!')
# Realiza una captura de cada cámara
for i in range(1):
    imwrite('/root/Smoke/TestCam/' + 'imageCamera' + str(i) + '.png',
            VideoCapture(i).read()[1])
    VideoCapture(i).release()
# Devuelve el tiempo total que le ha llevado la prueba
afterAll = datetime.now()
print('Time: ' + str((afterAll - beforeAll).total_seconds()))
예제 #56
0
    def __init__(self, window):
        super(GamePage, self).__init__(window)
        # ------------------------- 游戏是否结束 -------------------------#
        self.hasGameOver = False
        # ------------------------- 左右分数 -------------------------#
        self.leftScore = 0
        self.rightScore = 0
        # ------------------------- 游戏模式 -------------------------#
        # 检测人脸的个数
        if BasePage.mode == MODE.SINGLE:
            self.faceCount = 1
        elif BasePage.mode == MODE.DOUBLE:
            self.faceCount = 2
        # ------------------------- 暂停 -------------------------#
        # 是否暂停
        self.isPause = False
        # 暂停文字
        # 标题文本
        self.pauseSurface = self.middleFont.render('Pause', True, WHITE)
        # 获取文本的矩形
        self.pauseRect = self.pauseSurface.get_rect()
        # 设置矩形中心点
        self.pauseRect.center = (WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)
        # ------------------------- 检测的人脸方框数据 -------------------------#
        self.faceRects = []
        # ------------------------- 存放球拍数据 -------------------------#
        self.racketSurfaces = []
        # ------------------------- 球拍 -------------------------#
        # 乒乓球拍
        self.racketSurface = image.load('image/pai.png')
        # 球拍的宽度和高度
        self.racketW = self.racketSurface.get_width()
        self.racketH = self.racketSurface.get_height()

        # ------------------------- 小球位置 -------------------------#
        # 小球位置
        self.pos_x = 300  # 矩形的X轴坐标
        self.pos_y = 250  # 矩形的Y轴坐标
        # 小球x和y轴分速度
        self.vel_x = 24  # X轴分速度
        self.vel_y = 12  # Y轴分速度
        # ------------------------- 球 -------------------------#
        # 乒乓球拍
        ballSurface = image.load('image/ball.png')
        # 球拍的宽度和高度
        w = ballSurface.get_width()
        h = ballSurface.get_height()
        self.ballSurface = transform.scale(ballSurface, (w // 20, h // 20))
        # 球矩形
        self.ballRect = self.ballSurface.get_rect()
        # 球的宽度和高度
        self.ballW = self.ballRect.width
        self.ballH = self.ballRect.height

        # 设置矩形中心点
        self.ballRect.topleft = (self.pos_x, self.pos_y)

        # tensorflow检测类
        self.tfDetector = TensoflowFaceDector(PATH_TO_CKPT)
        # ---------------------- 读取配置获取摄像头id ---------------------- #
        f = open('config/config.txt')
        lines = f.readlines()
        for line in lines:
            if line.startswith('camera_index'):
                CAMERA_INDEX = int(line[line.index(':') + 1:])
            elif line.startswith('game_over_score'):
                self.game_over_score = int(line[line.index(':') + 1:])
        print('---------------摄像头id:', CAMERA_INDEX)
        # 摄像头
        self.camera = VideoCapture(CAMERA_INDEX)
        self.camera.set(3, WINDOW_WIDTH)
        self.camera.set(4, WINDOW_HEIGHT)
예제 #57
0
    def approximate(self, source_path, algorithm, num_points):
        # Check to ensure that a file exists at the specified path
        if (not video_exists(source_path)):
            logger.error("Failed to find a video file at the specified path.")
            return

        # Initialize the views directory
        VIEWS_DIRECTORY = "views"
        initialize_views_directory(VIEWS_DIRECTORY)

        # Setting up video reader
        video = VideoCapture(source_path)

        # Find the dimensions of the video to define the video cube
        num_frames, video_width, video_height, fps = get_video_parameters(
            video)
        vc = VideoCube(num_frames, video_height, video_width)

        # Setting up video writer
        fourcc = VideoWriter_fourcc(*'mp4v')
        video_out = VideoWriter(VIEWS_DIRECTORY + "/output.mp4", fourcc, fps,
                                (video_height, video_width))
        video_out_lines = VideoWriter(
            VIEWS_DIRECTORY + "/output_view_lines.mp4", fourcc, fps,
            (video_height, video_width))
        video_out_corners = VideoWriter(
            VIEWS_DIRECTORY + "/output_view_corners.mp4", fourcc, fps,
            (video_height, video_width))
        video_out_keypoints = VideoWriter(
            VIEWS_DIRECTORY + "/output_view_keypoints.mp4", fourcc, fps,
            (video_height, video_width))

        # Initialize the video cube according to the point initialization algorithm
        # Exit if points failed to be placed
        if not self._initialize_point_placer(algorithm, vc, num_points, video):
            logger.error("Point placement failed to initialize.")
            return

        # Tetrahedralize the video cube
        vc.tetrahedralize()

        # Loop through the
        frame_number = 0
        while video.isOpened():
            # Read a frame of the video
            frames_remain, frame = video.read()

            # Stop reading if we reach the end of the video
            if not frames_remain:
                break

            # Slice the video cube at this frame and create a low poly frame
            frame_lp, frame_lp_lines, frame_lp_corners, lp_frame_keypoints = vc.slice_cube(
                frame, frame_number)
            frame_number += 1

            # Write the low poly frame
            video_out.write(frame_lp)
            video_out_lines.write(frame_lp_lines)
            video_out_corners.write(frame_lp_corners)
            video_out_keypoints.write(lp_frame_keypoints)

        # Release video reader and writer
        video_out.release()
        video_out_lines.release()
        video_out_corners.release()
        video_out_keypoints.release()
        video.release()
예제 #58
0
 def _open_run_muvie(self):
     open_file = filedialog.askopenfilename()
     if open_file:
         self._cam = VideoCapture(open_file)
         self._prepare_for_word()
         self._file_name['text'] = 'file name: ' + open_file.split('/')[-1]
예제 #59
0
class ProcessPipelineWithURL:
    writeCurses = False

    def __init__(self, cameraStreamURL, Pipeline):
        self.url = cameraStreamURL
        self.pipeline = Pipeline()
        self.logger = logging.getLogger('pipeline-procesor')
        self.stream = None
        NetworkTables.setTeam(TEAM_NUMBER)
        NetworkTables.initialize(server=NT_SERVER)
        self.table = NetworkTables.getTable(NT_TABLE_NAME)

    def readStreamFrame(self):
        init = False
        if not self.stream:
            init = True
            self.stream = VideoCapture(self.url)
        connected, frame = self.stream.read()
        if not connected:
            self.logger.warning('Camera stream could not be read')
            time.sleep(1)
            self.stream.release()
            self.stream = None
            return None
        else:
            if init:
                self.logger.debug('Stream status: %s', connected)
            return frame

    def sortTupleListByIdx(self, tupleList, idx):
        return sorted(tupleList, key=lambda x: x[idx])

    def initCurses(self):
        self.writeCurses = True
        self.scr = curses.initscr()

    def processContour(self, contour):
        self.logger.debug("Contour: \n %s", contour)
        x_values = []
        y_values = []
        self.logger.debug("Getting x-values and y-values of contour")
        for arr in contour:
            x_values.append(arr[0][0])
            y_values.append(arr[0][1])
        self.logger.debug("Calculating max and min")
        x_max = numpy.max(x_values)
        y_max = numpy.max(y_values)
        x_min = numpy.min(x_values)
        y_min = numpy.min(y_values)

        self.logger.debug("Calculating width and height")
        width = x_max - x_min
        height = y_max - y_min

        self.logger.debug("Calculating center")
        center = ((x_max + x_min) / 2, (y_max + y_min) / 2)
        self.logger.debug("Contour height: %s, width: %s, center: %s", height,
                          width, center)
        return width, height, center

    def sendPipelineOutput(self):
        self.logger.debug("start of sendPipelineOutput class")
        idx = 0
        self.logger.debug("get contour list from pipeline")
        contour_list = getattr(self.pipeline, "filter_contours_output")

        if len(contour_list) == 0:
            return

        if self.writeCurses:
            self.scr.clear()

        self.logger.debug("iterating through contour list")
        for contour in contour_list:
            n = "contour_%s" % idx
            self.logger.debug("processing the contour")
            width, height, center = self.processContour(contour)
            self.logger.debug("send width, height, center to networktables")
            self.table.putNumber(n + "_width", width)
            self.table.putNumber(n + "_height", height)
            self.table.putNumberArray(n + "_centerpoint", center)
            self.logger.debug('Name: %s height: %s width: %s center: %s', n,
                              height, width, center)
            idx += 1
            if self.writeCurses:
                for point in contour:
                    self.cursesTerminalWrite(point)
                self.cursesTerminalWrite(center, char="X")

            self.logger.debug('Centerpoint: (%s,%s)', center[1], center[0])

    def cursesTerminalWrite(self, point, char="#"):
        percent = tuple(map(operator.truediv, point, VIS_SIZE))
        screenPercent = (percent[1], percent[0])
        x, y = tuple(map(operator.mul, screenPercent, self.scr.getmaxyx()))
        try:
            self.scr.addstr(int(x), int(y), str(char))
            self.scr.refresh()
        except BaseException as er:
            print(er)

    def run(self):
        self.logger.info('Attempting to process camera stream')
        while True:
            self.logger.debug("read frame")
            frame = self.readStreamFrame()
            if not frame is None:
                self.logger.debug("Sending frame to GRIP pipeline")
                self.pipeline.process(frame)
                self.logger.debug("Start sending to rio")
                self.sendPipelineOutput()
예제 #60
0
class GamePage(BasePage):
    def __init__(self, window):
        super(GamePage, self).__init__(window)
        # ------------------------- 游戏是否结束 -------------------------#
        self.hasGameOver = False
        # ------------------------- 左右分数 -------------------------#
        self.leftScore = 0
        self.rightScore = 0
        # ------------------------- 游戏模式 -------------------------#
        # 检测人脸的个数
        if BasePage.mode == MODE.SINGLE:
            self.faceCount = 1
        elif BasePage.mode == MODE.DOUBLE:
            self.faceCount = 2
        # ------------------------- 暂停 -------------------------#
        # 是否暂停
        self.isPause = False
        # 暂停文字
        # 标题文本
        self.pauseSurface = self.middleFont.render('Pause', True, WHITE)
        # 获取文本的矩形
        self.pauseRect = self.pauseSurface.get_rect()
        # 设置矩形中心点
        self.pauseRect.center = (WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)
        # ------------------------- 检测的人脸方框数据 -------------------------#
        self.faceRects = []
        # ------------------------- 存放球拍数据 -------------------------#
        self.racketSurfaces = []
        # ------------------------- 球拍 -------------------------#
        # 乒乓球拍
        self.racketSurface = image.load('image/pai.png')
        # 球拍的宽度和高度
        self.racketW = self.racketSurface.get_width()
        self.racketH = self.racketSurface.get_height()

        # ------------------------- 小球位置 -------------------------#
        # 小球位置
        self.pos_x = 300  # 矩形的X轴坐标
        self.pos_y = 250  # 矩形的Y轴坐标
        # 小球x和y轴分速度
        self.vel_x = 24  # X轴分速度
        self.vel_y = 12  # Y轴分速度
        # ------------------------- 球 -------------------------#
        # 乒乓球拍
        ballSurface = image.load('image/ball.png')
        # 球拍的宽度和高度
        w = ballSurface.get_width()
        h = ballSurface.get_height()
        self.ballSurface = transform.scale(ballSurface, (w // 20, h // 20))
        # 球矩形
        self.ballRect = self.ballSurface.get_rect()
        # 球的宽度和高度
        self.ballW = self.ballRect.width
        self.ballH = self.ballRect.height

        # 设置矩形中心点
        self.ballRect.topleft = (self.pos_x, self.pos_y)

        # tensorflow检测类
        self.tfDetector = TensoflowFaceDector(PATH_TO_CKPT)
        # ---------------------- 读取配置获取摄像头id ---------------------- #
        f = open('config/config.txt')
        lines = f.readlines()
        for line in lines:
            if line.startswith('camera_index'):
                CAMERA_INDEX = int(line[line.index(':') + 1:])
            elif line.startswith('game_over_score'):
                self.game_over_score = int(line[line.index(':') + 1:])
        print('---------------摄像头id:', CAMERA_INDEX)
        # 摄像头
        self.camera = VideoCapture(CAMERA_INDEX)
        self.camera.set(3, WINDOW_WIDTH)
        self.camera.set(4, WINDOW_HEIGHT)

    def getCamFrame(self):
        '''
        摄像头获取一帧数据
        :param camera: 摄像头
        :return:
        '''
        # 清空人脸方框数据
        self.faceRects.clear()
        if not self.camera.isOpened():
            print('摄像头没有打开')
            return None
        # 获取图像
        retval, frame = self.camera.read()

        if retval:
            print('成功-------------------------------------------------------')
            frame = cvtColor(frame, COLOR_BGR2RGB)
            [h, w] = frame.shape[:2]
            # tensorflow检测人脸
            (boxes, scores, classes,
             num_detections) = self.tfDetector.run(frame)

            boxes = squeeze(boxes)
            scores = squeeze(scores)
            for i in range(0, self.faceCount):
                if scores[i] > 0.3:
                    ymin, xmax, ymax, xmin = boxes[i, 0], boxes[i, 1], boxes[
                        i, 2], boxes[i, 3]
                    left, right, top, bottom = (xmin * w, xmax * w, ymin * h,
                                                ymax * h)
                    # 处理左右坐标
                    left = WINDOW_WIDTH - left
                    right = WINDOW_WIDTH - right
                    self.faceRects.append((left, right, top, bottom))
                    print('--------left', left, ' right', right)
                    # cv2.rectangle(frame, (int(left), int(top)), (int(right), int(bottom)), (0, 255, 255), 2)

            frame = rot90(frame)
            frame = surfarray.make_surface(frame)
            return frame
        else:
            print('失败-------------------------------------------------------')
        return None

    def handleSingleModeCollision(self):
        '''单人模式碰撞'''
        for _, rackRect in self.racketSurfaces:
            left = rackRect[0]
            top = rackRect[1]
            width = rackRect[2]
            height = rackRect[3]

            # 右侧矩形
            rightRect = Rect(left + width - 1, top, 1, height * 2 // 3)
            # 小球从右侧过来 是否和右侧碰撞
            if rightRect.colliderect(self.ballRect) and self.vel_x < 0:
                self.vel_x = abs(self.vel_x)
                self.rightScore += 1
                return

            # 左侧矩形
            leftRect = Rect(left, top, 1, height * 2 // 3)
            # 从左侧过来  是否和左侧碰撞
            if leftRect.colliderect(self.ballRect) and self.vel_x > 0:
                self.vel_x = -abs(self.vel_x)
                self.leftScore += 1
                return

    def handleDoubleModeCollision(self):
        '''双人模式碰撞'''
        for _, rackRect in self.racketSurfaces:
            left = rackRect[0]
            top = rackRect[1]
            width = rackRect[2]
            height = rackRect[3]
            # 中间的x
            centerX = left + width / 2
            # 判断左右位置
            if centerX < WINDOW_WIDTH / 2:
                # print('左侧--------------------------------------------------------------------','left',left,' right',right)
                # 左侧
                # 右侧矩形
                rightRect = Rect(left + width - 1, top, 1, height * 2 // 3)
                # 小球从右侧过来 是否和右侧碰撞
                if rightRect.colliderect(self.ballRect) and self.vel_x < 0:
                    self.vel_x = abs(self.vel_x)
                    return
            else:
                # print('右侧--------------------------------------------------------------------')
                # 右侧
                # 左侧矩形
                leftRect = Rect(left, top, 1, height * 2 // 3)
                # 从左侧过来  是否和左侧碰撞
                if leftRect.colliderect(self.ballRect) and self.vel_x > 0:
                    self.vel_x = -abs(self.vel_x)
                    return

    def handleCollision(self):
        '''处理球拍和小球碰撞'''
        if self.mode == MODE.SINGLE:
            self.handleSingleModeCollision()
        elif self.mode == MODE.DOUBLE:
            self.handleDoubleModeCollision()

    def display(self):
        # 是否暂停
        if self.isPause:
            self.window.blit(self.pauseSurface, self.pauseRect)
            return
        # 获取一帧画面
        frame = self.getCamFrame()
        # 如果没有画面不做处理
        if not frame:
            print('没有获取画面')
            return
        # 展示画面
        self.window.blit(frame, (0, 0))
        # 展示人脸球拍数据
        self.showRacket()
        # 处理球拍和小球碰撞
        self.handleCollision()
        # 展示乒乓球
        self.showBall()
        # 显示分数
        self.showScore()
        if not self.hasGameOver and self.mode == MODE.DOUBLE:
            # 游戏结束判断
            self.gameOver()

    def gameOver(self):
        '''游戏结束判断'''
        if self.leftScore >= self.game_over_score or self.rightScore >= self.game_over_score:
            # 游戏结束标示
            self.hasGameOver = True
            # 再展示一次
            self.display()
            # 刷新
            display.flip()
            # 修改页面标示
            BasePage.pageType = PAGETYPE.GAMEOVER

    def showSingleModeScore(self):
        '''单人模式分数'''
        # 标题文本
        self.leftScoreSurface = self.middleFont.render(
            '左击:{}'.format(self.leftScore), True, WHITE)
        self.rightScoreSurface = self.middleFont.render(
            '右击:{}'.format(self.rightScore), True, WHITE)
        # 显示分数
        self.window.blit(self.leftScoreSurface, (100, 50))
        self.window.blit(self.rightScoreSurface, (WINDOW_WIDTH - 200, 50))

    def showDoubleModeScore(self):
        '''双人模式分数'''
        # 标题文本
        self.leftScoreSurface = self.middleFont.render(
            '{}'.format(self.leftScore), True, WHITE)
        self.rightScoreSurface = self.middleFont.render(
            '{}'.format(self.rightScore), True, WHITE)
        # 显示分数
        self.window.blit(self.leftScoreSurface, (100, 50))
        self.window.blit(self.rightScoreSurface, (WINDOW_WIDTH - 100, 50))

    def showScore(self):
        '''显示分数'''
        # 单人模式不显示分数
        if self.mode == MODE.SINGLE:
            self.showSingleModeScore()
        if self.mode == MODE.DOUBLE:
            self.showDoubleModeScore()

    def constructRacketDatas(self):
        '''根据人脸方框构建球拍数据'''
        # 清空数据
        self.racketSurfaces.clear()
        for face in self.faceRects:
            left = face[0]
            right = face[1]
            top = face[2]
            bottom = face[3]

            w = right - left
            h = bottom - top
            # 中心点
            centerP = left + w // 2, top + h // 2
            # 左侧赢  右侧不用展示球拍 右侧赢  左侧不需要展示
            if (self.leftScore >= 5 and centerP[0] > WINDOW_WIDTH / 2) or (
                    self.rightScore >= 5 and centerP[0] < WINDOW_WIDTH / 2):
                continue
            # 缩小系数
            if w > h:
                # 缩小倍数
                rf = self.racketH / h
            else:
                rf = self.racketW / w
            print('缩放系数', rf)
            racketSurface = transform.scale(
                self.racketSurface,
                (int(self.racketW / rf), int(self.racketH / rf)))
            # 球拍举行
            racketRect = racketSurface.get_rect()
            # 设置矩形中心点
            racketRect.center = centerP
            # self.window.blit(racketSurface, racketRect)
            self.racketSurfaces.append((racketSurface, racketRect))

    def showRacket(self):
        '''显示球拍'''
        # 构建球拍数据
        self.constructRacketDatas()
        # 显示到窗口上
        for ele in self.racketSurfaces:
            self.window.blit(ele[0], ele[1])

    def showBall(self):
        '''显示乒乓球'''
        # 移动矩形
        self.pos_x += self.vel_x
        self.pos_y += self.vel_y

        # 保证矩形能待在屏幕内
        if self.pos_x > WINDOW_WIDTH - self.ballW:
            self.vel_x = -self.vel_x
            if self.mode == MODE.DOUBLE:
                # 左边得分
                self.leftScore += 1
        elif self.pos_x < 0:
            self.vel_x = -self.vel_x
            if self.mode == MODE.DOUBLE:
                # 右边得分
                self.rightScore += 1

        if self.pos_y > WINDOW_HEIGHT - self.ballH or self.pos_y < 0:
            self.vel_y = -self.vel_y
        # 修改小球的x和y坐标
        self.ballRect.topleft = (self.pos_x, self.pos_y)
        # 画乒乓球
        self.window.blit(self.ballSurface, self.ballRect)

    def keyDown(self, key):
        '''
        按下的事件
        :param key:按下的键
        :return:
        '''
        if key == K_SPACE:
            # 空格键 暂停
            self.isPause = not self.isPause
        elif key == K_TAB:
            BasePage.pageType = PAGETYPE.SPLASH