Exemple #1
0
    def trackingPoint(self, img, idx):
        current_point = self.points[idx]
        current_velocity = self.velocities[idx]

        # Get a local tracking image
        sub_background = self.background[
            current_point[0] - self.box_delta_y_up:current_point[0] +
            self.box_delta_y_down, current_point[1] -
            self.box_delta_x_left:current_point[1] + self.box_delta_x_right]
        sub_img_orig = img[current_point[0] -
                           self.box_delta_y_up:current_point[0] +
                           self.box_delta_y_down, current_point[1] -
                           self.box_delta_x_left:current_point[1] +
                           self.box_delta_x_right]

        background_ext = cv2.BackgroundSubtractorMOG2()
        background_ext.apply(sub_background)
        sub_img = background_ext.apply(sub_img_orig)

        # Remove noise and shadows
        _, img_thresholded = cv2.threshold(sub_img, 200, 255,
                                           cv.CV_THRESH_BINARY)

        contours, _ = cv2.findContours(img_thresholded, cv.CV_RETR_EXTERNAL,
                                       cv.CV_CHAIN_APPROX_SIMPLE)
        filtered_contours = []
        for contour in contours:
            if cv2.contourArea(contour) >= self.area_threshold:
                filtered_contours.append(contour)

        down_right_bounds = np.array(
            map(partial(np.amax, axis=0), filtered_contours), np.int)
        up_left_bounds = np.array(
            map(partial(np.amin, axis=0), filtered_contours), np.int)
        centers = np.array(
            map(partial(np.mean, axis=0), zip(down_right_bounds,
                                              up_left_bounds)), np.int)

        tracking_points = []
        for f, c in zip(down_right_bounds, centers):
            tracking_points.append([f[0][1], c[0][0]])

        if len(tracking_points) == 0:
            # tracking_points = np.array(
            #     [(c[0][1], c[0][0]) for c in center], np.int)
            tracking_points.append([
                self.box_delta_y_up + self.velocities[idx][0],
                self.box_delta_x_left + self.velocities[idx][1]
            ])
        else:
            tracking_points = np.array(tracking_points, np.int)

        if len(tracking_points) >= 1:
            point = self.__minPoint(tracking_points)
            point[0] = current_point[0] + point[0] - self.box_delta_y_up
            point[1] = current_point[1] + point[1] - self.box_delta_x_left
            self.velocities[idx][0] = point[0] - current_point[0]
            self.velocities[idx][1] = point[1] - current_point[1]

        return point
Exemple #2
0
def mog2(vid):
    # requires opencv 2.4
    fgbg = cv2.BackgroundSubtractorMOG2()
    oldtime = vid.get(cv2.cv.CV_CAP_PROP_POS_MSEC)
    while (vid.isOpened() &
           (vid.get(cv2.cv.CV_CAP_PROP_POS_MSEC) - oldtime <= 30000)):
        ret, frame = vid.read()
        fgmask = fgbg.apply(frame)
        cv2.imshow('frame', fgmask)
        cv2.waitKey(1)
    vid.release()
    cv2.destroyAllWindows()


# def knn(vid):
#     fgbg = cv2.createBackgroundSubtractorKNN()
#     while 1:
#         ret,frame = vid.read()
#         fgmask = fgbg.apply(frame)
#         bgmask = fgbg.getBackgroundImage()
#         cv2.imshow('frame',fgmask)
#         cv2.imshow('bg',bgmask)
#         cv2.waitKey(1)
#     cv2.destroyAllWindows()
#     vid.release()
def aa(request):
    cv2.ocl.setUseOpenCL(False)
    cap = cv2.VideoCapture("D:/Document/WeChat Files/hsfbhao539/Files/office/foreground.avi")
    #cap = cv2.VideoCapture("D:/Document/WeChat Files/hsfbhao539/Files/office/foreground.avi")

    # 方法一:
    fgbg1 = cv2.BackgroundSubtractorMOG()

    # 方法二:
    fgbg2 = cv2.BackgroundSubtractorMOG2()

    # 方法三:  需要OpenCV3.0
    #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
    #fgbg3 = cv2.BackgroundSubtractorGMG()

    while True:
        ret, frame = cap.read()
        if ret:
            fg_mask1 = fgbg1.apply(frame)
            fg_mask2 = fgbg2.apply(frame)

            #fgmask3 = fgbg3.apply(frame)
            #fgmask3 = cv2.morphologyEx(fgmask3, cv2.MORPH_OPEN, kernel)

            result = np.hstack((fg_mask1, fg_mask2))
            cv2.imshow('frame', result)

            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break
        else:
            break

    cap.release()
    cv2.destroyAllWindows()
Exemple #4
0
def init():
    global bg_sub, arrow_cnt, package_path, make_move_service
    rospy.init_node('recognition')
    rospy.wait_for_service(make_move_service_name)
    make_move_service = rospy.ServiceProxy(make_move_service_name, MakeMove)
    bg_sub = cv2.BackgroundSubtractorMOG2()
    ros_pack = rospkg.RosPack()
    package_path = ros_pack.get_path('nao_dance')
    arrow = cv2.pyrDown(cv2.imread(package_path + '/res/arrow.png', 0))
    ret, thresh = cv2.threshold(arrow, 50, 255, 0)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    arrow_cnt = contours[0]
    # canvas = np.zeros(arrow.shape, np.uint8)
    # cv2.drawContours(canvas, contours, 0, 255, 2)
    # cv2.imshow('arrow', canvas)
    cv2.namedWindow('edge')
    cv2.createTrackbar('canny min', 'edge', 100, 1000, nothing)
    cv2.createTrackbar('canny max', 'edge', 200, 1000, nothing)
    cv2.namedWindow('result')
    cv2.createTrackbar('diff% max', 'result', 20, 100, nothing)
    cv2.createTrackbar('area min', 'result', 300, 1000, nothing)
    cv2.createTrackbar('saturation split', 'result', 105, 255, nothing)
    cv2.namedWindow('contours')
    cv2.createTrackbar('approx', 'contours', 34, 100, nothing)
Exemple #5
0
    def run(self, frame, options):

        self.options = options

        results = {'frame': frame}

        # Apply normalization
        if self.options['normalize']:
            # Normalize only the saturation channel
            results['frame'] = self.normalize(frame)
            # print 'Normalizing frame'

        # Apply background subtraction
        if self.options['background_sub']:
            frame = cv2.blur(frame, (2, 2))
            # print 'running sub'
            if self.background_sub is not None:
                bg_mask = self.background_sub.apply(frame)
            else:
                self.background_sub = cv2.BackgroundSubtractorMOG2(
                    0, 30, False)
                bg_mask = self.background_sub.apply(frame)
            results['background_sub'] = bg_mask

        return results
Exemple #6
0
def MOG2_test(input_path, save_path):
    cap = cv2.VideoCapture(input_path)

    # 1. 获取视频码率、格式:
    fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
    size = (int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
    codec = cap.get(cv2.cv.CV_CAP_PROP_FOURCC)

    print(fps, size, codec)

    # 指定写视频的格式, I420-avi, MJPG-mp4
    videoWriter = cv2.VideoWriter(save_path, cv2.cv.CV_FOURCC('I', '4', '2', '0'), fps, size)

    fgbg = cv2.BackgroundSubtractorMOG()
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.BackgroundSubtractorMOG2()

    while cap.isOpened():
        ret, frame = cap.read()
        if ret == True:

            fgmask = fgbg.apply(frame)
            fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
            print(type(fgmask))
            videoWriter.write(fgmask)
            cv2.imshow('frame', fgmask)
            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break
        else:
            break
    print('finish.')
    cap.release()
    cv2.destroyAllWindows()
Exemple #7
0
    def __init__(self, image_shape=(640, 480)):
        # shape have 3 channels
        shape_rgb = image_shape+(3,)

        self.current_frame = np.zeros(shape=shape_rgb, dtype=np.uint8)
        self.proposal_foreground = np.zeros(shape=shape_rgb, dtype=np.uint8)

        self.foreground_mask_long_term = np.zeros(shape=image_shape)
        self.foreground_mask_short_term = np.zeros(shape=image_shape)
        self.background_aggregator = np.zeros(shape=image_shape, dtype=np.int8)
        self.proposal_mask = np.zeros(shape=image_shape, dtype=np.uint8)  # mask from aggregator

        # define Zivkovic background subtraction function LONG
        self.f_bg_long = cv2.BackgroundSubtractorMOG2(BG_ZIV_HIST, BG_ZIV_LONG_THRESH, False)
        # define zivkovic background subtraction function SHORT
        self.f_bg_short = cv2.BackgroundSubtractorMOG2(BG_ZIV_HIST, BG_ZIV_SHORT_THRESH, False)
Exemple #8
0
    def __init__(detector, **kwargs):
        # setup default config
        detector.config = {}
        default_params = detector.default_params()
        for pinfos in default_params.values():
            detector.config.update({pi.name: pi.default for pi in pinfos})
        # modify based on user args
        dict_update_subset(detector.config, kwargs)

        # Setup GMM background subtraction algorithm
        logger.debug('Using GMM from cv2.__version__ = {}'.format(
            cv2.__version__))
        if cv2.__version__.startswith('2'):
            # not sure about these params
            detector.background_model = cv2.BackgroundSubtractorMOG2(
                history=detector.config['n_training_frames'],
                varThreshold=detector.config['gmm_thresh'],
                bShadowDetection=False)
        else:
            detector.background_model = cv2.createBackgroundSubtractorMOG2(
                history=detector.config['n_training_frames'],
                varThreshold=detector.config['gmm_thresh'],
                detectShadows=False)

        # Setup detection filter algorithm
        filter_config = dict_subset(
            detector.config, [pi.name for pi in default_params['filter']])
        detector.filter = DetectionShapeFilter(**filter_config)

        detector.n_iters = 0
        # masks from previous iter are kept in memory for visualization
        detector._masks = {}
def main():
    bg_subtractor = cv2.BackgroundSubtractorMOG2()

    car_counter = None # Will be created after first frame is captured

    # Set up image source

    #cap = cv2.VideoCapture("flow.mp4")
    cap = cv2.VideoCapture(URL)
    while True:
        ret, frame = cap.read()
        if not ret:
            print 'failed'
        else:
            frame = cv2.resize(frame, (0, 0), fx = K, fy = K)
            if car_counter is None:
                # We do this here, so that we can initialize with actual frame size
                #car_counter = VehicleCounter(frame.shape[:2], frame.shape[1] / 2)
                car_counter = VehicleCounter(frame.shape[:2], DIVIDER1, DIVIDER2, DIVIDER3, DIVIDER4, DIVIDER5, DIVIDER6)
                #print frame.shape
            # Archive raw frames from video to disk for later inspection/testing

            processed = process_frame(frame, bg_subtractor, car_counter, K)

            # #cv2.imshow('Source Image', frame)
            # cv2.imshow('Processed Image', processed)

            # c = cv2.waitKey(10)
            # if c == 27:
            #     break

    cap.release()
    cv2.destroyAllWindows()
    def __init__(self, width, height):

        self.width = width
        self.height = height

        #Backgroundsubtractor for real computers
        #self.fgbg = cv2.createBackgroundSubtractorMOG2(10, 8, False)
        #Backgroundsubtractor for the PI
        self.fgbg = cv2.BackgroundSubtractorMOG2(10, 64, False)

        self.maxX = 0
        self.minX = 0

        self.picChangeTime = None
        self.first = True
        self.currCont = None
        self.biggestCont = None
        self.prevBigId = None
        self.kernel = np.ones((3, 3), 'uint8')

        self.blackImg = None
        self.normImg = None

        #Contour settings
        self.maxCont = 0
        self.minCont = 0
Exemple #11
0
    def __init__(self, vid_file, skel_file, track_file):

        self.vid = vid_file
        self.track_mask = cv2.imread(track_file, cv2.IMREAD_GRAYSCALE)

        self.skel = cv2.imread(skel_file, cv2.IMREAD_GRAYSCALE)
        self.width = self.skel.shape[1]
        self.height = self.skel.shape[0]
        self.skel[0, :] = 0
        self.skel[:, 0] = 0
        self.skel[self.height - 1, :] = 0
        self.skel[:, self.width - 1] = 0

        # Specify pixel coord for startline, finishline, \
        # and the gap (a pixel separates startline and finishline)

        # RaceV05 - track6
        self.start = [490, 103]
        self.finish = [492, 103]
        self.sfgap = [491, 103]
        self.newskel = np.zeros([1020, 1463]).astype(np.uint8)

        self.coord_mapping()
        # cv2.imshow('s',self.pixMap)

        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

        self.fgbg = cv2.BackgroundSubtractorMOG2()
Exemple #12
0
    def create_gist_parameters(self, selected_path, video_name):

        org_images = images.read_video_as_list_by_path(selected_path,
                                                       video_name)
        background_model = cv2.BackgroundSubtractorMOG2(len(org_images),
                                                        varThreshold=266,
                                                        bShadowDetection=True)
        for frame in org_images:
            background_model.apply(
                frame, len(org_images))  # given frame, learning-rate

        th = 150
        fgmask_set = []
        dp_mask = []
        for frame in org_images:
            forward = background_model.apply(
                frame
            )  # create foreground mask which is gray-scale(0~255) image.
            tmp = cv2.cvtColor(forward, cv2.COLOR_GRAY2BGR)  # convert to color
            dp_mask.append(tmp)
            #convert gray-scale foreground mask to binary image.
            a = stats.threshold(forward, threshmin=th, threshmax=255, newval=0)
            a = stats.threshold(a, threshmin=0, threshmax=th, newval=1)
            fgmask_set.append(a)

        return fgmask_set, org_images, dp_mask
def main(args):
    rospy.init_node('graspNet', anonymous=True)
    MOG = cv2.BackgroundSubtractorMOG2()
    cameraTopic = '/camera/rgb/image_color'
    depthTopic = '/depth'
    stream(cameraTopic, depthTopic)
    print 'complete!'
    cv2.destroyAllWindows()
    return 0
Exemple #14
0
def MOG2_test(input_path, save_path):
    cap = cv2.VideoCapture(input_path)

    # 1. 获取视频码率、格式:
    fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
    size = (int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
    codec = cap.get(cv2.cv.CV_CAP_PROP_FOURCC)

    print(fps, size, codec)

    # 指定写视频的格式, I420-avi, MJPG-mp4
    videoWriter = cv2.VideoWriter(save_path, cv2.cv.CV_FOURCC('I', '4', '2', '0'), fps, size)

    fgbg = cv2.BackgroundSubtractorMOG()
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    fgbg = cv2.BackgroundSubtractorMOG2()

    while cap.isOpened():
        ret, frame = cap.read()
        if ret == True:

            fgmask = fgbg.apply(frame)
            # cv2.imshow('frame222', fgmask)
            videoWriter.write(fgmask)
            fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
            # print(type(fgmask))
            # fgmask = cv2.cvtColor(fgmask, cv2.COLOR_BGR2GRAY)





            # ret, fgmask = cv2.threshold(fgmask, 114, 255, cv2.THRESH_BINARY)  # 2. 二值化,转为0-255,


            # 闭运算,闭运算用来连接被误分为许多小块的对象,
            closed = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
            # 显示腐蚀后的图像
            # cv2.imshow("Close", closed)

            # 开运算,开运算用于移除由图像噪音形成的斑点。
            opened = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
            # 显示腐蚀后的图像
            # cv2.imshow("Open", opened)

            # videoWriter.write(fgmask)
            cv2.imshow('frame', fgmask)
            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break
        else:
            break
    print('finish.')
    cap.release()
    cv2.destroyAllWindows()
Exemple #15
0
 def __init__(self,VideoSize,history = 2*60*4,scaleDown = 2):
     self.scale_size = (VideoSize[0]/scaleDown,VideoSize[1]/scaleDown)
     self.orgSize = VideoSize
     #self.mask = np.zeros((scale_size[0]/2,scale_size[1]/2,1),dtype = 'uint8')
     self.bgs = cv2.BackgroundSubtractorMOG2(history=history,varThreshold=9) #  60 sec * 4fps
     self.fgmask = None
     self.learning = True
     self.kernel3x3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
     self.stoplearning_timeout = Tick();
     self.isDrawMask = False        
	def __init__(self):
		self.settings = settings.Settings()
		self.method = self.settings.bsMethod

		if self.method == 0:
			self.fgbg = cv2.BackgroundSubtractorMOG2(self.settings.MOG2history, self.settings.MOG2thresh, self.settings.MOG2shadow)
			self.foregroundMask = None

		if self.method == 1:
			self.backgroundFrame = None
			self.frameCount = 1
Exemple #17
0
    def __init__(self, vid_file):

        vidName = vid_file
        self.vid = av.open(vidName)
        self.num_frames = 0

        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
        	# cv2.createbackgroundSubstractor() works in cv3.0
        	# for 2.4.x use the following:
        self.fgbg = cv2.BackgroundSubtractorMOG2()
        self.detector = cv2.SimpleBlobDetector()
Exemple #18
0
  def __init__(self, history, threshold, detectShadows):
    self.history = history
    self.threshold = threshold
    self.detectShadows = detectShadows

    version = int(re.findall(r'\d+', cv2.__version__)[0])
    if version == 3:
      self.operator = cv2.createBackgroundSubtractorMOG2(self.history,
        self.threshold, self.detectShadows)
    elif version == 2:
      self.operator = cv2.BackgroundSubtractorMOG2(self.history,
        self.threshold, self.detectShadows)
    else:
      raise Exception("Unsupported OpenCV version {0}".format(version))
Exemple #19
0
def show_webcam(mirror=False):
    cam = cv2.VideoCapture(0)
    fgbg = cv2.BackgroundSubtractorMOG2()
    fgmask = None
    while True:
        ret_val, img = cam.read()
        if mirror:
            img = cv2.flip(img, 1)
        fgmask = fgbg.apply(img, fgmask, 0.03)
        cv2.imshow('my webcam', fgmask)
        print img.shape, img.dtype
        if cv2.waitKey(1) == 27:
            break  # esc to quit
    cv2.destroyAllWindows()
def main():
    cap = cv2.VideoCapture('../../background_subtraction/src/vtest.avi')
    fgbg = cv2.BackgroundSubtractorMOG2()

    while(1) :
        ret, frame = cap.read()
        fgmask = fgbg.apply(frame)

        cv2.imshow('frame',fgmask)
        k = cv2.waitKey(30) & 0xff
        if k == 27 :
            break
    cp.release()
    cv2.destroyAllWindows()
Exemple #21
0
def testBackgroundSubtractorMOG(dataset_path, ID, method, save_masks, Color):

    # Check directories
    output_dir = dataset_path + ID + '/' + method
    print 'Running background subtraction using ' + method + ' ...'
    print 'Output masks will be saved at "' + output_dir + '" directory'
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Use MOG or MOG2 according to input method
    if method == 'MOG':
        fgbg = cv2.BackgroundSubtractorMOG()
    elif method == 'MOG2':
        fgbg = cv2.BackgroundSubtractorMOG2()

    # Get input frames size
    frames_path = dataset_path + ID + '/input'
    frame_list = sorted(os.listdir(frames_path))
    nrows, ncols, nchannels = cv2.imread(
        os.path.join(frames_path, frame_list[0])).shape
    mask_vec = np.zeros([1, nrows * ncols])

    # Read dataset
    train_frames, test_frames, train_gts, test_gts = readDataset(
        dataset_path, ID, Color)

    # Run MOG
    for idx in range(len(test_frames)):
        if Color:
            im = np.uint8(
                np.reshape(test_frames[idx], (nrows, ncols, nchannels)))
        else:
            im = np.uint8(np.reshape(test_frames[idx], (nrows, ncols)))
        fgmask = fgbg.apply(im, learningRate=0.01)
        fgmask = fgmask.ravel()
        fgmask[fgmask == 127] = 0
        fgmask[fgmask > 127] = 1
        mask_vec = np.vstack((mask_vec, fgmask))
        # Save current mask
        if save_masks:
            cv2.imwrite(output_dir + '/img_' + ('%03d' % idx) + '.png',
                        np.reshape(255 * fgmask, (nrows, ncols)))

    # Get predictions and compute performance measures
    predictions = mask_vec[1:, :]
    precision, recall, fscore = evaluateBackgroundEstimation(
        predictions, test_gts)
    print 'F-score:', fscore
    print 'Precision: ', precision
    print 'Recall: ', recall
Exemple #22
0
 def __init__(self):
     self.frame = None
     self.video = cv.VideoCapture(0)
     self.bgfg = cv.BackgroundSubtractorMOG2()
     self.image = None
     self.crop_image = None
     self.gray = None
     self.blurred = None
     self.background = None
     self.thresh = None
     self.contour = None
     self.hull = None
     self.defects = None
     self.mask = None
     self.drawing = None
Exemple #23
0
    def __init__(self, w_IP, w_Port, w_UN, w_Pass):

        self.feed_url = "http://" + w_IP + ":" + w_Port + "/vjpeg.v?user="******"&pwd=" + w_Pass
        print self.feed_url

        self.startMotionTime = None
        self.totalMotionTime = 0
        self.totalMotionAcceptable = 25
        self.alertLevel = 0
        self.alertLevelAcceptable = 3

        self.w_Capture = cv2.VideoCapture(self.feed_url)
        self.fgbg = cv2.BackgroundSubtractorMOG2()

        self.motionTracking2()
Exemple #24
0
 def __init__(self):
     self.video = None
     self.fgMask = None
     self.sampleFrame = None
     #self.subtractor = cv2.BackgroundSubtractorMOG(history=150, nmixtures=20, backgroundRatio=0.7, noiseSigma=25)
     self.subtractor = cv2.BackgroundSubtractorMOG2(150, 200, False)
     self.lanes = []
     self.lanesImage = []
     self.laneContours = []
     self.lanePoints = []
     self.typeCar = {"small": 0, "medium": 0, "large": 0}
     self.totalLane = 0
     self.video_name = ''
     self.num_frame = 0
     self.timer = None
     self.total_frame = 0
Exemple #25
0
    def __init__(self, movieName):

        self.vc = cv2.VideoCapture(movieName)
        if not self.vc.isOpened():
            print "Error opening video file"
            sys.exit(0)

        # skipping the first few frames (to where the car is on the road)
        for ii in range(500):
            self.vc.grab()

        self.history = 10
        self.varThreshold = 16

        self.fgbg = cv2.BackgroundSubtractorMOG2(
            history=self.history, varThreshold=self.varThreshold)
        self.fgbg.setBool('detectShadows', False)
Exemple #26
0
 def MOG2(self,cv_image):
    global count
    global bgs_mog
    count=count+1
    print(count)
    frame=cv_image
    fgmask = bgs_mog.apply(frame)
    kernel = np.ones((10,10), np.uint8)
    draw = fgmask
    contours,hierarchy = cv2.findContours(fgmask, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
       print(cv2.contourArea(cnt))
       if(1500< cv2.contourArea(cnt) < 30000 ):
          x,y,w,h = cv2.boundingRect(cnt)
          frame=cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
    if not(count % 4):
       bgs_mog = cv2.BackgroundSubtractorMOG2(50,20,True)
    return frame
Exemple #27
0
    def set_fg_mask_method(self, method):
        '''
        Sets the foreground image mask to the result of a user-specified
        foreground segmentation method. 
        
        The following methods are supported:
        
            - "simple": no-frills image difference and otsu thresholding.
            - "mog": MOG background subtraction algorithm.
            - "mog2": MOG2 background subtraction algorithm.
        
        Args:
            method: The algorithm to use to create the foreground mask. Should
                    be either "simple", "mog", or "mog2".
        Returns:
            True if background and foreground images exist, a valid method
            was specified, and foreground segmentation was applied 
            successfully; false otherwise.
        '''

        if (self.bg_img is None) or (self.fg_img is None):
            return False
        if method.lower() == "simple":
            self.fg_mask = cv2.absdiff(self.bg_img, self.fg_img)
            self.fg_mask = cv2.cvtColor(self.fg_mask, cv2.COLOR_BGR2GRAY)
            __, self.fg_mask = cv2.threshold(
                self.fg_mask, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        elif method.lower() == "mog":
            bg_subtractor = cv2.BackgroundSubtractorMOG()
            bg_subtractor.apply(self.bg_img)
            self.fg_mask = bg_subtractor.apply(self.fg_img)
        elif method.lower() == "mog2":
            bg_subtractor = cv2.BackgroundSubtractorMOG2()
            bg_subtractor.apply(self.bg_img)
            self.fg_mask = bg_subtractor.apply(self.fg_img)
            __, self.fg_mask = cv2.threshold(self.fg_mask, 128, 255,
                                             cv2.THRESH_BINARY)
        else:
            return False

    #kernal = np.ones((7,7), np.uint8)
    #self.fg_mask = cv2.morphologyEx(self.fg_mask, cv2.MORPH_OPEN, kernal)
        return True
Exemple #28
0
def substract_bkg(img, bkgs, fname=None):
    """Substract image background

    @param img: input forward image in np array
    @param bkgs: a list of background image in np arrays

    Keyword arguments:
    fname -- specify to output the substracted image

    """

    backsub = cv2.BackgroundSubtractorMOG2()
    fgmask = None
    for bkg in bkgs:
        fgmask = backsub.apply(bkg)
    fgmask = backsub.apply(img)
    if fname is not None and type(fname) is str:
        save_img(fgmask, fname=fname)
    return cv2.bitwise_and(img, img, mask=fgmask)
Exemple #29
0
def main():
    cap = cv2.VideoCapture(0)

    #The class implements the Gaussian mixture model background subtraction described in
    #http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf.
    fgbg = cv2.BackgroundSubtractorMOG2()

    while (1):
        ret, frame = cap.read()

        fgmask = fgbg.apply(frame)

        cv2.imshow('frame', fgmask)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
def concavityCalculation(res,drawing):
    int[][] nodes=new concavityIndexList();
    nodes.np.zeroes;
    bgModel = cv2.BackgroundSubtractorMOG2(0, bgSubThreshold)
    triggerSwitch = False
    boolean[] indexes=new indexes(System.in);
    index[0]=true;
    index[1]=true;
    for(i=1;i<nodes.len()-1;i++):
        int xdif = nodes[i][0] - nodes[i-1][0]
        int ydif = nodes[i][1] - nodes[i-1][1]
        int refSlope = ydif/xdif
        int thisX= nodes[i+1][0] - nodes[i][0]
        int thisY= nodes[i+1][0] - nodes[i][0]
        int thisSlope = thisY/thisX
        if(thisSlope<=refSlope):
            indexes[i+1]=true
        else:
            indexes[i+1]=false;