def background_subtraction(background_image, foreground_image, device, debug=None):
    """Creates a binary image from a background subtraction of the foreground using cv2.BackgroundSubtractorMOG().
    The binary image returned is a mask that should contain mostly foreground pixels.
    The background image should be the same background as the foreground image except not containing the object
    of interest.

    Images must be of the same size and type.
    If not, larger image will be taken and downsampled to smaller image size.
    If they are of different types, an error will occur.

    Inputs:
    background_image       = img object, RGB or binary/grayscale/single-channel
    foreground_image       = img object, RGB or binary/grayscale/single-channel
    device                 = device number. Used to count steps in the pipeline
    debug                  = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device                 = device number
    fgmask                 = background subtracted foreground image (mask)

    :param background_image: numpy array
    :param foreground_image: numpy array
    :param device: int
    :param debug: str
    :return device: int
    :return fgmask: numpy array
    """

    device += 1
    # Copying images to make sure not alter originals
    bg_img = np.copy(background_image)
    fg_img = np.copy(foreground_image)
    # Checking if images need to be resized or error raised
    if bg_img.shape != fg_img.shape:
        # If both images are not 3 channel or single channel then raise error.
        if len(bg_img.shape) != len(fg_img.shape):
            fatal_error("Images must both be single-channel/grayscale/binary or RGB")
        # Forcibly resizing largest image to smallest image
        print("WARNING: Images are not of same size.\nResizing")
        if bg_img.shape > fg_img.shape:
            width, height = fg_img.shape[1], fg_img.shape[0]
            bg_img = cv2.resize(bg_img, (width, height), interpolation=cv2.INTER_AREA)
        else:
            width, height = bg_img.shape[1], bg_img.shape[0]
            fg_img = cv2.resize(fg_img, (width, height), interpolation=cv2.INTER_AREA)

    # Instantiating the background subtractor, for a single history no default parameters need to be changed.
    if cv2.__version__[0] == '2':
        bgsub = cv2.BackgroundSubtractorMOG()
    else:
        bgsub = cv2.createBackgroundSubtractorMOG2()
    # Applying the background image to the background subtractor first.
    # Anything added after is subtracted from the previous iterations.
    fgmask = bgsub.apply(bg_img)
    # Applying the foreground image to the background subtractor (therefore removing the background)
    fgmask = bgsub.apply(fg_img)

    # Debug options
    if debug == "print":
        print_image(fgmask, "{0}_background_subtraction.png".format(device))
    elif debug == "plot":
        plot_image(fgmask, cmap="gray")
    
    return device, fgmask
#混合高斯建模
import numpy as np
import cv2

cap = cv2.VideoCapture('viptraffic.avi')
fourcc = cv2.cv.FOURCC(*'XVID')
out = cv2.VideoWriter('output1.avi', fourcc, 20.0, (160, 120), 0)  #写视频的参数设置
fgbg = cv2.BackgroundSubtractorMOG(200, 5, 0.7)  #创建一个背景对象
cv2.namedWindow('img', cv2.WINDOW_NORMAL)
cv2.namedWindow('fgmask', cv2.WINDOW_NORMAL)  #创建两个窗口
i = 0
while (1):
    ret, frame = cap.read()  #读入视频帧
    i = i + 1
    print i  #统计帧数
    if ret:  #读入正确则继续
        fgmask = fgbg.apply(frame, learningRate=0.02)  #计算前景掩模
        out.write(fgmask)  #写入视频
        cv2.imshow('img', frame)
        cv2.imshow('fgmask', fgmask)  #显示结果
        k = cv2.waitKey(1000)
        if k == 27:
            break
    else:
        break
cap.release()
cv2.destroyAllWindows()
Exemple #3
0
    br = CvBridge()  # Create a black image, a window

    #--------------------------------------------------------------------------------------#
    def detect_and_draw(imgmsg):
        global x, y

        img = br.imgmsg_to_cv2(imgmsg, desired_encoding="passthrough")
        img = img[:, :, 0:3]
        fgmask = fgbg.apply(img)

        cv2.imshow('rgb', img)
        #cv2.imshow('frame',fgmask)
        img1_bg = cv2.bitwise_and(img, img, mask=fgmask)
        img_obj = img.copy()
        for i in range(len(x)):
            print i
            img_obj[y[i], x[i], :] = [255, 0, 0]
        cv2.imshow('object', img_obj)
        cv2.imshow('masked', img1_bg)
        k = cv2.waitKey(1) & 0xff

#--------------------------------------------------------------------------------------#

    fgbg = cv2.BackgroundSubtractorMOG()
    rospy.init_node('object_detection')
    LH_image_topic = rospy.resolve_name("/cameras/left_hand_camera/image")

    rospy.Subscriber(LH_image_topic, sensor_msgs.msg.Image, detect_and_draw)

    rospy.spin()
Exemple #4
0
def track_ball_3(video):
    """
    As track_ball_1, but for ball_3.mov.

    Requires going through the video once to find the size of the moving
    object, then again to track its location
    """
    result = []
    num_frames = 0
    fgbg = cv2.BackgroundSubtractorMOG()
    x, y, w, h = 0, 0, 0, 0
    avg_w, avg_h = 0, 0

    ret, frame = video.read()

    while ret is True:
        num_frames = num_frames + 1
        sub = fgbg.apply(frame)

        kernel = numpy.ones((5, 5), numpy.uint8)
        dilation = cv2.dilate(sub, kernel, iterations=1)
        ret, thresh = cv2.threshold(dilation, 127, 255, 0)

        # contours is a list of all the contours in the image
        contours, _ = cv2.findContours(thresh, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)

        if (contours):
            x, y, w, h = cv2.boundingRect(contours[0])
            avg_w = avg_w + w
            avg_h = avg_h + h

        ret, frame = video.read()

    avg_w = avg_w / num_frames
    avg_h = avg_h / num_frames

    # Reset the video
    video.set(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO, 0)

    # take first frame of the video
    ret, frame = video.read()

    # setup initial location of window to track an object of the
    # size determined previously
    track_window = (avg_w, avg_w, avg_h, avg_h)
    x, y, w, h = track_window

    # set up the ROI for tracking
    roi = frame[y:y + h, x:x + w]
    hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_roi, numpy.array((0., 60., 32.)),
                       numpy.array((180., 255., 255.)))
    roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
    cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

    # setup the termination criteria,
    # either 10 iteration or move by atleast 1 pt
    term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while ret is True:

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)

        # apply meanshift to get the new location
        ret, track_window = cv2.meanShift(dst, track_window, term_crit)

        # draw it on image
        x, y, w, h = track_window
        result.append((x, y, x + w, y + h))

        ret, frame = video.read()

    return result
def process_frames(source,
                   display=False,
                   clear_iters=1,
                   blur_kernel=(5, 5),
                   max_frames=-1):
    """Reads a video from source (a file or a camera) and performs preprocessing and transformation into
    a vector representation for each frame. The preprocessing consists of blurring, eroding, dilating, extracting
    the contour, then transforming it into a vector representation of its chain code form. See function get_chain_vector.

    source : Video source. File path or 0 for camera.
    display : Show contour and print progress
    clear_iters : How many iterations of erosion od dilation to perform
    blur_kernel : Kernel for Gaussian blur
    max_frames : Number of frames to read. -1 for all. Needs to be used with camera to prevent infinite filming.
    returns : Vector representations of motion in frames in video
    """
    kernel = np.ones((3, 3), np.uint8)

    cap = cv2.VideoCapture(source)
    fgbg = cv2.BackgroundSubtractorMOG(60 * 30, 256, 0.9, 0.01)
    frames = []

    ret, frame = cap.read()

    if not ret:
        return []

    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(frame, blur_kernel, 0)
    erosion = cv2.erode(blur, kernel, iterations=clear_iters)
    dilatation = cv2.dilate(erosion, kernel, iterations=clear_iters)

    avg = np.float32(dilatation)

    frame_cnt = 0

    while (1):
        ret, frame = cap.read()

        if not ret: break

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(frame, (5, 5), 0)
        erosion = cv2.erode(blur, kernel, iterations=clear_iters)
        dilatation = cv2.dilate(erosion, kernel, iterations=clear_iters)

        cv2.accumulateWeighted(dilatation, avg, 0.025)  # HERE
        frames.append(dilatation)

        frame_cnt += 1
        if max_frames != -1 and frame_cnt >= max_frames:
            break

    cap.release()

    average = cv2.convertScaleAbs(avg)
    fgbg.apply(average)

    chains = []

    for frame in frames:
        img = fgbg.apply(frame)
        img1 = copy.deepcopy(img)
        contours, hierarchy = cv2.findContours(img1, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_NONE)

        chains.append(get_chain_vector(contours))

        if display == True and contours:
            cv2.drawContours(img1, contours, -1, (255, 0, 0), 1)
            cv2.imshow('Silueta', img1)
            cv2.waitKey(1)

    cv2.destroyAllWindows()

    return [frame for frame in chains if frame.any()]
def batch_identify_fish(dir, motion_detection_dir):
    """Input as video, return processed video with identified fish"""

    videofile = os.path.join(dir, 'forPTV.mp4')

    camera = cv2.VideoCapture(videofile)

    if camera.isOpened():
        pass
    else:
        print "Converting MP4 file..."
        os.system('ffmpeg -i %s -c:v copy %s_Converted.mp4' %
                  (videofile, videofile[:-4]))
        camera = cv2.VideoCapture('%s_Converted.MP4' % videofile[:-4])

    fps = camera.get(cv2.cv.CV_CAP_PROP_FPS)
    history = np.int(fps / 2)

    # creating background subtraction object
    fgbg = cv2.BackgroundSubtractorMOG()  # It works for opencv 2.4

    # creating video writing object, outside of the loop
    size = (int(camera.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
            int(camera.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) * 2)
    fourcc = cv2.cv.FOURCC('8', 'B', 'P', 'S')  #works, large

    # output = cv2.VideoWriter(os.path.join(data_dir, 'rawVideo_identifiedFish.avi'),  fourcc, fps, size, True)

    # size for individual video
    size_1 = (int(camera.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
              int(camera.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
    # out_fish_only = cv2.VideoWriter(os.path.join(data_dir,'fishOnly.avi'), fourcc, fps, size_1, True)

    # size for individual video, background subtraction results
    fgbg_results = cv2.VideoWriter(
        os.path.join(motion_detection_dir, 'fgbg_MOG_results.avi'), fourcc,
        fps, size_1, True)

    print "~~~ identifying fish from raw video..."

    #### starting the loop
    frame_nb = 0

    while True:

        print frame_nb

        grabbed, frame = camera.read()

        if not grabbed:
            break

        frame_nb += 1

        #### bluring before binarizing can help remove small noise
        # blurred  = cv2.bilateralFilter(clean_frame, 5, 15,15)
        # blurred_1  = cv2.GaussianBlur(blurred, (3, 3), 0)

        # return <type 'numpy.ndarray'>, 1-channel pics
        # fgmask   = fgbg.apply(frame,  learningRate=0.01)
        fgmask = fgbg.apply(frame)
        # print np.shape(fgmask)
        fgmask_3d = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2RGB)
        # print np.shape(fgmask_3d)
        '''
		#### removing noise, optional
		kernel   = np.ones((3,3), np.uint8)
		erosion  = cv2.erode(fgmask, kernel)
		dilation = cv2.dilate(erosion, kernel)


		ret, thresh = cv2.threshold(dilation, 200, 255, 0)


		contours, hierarchy = cv2.findContours(thresh,1,2)

		c_x, c_y = find_fish(dilation, contours, remove_noise=True)
		centroids.append([c_x, c_y])
		# centroids_deque.appendleft((c_x, c_y))


		# print "the chanel of fgmask is: " + str(np.shape(fgmask)[-1])
		#### converting 1 channel binary images to 3 channel images, for stacking them together
		identified_fish   = cv2.cvtColor(dilation, cv2.COLOR_GRAY2RGB)

		# print np.shape(fgmask)
		cv2.imshow('frame', dilation)
		

		#### using cv2.inpaint to blend the fish into the background
		# blended = cv2.inpaint(frame, dilation, 3, cv2.INPAINT_TELEA)
		# print "the chanel of blended is: " + str(np.shape(blended)[-1])
	
		#### showing
		twoV  = np.vstack((frame, identified_fish))

		#### adding name on different frames, Bottom-left corner of the text string in the image
		font  = cv2.FONT_HERSHEY_SIMPLEX
		width = np.shape(frame)[0]
		hight = np.shape(frame)[1]
		cv2.putText(twoV, 'Original Video',               (int(width/2-80),int(hight/15)), font, 0.7,(0,0,255),2)
		cv2.putText(twoV, 'Identified Fish',              (int(width/2-80),int(hight/9)+int(hight/2)), font, 0.7,(0,0,255),2)
		'''
        # cv2.putText(threeV, 'Fish blended with Background', (int(width/2-80),int(hight/6)+int(hight)), font, 0.7,(0,0,255),2)
        # cv2.putText(blended, 'Fish blended with Background',(int(width/2-80),int(hight/15)), font, 0.7,(0,0,255),2)
        # cv2.putText(blended, '(Preprocessing for Streams)', (int(width/2-80),int(hight/15)+20), font, 0.7,(0,0,255),1)

        #### saving videos to the video recorder

        # 		output.write(twoV)
        # 		out_fish_only.write(identified_fish)
        fgbg_results.write(fgmask_3d)
# 		cv2.imshow('Feedin-Tracking', twoV)
# 		cv2.imshow('Identified Fish)', identified_fish)
# 		cv2.imshow('BgFg', fgmask_3d)

#### release the video capture and video output after work.
    camera.release()
    # 	output.release()
    # 	out_fish_only.release()
    fgbg_results.release()
    cv2.destroyAllWindows()

    print "Done!"
Exemple #7
0
def get_scenes(cap, tail=2):
    last_frame = None
    i = 0
    fgbg = cv2.BackgroundSubtractorMOG()
    exp_avrg = 0
    scenes = []
    avrgs = []
    diffs = []
    max_bspeed = 0
    count = 0
    max_brightness = 0
    avrg_brightness = 0
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret:
            if i == tail:
                fgbg = cv2.BackgroundSubtractorMOG()
                i = 0
            fgmask = fgbg.apply(frame)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            brightness = frame.mean()
            width, height = frame.shape
            if i > 0 and i < tail:
                w, h = fgmask.shape
                diff = np.count_nonzero(fgmask) / (1.0 * w * h)
                if diff > exp_avrg:
                    avrg_brightness /= 1.0 * count
                    scenes.append({
                        "end_frame":
                        cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES),
                        "max_bspeed":
                        max_bspeed,
                        "avrg_bspeed":
                        exp_avrg,
                        "max_brightness":
                        max_brightness,
                        "avrg_brightness":
                        avrg_brightness
                    })
                    exp_avrg = _alpha * diff + (1 - _alpha) * exp_avrg
                    max_bspeed = 0
                    max_brightness = 0

                max_bspeed = max(diff, max_bspeed)
                avrgs.append(exp_avrg)
                diffs.append(diff)
            count += 1
            max_brightness = max(brightness, max_brightness)
            avrg_brightness += brightness

            i += 1
            if cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES) == cap.get(
                    cv2.cv.CV_CAP_PROP_FRAME_COUNT):
                break
    # print scenes
    fr_start = 0
    ret_scenes = []
    for j in range(len(scenes)):
        scene = scenes[j]
        fr_end = scene["end_frame"]
        duration = (fr_end - fr_start) / cap.get(cv2.cv.CV_CAP_PROP_FPS)
        if duration > _min_scene_length and duration < _max_scene_length:
            scenes[j]["start"] = fr_start / cap.get(cv2.cv.CV_CAP_PROP_FPS)
            scenes[j]["end"] = fr_end / cap.get(cv2.cv.CV_CAP_PROP_FPS)
            scenes[j]["duration"] = duration
            scenes[j]["position"] = fr_start / \
                cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
            ret_scenes.append(scenes[j])
        fr_start = fr_end

    return ret_scenes
Exemple #8
0
class Ui_MainWindow(object):
    source = None
    player = None
    title = None
    score = None
    level = 20
    scoreLine = {}
    notes = None
    listindex = 0
    frame = None
    vwidth = 640
    vheight = 480
    totaltime = 0
    lrbnd = 70
    ubnd = 100
    fgbg = cv2.BackgroundSubtractorMOG()
    transition = 'mid'
    ctransition = None
    minsize = 10
    font = cv2.FONT_HERSHEY_SIMPLEX
    correct = 0

    leftval = 0
    midval = 0
    rightval = 0
    upval = 0

    lenl = vheight * lrbnd
    lenm = (vheight - ubnd) * (vwidth - (2 * lrbnd))
    lenr = vheight * lrbnd
    lenu = (vwidth - (2 * lrbnd)) * ubnd

    t_minus = cv2.cvtColor(np.zeros((vheight, vwidth, 3), np.uint8),
                           cv2.COLOR_RGB2GRAY)
    t = cv2.cvtColor(np.zeros((vheight, vwidth, 3), np.uint8),
                     cv2.COLOR_RGB2GRAY)
    t_plus = cv2.cvtColor(np.zeros((vheight, vwidth, 3), np.uint8),
                          cv2.COLOR_RGB2GRAY)

    def setupUi(self, MainWindow):
        MainWindow.setObjectName(_fromUtf8("MainWindow"))
        MainWindow.resize(820, 635)
        MainWindow.setMinimumSize(820, 635)
        MainWindow.setMaximumSize(820, 635)
        self.centralwidget = QtGui.QWidget(MainWindow)
        self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
        self.lineEdit = QtGui.QTextEdit(self.centralwidget)
        self.lineEdit.setGeometry(QtCore.QRect(10, 475, 800, 105))
        self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
        self.lineEdit.setReadOnly(True)

        redir = RedirectText(self.lineEdit)
        sys.stdout = redir

        self.lineEdit.append(
            u'Microsoft Imagine Cup 2017 - ARshaker에 오신 것을 환영합니다.')
        self.lineEdit.append(u'음악 파일이나 기 생성된 악보 파일(*.jhw)을 열어주세요!')
        self.lineEdit.append(u'JHW=김주현(JH)+황혜원(HW) 제작')

        self.listWidget = QtGui.QListWidget(self.centralwidget)
        self.listWidget.setGeometry(QtCore.QRect(650, 5, 160, 465))
        self.listWidget.setObjectName(_fromUtf8("listWidget"))

        self.seekSlider = Phonon.SeekSlider(self.centralwidget)
        self.seekSlider.setGeometry(QtCore.QRect(220, 6, 241, 22))
        self.seekSlider.setObjectName(_fromUtf8("seekSlider"))

        paletteb = QtGui.QPalette()
        paletteb.setBrush(QtGui.QPalette.Light, QtCore.Qt.black)

        paletteg = QtGui.QPalette()
        paletteg.setBrush(QtGui.QPalette.Light, QtCore.Qt.green)

        palettegl = QtGui.QPalette()
        palettegl.setBrush(QtGui.QPalette.Light, QtCore.Qt.darkYellow)

        paletter = QtGui.QPalette()
        paletter.setBrush(QtGui.QPalette.Light, QtCore.Qt.red)

        palettem = QtGui.QPalette()
        palettem.setBrush(QtGui.QPalette.Light, QtCore.Qt.darkMagenta)

        palettebl = QtGui.QPalette()
        palettebl.setBrush(QtGui.QPalette.Light, QtCore.Qt.darkBlue)

        palettebb = QtGui.QPalette()
        palettebb.setBrush(QtGui.QPalette.Light, QtCore.Qt.blue)

        palettebc = QtGui.QPalette()
        palettebc.setBrush(QtGui.QPalette.Light, QtCore.Qt.cyan)

        self.lcdNumber = QtGui.QLCDNumber(self.centralwidget)
        self.lcdNumber.setPalette(paletteb)
        self.lcdNumber.setGeometry(QtCore.QRect(580, 5, 64, 23))
        self.lcdNumber.setObjectName(_fromUtf8("lcdNumber"))
        self.lcdNumber.display("00:00")

        self.lcdr = QtGui.QLCDNumber(self.centralwidget)
        self.lcdr.setPalette(palettegl)
        self.lcdr.setGeometry(QtCore.QRect(15, 420, 80, 23))
        self.lcdr.setObjectName(_fromUtf8("lcdr"))
        self.lcdr.display("00:00")
        self.rlab = QtGui.QLabel(self.centralwidget)
        self.rlab.setText(u'남은 시간')
        self.rlab.setGeometry(QtCore.QRect(23, 445, 100, 22))

        self.lcda = QtGui.QLCDNumber(self.centralwidget)
        self.lcda.setPalette(paletteb)
        self.lcda.setGeometry(QtCore.QRect(110, 420, 80, 23))
        self.lcda.setObjectName(_fromUtf8("lcdAccu"))
        self.lcda.display("00000")
        self.alab = QtGui.QLabel(self.centralwidget)
        self.alab.setText(u'현재 비트')
        self.alab.setGeometry(QtCore.QRect(118, 445, 100, 22))

        self.lcdo = QtGui.QLCDNumber(self.centralwidget)
        self.lcdo.setPalette(palettebb)
        self.lcdo.setGeometry(QtCore.QRect(205, 420, 80, 23))
        self.lcdo.setObjectName(_fromUtf8("lcdCorrect"))
        self.lcdo.display("00000")
        self.olab = QtGui.QLabel(self.centralwidget)
        self.olab.setText(u'맞은 비트')
        self.olab.setGeometry(QtCore.QRect(213, 445, 100, 22))

        self.lcdx = QtGui.QLCDNumber(self.centralwidget)
        self.lcdx.setPalette(palettem)
        self.lcdx.setGeometry(QtCore.QRect(300, 420, 80, 23))
        self.lcdx.setObjectName(_fromUtf8("lcdWrong"))
        self.lcdx.display("00000")
        self.xlab = QtGui.QLabel(self.centralwidget)
        self.xlab.setText(u'틀린 비트')
        self.xlab.setGeometry(QtCore.QRect(308, 445, 100, 22))

        self.lcdl = QtGui.QLCDNumber(self.centralwidget)
        self.lcdl.setPalette(paletteg)
        self.lcdl.setGeometry(QtCore.QRect(395, 420, 80, 23))
        self.lcdl.setObjectName(_fromUtf8("lcdLeft"))
        self.lcdl.display("00000")
        self.llab = QtGui.QLabel(self.centralwidget)
        self.llab.setText(u'남은 비트')
        self.llab.setGeometry(QtCore.QRect(403, 445, 100, 22))

        self.lcdpg = QtGui.QLCDNumber(self.centralwidget)
        self.lcdpg.setPalette(palettebc)
        self.lcdpg.setGeometry(QtCore.QRect(490, 420, 70, 23))
        self.lcdpg.setObjectName(_fromUtf8("lcdProgress"))
        self.lcdpg.display("00000")
        self.pglab = QtGui.QLabel(self.centralwidget)
        self.pglab.setText(u'진행률(%)')
        self.pglab.setGeometry(QtCore.QRect(491, 445, 100, 22))

        self.lcdg = QtGui.QLCDNumber(self.centralwidget)
        self.lcdg.setPalette(paletter)
        self.lcdg.setGeometry(QtCore.QRect(572, 420, 70, 23))
        self.lcdg.setObjectName(_fromUtf8("lcdPercent"))
        self.lcdg.display("00000")
        self.glab = QtGui.QLabel(self.centralwidget)
        self.glab.setText(u'정확도(%)')
        self.glab.setGeometry(QtCore.QRect(573, 445, 100, 22))

        self.lrlab = QtGui.QLabel(self.centralwidget)
        self.lrlab.setText(u'좌,우(L,R)크기')
        self.lrlab.setGeometry(QtCore.QRect(13, 380, 100, 22))
        self.lrsl = QtGui.QSlider(QtCore.Qt.Horizontal, self.centralwidget)
        self.lrsl.setGeometry(QtCore.QRect(117, 380, 120, 22))
        self.lrsl.setMinimum(30)
        self.lrsl.setMaximum(200)
        self.lrsl.setValue(30)
        self.lrsl.setObjectName(_fromUtf8("lrsl"))
        self.lrsl.valueChanged.connect(self.lrslchange)

        self.ulab = QtGui.QLabel(self.centralwidget)
        self.ulab.setText(u'위(U) 경계')
        self.ulab.setGeometry(QtCore.QRect(244, 380, 100, 22))
        self.usl = QtGui.QSlider(QtCore.Qt.Horizontal, self.centralwidget)
        self.usl.setGeometry(QtCore.QRect(317, 380, 120, 22))
        self.usl.setMinimum(100)
        self.usl.setMaximum(400)
        self.usl.setValue(100)
        self.usl.setObjectName(_fromUtf8("usl"))
        self.usl.valueChanged.connect(self.uslchange)

        self.dlab = QtGui.QLabel(self.centralwidget)
        self.dlab.setText(u'색상 경계값')
        self.dlab.setGeometry(QtCore.QRect(441, 380, 100, 22))
        self.dsl = QtGui.QSlider(QtCore.Qt.Horizontal, self.centralwidget)
        self.dsl.setGeometry(QtCore.QRect(524, 380, 120, 22))
        self.dsl.setMinimum(1)
        self.dsl.setMaximum(255)
        self.dsl.setValue(10)
        self.dsl.setObjectName(_fromUtf8("dsl"))
        self.dsl.valueChanged.connect(self.dslchange)

        #음악 재생 부분
        self.volumeSlider = Phonon.VolumeSlider(self.centralwidget)
        #self.volumeSlider.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
        self.volumeSlider.setGeometry(QtCore.QRect(460, 3, 113, 26))
        self.volumeSlider.setObjectName(_fromUtf8("volumeSlider"))

        MainWindow.setCentralWidget(self.centralwidget)
        self.menubar = QtGui.QMenuBar(MainWindow)
        self.menubar.setGeometry(QtCore.QRect(0, 0, 820, 26))
        self.menubar.setObjectName(_fromUtf8("menubar"))
        self.menu = QtGui.QMenu(self.menubar)
        self.menu.setObjectName(_fromUtf8("menu"))
        self.menu_2 = QtGui.QMenu(self.menubar)
        self.menu_2.setObjectName(_fromUtf8("menu_2"))
        self.menu_3 = QtGui.QMenu(self.menubar)
        self.menu_3.setObjectName(_fromUtf8("menu_3"))
        self.menu_4 = QtGui.QMenu(self.menubar)
        self.menu_4.setObjectName(_fromUtf8("menu_4"))
        MainWindow.setMenuBar(self.menubar)
        self.statusbar = QtGui.QStatusBar(MainWindow)
        self.statusbar.setObjectName(_fromUtf8("statusbar"))
        MainWindow.setStatusBar(self.statusbar)

        self.action = QtGui.QAction(MainWindow)
        self.action.setObjectName(_fromUtf8("action"))
        self.action.setShortcut("Ctrl+O")
        self.action.setStatusTip(
            u'음악 파일을 엽니다. 악보가 이전에 생성되지 않은 음악일 경우, 악보 생성에 시간이 소요됩니다.')
        self.action.triggered.connect(lambda: self.openMusic(True))

        self.action_2 = QtGui.QAction(MainWindow)
        self.action_2.setObjectName(_fromUtf8("action_2"))
        self.action_2.setShortcut("Ctrl+S")
        self.action_2.setStatusTip(
            u'악보 파일을 엽니다. 파일 확장자가 *.jhw로 끝나며 .jhw 앞에 붙은 숫자가 클 수록 난이도가 쉽습니다.')
        self.action_2.triggered.connect(self.openScore)

        self.action_y = QtGui.QAction(MainWindow)
        self.action_y.setObjectName(_fromUtf8("action_y"))
        self.action_y.setShortcut("Ctrl+Y")
        self.action_y.setStatusTip(
            u'YouTube 주소를 입력합니다. YouTube에서 음악을 받아 악보파일을 자동으로 생성합니다.')
        self.action_y.triggered.connect(lambda: self.openMusic(False))

        self.action_3 = QtGui.QAction(MainWindow, checkable=True)
        self.action_3.setObjectName(_fromUtf8("action_3"))

        self.action_4 = QtGui.QAction(MainWindow, checkable=True)
        self.action_4.setObjectName(_fromUtf8("action_4"))
        self.action_4.setChecked(True)

        self.action_5 = QtGui.QAction(MainWindow)
        self.action_5.setObjectName(_fromUtf8("action_5"))

        self.action_6 = QtGui.QAction(MainWindow)
        self.action_6.setObjectName(_fromUtf8("action_6"))
        self.action_6.setShortcut("Ctrl+H")
        self.action_6.setStatusTip(u'간략한 정보와 사용방법을 수록 하고 있습니다.')
        self.action_6.triggered.connect(self.help)

        ag = QtGui.QActionGroup(MainWindow, exclusive=True)
        self.action_7 = ag.addAction(QtGui.QAction(MainWindow, checkable=True))
        self.action_7.setChecked(True)
        self.action_8 = ag.addAction(QtGui.QAction(MainWindow, checkable=True))
        self.action_9 = ag.addAction(QtGui.QAction(MainWindow, checkable=True))

        #self.action_7 = QtGui.QAction(MainWindow)
        self.action_7.setObjectName(_fromUtf8("action_7"))
        self.action_7.setStatusTip('Easy: *.20.jhw')
        self.action_7.triggered.connect(self.easy)

        #self.action_8 = QtGui.QAction(MainWindow)
        self.action_8.setObjectName(_fromUtf8("action_8"))
        self.action_8.setStatusTip('Medium: *.15.jhw')
        self.action_8.triggered.connect(self.medi)

        #self.action_9 = QtGui.QAction(MainWindow)
        self.action_9.setObjectName(_fromUtf8("action_9"))
        self.action_9.setStatusTip('Hard: *.10.jhw')
        self.action_9.triggered.connect(self.hard)

        ag2 = QtGui.QActionGroup(MainWindow, exclusive=True)
        self.action_10 = ag2.addAction(
            QtGui.QAction(MainWindow, checkable=True))
        self.action_10.setChecked(True)
        self.action_11 = ag2.addAction(
            QtGui.QAction(MainWindow, checkable=True))

        self.action_10.setObjectName(_fromUtf8("action_10"))
        self.action_10.setStatusTip(u'인식 방법을 잔상 인식 모드로 설정합니다.')
        self.action_10.triggered.connect(self.shadow)

        self.action_11.setObjectName(_fromUtf8("action_11"))
        self.action_11.setStatusTip(u'인식 방법을 배경 제거 인식 모드로 설정합니다.')
        self.action_11.triggered.connect(self.bgrm)

        self.menu.addAction(self.action)
        self.menu.addAction(self.action_2)
        self.menu.addAction(self.action_y)
        self.menu.addAction(self.action_6)
        self.menu_2.addAction(self.action_3)
        self.menu_2.addAction(self.action_4)

        self.menu_3.addAction(self.action_7)
        self.menu_3.addAction(self.action_8)
        self.menu_3.addAction(self.action_9)
        self.menu_4.addAction(self.action_10)
        self.menu_4.addAction(self.action_11)

        self.menubar.addAction(self.menu.menuAction())
        self.menubar.addAction(self.menu_2.menuAction())
        self.menubar.addAction(self.menu_3.menuAction())
        self.menubar.addAction(self.menu_4.menuAction())

        self.camera = QtGui.QWidget(self.centralwidget)
        self.camera.setGeometry(QtCore.QRect(0, 20, 655, 365))
        self.camera.setMinimumSize(QtCore.QSize(655, 365))
        self.camera.setMaximumSize(QtCore.QSize(655, 365))
        self.camera.setObjectName(_fromUtf8("camera"))

        self.startButton = QtGui.QPushButton(self.centralwidget)
        self.startButton.setGeometry(QtCore.QRect(10, 5, 91, 23))
        self.startButton.setObjectName(_fromUtf8("startButton"))
        self.pauseButton = QtGui.QPushButton(self.centralwidget)
        self.pauseButton.setGeometry(QtCore.QRect(101, 5, 111, 23))
        self.pauseButton.setObjectName(_fromUtf8("pauseButton"))

        self.retranslateUi(MainWindow)
        QtCore.QMetaObject.connectSlotsByName(MainWindow)
        #UI 부분 완성 이제 영상 부분

        self.capture = None
        self.fps = 29.97
        self.timer = None
        self.startButton.clicked.connect(self.start)
        self.startButton.connect(
            QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_F1),
                            self.centralwidget), QtCore.SIGNAL('activated()'),
            self.start)
        self.monitor = QtGui.QVBoxLayout(self.camera)
        self.startCapture()

        # QProcess object for external app
        self.process = QtCore.QProcess()
        # QProcess emits `readyRead` when there is data to be read
        self.process.readyRead.connect(self.dataReady)
        # Just to prevent accidentally running multiple times
        # Disable the button when process starts, and enable it when it finishes
        self.process.started.connect(lambda: self.menu.setEnabled(False))
        self.process.started.connect(
            lambda: self.startButton.setEnabled(False))
        self.process.finished.connect(lambda: self.menu.setEnabled(True))
        self.process.finished.connect(
            lambda: self.startButton.setEnabled(True))
        self.process.finished.connect(self.scoremade)

    def retranslateUi(self, MainWindow):
        MainWindow.setWindowTitle(_translate("MainWindow", "ARshaker", None))
        self.menu.setTitle(_translate("MainWindow", "&파일", None))
        self.menu_2.setTitle(_translate("MainWindow", "&화면 반전", None))
        self.menu_3.setTitle(_translate("MainWindow", "&난이도", None))
        self.menu_4.setTitle(_translate("MainWindow", "&인식 모드", None))
        self.action.setText(_translate("MainWindow", "&음악 열기", None))
        self.action_2.setText(_translate("MainWindow", "&악보 열기", None))
        self.action_y.setText(_translate("MainWindow", "&Youtube에서 열기", None))
        self.action_3.setText(_translate("MainWindow", "&상하 반전", None))
        self.action_4.setText(_translate("MainWindow", "&좌우 반전", None))
        self.action_5.setText(_translate("MainWindow", "&배경삭제", None))
        self.action_6.setText(_translate("MainWindow", "&도움말", None))
        self.action_7.setText(_translate("MainWindow", "&쉬움", None))
        self.action_8.setText(_translate("MainWindow", "&보통", None))
        self.action_9.setText(_translate("MainWindow", "&어려움", None))
        self.action_10.setText(_translate("MainWindow", "&잔상 인식", None))
        self.action_11.setText(_translate("MainWindow", "&배경 제거", None))
        self.startButton.setText(_translate("MainWindow", "시작(F1)", None))
        self.pauseButton.setText(_translate("MainWindow", "일시정지(F2)", None))

    #위 경계값 가져오기
    def uslchange(self):
        self.ubnd = self.usl.value()
        self.lenl = self.vheight * self.lrbnd
        self.lenm = (self.vheight - self.ubnd) * (self.vwidth -
                                                  (2 * self.lrbnd))
        self.lenr = self.vheight * self.lrbnd
        self.lenu = (self.vwidth - (2 * self.lrbnd)) * self.ubnd

    #좌,우 경계값 가져오기
    def lrslchange(self):
        self.lrbnd = self.lrsl.value()
        self.lenl = self.vheight * self.lrbnd
        self.lenm = (self.vheight - self.ubnd) * (self.vwidth -
                                                  (2 * self.lrbnd))
        self.lenr = self.vheight * self.lrbnd
        self.lenu = (self.vwidth - (2 * self.lrbnd)) * self.ubnd

    #인식 크기 경계값 가져오기
    def dslchange(self):
        self.minsize = self.dsl.value()

    # 악보생성용 서브 프로세스 호출 및 결과 기입
    def dataReady(self):
        cursor = self.lineEdit.textCursor()
        cursor.movePosition(cursor.End)
        cursor.insertText(str(self.process.readAll()))
        self.lineEdit.ensureCursorVisible()

    # 악보를 처음 생성했을때
    def scoremade(self):
        self.lineEdit.clear()
        self.lineEdit.append(self.score)
        if self.level is 20:
            self.lineEdit.append(u'난이도: 쉬움')
        elif self.level is 15:
            self.lineEdit.append(u'난이도: 보통')
        elif self.level is 10:
            self.lineEdit.append(u'난이도: 어려움')
        self.sequencer()
        self.lineEdit.append(u'위의 악보가 생성되었습니다. 시작(F1)을 눌러 리듬게임을 시작하세요!')
        self.stop()

    #사용자 조정 인식 값 라벨 변경 및 인지
    def bgrm(self):
        self.dlab.setText(u'인식 최소값')
        self.lineEdit.append(u'인식 방법이 배경 제거 인식 모드로 설정 되었습니다.')
        self.dsl.setMinimum(1)
        self.dsl.setMaximum(1000)
        self.dsl.setValue(1)
        self.minsize = 1

    def shadow(self):
        self.dlab.setText(u'색상 경계값')
        self.lineEdit.append(u'인식 방법이 잔상 인식 모드로 설정 되었습니다.')
        self.dsl.setMinimum(1)
        self.dsl.setMaximum(255)
        self.dsl.setValue(10)
        self.minsize = 10

    #난이도 3개
    def easy(self):
        self.level = 20
        self.lineEdit.append(u'난이도가 쉬움으로 변경되었습니다.')

    def medi(self):
        self.level = 15
        self.lineEdit.append(u'난이도가 보통으로 변경되었습니다.')

    def hard(self):
        self.level = 10
        self.lineEdit.append(u'난이도가 어려움으로 변경되었습니다.')

    #음악 여는 함수
    def openMusic(self, type):
        name = None
        if type:
            name = QtGui.QFileDialog.getOpenFileNames(
                None, u'음악 선택 - mp3, wav, wma 확장자를 가진 파일 하나를 열어주세요',
                QtGui.QDesktopServices.storageLocation(
                    QtGui.QDesktopServices.MusicLocation),
                "Music (*.mp3 *.wav *.wma)")
        else:
            url, ok = QtGui.QInputDialog.getText(
                None, u'음악 선택 - YouTube 주소를 입력하세요',
                u'예시:https://www.youtube.com/watch?v=DVuRGCuFoa0')
            if not str(url).startswith(
                    'https://www.youtube.com/watch?v=') or not ok:
                self.lineEdit.append(u'올바르지 않은 YouTube 주소 입니다. 다시 시도하세요.')
                return
            else:
                self.lineEdit.append(u'음원 추출 중입니다. 조금만 기다려주세요.')
                with youtube_dl.YoutubeDL(ydl_opts) as ydl:
                    ydl.download([str(url)])

                video = pafy.new(url)
                for a in reversed(video.audiostreams):
                    if not str(a.extension).__contains__("webm"):
                        songtitle = a.title + "." + a.extension
                        self.lineEdit.append(songtitle)
                        self.lineEdit.append(u'음원 추출 중입니다. 조금만 기다려주세요.')
                        if not os.path.isfile(songtitle):
                            downloader = QtCore.QThread()
                            #변환 subprocess 미완
                            #sys.stdout = open(os.devnull, "w")
                            #a.download(quiet=True)
                            #sys.stdout= saved_stdout

                        #name=ytfolder+a.title+ ".mp3"

        if not name:
            return
        else:
            print(name)
            self.source = name[0]
            self.lineEdit.clear()
            self.lineEdit.append(self.source)
            self.lineEdit.append(
                u'음악 파일이 성공적으로 열렸습니다. 악보를 생성합니다. 완료될 때 까지 기다려 주세요.')
            self.lineEdit.append(" ")
            #self.player = Phonon.createPlayer(Phonon.MusicCategory,Phonon.MediaSource(self.source))
            self.player = Phonon.MediaObject()
            self.player.setCurrentSource(Phonon.MediaSource(self.source))
            self.player.setTickInterval(100)
            self.player.tick.connect(self.tick)
            self.seekSlider.setMediaObject(self.player)
            self.audioOutput = Phonon.AudioOutput(Phonon.MusicCategory)
            Phonon.createPath(self.player, self.audioOutput)
            self.volumeSlider.setAudioOutput(self.audioOutput)
            self.title = self.source.split('\\')[self.source.count('\\')]
            self.score = self.source + "." + str(self.level) + ".jhw"
            cmd = 'mnb ' + str(
                self.level) + ' "' + self.source + '" "' + self.score + '"'
            if os.path.exists(self.score):
                self.sequencer()
                self.lineEdit.append(
                    u'이미 생성된 악보가 있습니다. 시작(F1)을 눌러 리듬게임을 시작하세요!')
                self.stop()
            else:
                self.process.start(cmd)

    #시퀀서
    def sequencer(self):
        self.correct = 0
        f = open(self.score, 'r')
        while True:
            line = f.readline()
            if not line: break
            self.scoreLine[(int)(
                line.split(" ")[0])] = line.split(" ")[1].split("\n")[0]
        f.close()
        self.notes = sorted(self.scoreLine.items(), key=operator.itemgetter(0))
        self.lineEdit.append(u'비트 개수: ' + str(len(self.notes)) + u'개')
        self.listWidget.clear()
        self.listindex = 0
        for note in self.notes:
            bits = str(datetime.timedelta(milliseconds=note[0])).split(":")
            if bits[2].endswith("000"):
                bits[2] = bits[2].rstrip("000")
            if len(bits[2]) < 6:
                i = len(bits[2])
                while i < 6:
                    bits[2] = bits[2] + '0'
                    i += 1
            self.listWidget.addItem(
                str(bits[1]) + ":" + str(bits[2]).replace(".", ":") + " - " +
                note[1])
            if self.notes.index(note) == len(self.notes) - 1:
                self.totaltime = note[0]
                self.lineEdit.append(u'예상 운동 시간: ' + str(bits[1]) + u'분 ' +
                                     str(bits[2]).replace(".", u'초 '))

    #악보 여는 함수
    def openScore(self):
        name = QtGui.QFileDialog.getOpenFileNames(
            None,
            u'악보 선택 - .jhw 앞에 붙은 숫자가 클 수록 난이도가 쉽습니다. (20:쉬움 15:보통 10:어려움)',
            QtGui.QDesktopServices.storageLocation(
                QtGui.QDesktopServices.MusicLocation), "Score (*.jhw)")
        if not name:
            return
        else:
            self.score = name[0]
            if ".mp3" in self.score:
                self.source = self.score.split('.mp3')[0] + '.mp3'
            elif ".wma" in self.score:
                self.source = self.score.split('.wma')[0] + '.wma'
            elif ".wav" in self.score:
                self.source = self.score.split('.wav')[0] + '.wav'
            self.title = self.source.split('\\')[self.source.count('\\')]
            self.lineEdit.clear()
            self.lineEdit.append(self.score)
            self.lineEdit.append(u'악보 파일이 성공적으로 열렸습니다. 해당 악보에 맞는 음악을 로딩합니다.')
            if os.path.exists(self.source):
                self.player = Phonon.MediaObject()
                self.player.setCurrentSource(Phonon.MediaSource(self.source))
                self.player.setTickInterval(100)
                self.player.tick.connect(self.tick)
                self.seekSlider.setMediaObject(self.player)
                self.audioOutput = Phonon.AudioOutput(Phonon.MusicCategory)
                Phonon.createPath(self.player, self.audioOutput)
                self.volumeSlider.setAudioOutput(self.audioOutput)
                self.sequencer()
                self.stop()
                self.lineEdit.append(
                    u'음악파일이 성공적으로 로드되었습니다. 시작(F1)을 눌러 리듬게임을 시작하세요!')
            else:
                self.lineEdit.append(u'악보 파일에 해당하는 음악파일이 없습니다. 음악파일을 먼저 열어주세요')

    #도움말 함수
    def help(self):
        QtGui.QMessageBox.information(
            None, u'팀 JHW -ARSHAKER',
            u'제10회 공개 SW 개발자대회 응용SW 부문 출품작\n\n제작자: 김주현 , 황혜원\n\n원만한 사용을 위해 게임 시작 전 좌,우,위 위치 슬라이더를 조절하여 초기에 초록색┌┐이 화면에 나오도록 해주시고,\n\n가급적 카메라가 있는 방향으로 몸을 뻗으세요.\n\n초록색┌┐상태로 전환이 잘 안되는 경우 인식 모드를 번갈아 바꾸어 보세요.',
            u'ARSHAKER 화이팅')

    #타이머 함수
    def tick(self, time):
        displayTime = QtCore.QTime(0, (time / 60000) % 60, (time / 1000) % 60)
        self.lcdNumber.display(displayTime.toString('mm:ss'))
        timediff = self.totaltime - time
        if timediff < 0:
            timediff = 0
        displayTime2 = QtCore.QTime(0, (timediff / 60000) % 60,
                                    (timediff / 1000) % 60)
        self.lcdr.display(displayTime2.toString('mm:ss'))
        for note in self.notes:
            if note[0] <= self.player.currentTime(
            ) and note[0] > self.listindex:
                self.listindex = note[0]
                self.listWidget.setCurrentRow(self.notes.index(note))
                self.lcda.display(self.notes.index(note) + 1)
                self.lcdl.display(len(self.notes) - self.notes.index(note) - 1)
                self.lcdo.display(self.correct)
                self.lcdx.display(self.notes.index(note) + 1 - self.correct)
                percent = round((float(self.notes.index(note)) /
                                 float(len(self.notes)) * 100))
                perfect = round((float(self.correct) /
                                 float(self.notes.index(note) + 1) * 100))
                self.lcdg.display(int(perfect))
                self.lcdpg.display(int(percent))
                #self.listWidget.setCurrentIndex(self.listWidget.model().index(self.notes.index(note)))
                if timediff > 0:
                    self.ctransition = note[1]
                else:
                    self.ctransition = None
                if self.notes.index(note) + 19 < len(self.notes):
                    self.listWidget.scrollTo(self.listWidget.model().index(
                        self.notes.index(note) + 19))

    #영상 관련 함수
    def diffImg(self, t0, t1, t2):
        d1 = cv2.absdiff(t2, t1)
        d2 = cv2.absdiff(t1, t0)
        return cv2.bitwise_and(d1, d2)

    def nextFrameSlot(self):
        ret, frame = self.cap.read()

        #상하, 좌우 반전; 기본 좌우 반전 선택
        if self.action_3.isChecked():
            frame = cv2.flip(frame, 0)
        if self.action_4.isChecked():
            frame = cv2.flip(frame, 1)

        #배경 지우기 모드
        if self.action_11.isChecked():
            fgmask = self.fgbg.apply(frame)
            #fgmask = cv2.equalizeHist(fgmask)
            thrs1 = 2000
            thrs2 = 4000
            fgmask = cv2.Canny(fgmask, thrs1, thrs2, apertureSize=5)
            left = fgmask[0:self.vheight, 0:self.lrbnd]
            mid = fgmask[self.ubnd:self.vheight,
                         self.lrbnd:self.vwidth - self.lrbnd]
            right = fgmask[0:self.vheight,
                           self.vwidth - self.lrbnd:self.vwidth]
            up = fgmask[0:self.ubnd, self.lrbnd:self.vwidth - self.lrbnd]

        #잔상 인식 모드
        else:
            self.t_minus = self.t
            self.t = self.t_plus
            self.t_plus = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

            blur = self.diffImg(self.t_minus, self.t, self.t_plus)
            masks = cv2.GaussianBlur(blur, (5, 5), 0)
            ret, mask = cv2.threshold(masks, self.minsize, 255,
                                      cv2.THRESH_TOZERO)

            left = mask[0:self.vheight, 0:self.lrbnd]
            mid = mask[self.ubnd:self.vheight,
                       self.lrbnd:self.vwidth - self.lrbnd]
            right = mask[0:self.vheight, self.vwidth - self.lrbnd:self.vwidth]
            up = mask[0:self.ubnd, self.lrbnd:self.vwidth - self.lrbnd]

        leftval = cv2.countNonZero(left)
        midval = cv2.countNonZero(mid)
        rightval = cv2.countNonZero(right)
        upval = cv2.countNonZero(up)

        if (leftval is 0 and rightval is 0
                and upval is 0) or (leftval < self.minsize
                                    and rightval < self.minsize
                                    and upval < self.minsize):
            # if (leftval is 0 and rightval is 0 and upval is 0):
            # if (leftval is 0 and rightval is 0 and upval is 0):
            cv2.rectangle(frame, (self.lrbnd, self.ubnd),
                          (self.vwidth - self.lrbnd, self.vheight),
                          (0, 255, 0), 10)
            self.transition = 'mid'
        elif leftval is 0 and rightval is 0 and upval > 0 or (
                leftval < upval and rightval < upval):
            cv2.rectangle(frame, (self.lrbnd, 0),
                          (self.vwidth - self.lrbnd, self.ubnd),
                          (255, 255, 255), 10)
            if self.transition is not 'U':
                self.transition = 'U'
        elif leftval > 0 and rightval is 0 and upval is 0 or (
                leftval > upval and leftval > rightval):
            cv2.rectangle(frame, (0, 0), (self.lrbnd, self.vheight),
                          (255, 0, 0), 10)
            if self.transition is not 'L':
                self.transition = 'L'
        elif rightval > 0 and leftval is 0 and upval is 0 or (
                rightval > upval and rightval > leftval):
            cv2.rectangle(frame, (self.vwidth - self.lrbnd, 0),
                          (self.vwidth, self.vheight), (0, 0, 255), 10)
            if self.transition is not 'R':
                self.transition = 'R'

        cv2.putText(frame,
                    str(leftval) + "/" + str(self.lenl), (3, 180), self.font,
                    0.5, (0, 255, 0), 2)
        cv2.putText(frame,
                    str(midval) + "/" + str(self.lenm), (270, 400), self.font,
                    0.5, (0, 255, 0), 2)
        cv2.putText(frame,
                    str(rightval) + "/" + str(self.lenr), (565, 180),
                    self.font, 0.5, (0, 255, 0), 2)
        cv2.putText(frame,
                    str(upval) + "/" + str(self.lenu), (280, 90), self.font,
                    0.5, (0, 255, 0), 2)

        if self.ctransition == self.transition:
            self.ctransition = None
            self.correct += 1
        if self.ctransition is not None:
            if self.ctransition == 'L':
                cv2.putText(frame, self.ctransition, (250, 360), self.font, 7,
                            (255, 0, 0), 7)
            elif self.ctransition == 'R':
                cv2.putText(frame, self.ctransition, (250, 360), self.font, 7,
                            (0, 0, 255), 7)
            elif self.ctransition == 'U':
                cv2.putText(frame, self.ctransition, (250, 360), self.font, 7,
                            (255, 255, 255), 7)

        #self.keyFix()
        if ret:
            # My webcam yields frames in BGR format
            self.frame = cv2.cvtColor(frame, cv2.cv.CV_BGR2RGB)
        else:
            self.frame = np.zeros((480, 640, 3), np.uint8)
            cv2.putText(self.frame,
                        "Please Rerun Program After Connecting Camera",
                        (5, 240), self.font, 0.8, (0, 255, 0), 2)
            self.lineEdit.setText(
                u'컴퓨터에 카메라가 설치되지 않았습니다. 카메라를 연결 후 프로그램을 재 실행 시켜주세요')

        img = QtGui.QImage(self.frame, self.frame.shape[1],
                           self.frame.shape[0], QtGui.QImage.Format_RGB888)
        pix = QtGui.QPixmap.fromImage(img)
        self.video_frame.setPixmap(pix)

    def startCapture(self):
        if not self.capture:
            self.cap = cv2.VideoCapture(0)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.vwidth)
            self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.vheight)
            self.video_frame = QtGui.QLabel()
            self.pauseButton.clicked.connect(self.stop)
            self.pauseButton.connect(
                QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_F2),
                                self.centralwidget),
                QtCore.SIGNAL('activated()'), self.stop)
            # self.capture.setFPS(1)
            # self.setParent(self)
            # self.setWindowFlags(QtCore.Qt.Tool)
            self.monitor.addWidget(self.video_frame)
            self.start()

    '''def endCapture(self):
        self.capture.deleteLater()
        self.capture = None'''

    def setFPS(self, fps):
        self.fps = fps

    def start(self):
        if self.timer is None:
            self.timer = QtCore.QTimer()
            self.timer.timeout.connect(self.nextFrameSlot)
            self.timer.start(1000. / self.fps)
            if self.player is not None and self.player.state(
            ) != Phonon.PlayingState:
                self.player.play()

    def stop(self):
        if self.timer is not None:
            self.timer.stop()
            self.timer = None
            if self.player is not None and self.player.state(
            ) == Phonon.PlayingState:
                self.player.pause()

    def deleteLater(self):
        self.cap.release()
        super(QtGui.QWidget, self).deleteLater()
Exemple #9
0
import numpy as np
import cv2

cap = cv2.VideoCapture(0)
#cap = cv2.VideoCapture('video/people-walking.mp4')

fgmogbg = cv2.BackgroundSubtractorMOG()  # Creates a background object.
fgmog2bg = cv2.BackgroundSubtractorMOG2(
)  # It detects and marks shadows as well.

while True:
    ret, frame = cap.read()

    # To get the foreground mask.
    fgmogmask = fgmogbg.apply(frame)
    fgmog2mask = fgmog2bg.apply(frame)

    cv2.imshow('original', frame)
    cv2.imshow('fgmogmask', fgmogmask)
    cv2.imshow('fgmog2mask', fgmog2mask)

    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break

cap.release()
cv2.destroyAllWindows()

# Note: For OpenCV version-3:
#	use cv2.createBackgroundSubtractorMOG() in line 5 and cv2.createBackgroundSubtractorMOG2() in line 6.
Exemple #10
0
RESALTAR_BORDES = 4
RESALTAR_LINEAS_RECTAS = 5
RESALTAR_AZUL = 6
RESALTAR_ROJO = 7
RESALTAR_VERDE = 8
RESALTAR_BLANCO = 9
DETECTAR_MOVIMIENTO = 10
RESALTAR_COLORES_FUEGO = 11

PARAMETRO_ROJO = 0
PARAMETRO_AZUL = 1
PARAMETRO_VERDE = 3
PARAMETRO_BLANCO = 4

mog = cv2.BackgroundSubtractorMOG(history=10,
                                  nmixtures=6,
                                  backgroundRatio=0.9,
                                  noiseSigma=0.1)


#funcion que intensifica los colores en un rango(min y max)
#rango minino de los tres canales HSV: hMin,sMin,vMin
#rango maximo de los tres canales HSV: hMax,sMax,vMax
def aumentarIntensidadPorRangoDeColor(frame, hMin, hMax, sMin, sMax, vMin,
                                      vMax):

    #convierte el frame al espacio de color hsv
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    #Se crea un array con las posiciones minimas y maximas
    lower = np.array([hMin, sMin, vMin])
    upper = np.array([hMax, sMax, vMax])
Exemple #11
0
def inicializarMOG():
    global mog
    mog = cv2.BackgroundSubtractorMOG(history=500,
                                      nmixtures=6,
                                      backgroundRatio=0.9,
                                      noiseSigma=0.1)
import cv2
import numpy as np

def get_frame(cap, scaling_factor):
    ret, frame = cap.read()
    frame = cv2.resize(frame, None, fx=scaling_factor, 
            fy=scaling_factor, interpolation=cv2.INTER_AREA)
    return frame

if __name__=='__main__':
    cap = cv2.VideoCapture(0)
    bgSubtractor = cv2.BackgroundSubtractorMOG()
    history = 100

    while True:
        frame = get_frame(cap, 0.5)
        mask = bgSubtractor.apply(frame, learningRate=1.0/history)
        mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        cv2.imshow('Input frame', frame)
        cv2.imshow('Moving Objects', mask & frame)
        c = cv2.waitKey(10)
        if c == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
Exemple #13
0
    def startCamera(self):
        '''start Camera
        '''
        cap = cv2.VideoCapture(0)

        dialogbox = Form()
        dialogbox.show()
        dialogbox.liveCameraFlag = True

        while (cap.isOpened()):
            # Capture frame-by-frame
            ret, frame = cap.read()

            # Our operations on the frame come here
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if dialogbox.filterFlag == "2D Convolution - Average":
                kernel = np.ones((5, 5), np.float32) / 25
                gray = cv2.filter2D(gray, -1, kernel)

            if dialogbox.filterFlag == "2D Convolution - Smooth":
                gray = cv2.blur(gray, (5, 5))

            if dialogbox.filterFlag == "2D Convolution - Gaussian":
                gray = cv2.GaussianBlur(gray, (5, 5), 0)

            if dialogbox.filterFlag == "2D Convolution - Median":
                gray = cv2.medianBlur(gray, 5)

            if dialogbox.filterFlag == "2D Convolution - Bilateral":
                gray = cv2.bilateralFilter(gray, 9, 75, 75)

            if dialogbox.filterFlag == "Canny Filter":
                gray = cv2.Canny(gray, 100, 20)

            if dialogbox.filterFlag == "Invert":
                gray = (255 - gray)

            if dialogbox.filterFlag == "Adaptive Threshold":
                gray_blur = cv2.GaussianBlur(gray, (15, 15), 0)
                gray = cv2.adaptiveThreshold(gray_blur, 255,
                                             cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                             cv2.THRESH_BINARY_INV, 11, 1)

            if dialogbox.filterFlag == "Laplacian Edge":
                # remove noise
                img = cv2.GaussianBlur(gray, (3, 3), 0)
                # convolute with proper kernels
                gray = cv2.Laplacian(img, cv2.CV_64F)

            if dialogbox.filterFlag == "Background Subtract":
                fgbg = cv2.BackgroundSubtractorMOG()
                history = 10
                while dialogbox.filterFlag == "Background Subtract":
                    retVal, frame = cap.read()
                    fgmask = fgbg.apply(frame, learningRate=1.0 / history)
                    cv2.imshow('Live Camera', fgmask)
                    globimg = gray

                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        cap.release()
                        cv2.destroyAllWindows()
                        globimg = 'OFF'

                        dialogbox.close()
                        break

            if dialogbox.getImageFlag == True:
                if dialogbox.blackandwhiteFlag == True:
                    self.capturedImage = (self.convertImage(gray))
                else:
                    self.capturedImage = (self.convertImage(frame))

            if dialogbox.recordingFlag == True:
                if dialogbox.blackandwhiteFlag == True:
                    self.recordingList.append(self.convertImage(gray))
                else:
                    self.recordingList.append(self.convertImage(frame))

            if dialogbox.blackandwhiteFlag == True:
                # Display the resulting frame in grey
                cv2.imshow('Live Camera', gray)
                globimg = gray

            else:
                # Display the resulting frame in colour
                cv2.imshow('Live Camera', frame)
                globimg = frame

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            if dialogbox.liveCameraFlag == False:
                break

        # When everything is done, release the capture
        cap.release()
        cv2.destroyAllWindows()
        globimg = 'OFF'
Exemple #14
0
    def mainfun(self):

        simgnum = 1
        imgNum = 1
        template = cv2.imread('outkicktemp71.png',0)
        tempsiz = cv2.imread('tempsize3.png',0)
        wrframe = False
        
        size = np.size(template)
        skelt = np.zeros(template.shape,np.uint8)
         
        ret,img = cv2.threshold(template,127,255,0)
        element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
        done = False
         
        while( not done):
            eroded = cv2.erode(template,element)
            temp = cv2.dilate(eroded,element)
            temp = cv2.subtract(template,temp)
            skelt = cv2.bitwise_or(skelt,temp)
            template = eroded.copy()
         
            zeros = size - cv2.countNonZero(template)
            if zeros==size:
                done = True
        
        #cv2.imshow('Backgr',skelt)
        #cv2.waitKey(0)
        
        w, h = skelt.shape[::-1]
        
        print w
        print h
        
        threshold = 0.08
        
        fgbg = cv2.BackgroundSubtractorMOG()
        
        cap = cv2.VideoCapture("outkick73.mp4")
        
        while(1):
            
            
            ret, img_rgb = cap.read()
            fgmask = fgbg.apply(img_rgb)
            #cv2.imshow('FGmsk',fgmask)
            
            size = np.size(fgmask)
            skel = np.zeros(fgmask.shape,np.uint8)
         
            ret,fgmask = cv2.threshold(fgmask,127,255,0)
            element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
            done = False
         
            while( not done):
                eroded = cv2.erode(fgmask,element)
                temp = cv2.dilate(eroded,element)
                temp = cv2.subtract(fgmask,temp)
                skel = cv2.bitwise_or(skel,temp)
                fgmask = eroded.copy()
         
                zeros = size - cv2.countNonZero(fgmask)
                if zeros==size:
                    done = True
            
            #cv2.imshow('Backgr',skel)
            
            imgNum+= 1
            
            if(self.status == False):
                
                res = cv2.matchTemplate(skel,skelt,cv2.TM_CCOEFF_NORMED)
                loc = np.where( res >= threshold)
                for pt in zip(*loc[::-1]):
                    cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 1)
                    
                    #winsound.Beep(2500, 40)
                    #os.system("sound.wav")
                    wrframe = True
                    self.trackwin = self.objtr.run_main(pt[0], pt[1], w, h, img_rgb)
                    self.status = True
            elif(self.status == True):
                
                self.trackwin = self.objtr.run_main2(self.trackwin, img_rgb)
                winsound.Beep(2500, 10)                
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    self.status = False
                    
                   
        #         cv2.circle(img_rgb,(pt[0], pt[1]),3,100+(255),-1)
        
            
            if(wrframe == True):
                cv2.imwrite('savepic//('+str(simgnum)+').jpg',img_rgb)
                simgnum=simgnum+1
                wrframe = False
         
              #  cv2.imwrite('res.png',img_rgb)
               # cv2.imshow('Detected Val',img_rgb)
                
            k = cv2.waitKey(True)
            if k == 27:
                break
            elif k == 48:
                    time.sleep(10.0)
                    
            cv2.imshow('Detected Val',img_rgb)
        
        cv2.destroyAllWindows()
Exemple #15
0
    def __init__(self):
        self.motionDetected = False
        self.motionDetectedHold = False

        self.robot_view_motion = rospy.get_param('~robot_view_motion', False)
        self.Vp_MD_Subtr_UseFrmDiff = rospy.get_param(
            '~Vp_MD_Subtr_UseFrmDiff', False)
        self.Vp_MD_Subtr_UseMog = rospy.get_param('~Vp_MD_UseSubtractorMog',
                                                  True)
        self.Vp_ME_UseLkTractor = rospy.get_param('~Vp_ME_UseLkTractor', False)

        self.md_visualMsg = visualMsg()
        self.md_pub = rospy.Publisher(
            "tpc_visual", visualMsg, queue_size=5
        )  #name.data_class,subscriber_listener=None, tcp_nodelay=False, latch=False, headers=None, queue_size=None
        self.pub_count = 0

        self.lk_tracker = LKTracker()

        self.cam = PiCamera()
        self.cam.resolution = (640, 480)
        self.cam.framerate = 24
        self.rawCapture = PiRGBArray(self.cam)
        time.sleep(0.1)
        #self.cap = cv2.VideoCapture(0)
        #self.r = rospy.Rate(24)

        self.detect_box = (10, 10, 620, 460)  #None   (x,y,w,h)
        self.track_box = None  #rotated Rect (center, size, angle) | regular cvRect (x,y,w,h)
        self.keypoints = []
        self.frame_idx = 0
        self.detect_interval = 8
        self.mask = None

        self.feature_size = rospy.get_param("~feature_size", 2)
        self.prev_grey = None
        self.grey = None
        self.avg = None

        self.bgsMOG = cv2.BackgroundSubtractorMOG(
            1, 6, 0.9,
            .3)  #([history, nmixtures, backgroundRatio[, moiseSigma]])
        """
           history - length of the history
           nmixtures - number of Gaussian mixtures
        """
        # Subtractor parameters
        self.delta_thresh = rospy.get_param('~subtr_delta_thresh', 20)
        self.min_area = rospy.get_param('~subtr_min_area',
                                        3000)  # ignore too small contour

        #good feature params
        self.gf_maxCorners = rospy.get_param('~gf_maxCorners',
                                             200)  # 500 | 200
        self.gf_qualityLevel = rospy.get_param("~gf_qualityLevel",
                                               0.02)  # 0.3 | 0.02
        self.gf_minDistance = rospy.get_param("~gf_minDistance", 7)  # 10  |  7
        self.gf_blockSize = rospy.get_param("~gf_blockSize", 10)  # 7   | 10
        self.gf_useHarrisDetector = rospy.get_param("~gf_useHarrisDetector",
                                                    True)
        self.gf_k = rospy.get_param("~gf_k", 0.04)
        self.gf_params = dict(maxCorners=self.gf_maxCorners,
                              qualityLevel=self.gf_qualityLevel,
                              minDistance=self.gf_minDistance,
                              blockSize=self.gf_blockSize,
                              useHarrisDetector=self.gf_useHarrisDetector,
                              k=self.gf_k)

        if DEBUG_L1: print('Info:motionDetect intialized.')
import numpy as np

import imutils
import cv2

cap = cv2.VideoCapture('Q1.avi')

fgbg = cv2.BackgroundSubtractorMOG(history=1,
                                   nmixtures=1,
                                   backgroundRatio=0.7,
                                   noiseSigma=0)

numframes = cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)
count = 1

while count <= 100:

    name = 'Frame%s.jpg' % count
    ret, frame = cap.read()
    fgmask = fgbg.apply(frame)

    # print numframes
    # cv2.imwrite(name,fgmask)
    cv2.imshow('frame', fgmask)
    cv2.waitKey(100)
    count += 1
cap.release()
import cv2
import numpy as np
backsub = cv2.BackgroundSubtractorMOG()
cap = cv2.VideoCapture(0)

while True:
    _, frame = cap.read()
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # hranice filtra
    lower_red = np.array([10, 90, 10])
    upper_red = np.array([254, 200, 100])
    # maskovanie obrazu
    mask = cv2.inRange(hsv, lower_red, upper_red)
    res = cv2.bitwise_and(frame, frame, mask=mask)
    # detekcia zmeny
    fgmask = backsub.apply(mask, None, 0.001)
    contours, hierarchy = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_NONE)
    # velkost videa spravi5 automatiku
    Y = 480
    X = 640
    # rozdelenie obrazu
    cv2.line(frame, (0, Y / 2), (X, Y / 2), (255, 0, 255), 1)
    cv2.line(frame, (X / 2, 0), (X / 2, Y), (255, 0, 255), 1)
    try:
        hierarchy = hierarchy[0]
    except:
        hierarchy = []
    for contour, hier in zip(contours, hierarchy):
        (x, y, w, h) = cv2.boundingRect(contour)
        if w > 20 and h > 20:
Exemple #18
0
 def __init__(self):
     self._windowManager = WindowManager('Track', self.onKeypress)
     self._captureManager = CaptureManager(cv2.VideoCapture(0),
                                           self._windowManager, True)
     self._fgbg = cv2.BackgroundSubtractorMOG()
Exemple #19
0
def srs():
    ip = cv2.VideoCapture(IPVIDEONAME)
    bgsubtractor = cv2.BackgroundSubtractorMOG()
    count = 'N/A'
    heavyweigthcount = 0
    mediumweigthcount = 0
    kernel = np.ones((5, 5), np.uint8)
    PAUSE = False
    Framecount = 0
    fc1 = 0
    fc2 = 0
    y, e = 0, 0  #Setting Defaults to avoid Errors
    speed = "N/A"
    flagtostartlogic = False
    detectedVehicle = "N/A"
    if IPVIDEONAME == 0:
        num_frames = 120

        # print "Capturing {0} frames".format(num_frames)

        # Start time
        start = time.time()

        # Grab a few frames
        for i in xrange(0, num_frames):
            ret, frame = ip.read()

        # End time
        end = time.time()

        # Time elapsed
        seconds = end - start
        # print "Time taken : {0} seconds".format(seconds)

        # Calculate frames per second

        fps = num_frames / seconds

        print("fps", fps)
    else:
        fps = ip.get(cv2.cv.CV_CAP_PROP_FPS)
        print "Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(
            fps)

    while ip.isOpened():
        ret, img = ip.read()
        if ret:
            copy = img.copy()

            # copy=cv2.resize(copy,(400,400))
            # img = cv2.resize(img, (400, 400))
            Framecount = Framecount + 1
            timestamp = datetime.datetime.now()
            ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
            cv2.putText(copy, ts, (10, copy.shape[0] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255), 1)
            # cv2.imshow("CCTV",copy)

            # Setting Imaginary Lines and points to check
            ############################################################################################################
            if LINE_ORIENTATION == "Vertical Reference Line":
                #HardCoded Points
                ##################################################################################################################################
                # pt1 = img.shape[1] / 2
                # pt2 = img.shape[1] / 2 + 100
                # cv2.line(copy, (pt1, 0), (img.shape[1] / 2, img.shape[0]), (255, 127, 0), 2)
                # cv2.line(copy, (pt2, 0), (img.shape[1] / 2+100, img.shape[0]), (255, 127, 0), 2)
                ################################################################################################################################

                #Using DISTBETWEENPOINTS variable
                ########################################################################################################################################
                pt1 = img.shape[1] / 2
                pt2 = img.shape[1] / 2 + DISTBETWEENPOINTS
                cv2.line(copy, (pt1, 0), (pt1, img.shape[0]), (255, 127, 0), 2)
                cv2.line(copy, (pt2, 0), (pt2, img.shape[0]), (255, 127, 0), 2)
                ######################################################################################################################################

            else:
                # HardCoded Points
                ##################################################################################################################################
                # pt2 = img.shape[0] / 2 - 50  # Swapped pt1 and pt2 positons not tested
                # pt1 = img.shape[0] / 2
                # cv2.line(copy, (0, pt1), (img.shape[1], pt1), (255, 127, 0), 2)
                # cv2.line(copy, (0,pt2), (img.shape[1], pt2), (255, 127, 0), 2)
                ##################################################################################################################################

                # Using DISTBETWEENPOINTS variable
                ########################################################################################################################################
                pt2 = img.shape[
                    0] / 2 - DISTBETWEENPOINTS  # Swapped pt1 and pt2 positons not tested
                pt1 = img.shape[0] / 2
                cv2.line(copy, (0, pt1), (img.shape[1], pt1), (255, 127, 0), 2)
                cv2.line(copy, (0, pt2), (img.shape[1], pt2), (255, 127, 0), 2)
                ######################################################################################################################################

            # Preprocessing the Video
            ############################################################################################################
            imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            imggray = cv2.GaussianBlur(imggray, (5, 5), 0)
            backsubimg = bgsubtractor.apply(imggray, None, 0.01)
            # cv2.imshow("BGSubImg",backsubimg)
            dilate = backsubimg

            for i in range(9):
                dilate = cv2.erode(dilate, None, iterations=1)
                dilate = cv2.dilate(dilate, None, iterations=2)

            dilate = cv2.dilate(dilate, None, iterations=1)
            # cv2.imshow("Erode",dilate)
            ############################################################################################################

            # Finding Countours and Drawing them
            ############################################################################################################
            contours, hierarchy = cv2.findContours(dilate, cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_SIMPLE)
            # cv2.imshow('After Contouring', dilate)
            # cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
            for c in contours:
                (x, y, w, h) = cv2.boundingRect(c)
                # cv2.line(test, (img.shape[0]/2,0), (img.shape[0]/2,img.shape[1]), (255, 127, 0), 2)
                cv2.rectangle(copy, (x, y), (x + w, y + h), (255, 0, 0), 2)
                M = cv2.moments(c)
                if M["m00"] != 0:  #Fixed Divide By Zero Error
                    cx = int(M["m10"] / M["m00"])
                    cy = int(M["m01"] / M["m00"])
                else:
                    cx, cy = 0, 0
                #print(cx,xofline)

                if flagtostartlogic:
                    if LINE_ORIENTATION == "Vertical Reference Line":
                        if cx >= pt1 and cx <= pt1 + 10:
                            fc1 = Framecount
                            # e=time.time()
                            # print("E",e)
                            count = count + 1
                            print "Total Count", count
                            totalcounter.setText(str(count))
                            print "Countour Area", cv2.contourArea(c)
                            if cv2.contourArea(c) >= MWR1 and cv2.contourArea(
                                    c) <= MWR2:
                                mediumweigthcount = mediumweigthcount + 1
                                print "Medium Weighted Vehicle ", mediumweigthcount
                                mediumcounter.setText(str(mediumweigthcount))
                                detectedVehicle = "Medium Weighted Vehicle"

                            else:
                                heavyweigthcount = heavyweigthcount + 1
                                print "Heavy Weighted Vehicle ", heavyweigthcount
                                heavycounter.setText(str(heavyweigthcount))
                                detectedVehicle = "Heavy Weighted Vehicle"

                            if (RRM == detectedVehicle):
                                text = alertholder.text()
                                alertholder.setText(
                                    text +
                                    "\n Restricted Vehicle type found on Road")
                                detectedVehicle = "N/A"
                            else:
                                detectedVehicle = "N/A"

                        if cx >= pt2 and cx < pt2 + 10:
                            fc2 = Framecount
                            numframe = fc2 - fc1
                            # print("NumberofFrames",numframe)
                            try:
                                t = numframe / fps
                            except ZeroDivisionError:
                                continue
                            # print("t",t)
                            # print("coincide")
                            # s=time.time()
                            # print("S", s)
                            # y=s-e
                            # print("Y",y)
                            try:
                                speed = round((0.02 * 3600 / t) % 100, 2)
                                # speed = 2*1000/(numframe*fps)
                                # speed = abs(speed)
                                # # speed = '{:.2f}'.format(speed).replace('.', '')

                            except:
                                continue
                            print speed, "km/h"
                            if (speed > SPEED_LIMIT):
                                name = "Snaps\\" + "ID" + str(
                                    count) + " " + str(
                                        speed) + " " + "kmph" + ".jpg"
                                # crop_img = img[int(y):int(y+w+100),int(x):int(x+h+100)]
                                crop_img = img[int(y):int(y + w),
                                               int(x):int(x + h)]
                                cv2.imwrite(name, crop_img)
                                text = alertholder.text()
                                alertholder.setText(text +
                                                    "\n Speed limit crossed")

                            # # imagefornumberplate
                            # if cx==img.shape[1]/2:
                            #     snap = "NumberPlate\\"+"xyz"+".png"
                            #     crop_img = copy[int(y):int(y + w ), int(x):int(x + h)]
                            #     cv2.imwrite(name, crop_img)

                            fc2 = 0
                            fc1 = 0
                            numframe = 0

                    else:
                        if cy >= pt1 and cy <= pt1 + 19:
                            fc1 = Framecount
                            # e = time.time()
                            count = count + 1
                            print "Total Count", count
                            totalcounter.setText(str(count))
                            print "Countour Area", cv2.contourArea(c)

                            if cv2.contourArea(c) >= MWR1 and cv2.contourArea(
                                    c) <= MWR2:
                                mediumweigthcount = mediumweigthcount + 1
                                print "Medium Weighted Vehicle ", mediumweigthcount
                                mediumcounter.setText(str(mediumweigthcount))
                                detectedVehicle = "Medium Weighted Vehicle"

                            else:
                                heavyweigthcount = heavyweigthcount + 1
                                print "Heavy Weighted Vehicle ", heavyweigthcount
                                heavycounter.setText(str(heavyweigthcount))
                                detectedVehicle = "Heavy Weighted Vehicle"

                            if (RRM == detectedVehicle):
                                text = alertholder.text()
                                alertholder.setText(
                                    text +
                                    "\n Restricted Vehicle type found on Road")
                                detectedVehicle = "N/A"
                            else:
                                detectedVehicle = "N/A"

                        if cx >= pt2 and cx < pt2 + 20:
                            # print
                            fc2 = Framecount
                            numframe = fc2 - fc1
                            # print("NumberofFrames", numframe)
                            t = numframe / fps
                            # s=time.time()
                            # y=s-e
                            try:
                                speed = round((0.05 * 3600 / t) % 100, 2)
                                # speed = 0.05/ (numframe * fps)
                            except ZeroDivisionError:
                                continue

                            print speed, "km/h"

                            if (speed > SPEED_LIMIT):
                                name = "Snaps\\" + "ID" + str(
                                    count) + " " + str(
                                        speed) + " " + "kmph" + ".jpg"
                                # crop_img = img[int(y):int(y+w+100),int(x):int(x+h+100)]
                                crop_img = img[int(y):int(y + w),
                                               int(x):int(x + h)]
                                cv2.imwrite(name, crop_img)
                                text = alertholder.text()
                                alertholder.setText(text +
                                                    "\n Speed limit crossed")
                            fc2 = 0
                            fc1 = 0

            ############################################################################################################

            # Drawing Centroids
            ###########################################################################################################
                cv2.circle(copy, (cx, cy), 3, (255, 0, 255), -1)
                cv2.putText(
                    copy, "Count: " + str(count) + "  Speed: " + str(speed) +
                    " kmph", (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.55,
                    (0, 255, 0), 2)
                # copy=cv2.resize(copy,(420,276))
                # cv2.namedWindow("Detected Vehicles", cv2.cv.CV_WINDOW_AUTOSIZE)
                cv2.imshow("Detected Vehicles", copy)
            ############################################################################################################

            # Mechanism for sustaining pause,play,nextFrame which is set in the outer while loop
            ############################################################################################################
            while (PAUSE):
                k = cv2.waitKey()
                # print(k)
                if k == 32:  # PAUSE Video Using Spacebar
                    PAUSE = False

                if k == 13:  # Next Frame Using Enter
                    break

                if k == 27:  # Exit When in Paused State using ESC
                    PAUSE = False
                    break
            ############################################################################################################

            # Setting FPS and pause state
            ############################################################################################################
            k = cv2.waitKey(30)
            if k == 27:
                break

            if k == 32:
                PAUSE = True
            if k == ord('s'):
                flagtostartlogic = True
                count = 0
                speed = 0
            ############################################################################################################

        # If no next frame is available i.e ret is false
        ################################################################################################################
        else:
            print("Video Ended")
            email()
            break
        ###############################################################################################################

    ip.release()
    cv2.destroyAllWindows()
Exemple #20
0
else:
    camera = cv2.VideoCapture(args["video"])

fourcc = cv2.cv.CV_FOURCC('m', 'p', '4', 'v')
video = cv2.VideoWriter('MOG_scaling_nov.avi', fourcc, 10, (640, 480), True)

_object_type = "Car"
_temp_path = args["tempfolder"]
_model_path = args["modelpath"]

# Load the classifier
clf = joblib.load(_model_path)
firstFrame = None

fgbg = cv2.BackgroundSubtractorMOG(history=2,
                                   nmixtures=5,
                                   backgroundRatio=0.6,
                                   noiseSigma=0.7)

count = 0
car_count = 0
frame_count = 0

# loop over the frames of the video
while True:
    # grab the current frame and initialize the occupied/unoccupied text
    (grabbed, frame) = camera.read()
    text = "Unoccupied"

    # if the frame could not be grabbed, then we have reached the end of the video
    if not grabbed:
        break
Exemple #21
0
def get_center_pixel(trial_folder, num_images):
    left_image_folder = os.path.join(trial_folder, 'left')
    right_image_folder = os.path.join(trial_folder, 'right')
    xyz_folder = os.path.join(trial_folder, 'xyz')
    center_pixels = []
    xyzs = []
    left_fgbg = cv2.BackgroundSubtractorMOG()
    right_fgbg = cv2.BackgroundSubtractorMOG()
    left_images_list = os.listdir(left_image_folder)
    left_images_list = [
        image for image in left_images_list if image.endswith('.jpg')
    ]
    assert len(
        left_images_list) > num_images, 'Cannot fetch %d images in %s' % (
            num_images, left_images_list)
    idx = 0
    left_bkg_image = '%s-%d.jpg' % ('left', len(left_images_list) - 1)
    right_bkg_image = '%s-%d.jpg' % ('right', len(left_images_list) - 1)
    left_image = cv2.imread(os.path.join(left_image_folder, left_bkg_image))
    right_image = cv2.imread(os.path.join(right_image_folder, right_bkg_image))
    left_fgmask = left_fgbg.apply(left_image)
    right_fgmask = right_fgbg.apply(right_image)
    while True:
        left_image_name = '%s-%d.jpg' % ('left', idx)
        right_image_name = '%s-%d.jpg' % ('right', idx)
        xyz_filename = '%s-%d.json' % ('xyz', idx)
        left_image = cv2.imread(
            os.path.join(left_image_folder, left_image_name))
        right_image = cv2.imread(
            os.path.join(right_image_folder, right_image_name))
        xyz_file = os.path.join(xyz_folder, xyz_filename)
        left_fgmask = left_fgbg.apply(left_image)
        right_fgmask = right_fgbg.apply(right_image)
        left_contours, left_hierarchy = cv2.findContours(
            left_fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        right_contours, right_hierarchy = cv2.findContours(
            right_fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # find the largest contour in the mask, then use
        # it to compute the minimum enclosing circle and
        # centroid
        left_xy_radius = []
        right_xy_radius = []
        for contour in left_contours:
            if contour.shape[0] < 3:
                continue
            (x, y), radius = cv2.minEnclosingCircle(contour)
            left_xy_radius.append((int(x), int(y), radius))
        for contour in right_contours:
            if contour.shape[0] < 3:
                continue
            (x, y), radius = cv2.minEnclosingCircle(contour)
            right_xy_radius.append((int(x), int(y), radius))
        if not left_xy_radius:
            left_x = 0
            left_y = 0
            left_radius = 0
        else:
            left_xy_radius = sorted(left_xy_radius, key=lambda tup: tup[0])
            left_x, left_y, left_radius = left_xy_radius[-1]
        if not right_xy_radius:
            right_x = 0
            right_y = 0
            right_radius = 0
        else:
            right_xy_radius = sorted(right_xy_radius, key=lambda tup: tup[0])
            right_x, right_y, right_radius = right_xy_radius[-1]

        center_pixels.append([left_x, left_y, right_x, right_y])
        tmp_xyz = []
        with open(xyz_file, 'r') as f:
            data = json.load(f)
            tmp_xyz.append(data['xyz'][0]['x'])
            tmp_xyz.append(data['xyz'][0]['y'])
            tmp_xyz.append(data['xyz'][0]['z'])
        xyzs.append(tmp_xyz)
        idx += 1
        if idx == num_images:
            break
        assert len(left_images_list) > idx, 'Cannot fetch %d images in %s' % (
            num_images, left_image_folder)

    return center_pixels, xyzs
Exemple #22
0
    width, height = im.size
    im = im.convert("L")
    data = im.getdata()
    data = np.matrix(data, dtype='float') / 255.0
    new_data = np.reshape(data, (width, height))
    return new_data
    
def ImageToMatrix2(im):
    mx = scipy.misc.fromimage(im)
    return mx 

root_path = "./"

cap = cv2.VideoCapture("data/test.mp4")

fgbg = cv2.BackgroundSubtractorMOG(500, 10, 0.2)

#fgbg = cv2.createBackgroundSubtractorMOG2()

#fgbg = cv2.createBackgroundSubtractorMOG2()
#fgbg.setHistory(500)
#fgbg.setNMixtures(10)
#fgbg.setBackgroundRatio(0.2)

fnprex = "getting_mp4_"

predict_filelist = root_path + "predict_filelist.txt"
pfl = open(predict_filelist,'w')

j = 1
while (1):
Exemple #23
0
 def __init__(self):
     self.PI = 3.14159
     self.bg_subtractor = cv2.BackgroundSubtractorMOG(history=200,
                                                      nmixtures=5,
                                                      backgroundRatio=0.7,
                                                      noiseSigma=0)
# coding: utf-8

# ## Background Subtraction

# ### 1. Gaussian Mixture-based Background/Foreground Segmentation Algorithm

# In[ ]:

# OpenCV 2.4.13 only
import numpy as np
import cv2

cap = cv2.VideoCapture('walking.avi')

# Initlaize background subtractor
foreground_background = cv2.BackgroundSubtractorMOG()

while True:

    ret, frame = cap.read()

    # Apply background subtractor to get our foreground mask
    foreground_mask = foreground_background.apply(frame)

    cv2.imshow('Output', foreground_mask)
    if cv2.waitKey(1) == 13:
        break

cap.release()
cv2.destroyAllWindows()
Exemple #25
0
def runCamera(cameraName):
    try:
        motionDetectedTimestamp = ""

        # Connect to database
        conn = sqlite3.connect('/home/pc/opencv_database.db')
        c = conn.cursor()

        # Get camera values from database
        ip = retrieveFromDatabase("ip", cameraName)
        port = retrieveFromDatabase("port", cameraName)
        password = retrieveFromDatabase("password", cameraName)

        # Create camera url
        mpegURL = "http://" + ip + ":" + port + "/videostream.asf?user=admin&pwd=" + password + "&resolution=32&rate=0&.mpg"

        # Specify the video to be captured
        cap = cv2.VideoCapture(mpegURL)

        # Get the starting time and starting video number
        startTime = time.time()
        videoNumber = 1

        # Codec and VideoWriter object for saving the video
        fileSaveDirectory = retrieveDirectoryFromDB()
        fourcc = cv2.cv.CV_FOURCC(*'XVID')
        out = getOutputFile(fileSaveDirectory, videoNumber, fourcc)

        fgbg = cv2.BackgroundSubtractorMOG()

        motionDetectedFrameCount = 0
        motionDetected = False
        mute = True
        # While the camera is recording
        while (True):
            # Check for any keys that were pressed
            k = cv2.waitKey(30) & 0xff
            if k == ord('q') or k == 27:
                break
            elif k == ord('k'):
                # Generate a new background
                fgbg = cv2.BackgroundSubtractorMOG()
            elif k == ord('m'):
                mute = not mute
            elif k == ord('w'):
                # Move camera up
                moveCamera(password, ip, port, 0)
            elif k == ord('a'):
                # Move camera left
                moveCamera(password, ip, port, 4)
            elif k == ord('s'):
                # Move camera down
                moveCamera(password, ip, port, 2)
            elif k == ord('d'):
                # Move camera right
                moveCamera(password, ip, port, 6)
            # Stop any camera movement
            moveCamera(password, ip, port, 1)

            # Read the current frame from the camera
            ret, frame = cap.read()

            # If there has been motion detected for more than a specified number of frames, generate a new mask
            if motionDetectedFrameCount > 1:
                fgbg = cv2.BackgroundSubtractorMOG()
                motionDetectedFrameCount = 0

            # Apply the mask to the frame
            fgmask = fgbg.apply(frame)
            kernel = np.ones((5, 5), np.uint8)
            fgmask = cv2.erode(fgmask, kernel, iterations=1)
            fgmask = cv2.dilate(fgmask, kernel, iterations=4)

            # Find differences between the mask and frame, if any.  These are called contours
            contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_LIST,
                                                   cv2.CHAIN_APPROX_NONE)

            # If there are no contours an error will be thrown.  If there are contours:
            if len(contours) != 0:
                motionDetectedFrameCount += 1
                motionDetected = True
                cnt = contours[0]
                x, y, w, h = cv2.boundingRect(cnt)
                minX = x
                minY = y
                maxX = x + w
                maxY = y + h
                for contour in contours:
                    x, y, w, h = cv2.boundingRect(contour)
                    if x < minX:
                        minX = x
                    elif y < minY:
                        minY = y
                    elif (x + w) > maxX:
                        maxX = (x + w)
                    elif (y + h) > maxY:
                        maxY = (y + h)
                # Draw a target around the motion detected
                centerX = (minX + maxX) / 2
                centerY = (minY + maxY) / 2
                cv2.rectangle(frame, (centerX, centerY), (centerX, centerY),
                              (255, 000, 255), 2)
                cv2.rectangle(frame, (minX, minY), (maxX, maxY),
                              (255, 000, 255), 2)
                # Play a sound to alert the user of motion detected
                if not mute:
                    os.system("aplay beep.wav")

                # Record movement time of occurrence in log
                #if (motionDetectedTimestamp != time.asctime(time.localtime())):
                #	motionDetectedTimestamp = time.asctime(time.localtime())
                #	logTimestamp()

            # Put text over video frame
            # Put a timestamp on the video frame
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(frame, str(time.asctime(time.localtime())), (0, 25),
                        font, 1, (0, 0, 0), 7)
            cv2.putText(frame, str(time.asctime(time.localtime())), (0, 25),
                        font, 1, (255, 255, 255), 2)
            # Add MUTE text if the program is muted
            if mute:
                cv2.putText(frame, "MUTE", (555, 475), font, 1, (0, 0, 255), 4)

            # Show the frame, and write it to the .avi file
            cv2.imshow('Video', frame)
            out.write(frame)

            # Find how long the routine has been running for
            endTime = time.time()
            elapsedTime = endTime - startTime

            # Save the video after a specified number of seconds
            if elapsedTime >= 4:
                out.release()
                # If there was motion detected during the recording, move on to the next video number.  Otherwise write over this video
                # If there are more than a specified number of videos, the count is set back to 1 so they can all be written over
                if (videoNumber == 2000) and (motionDetected == True):
                    motionDetected = False
                    videoNumber = 1
                elif motionDetected == True:
                    motionDetected = False
                    videoNumber += 1

                out = getOutputFile(fileSaveDirectory, videoNumber, fourcc)
                startTime = time.time()
        cap.release()
        out.release()
        cv2.destroyWindow('Video')
    except:
        pass
Exemple #26
0
class datasheet:
  	date_time = time.ctime()
  	Bus_route = "XYZ"
  	Location = "XYZ"
  	no_of_person_going_in = 0
  	no_of_person_coming_out = 0
  	no_of_person_in = 0

def Distance(a,b):
	c = (a[0]-b[0])**2 + (a[1]-b[1])**2
	return c**0.5 

frame_count=0
if(len(sys.argv)>1): cap = cv2.VideoCapture(sys.argv[1])
else: cap = cv2.VideoCapture(0)
fgbg = cv2.BackgroundSubtractorMOG(200,5,0.6,0)
distance =0
#prev_distance=0
# pts = []
incount=0
outcount=0
#prev_center = None

circles=[]
data = datasheet() #data = []
#perv_location = '' 
ret, firstFrame = cap.read()
grayFirst = cv2.cvtColor(firstFrame, cv2.COLOR_BGR2GRAY)
while(1):
	# if(frame_count==302):continue
	ret, frame = cap.read()
Exemple #27
0
    def conquer(self, cap, start, end):
        # Background Subtraction parameters
        i = 0
        fgbg = cv2.BackgroundSubtractorMOG()

        # Resutls
        scenes = []

        # Scene feature initialization
        exp_avrg = 0
        max_bspeed = 0
        max_brightness = 0
        avrg_brightness = 0

        # Iterating over the frames
        last_frame = []
        first_frame = []
        current_frame_start = start
        currentframe = start
        while currentframe < end:
            # Locking the capture to get the frame
            self.lock.acquire()
            cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, currentframe - 1)
            ret, frame = cap.read()
            self.lock.release()

            if ret:
                # Save the first frame
                if len(first_frame) == 0:
                    first_frame = frame

                # Apply the background subtraction
                if i == 2:
                    fgbg = cv2.BackgroundSubtractorMOG()
                    i = 0
                fgmask = fgbg.apply(frame)
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                width, height = frame.shape

                # Get the brightness
                brightness = frame.mean()
                if i > 0 and i < 2:
                    w, h = fgmask.shape
                    diff = np.count_nonzero(fgmask) / (1.0 * w * h)

                    # Add new scene if big difference encountered
                    if diff > exp_avrg or (currentframe + self._skip_rate +
                                           1) >= end:
                        if current_frame_start != currentframe:
                            avrg_brightness /= 1.0 * (currentframe -
                                                      current_frame_start) / (
                                                          self._skip_rate + 1)
                        scenes.append({
                            "end_frame": currentframe,
                            "max_bspeed": max_bspeed,
                            "avrg_bspeed": exp_avrg,
                            "max_brightness": max_brightness,
                            "avrg_brightness": avrg_brightness
                        })

                        # Reinitialize the scene fetures
                        exp_avrg = self._alpha * diff + (
                            1 - self._alpha) * exp_avrg
                        max_bspeed = 0
                        max_brightness = 0
                        avrg_brightness = 0
                        last_frame = frame

                    # Compute the frame features
                    max_bspeed = max(diff, max_bspeed)

                # Compute the frame features
                max_brightness = max(brightness, max_brightness)
                avrg_brightness += brightness
                i += 1

            # Increment the frame index & skipping some
            currentframe += self._skip_rate + 1

        # Save the scenes
        fr_start = 0
        ret_scenes = []
        for j in range(len(scenes)):
            scene = scenes[j]
            fr_end = scene["end_frame"]
            duration = (fr_end - fr_start)
            if duration > self._skip_rate:
                scenes[j]["start_frame"] = fr_start
                scenes[j]["start"] = fr_start / self.fps
                scenes[j]["end"] = fr_end / self.fps
                scenes[j]["duration"] = duration / self.fps
                scenes[j]["position"] = fr_start / self.length
                ret_scenes.append(scenes[j])
            fr_start = fr_end

        # Return the result in the queue
        return (ret_scenes, first_frame, last_frame)
Exemple #28
0
 def __init__(self, learning_rate=-1):
     self.mog = cv2.BackgroundSubtractorMOG()
     self.mask = None
     self.learning_rate = learning_rate
Exemple #29
0
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size")
args = vars(ap.parse_args())

# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
    camera = cv2.VideoCapture(0)

# otherwise, grab a reference to the video file
else:
    camera = cv2.VideoCapture(args["video"])

fgbg = cv2.BackgroundSubtractorMOG(50, 1, 0.9, .1)
fgbg2 = cv2.BackgroundSubtractorMOG(50, 1, 0.9, .1)

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#face_cascade = cv2.CascadeClassifier('body.xml')

#(grabbed, frame) = camera.read()
#frame = imutils.resize(frame, width=600)
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

#oldframe=frame

# keep looping
while True:
    # grab the current frame
    (grabbed, frame) = camera.read()
import cv2
import numpy as np
import math
cap = cv2.VideoCapture(0)
fgbg = cv2.BackgroundSubtractorMOG(history=3,nmixtures=4,backgroundRatio=0.5, noiseSigma=0.01) 
fbg = 0


while(cap.isOpened()):
    ret, img = cap.read()
    fbg = img
    history = 3
    fbg = fgbg.apply(img,learningRate=1.0/history)
    cv2.imshow('test',fbg)


    #Region inside which the hand is to be placed 
    cv2.rectangle(img,(0,0),(500,500),(0,255,0),0)
    grey = fbg[200:500, 200:500]
    crop_img =img[200:500, 200:500]
    fbg_crop = fbg[200:500, 200:500] 
    
    grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
    #grey = cv2.cvtColor(crop_img, cv2.COLOR_GRAY2BGR)
    value = (35,35)
    
    thresh1 =crop_img
    blurred = cv2.GaussianBlur(grey, value, 0)
    
    _, thresh1 = cv2.threshold(blurred, 200, 255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)