def dictionay_learning_MHOF_online(training_samples_num=400):
    from MHOF_Extraction import MHOF_Extraction
    from MHOF_histogram_block import MHOF_histogram_block
    from sklearn.decomposition import MiniBatchDictionaryLearning
    import numpy as np
    import cv2
    import video
    cam=video.create_capture('Crowd-Activity-All.avi')
    height_block_num=4
    width_block_num=5
    bin_num=16
    ret,prev=cam.read()
    ret,img=cam.read()
    flow_H=MHOF_Extraction(prev,img)
    flow_hist_H=MHOF_histogram_block(flow_H,height_block_num,width_block_num,bin_num)
    flow_hist_H=np.reshape(flow_hist_H,[1,flow_hist_H.size])
    #  error!!!!
    dico=MiniBatchDictionaryLearning(1,alpha=1,n_iter=500)
    dic=dico.fit(flow_hist_H).components_
    for i in range(training_samples_num):
        ret,img=cam.read()
        flow_H=MHOF_Extraction(prev,img)
        flow_hist_H=MHOF_histogram_block(flow_H,height_block_num,width_block_num,bin_num)
        dico=MiniBatchDictionaryLearing(i+1,alpha=1,n_iter=500,dict_init=dic)
        dic=dico.fit(flow_hist_H).components
    return dic

        
    def __init__(self, video_src):
        self.track_len = 25
        self.detect_interval = 1
        self.tracks = []

        self.capture = cv2.VideoCapture(video_src)

        self.frame_width = int(self.capture.get(cv.CV_CAP_PROP_FRAME_WIDTH) / 2)
        self.frame_height = int(self.capture.get(cv.CV_CAP_PROP_FRAME_HEIGHT) / 2)
        self.frame_size = (self.frame_width, self.frame_height)
        self.grid_width = int(self.frame_width / 8 / 2)
        self.grid_height = int(self.frame_height / 8 / 2)
        self.grid_size = (self.grid_width, self.grid_height)
        self.total_frame_count = int(self.capture.get(cv.CV_CAP_PROP_FRAME_COUNT))
        print self.frame_size
        print self.grid_size
        print self.total_frame_count

        self.data_path = str(video_src) + ".oflw"
        print self.data_path
        self.fp = np.memmap(self.data_path, dtype="float32", mode="w+", shape=(self.total_frame_count, (512 + 128)))

        print "FP shape: ", self.fp.shape
        self.cam = video.create_capture(video_src)
        self.frame_idx = 0
Exemple #3
0
 def __init__(self, video_src):
     self.track_len = 10
     self.detect_interval = 3
     self.tracks = {}
     self.cam = video.create_capture(video_src)
     self.frame_idx = 0
     self.prev_gray = {}
Exemple #4
0
def main():
    import sys

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cap = video.create_capture(fn)

    leveln = 6
    cv.namedWindow('level control')
    for i in xrange(leveln):
        cv.createTrackbar('%d'%i, 'level control', 5, 50, nothing)

    while True:
        ret, frame = cap.read()

        pyr = build_lappyr(frame, leveln)
        for i in xrange(leveln):
            v = int(cv.getTrackbarPos('%d'%i, 'level control') / 5)
            pyr[i] *= v
        res = merge_lappyr(pyr)

        cv.imshow('laplacian pyramid filter', res)

        if cv.waitKey(1) == 27:
            break

    print('Done')
Exemple #5
0
def main():
	try:
		fn = sys.argv[1]
	except:
		fn = 0
	cv2.namedWindow('edge')

	cap = video.create_capture(fn)
	cv2.setMouseCallback('edge', onmouse)
	global ser
	ser = serial.Serial('COM4',9600)
	count =0
	while True:
		#print seed_pt[0]
		flag, img = cap.read()
		vis=img.copy()
		gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
		if count%2==0:
			main_check(vis,gray,seed_pt)
				
		cv2.imshow('edge', vis)
		count+=1
		ch = cv2.waitKey(5) & 0xFF
		if ch == 27:
			break
Exemple #6
0
def main():
    try:
        video_src = sys.argv[1]
    except:
        video_src = 0

    cam = video.create_capture(video_src)
    mser = cv.MSER_create()

    while True:
        ret, img = cam.read()
        if ret == 0:
            break
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        vis = img.copy()

        regions, _ = mser.detectRegions(gray)
        hulls = [cv.convexHull(p.reshape(-1, 1, 2)) for p in regions]
        cv.polylines(vis, hulls, 1, (0, 255, 0))

        cv.imshow('img', vis)
        if cv.waitKey(5) == 27:
            break

    print('Done')
Exemple #7
0
 def __init__(self, video_src, paused = False):
     self.cap = video.create_capture(video_src)
     _, self.frame = self.cap.read()
     cv2.imshow('frame', self.frame)
     self.rect_sel = RectSelector('frame', self.onrect)
     self.trackers = []
     self.paused = paused
 def __init__(self, video_src, skipFrame):
     self.track_len = 10
     self.detect_interval = 5
     self.tracks = []
     self.cam = video.create_capture(video_src)
     self.frame_idx = 0
     self.skipFrame = skipFrame
Exemple #9
0
def main():
    try:
        fn = sys.argv[1]
    except:
        fn = 0

    def nothing(*arg):
        pass

    cv.namedWindow('edge')
    cv.createTrackbar('thrs1', 'edge', 2000, 5000, nothing)
    cv.createTrackbar('thrs2', 'edge', 4000, 5000, nothing)

    cap = video.create_capture(fn)
    while True:
        flag, img = cap.read()
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        thrs1 = cv.getTrackbarPos('thrs1', 'edge')
        thrs2 = cv.getTrackbarPos('thrs2', 'edge')
        edge = cv.Canny(gray, thrs1, thrs2, apertureSize=5)
        vis = img.copy()
        vis = np.uint8(vis/2.)
        vis[edge != 0] = (0, 255, 0)
        cv.imshow('edge', vis)
        ch = cv.waitKey(5)
        if ch == 27:
            break

    print('Done')
Exemple #10
0
    def __init__(self, src, threshold = 25, doRecord=True, showWindows=True):
        self.doRecord = doRecord
        self.show = showWindows
        self.frame = None
        
        self.cap = video.create_capture(src)
        self.cap.set(3,1280)
        self.cap.set(4,2316)
        self.ret, self.frame = self.cap.read() #Take a frame to init recorder
        self.frame_rate = self.cap.get(5)
        print self.frame_rate
        self.gray_frame = np.zeros((self.cap.get(3), self.cap.get(4), 1), np.uint8)
        self.average_frame = np.zeros((self.cap.get(3), self.cap.get(4), 3), np.float32)
        self.absdiff_frame = None
        self.previous_frame = None
        
        self.surface = self.cap.get(3) * self.cap.get(4)
        self.currentsurface = 0
        self.currentcontours = None
        self.threshold = threshold
        self.isRecording = False

        self.tracks = []
        self.tracks_dist = []
        self.track_len = 3
        self.frame_idx = 0
        self.detect_interval = 5
        
        # self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font

        self.trigger_time = 0
        if showWindows:
            cv2.namedWindow("Image", cv2.WINDOW_AUTOSIZE)
    def detectFace(self):
        print "stuff"
        import sys, getopt
        print help_message

        args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
        try: video_src = video_src[0]
        except: video_src = 0
        args = dict(args)
        # cascade_fn = args.get('--cascade', "haarcascade_frontalface_alt.xml")
        cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
        
        cam = create_capture(video_src, fallback='synth:bg=lena.jpg:noise=0.05')
        found = False
        
        while True:
            ret, img = cam.read()
            rects = self.detect(img, cascade)
            vis = img.copy()
            if(rects != [] and found == False):
                print "here"
                if goodOrBad(img,cascade):
                    found = True

            cv2.imshow('facedetect', vis)

            if 0xFF & cv2.waitKey(5) == 27:
                break
        cv2.destroyAllWindows()
    def __init__(self, video_src, circles, csvFlag, rotation = 0):
        """
        Initializes main function for execution
        :param video_src: path to video to be analyzed
        :param circles: path to text file containing joints
        :param csvFlag: flag if video src is actually a csv that should be read in. ONLY USE THIS WITH DEBUG MODE!
        """
        self.roguePoints = []
        self.rotation = rotation
        self.csvFlag = csvFlag
        if not csvFlag:
            self.cam = video.create_capture(video_src)
        else:
            self.cam = csv.reader(open(video_src, 'r'), delimiter=',', quotechar='|')
        self.frame_idx = -1#This is because the frame index updates BEFORE anything is done.

        #Save frame to start at and the initial circles.
        self.allJoints = []
        f = open(circles, 'r')
        self.initalFrame = int(f.readline())
        for line in f:
            read = map(float, line.split())#x y r (with spaces)
            circle = j.Circle(read[0], read[1] , read[2])
            self.allJoints.append( j.Joint(circle, prm.TRACK_LEN ) )# circle in form [(x,y),r]
        f.close()
Exemple #13
0
    def __init__(self, src):
        self.cap = video.create_capture(src)
        self.frame = None
        self.rect_obj = None

        cv2.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)
Exemple #14
0
def compare(face_to_check,learn=False):
	import sys, getopt
	detected_time = 0
	detected_time_max = 10
	
	video_src = 0
	cascade_fn = os.path.join('data','haarcascades','haarcascade_frontalface_alt2.xml')

	cascade = cv2.CascadeClassifier(cascade_fn)

	cam = create_capture(video_src, fallback='synth:bg=../cpp/lena.jpg:noise=0.05')
	
	while True:
		ret, img1 = cam.read()
		gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
		gray = cv2.equalizeHist(gray)
		
		t = clock()
		rects = detect(gray, cascade)
		
		if len(rects):
			if detected_time<detected_time_max:
				detected_time+=1
			else:
				_found_size = (rects[0][0],rects[0][1],rects[0][2]-rects[0][0],
					rects[0][3]-rects[0][1])
				
				_found_face = cv.GetImage(cv.fromarray(img1))
				
				cv.SetImageROI(_found_face,_found_size)
				
				current_face = cv.CreateImage(cv.GetSize(_found_face),
					_found_face.depth,
					_found_face.nChannels)
				
				if learn:
					cv.Copy(_found_face, current_face, None)
					cv.SaveImage(os.path.join('data','images',face_to_check),current_face)
				
				cv.ResetImageROI(cv.GetImage(cv.fromarray(img1)))
				
				img2 = cv.LoadImage(os.path.join('data','images',face_to_check))
				
				dest_face = cv.CreateImage(cv.GetSize(img2),
					img2.depth,
					img2.nChannels)
				
				cv.Resize(_found_face, dest_face)
				
				if cv.Norm(dest_face,img2)<=30000:
					return True
				else:
					return False
				
				sys,exit()
		else:
			detected_time = 0
		
		dt = clock() - t
def main():
    cap = video.create_capture()

    classifier_fn = 'digits_svm.dat'
    if not os.path.exists(classifier_fn):
        print '"%s" not found, run digits.py first' % classifier_fn
        return 
    
    model = SVM()
    model.load('digits_svm.dat')

    while True:
        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        bin = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 31, 10)
        bin = cv2.medianBlur(bin, 3)
        contours, heirs = cv2.findContours( bin.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        rects = map(cv2.boundingRect, contours)
        valid_flags = [ 16 <= h <= 64  and w <= 1.2*h  for x, y, w, h in rects]

        for i, cnt in enumerate(contours):
            if not valid_flags[i]:
                continue
            _, _, _, outer_i = heirs[0, i]
            if outer_i >=0 and valid_flags[outer_i]:
                continue
            x, y, w, h = rects[i]
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0))
            sub = bin[y:,x:][:h,:w]
            #sub = ~cv2.equalizeHist(sub)
            #_, sub_bin = cv2.threshold(sub, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)

            s = 1.5*float(h)/SZ
            m = cv2.moments(sub)
            m00 = m['m00']
            if m00/255 < 0.1*w*h or m00/255 > 0.9*w*h:
                continue

            c1 = np.float32([m['m10'], m['m01']]) / m00
            c0 = np.float32([SZ/2, SZ/2])
            t = c1 - s*c0
            A = np.zeros((2, 3), np.float32)
            A[:,:2] = np.eye(2)*s
            A[:,2] = t
            sub1 = cv2.warpAffine(sub, A, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
            sub1 = deskew(sub1)
            if x+w+SZ < frame.shape[1] and y+SZ < frame.shape[0]:
                frame[y:,x+w:][:SZ, :SZ] = sub1[...,np.newaxis]
                
            sample = preprocess_hog([sub1])
            digit = model.predict(sample)[0]
            cv2.putText(frame, '%d'%digit, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1)


        cv2.imshow('frame', frame)
        cv2.imshow('bin', bin)
        if cv2.waitKey(1) == 27:
            break
Exemple #16
0
    def __init__(self, src):
        self.cap = video.create_capture(src)
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow("plane")
        self.rect_sel = common.RectSelector("plane", self.on_rect)
Exemple #17
0
    def __init__(self, src):
        self.cap = video.create_capture(src, presets['book'])
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        self.rect_sel = common.RectSelector('plane', self.on_rect)
 def __init__(self, video_src):
     self.track_len = 10
     self.detect_interval = 5
     self.tracks = []
     self.cam = video.create_capture(video_src)
     self.frame_idx = 0
     self.screenx=700
     self.screeny=550
Exemple #19
0
    def __init__(self, src):
        self.cap = video.create_capture(src)
        self.frame = None
        self.paused = False
        self.tracker = PlaneTracker()

        cv2.namedWindow('plane')
        cv2.createTrackbar('focal', 'plane', 25, 50, common.nothing)
        self.rect_sel = common.RectSelector('plane', self.on_rect)
Exemple #20
0
 def __init__(self, video_src):
     self.cam = video.create_capture(video_src)
     ret, self.frame = self.cam.read()
     cv2.namedWindow('camshift')
     cv2.setMouseCallback('camshift', self.onmouse)
     self.selection = None
     self.drag_start = None
     self.tracking_state = 0
     self.show_backproj = False
Exemple #21
0
def main():
	try:
		video_src = sys.argv[1]
	except:
		print help_message
		return

	cap = video.create_capture(video_src)
	capture = cv.CaptureFromFile(video_src)
	fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
	num_frames = float(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))

	keyDown = False
	states = []

	index = 0
	while True:

		ret, frame = cap.read()
		if ret:
			if len(states) == 0:
				# show first frame and let user decide if it is good or bad
				draw_str(frame, (20, 40), 'BAD')
				cv2.imshow('', frame)
				ch = cv2.waitKey(2500)
			else:
				ch = cv2.waitKey(int(1000/fps))

			if ch != -1: # key pressed
				keyDown = True if not keyDown else False

			if keyDown:
				state = 'GOOD'
				states.append(1)
			else:
				state = 'BAD'
				states.append(0)

			# draw_str(frame, (20, 40), state)
			draw_str(frame, (20, 20), '%s, %2d:%02d\t %2.2f%%' % (state, int((index / fps) / 60), int((index / fps) % 60), 100.0 * index / num_frames))
			cv2.imshow('', frame)

			index += 1
		else:
			# no more frames...
			break
	d = dict(states=states)
	content = json.dumps(d)

	# do not write a file if json parser fails
	if content:
		# write to disc
		f = open('%s.txt' % video_src,'w')	
		f.write(content)
		f.close()
	else:
		print 'error in json parser'
Exemple #22
0
	def __init__(self, src):
		self.cap = video.create_capture(src)
		self.frame = None
		self.paused = False
		self.tracker = PlaneTracker()

		cv2.namedWindow("plane")
		cv2.setMouseCallback("plane", self.on_mouse)
		self.drag_start = None
		self.track_window = None
Exemple #23
0
    def __init__(self, video_src):
        self.cam = video.create_capture(video_src, presets['cube'])
        _ret, self.frame = self.cam.read()
        cv.namedWindow('camshift')
        cv.setMouseCallback('camshift', self.onmouse)

        self.selection = None
        self.drag_start = None
        self.show_backproj = False
        self.track_window = None
    def __init__(self, video_src):
        self.track_len = 10
        self.cam = video.create_capture(video_src)        
        self.screenx=700
        self.screeny=550
        self.threshold=50
        self.switch_time=3
        self.codec = cv2.VideoWriter_fourcc('W', 'M', 'V', '2')

        cv2.namedWindow("Image")
Exemple #25
0
 def __init__(self, video_src):
     self.track_len = 10
     self.detect_interval = 5
     self.tracks = []
     self.cam = video.create_capture(video_src)
     self.frame_idx = 0
     self.last_event = clock()
     self.filter = 1
     self.min_interval = 5
     self.slow_frame=0
     self.sin_transform=None
Exemple #26
0
	def __init__(self, fp):
		"""
		Initialize with filepath to .avi file you want to alter.
		"""
		self._fps = 23.92
		self._capture_size  = (1920, 1080)
		self._fourcc = cv.CV_FOURCC('m','p','4','v')
		self._directory = os.path.split(fp)[0]
		self._file_name = os.path.splitext(os.path.split(fp)[1])[0]
		self._file_ext = os.path.splitext(os.path.split(fp)[1])[1]
		self._capture = video.create_capture(fp)
    def __init__(self, video_src):
        self.track_len = 10
        self.cam = video.create_capture(video_src)        
        self.screenx=700
        self.screeny=550
        self.threshold=50
        self.switch_time=10
        self.codec = cv2.VideoWriter_fourcc('W', 'M', 'V', '2')
        self.total_writer=cv2.VideoWriter("total"+".wmv", self.codec, 10, (640,480), 1)

        cv2.namedWindow("Image")
    def __init__(self, src):
        self.cap = video.create_capture(src)
        self.ref_frame = None

        self.detector = cv2.ORB(nfeatures=1000)
        self.matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)

        cv2.namedWindow("plane")
        self.rect_sel = common.RectSelector("plane", self.on_rect)

        self.frame = None
def video_feed(queue):
   '''
   Reads from a video capture and puts a frame in the queue.
   '''
   video = create_capture(0)
   success, frame = video.read()

   while success != 0:
      frame = crop_frame(frame)   
      queue.put(frame)
      success, frame = video.read()
Exemple #30
0
	def __init__(self, video_src):
		self.track_len = 10
		self.detect_interval = 5
		self.tracks = []
		self.cam = video.create_capture(video_src)
		self.frame_idx = 0
		self.video_src = video_src
		capture = cv.CaptureFromFile(video_src)
		self.num_frames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
		self.fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
		self.frame_metadata = []
Exemple #31
0
    except:
        video_src = 0
    args = dict(args)  # make args as dictionary
    cascade_fn = args.get('--cascade',
                          "data/haarcascades/haarcascade_frontalface_alt.xml"
                          )  # get trained face detected data
    nested_fn = args.get('--nested-cascade',
                         "data/haarcascades/haarcascade_eye.xml")

    cascade = cv.CascadeClassifier(
        cv.samples.findFile(cascade_fn))  # Loads the classifier from a file.
    nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))

    cam = create_capture(
        video_src,  # create_capture is a convenience function for capture creation,
        # falling back to procedural video in case of error.
        fallback='synth:bg={}:noise=0.05'.format(
            cv.samples.findFile('samples/data/lena.jpg')))

    while True:
        t = clock()  # start to count the time
        time = 0.0  # initialize the time counting
        ret, img = cam.read()  # capture a frame and store it
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY
                           )  # convert the color of the image from bgr to gray
        gray = cv.equalizeHist(
            gray)  # Equalizes the histogram of a grayscale image.
        vis = img.copy()  # make a copy of the image
        if detected.all(
        ) == 0 or total_time > 3000:  # if we haven't detected a face or can't find face in
            # the current sub rectangle more than 3000ms,start to detect in the full image
Exemple #32
0
 def __init__(self, video_src, usePiCamera=False):
     self.track_len = 10
     self.detect_interval = 3
     self.tracks = []
     self.cam = video.create_capture(video_src, usePiCamera=usePiCamera)
     self.frame_idx = 0
def main():
    try:
        src = sys.argv[1]
    except:
        src = 0
    cap = video.create_capture(src)

    classifier_fn = 'digits_svm.dat'
    if not os.path.exists(classifier_fn):
        print '"%s" not found, run digits.py first' % classifier_fn
        return
    model = SVM()
    model.load(classifier_fn)

    while True:
        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        bin = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY_INV, 31, 10)
        bin = cv2.medianBlur(bin, 3)
        contours, heirs = cv2.findContours(bin.copy(), cv2.RETR_CCOMP,
                                           cv2.CHAIN_APPROX_SIMPLE)
        try:
            heirs = heirs[0]
        except:
            heirs = []

        for cnt, heir in zip(contours, heirs):
            _, _, _, outer_i = heir
            if outer_i >= 0:
                continue
            x, y, w, h = cv2.boundingRect(cnt)
            if not (16 <= h <= 64 and w <= 1.2 * h):
                continue
            pad = max(h - w, 0)
            x, w = x - pad / 2, w + pad
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0))

            bin_roi = bin[y:, x:][:h, :w]
            gray_roi = gray[y:, x:][:h, :w]

            m = bin_roi != 0
            if not 0.1 < m.mean() < 0.4:
                continue
            '''
            v_in, v_out = gray_roi[m], gray_roi[~m]
            if v_out.std() > 10.0:
                continue
            s = "%f, %f" % (abs(v_in.mean() - v_out.mean()), v_out.std())
            cv2.putText(frame, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1)
            '''

            s = 1.5 * float(h) / SZ
            m = cv2.moments(bin_roi)
            c1 = np.float32([m['m10'], m['m01']]) / m['m00']
            c0 = np.float32([SZ / 2, SZ / 2])
            t = c1 - s * c0
            A = np.zeros((2, 3), np.float32)
            A[:, :2] = np.eye(2) * s
            A[:, 2] = t
            bin_norm = cv2.warpAffine(bin_roi,
                                      A, (SZ, SZ),
                                      flags=cv2.WARP_INVERSE_MAP
                                      | cv2.INTER_LINEAR)
            bin_norm = deskew(bin_norm)
            if x + w + SZ < frame.shape[1] and y + SZ < frame.shape[0]:
                frame[y:, x + w:][:SZ, :SZ] = bin_norm[..., np.newaxis]

            sample = preprocess_hog([bin_norm])
            digit = model.predict(sample)[0]
            cv2.putText(frame,
                        '%d' % digit, (x, y),
                        cv2.FONT_HERSHEY_PLAIN,
                        1.0, (200, 0, 0),
                        thickness=1)

        cv2.imshow('frame', frame)
        cv2.imshow('bin', bin)
        ch = cv2.waitKey(1)
        if ch == 27:
            break
Exemple #34
0
    print(__doc__)

    args, video_src = getopt.getopt(sys.argv[1:], '',
                                    ['cascade=', 'nested-cascade='])
    try:
        video_src = video_src[0]
    except:
        video_src = 0
    args = dict(args)

    cascade_fn = args.get('--cascade',
                          "data/haarcascades/haarcascade_frontalface_alt.xml")
    cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))

    cam = create_capture(video_src,
                         fallback='synth:bg={}:noise=0.05'.format(
                             cv.samples.findFile('samples/data/lena.jpg')))

    # to_detect = []
    detected = None
    show_backproj = False

    while True:
        ret, img = cam.read()
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        gray = cv.equalizeHist(gray)
        hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)

        # (0, 60, 30) is a dark colour
        # (180. 255, 255) is cyan
        # masking is thresholding the HSV image to get only colours in
Exemple #35
0
def main():
    import sys, getopt
    checknum = 0
    while True:
        try:

          # face recognizing code

          print('face camera ')
          args, video_src = getopt.getopt(sys.argv[1:2], '', ['cascade=', 'nested-cascade='])
          try:
              video_src = video_src[0]
          except:
              video_src = 0
          args = dict(args)
          cascade_fn = args.get('--cascade', "data/haarcascades/haarcascade_frontalface_alt.xml")
          nested_fn  = args.get('--nested-cascade', "data/haarcascades/haarcascade_eye.xml")
          cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
          nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))
          cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('samples/data/lena.jpg')))

          while True:
              ret, img = cam.read()
              gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
              gray = cv.equalizeHist(gray)
              rects = detect(gray, cascade)
              vis = img.copy()
              if len(rects):
                  if not nested.empty():
                      print('into nested')
                      for x1, y1, x2, y2 in rects:
                          roi = gray[y1:y2, x1:x2]
                          vis_roi = vis[y1:y2, x1:x2]
                          print('findrects')
                          subrects = detect(roi.copy(), nested)


                      if subrects!='[]':
                          faceok = 'faceok.wav'
                          fa = sa.WaveObject.from_wave_file(faceok)
                          face = fa.play()
                          face.wait_done()
                          print('detect!!')
                          break

          cam.release() # face recognition camera off
         


					# helmet detectecting code
          print("helmet camera")
          # load wav file 
          filename = 'helmet.wav'
          wave_obj = sa.WaveObject.from_wave_file(filename)
          helmetok = 'helmetok.wav'
          wave = sa.WaveObject.from_wave_file(helmetok)

          # load model & label for classifying
          labels = "labels.txt"
          model = "model_edgetpu.tflite"
          interpreter = Interpreter(model, experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
          interpreter.allocate_tensors()
          _, height, width, _ = interpreter.get_input_details()[0]['shape']


          # helmet detect camera on
          with picamera.PiCamera(resolution=(640, 480), framerate=30) as camera:
              camera.start_preview()
              try:
                  stream = io.BytesIO()
                  for _ in camera.capture_continuous(stream, format='jpeg', use_video_port=True):
                      stream.seek(0)
                      image = Image.open(stream).convert('RGB').resize((width, height),Image.ANTIALIAS)
                      results = classify_image(interpreter, image)
                      stream.seek(0)
                      stream.truncate()

                      # 헬멧 착용여부 판단
                      if results==0:
                          play_obj = wave_obj.play()
                          play_obj.wait_done()
                          checknum += 1

                          # Unless User does not wear helmet in three time of checking, the code will go backward
                          if checknum==3:
                              checknum = 0
                              break

                      else:
                          helm = wave.play()
                          helm.wait_done()
                          print('GoodBoy')
                          break

              finally:
                  camera.stop_preview()
        

        # If you want to interrupt the code running, then press Ctrl + C
        except KeyboardInterrupt:
            break
Exemple #36
0
def main():
    import sys
    
    if path.exists('data/coordinates_1.yml'):
        os.remove('data/coordinates_1.yml')
        
    points = None
    waterLevelSlots = None
    
    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cap = video.create_capture(fn)

    def fetchShortIntervalVideos(ctrl, motion_detector, lock):
        with lock:
             videoFilePath, hasIncomingVideoCaptureChanged = ctrl.getVideoFilePath()
             return videoFilePath, hasIncomingVideoCaptureChanged, motion_detector
    
    threadn = cv.getNumberOfCPUs()
    pending = deque()
    lock = Lock()
    pool = ThreadPool(processes = threadn, initializer = init_child, initargs=(lock,))
    
    threaded_mode = True
    ctrl = None
    motionDetector = None
    
    screenWidth = int(round(GetSystemMetrics(0) / 2))
    screenHeight = int(round(GetSystemMetrics(1) / 2))
    
    
    coordinates_data = None
    times = None 
    statuses = None
    
    pointsCaptured = False
    while True:
        with lock:
            while len(pending) > 1 and pending[0].ready() and pending[1].ready():
                payload = pending.popleft().get()
                if len(payload) == 3:
                    videoFilePath, hasIncomingVideoCaptureChanged, motion_detector = payload
                    if videoFilePath == None and hasIncomingVideoCaptureChanged == None:
                        break
                    else:
                        capture, coordinates_data, times, statuses = motion_detector.detect_motion_activity(videoFilePath, hasIncomingVideoCaptureChanged)
                        while capture.isOpened():
                            result, frame = capture.read()
                            if not result:
                                capture.release()
                                continue
                            else:
                                res, evaluated_areas  = motion_detector.process_algo_per_frame(frame, capture, coordinates_data, times, statuses)
                                
                                #draw_str(res, (5, 20), WaterLevelSlots.LEVEL_REACHED_MSG
                                        # + str(evaluated_waterLevelSlots.get_current_water_level()))
                                
                                #cv.namedWindow('flood-detection', cv.WINDOW_NORMAL)
                                #cv.setWindowProperty('flood-detection', 0, 1)
                                #cv.imshow('flood-detection', res)
                                
                                cv.namedWindow('OOW 2020 SMART CITY USE CASE - "SMART PEDESTRIAN CROSSING"', cv.WINDOW_NORMAL)
                                #cv.setWindowProperty('smart-parking', 0, 1)
                                #print(screenWidth)
                                #print(screenHeight)
                                resize = ResizeWithAspectRatio(res, width=screenWidth, height=screenHeight) 
                                cv.imshow('OOW 2020 SMART CITY USE CASE - "SMART PEDESTRIAN CROSSING"', resize)
                         
        if len(pending) < threadn:
            
            if not pointsCaptured:
                _ret, frame = cap.read()
                points = getPoints(frame, points)
                area = Area()
                ctrl = Controller(points, None, None)
                motionDetector = MotionDetector(points, 1, area)
                pointsCaptured = True
           
            if threaded_mode:
                task_put_videos = pool.apply_async(captureShortIntervalVideos, (cap, lock))
                task_get_videos = pool.apply_async(fetchShortIntervalVideos,(ctrl, motionDetector, lock))
                
            
            pending.append(task_put_videos)
            pending.append(task_get_videos)
        
        ch = cv.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break

    print('Done')
    cap.release()
Exemple #37
0
    table_number=6,  # 12
    key_size=12,  # 20
    multi_probe_level=1)  #2
matcher = cv2.FlannBasedMatcher(flann_params,
                                {})  # bug : need to pass empty dict (#1329)

green, red = (0, 255, 0), (0, 0, 255)

if __name__ == '__main__':
    print __doc__

    try:
        src = sys.argv[1]
    except:
        src = 0
    cap = video.create_capture(src)

    ref_kp = None

    while True:
        ret, img = cap.read()
        vis = img.copy()
        kp = detector.detect(img)
        kp, desc = extractor.compute(img, kp)

        for p in kp:
            x, y = np.int32(p.pt)
            r = int(0.5 * p.size)
            cv2.circle(vis, (x, y), r, (0, 255, 0))
        draw_str(vis, (20, 20), 'feature_n: %d' % len(kp))
Exemple #38
0
    return hog_viz
#https://raw.githubusercontent.com/shiaokai/plex/master/python/peopledetect_cam.py
if __name__ == '__main__':
    import sys
    from glob import glob
    import itertools as it

    # -- temporary dimension mismatch
    if len(sys.argv)==2:
        img = cv2.imread(sys.argv[1])
        hog_viz=look_at_hog(img)
        imshow(hog_viz,cmap='gray');show()
    else:
        hog = cv2.HOGDescriptor()
        #hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() )
        cam = create_capture(1, fallback='synth:bg=../cpp/lena.jpg:noise=0.05')
        while True:
            ret, img = cam.read()
            vis = img.copy()
            t = clock()
            if 1:
                vis=look_at_hog(vis)
            else:
                found, w = hog.detectMultiScale(img, winStride=(8,8), padding=(32,32), scale=1.05)
                found_filtered = []
                for ri, r in enumerate(found):
                    for qi, q in enumerate(found):
                        if ri != qi and inside(r, q):
                            break
                        else:
                            found_filtered.append(r)
Exemple #39
0
    cx, cy = x+w/2, y+h/2
    angle = angle*np.pi/180
    cv2.circle(vis, (cx, cy), r, color, 3)
    cv2.line(vis, (cx, cy), (int(cx+np.cos(angle)*r), int(cy+np.sin(angle)*r)), color, 3)

if __name__ == '__main__':
    import sys
    try: video_src = sys.argv[1]
    except: video_src = 0

    cv2.namedWindow('motempl')
    visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
    cv2.createTrackbar('visual', 'motempl', 2, len(visuals)-1, nothing)
    cv2.createTrackbar('threshold', 'motempl', DEFAULT_THRESHOLD, 255, nothing)

    cam = video.create_capture(video_src, fallback='synth:class=chess:bg=../cpp/lena.jpg:noise=0.01')
    ret, frame = cam.read()
    h, w = frame.shape[:2]
    prev_frame = frame.copy()
    motion_history = np.zeros((h, w), np.float32)
    hsv = np.zeros((h, w, 3), np.uint8)
    hsv[:,:,1] = 255
    while True:
        ret, frame = cam.read()
        frame_diff = cv2.absdiff(frame, prev_frame)
        gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
        thrs = cv2.getTrackbarPos('threshold', 'motempl')
        ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
        timestamp = clock()
        cv2.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
        mg_mask, mg_orient = cv2.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
Exemple #40
0
    print(__doc__)
    count = 0
    for i in range(1,1192):
        print(str(i))
        args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
        try:
            video_src = video_src[0]
        except:
            video_src = 0
        args = dict(args)
        cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
        nested_fn  = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")

        cascade = cv2.CascadeClassifier(cascade_fn)
        nested = cv2.CascadeClassifier(nested_fn)
        cam = create_capture(video_src, fallback='synth:bg=../data/friend/friendImage/image'+str(i)+'.jpg:noise=0.05')

        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        rects = detect(gray, cascade)
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))

        if not nested.empty():
            if len(rects) == 0:
                print('none')
            else:
                count = count + 1
                print(str(count))
Exemple #41
0
end_header
'''


def write_ply(fn, verts, colors):
    verts = verts.reshape(-1, 3)
    colors = colors.reshape(-1, 3)
    verts = np.hstack([verts, colors])
    with open(fn, 'w') as f:
        f.write(ply_header % dict(vert_num=len(verts)))
        np.savetxt(f, verts, '%f %f %f %d %d %d')


if __name__ == '__main__':
    print 'loading images...'
    camL = video.create_capture(1)
    camR = video.create_capture(0)

    _, imgL = camL.read()
    _, imgR = camR.read()

    # disparity range is tuned for 'aloe' image pair
    window_size = 3
    min_disp = 16
    num_disp = 112 - min_disp
    stereo = cv2.StereoSGBM(minDisparity=min_disp,
                            numDisparities=num_disp,
                            SADWindowSize=window_size,
                            uniquenessRatio=10,
                            speckleWindowSize=100,
                            speckleRange=32,
Exemple #42
0
def main():
    import sys
    try:
        fn = sys.argv[1]
    except IndexError:
        fn = 0
    try:
        start_at = int(sys.argv[2])
    except IndexError:
        start_at = 0
    try:
        compute_scene = len(sys.argv[3]) <= 0
    except IndexError:
        compute_scene = True

    if compute_scene:
        scenelist = getSceneList(fn, start_at)
    else:
        scenelist = [(start_at, float("inf"))]
    if scenelist is None or len(scenelist) == 0:
        scenelist = [(start_at, float("inf"))]
    cam = video.create_capture(fn)
    cam.set(cv.CAP_PROP_POS_FRAMES, start_at)
    nb_frames = int(cam.get(cv.CAP_PROP_FRAME_COUNT))
    rate = cam.get(cv.CAP_PROP_FPS)
    ret, prev = cam.read()
    realh, realw = prev.shape[:2]
    prev = reduce(prev)
    #h, w = realh//3, realw//3
    h, w = prev.shape[:2]

    #prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY)[w:2*w,h:2*h]
    prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY)

    pbar = tqdm(total=nb_frames - start_at)

    frame = start_at

    scene_results = []
    for scene_start, scene_end in scenelist:
        if frame > scene_start:
            continue
        for i in range(frame, scene_start):
            ret, prev = cam.read()
            pbar.update(1)

        if scene_start - frame > 0:
            prev = reduce(prev)
            prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY)
        frame = scene_start

        data = []
        while frame < scene_end:
            ret, img = cam.read()
            if not ret:
                break
            img = reduce(img)
            #gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)[w:2*w,h:2*h]
            gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)

            flow = cv.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3,
                                               15, 3, 5, 1.2, 0)
            prevgray = gray

            # data.append(np.absolute(flow).mean(axis=(0,1)))#Si plusieurs mouvements, ils peuvent s'annuler
            flow0 = flow.mean(axis=(0, 1))

            if len(data) > 0 and np.absolute(
                    maxi).sum() < 12:  #skip the next frame
                data.append(np.stack((data[-1], flow0)).mean(axis=0))
            data.append(flow0)
            maxi = flow.max(axis=(0, 1))

            pbar.update(1)
            frame += 1

            if frame < scene_end and np.absolute(
                    maxi).sum() < 12:  #skip the next frame
                pbar.update(1)
                frame += 1
                if frame == scene_end:
                    data.append(flow0)
                ret, img = cam.read()
            #cv.imshow('flow', draw_flow(gray, flow))
            #ch = cv.waitKey(5)

        # code.interact(local=locals())
        freq = np.fft.rfftfreq(len(data), 1 / rate)
        maxima = None
        freqs = None
        t = None

        for i in range(2):
            signal = np.array(data)[:, i]
            #data = np.linalg.norm(data, axis=1)
            #data = np.sum(data, axis=1)
            signal = running_mean(signal, 3)

            signal = np.array(signal)
            f, t, Zxx = sig.stft(signal,
                                 rate,
                                 boundary='even',
                                 nperseg=min(3 * rate, signal.shape[0]),
                                 noverlap=min(3 * rate - 1,
                                              signal.shape[0] - 1))
            maxi = np.abs(Zxx).max(axis=0)
            if maxima is None:
                maxima = maxi
            else:
                maxima = np.stack((maxima, maxi))

            freq = np.take(f, np.abs(Zxx).argmax(axis=0))
            if freqs is None:
                freqs = freq
            else:
                freqs = np.stack((freqs, freq))
            #plt.show()

            #maxima[i] = np.absolute(sp[first:]).max()
            #indices[i] = np.unravel_index(np.absolute(
            #    sp[first:]).argmax(), sp.shape)[0] + first
        freq = maxima.argmax(axis=0).choose(freqs)
        freq = running_mean(freq, min(int(rate), freq.shape[0]))
        #plt.plot(t, freq)
        #plt.show()
        scene_results.append((t + scene_start / rate, freq))

    pbar.close()
    #for t, freq in scene_results:
    #plt.plot(np.array([i/rate for i in range(nb_frames)]),
    fig, ax = plt.subplots()
    plt.plot(np.concatenate([t for t, freq in scene_results]),
             np.concatenate([freq for t, freq in scene_results]))

    formatter = ticker.FuncFormatter(
        lambda s, x: time.strftime('%M:%S', time.gmtime(s)))
    ax.xaxis.set_major_formatter(formatter)
    plt.show()
    cv2.imshow('hsv_map', hsv_map)

    cv2.namedWindow('hist', 0)
    hist_scale = 10

    def set_scale(val):
        global hist_scale
        hist_scale = val

    cv2.createTrackbar('scale', 'hist', hist_scale, 32, set_scale)

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cam = video.create_capture(
        fn, fallback='synth:bg=../data/baboon.jpg:class=chess:noise=0.05')

    while True:
        flag, frame = cam.read()
        cv2.imshow('camera', frame)

        small = cv2.pyrDown(frame)

        hsv = cv2.cvtColor(small, cv2.COLOR_BGR2HSV)
        dark = hsv[..., 2] < 32
        hsv[dark] = 0
        h = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])

        h = np.clip(h * 0.005 * hist_scale, 0, 1)
        vis = hsv_map * h[:, :, np.newaxis] / 255.0
        cv2.imshow('hist', vis)
    flow[:, :, 1] += np.arange(h)[:, np.newaxis]
    #res = cv.remap(img, flow, None, cv.INTER_LINEAR)
    res = cv.remap(img, flow, None, cv.INTER_NEAREST)
    return res


counter = 1

if __name__ == '__main__':
    import sys
    print(__doc__)
    try:
        fn = sys.argv[1]
    except IndexError:
        fn = 0
    cam2 = video.create_capture('walkExtract_720p.mp4')
    cam = video.create_capture(fn)
    ret, prev = cam.read()
    ret1, prev1 = cam2.read()
    prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY)
    show_hsv = False
    show_glitch = True
    cur_glitch = prev1.copy()

    while True:
        ret, img = cam.read()
        _, prev1 = cam2.read()
        gray2 = cv.cvtColor(prev1, cv.COLOR_BGR2GRAY)
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        flow = cv.calcOpticalFlowFarneback(prevgray, gray2, gray2, 0.5, 3, 15,
                                           3, 5, 1.2, 0)
Exemple #45
0
    import sys, getopt
    print(__doc__)

    args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
    try:
        video_src = video_src[0]
    except:
        video_src = 0
    args = dict(args)
    cascade_fn = args.get('--cascade', "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml")
    nested_fn  = args.get('--nested-cascade', "/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml")

    cascade = cv2.CascadeClassifier(cascade_fn)
    nested = cv2.CascadeClassifier(nested_fn)

    cam = create_capture(video_src, fallback='synth:bg=../data/lena.jpg:noise=0.05')

    while True:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))
        if not nested.empty():
            for x1, y1, x2, y2 in rects:
                roi = gray[y1:y2, x1:x2]
                vis_roi = vis[y1:y2, x1:x2]
                subrects = detect(roi.copy(), nested)
Exemple #46
0
colour = (b, g, r)

balldiameter = 10
bx = 100
by = balldiameter / 2
ballcolour = (255, 100, 100)
bx_direction = 2
by_direction = 3

#The face detector can have minimum and maximum sizes.
mn = 30  # minimum face width
mx = 500  # maximum face width

#these two lines set up the face detector and the video input
cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
cam = video.create_capture(0)

fimg = cv2.imread("smily.png")

while True:
    ret, img = cam.read()

    height, width = img.shape[:2]
    #print(height,width)
    #n make a blank background instead of the input image
    #   output_image=np.zeros((height,width,3),np.uint8)
    #   output_image[:]=(0,0,255)
    output_image = img
    bx = bx + bx_direction
    by = by + by_direction
    if (bx > width or bx < 0):
Exemple #47
0
    args, video_src = getopt.getopt(sys.argv[1:], '',
                                    ['cascade=', 'nested-cascade='])
    try:
        video_src = video_src[0]
    except:
        video_src = 'synth:bg=../cpp/lena.jpg:noise=0.05'
    args = dict(args)
    cascade_fn = args.get(
        '--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
    nested_fn = args.get('--nested-cascade',
                         "../../data/haarcascades/haarcascade_eye.xml")

    cascade = cv2.CascadeClassifier(cascade_fn)
    nested = cv2.CascadeClassifier(nested_fn)

    cam = create_capture(video_src)

    while True:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)

        t = clock()
        rects = detect(gray, cascade)
        vis = img.copy()
        draw_rects(vis, rects, (0, 255, 0))
        for x1, y1, x2, y2 in rects:
            roi = gray[y1:y2, x1:x2]
            vis_roi = vis[y1:y2, x1:x2]
            subrects = detect(roi.copy(), nested)
            draw_rects(vis_roi, subrects, (255, 0, 0))
Exemple #48
0
import cv2.cv as cv
from video import create_capture
from common import clock, draw_str

#GUI
from pgu import gui as pgui, text

#Velleman board
from pyk8055 import *

#Arduino - only one of this and Velleman will be used. Probably...
import pyduino

#There should be a USB camera, but if not use the webcam.
try:
    camera = create_capture(1)
    ret, im = camera.read()
    try:
        camera2 = create_capture(2)
        ret, im = camera2.read()
    except:
        camera2 = None
except:
    camera = create_capture(0)


def playSound(fileName):
    if not pygame.mixer.music.get_busy():
        pygame.mixer.music.load(fileName)
        pygame.mixer.music.play(1)
# main program
if __name__ == '__main__':
    import sys

    # print in the program shell window the text at the beginning of the file
    print(__doc__)

    # if there is no argument in the program invocation default to camera 0
    try:
        fn = sys.argv[1]
    except:
        fn = 0

    # grab initial frame, create window
    cv.waitKey(1) & 0xFF
    cap = video.create_capture(fn)
    ret, frame = cap.read()
    frame_counter += 1
    height, width, channels = frame.shape
    prevFrame = frame.copy()
    cv.namedWindow("video")

    # Create video of Frame sequence -- define the codec and create VideoWriter object
    fourcc = cv.VideoWriter_fourcc(*'XVID')
    cols = np.int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
    rows = np.int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
    vid_out = cv.VideoWriter('vid_out.avi', fourcc, 20.0, (cols, rows))

    # Set up multiprocessing
    threadn = cv.getNumberOfCPUs()
    pool = ThreadPool(processes=threadn)
Exemple #50
0
import numpy as np
import cv2
import video

try:
    infn = sys.argv[1]
except:
    infn = 0

cv2.namedWindow('foreground')

cap = video.create_capture(infn)

fgbg = cv2.BackgroundSubtractorMOG()
fn = 0
while (1):
    ret, frame = cap.read()

    fgmask = fgbg.apply(frame)
    cv2.imshow('foreground', fgmask)
    cv2.imshow('input', frame)
    k = cv2.waitKey(5)
    print "In frame {}".format(fn)
    fn += 1
    if k == 27:
        break

cap.release()
cv2.destroyAllWindows()
Exemple #51
0
    ESC   - exit

'''

import numpy as np
import cv2
import video

if __name__ == '__main__':
    import sys
    try:
        video_src = sys.argv[1]
    except:
        video_src = 0

    cam = video.create_capture(video_src)
    mser = cv2.MSER_create()
    while True:
        ret, img = cam.read()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        vis = img.copy()

        regions = mser.detectRegions(gray, None)
        hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
        cv2.polylines(vis, hulls, 1, (0, 255, 0))

        cv2.imshow('img', vis)
        if 0xFF & cv2.waitKey(5) == 27:
            break
    cv2.destroyAllWindows()
    

if __name__ == '__main__':
    import sys, getopt
    print help_message

    args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
    try: video_src = video_src[0]
    except: video_src = 0
    args = dict(args)
    #cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_default.xml")
    cascade_fn = args.get('--cascade', "haarcascade_frontalface_default.xml")

    cascade = cv2.CascadeClassifier(cascade_fn)

    cam = create_capture(0)
    x_est = np.zeros((n, 1))
    p_est = np.identity(n)
    w = 0
    h = 0
    allRoiHist = [] 
    frist = 0
    while(frist == '0'): 
        ret, img = cam.read()
        vis = img.copy()
        gray = cv2.cvtColor(vis, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        rects = detect(gray, cascade)
        if(format(len(rects)) == '1'):
            frist = 1
        cv2.imshow('facedetect', vis)
 def __init__(self, video_src):
     self.track_len = 10
     self.detect_interval = 5
     self.tracks = []
     self.cam = video.create_capture(video_src)
     self.frame_idx = 0
Exemple #54
0
    def run(self):

        standup_frame_cnt = 0
        sitdown_frame_cnt = 0
        #containsEnoughMotion = False
        #detected = False

        use_spatial_propagation = False
        use_temporal_propagation = True
        #inst = cv2.optflow.createOptFlow_DIS(cv2.optflow.DISOPTICAL_FLOW_PRESET_MEDIUM)
        #inst = cv2.optflow.createOptFlow_DIS(cv2.optflow.DISOPTICAL_FLOW_PRESET_FAST)
        inst = cv2.optflow.createOptFlow_DIS(
            cv2.optflow.DISOPTICAL_FLOW_PRESET_ULTRAFAST)
        inst.setUseSpatialPropagation(use_spatial_propagation)
        flow = None
        self.prev_gray = None

        delayTime = 1
        jump = 20
        badframeCnt = 0

        while True:
            ret, frame = self.cam.read()
            if not ret:
                badframeCnt = badframeCnt + 1
                if badframeCnt > 3:
                    break
                else:
                    continue

            #########
            # Step 0: preprocessing
            #########
            frame = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2))
            vis = frame.copy()
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            #cv2.imshow("before blur", frame_gray)
            frame_gray_blur = cv2.medianBlur(frame_gray, 7)
            #cv2.imshow("after blur", frame_gray)

            #########
            # Step 1: BackgroundSubtractor
            #########
            fgmask = self.fgbg.apply(frame_gray_blur, 0.7)
            #fgmask = cv2.medianBlur(fgmask, 7)
            cv2.imshow("fgmask", fgmask)

            #########
            # Step 2: morphology
            #########
            kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
            closed = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
            closed = cv2.morphologyEx(closed, cv2.MORPH_OPEN, kernel)

            #########
            # Step 3: contour and hull
            #########
            _, contours0, hierarchy = cv2.findContours(closed.copy(),
                                                       cv2.RETR_TREE,
                                                       cv2.CHAIN_APPROX_SIMPLE)
            contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0]

            # filter contours
            minLength = 2
            minArea = 2
            new_contours = []
            for index, c in enumerate(contours):
                if len(c) > minLength and cv2.contourArea(
                        c) > minArea:  # and hierarchy[index] is not None:
                    new_contours.append(c)
            contours = new_contours

            # get hulls
            hulls = []
            for contour in contours:
                hull = cv2.convexHull(contour)
                hulls.append(hull)

            # merge nest hulls
            hullMask = np.zeros((closed.shape[0], closed.shape[1], 1),
                                np.uint8)
            cv2.drawContours(hullMask, hulls, -1, 255, 1)
            _, contours1, hierarchy = cv2.findContours(hullMask.copy(),
                                                       cv2.RETR_EXTERNAL,
                                                       cv2.CHAIN_APPROX_SIMPLE)
            hulls = []
            for contour in contours1:
                hull = cv2.convexHull(contour)
                hulls.append(hull)
            cv2.drawContours(hullMask, hulls, -1, 255, -1)
            #cv2.imshow("editHull", hullMask)

            # get centers of contours
            centers = []
            for hull in hulls:
                M = cv2.moments(hull)
                cx = int(M['m10'] / M['m00'])
                cy = int(M['m01'] / M['m00'])
                centers.append((cx, cy))

            # label hulls
            hullLables = [-1 for i in range(len(hulls))]
            if len(self.prevHulls) == 0:
                hullLables = [i for i in range(len(hulls))]
            else:
                for index, hull in enumerate(hulls):
                    cx = centers[index][0]
                    cy = centers[index][1]
                    if self.prevHullMask[cy][cx] != 0:
                        # find corresponding hull
                        minDist = 10000
                        prevIndex = 0

                        for i, c in enumerate(self.prevCenters):
                            dist = abs(int(c[0]) -
                                       int(cx)) + abs(int(c[1]) - int(cy))
                            if dist < minDist:
                                minDist = dist
                                prevIndex = i
                        hullLables[index] = self.prevHullLabels[prevIndex]
                    else:
                        label = 0
                        while True:
                            if label in self.prevHullLabels or label in hullLables:
                                label = label + 1
                            else:
                                hullLables[index] = label
                                break

            ########
            # conpute optflow for each hull
            ########
            flows = []
            rois = []
            if self.prev_gray is not None:
                whole_flow = inst.calc(
                    self.prev_gray, frame_gray,
                    None)  # if use only roi for compute dense optflow?
                vis = draw_flow(vis, whole_flow)

                for h in hulls:
                    x, y, w, h = cv2.boundingRect(h)
                    rois.append((x, y, w, h))
                    flows.append(whole_flow[y:y + h, x:x + w])

            #if flow is not None and use_temporal_propagation:
            #warp previous flow to get an initial approximation for the current flow:
            #    flow = inst.calc(self.prev_gray, frame_gray, warp_flow(flow,flow))
            #elif self.prev_gray is not None:
            #    flow = inst.calc(self.prev_gray, frame_gray, None)

            ########
            # classify using optflow
            ########
            flags = []
            for index, flow in enumerate(flows):
                fx = np.mean(flow[:, :, 0])
                fy = np.mean(flow[:, :, 1])
                flags.append(fy < self.fy_threshold)
                #if fy < -1.0:
                #draw_str(vis, (rois[index][0], rois[index][1]), "%.2f"%fx, (255,0,0))
                #draw_str(vis, (rois[index][0], rois[index][1]-10), "%.2f"%fy, (0,255,0))

            ########
            # Draw result
            ########
            for i, hull in enumerate(hulls):
                if self.prev_gray is not None:
                    cv2.drawContours(vis, [hull], -1, color[flags[i]], 2)

            #for i, hull in enumerate(hulls):
            #    cv2.drawContours( vis, [hull], -1, color[hullLables[i]%len(color)], 2)

            for index, flow in enumerate(flows):
                flow = flow * 5
                #vis = draw_flow_roi(vis, flow, rois[index])
            '''
            prevHullMask = np.zeros((closed.shape[0], closed.shape[1], 3), np.uint8)
            if len(self.prevHulls) != 0:
                cv2.drawContours( prevHullMask, self.prevHulls, -1, (255, 255, 255), -1)
                for i, c in enumerate(centers):
                    cv2.drawContours(prevHullMask, [hulls[i]], 0, color[hullLables[i]], 2)
                    cv2.circle(prevHullMask, c, 1, color[hullLables[i]], 2)
                    draw_str(prevHullMask, c, "%d"%hullLables[i], color[hullLables[i]])
                for i, c in enumerate(self.prevCenters):
                    draw_str(prevHullMask, (c[0]+10, c[1]), "%d"%self.prevHullLabels[i])
            cv2.imshow("hulls", prevHullMask)
            '''

            #cv2.imshow('mask', fgmask)
            #cv2.imshow("close", closed)
            #cv2.imshow("contour", hullMask)

            #cv2.drawContours( vis, hulls, -1, (128,0,255), 2)
            #cv2.drawContours( vis, contours, -1, (255,255,255), 1)
            #cv2.drawContours( vis, new_contours, -1, (255,0,255), 1)

            cv2.imshow('lk_track', vis)

            self.frame_idx += 1
            self.prev_gray = frame_gray

            self.prevHulls = hulls
            self.prevHullMask = hullMask
            self.prevHullLabels = hullLables
            self.prevCenters = centers

            self.ch = 0xFF & cv2.waitKey(delayTime)  # 20
            ch = self.ch
            # Esc
            if ch == 27:
                break
            # faster
            if ch == ord('g'):
                delayTime = 1
            # fast
            if ch == ord('f'):
                delayTime = 20
            if ch == ord('1'):
                delayTime = 100
            if ch == ord('2'):
                delayTime = 300
            if ch == ord('3'):
                delayTime = 600
            # slow
            if ch == ord('s'):
                delayTime = 1000
            # replay
            if ch == ord('r'):
                self.cam = video.create_capture(self.video_src)
            # stop
            if ch == ord('d'):
                ch = 0xFF & cv2.waitKey(delayTime)
                while ch != ord('d'):
                    ch = 0xFF & cv2.waitKey(delayTime)
                    continue
            # move foreward >
            if ch == 82:
                jump = jump + 20
                print("jupm speed: ", jump)
            if ch == 84:
                jump = (jump - 20 > 0) and (jump - 20) or jump
                print("jupm speed: ", jump)
            if ch == 83:
                for i in range(jump):
                    self.cam.read()
Exemple #55
0
 def __init__(self, video_src):
     self.cam = video.create_capture(video_src)
     self.p0 = None
     self.use_ransac = True
Exemple #56
0
    search_response = urllib.urlopen(url)
    search_results = search_response.read()
    results = json.loads(search_results)
    data = results['responseData']
    if debug[1]: print "data is {}".format(data)
    try:
        hits = int(data['cursor']['estimatedResultCount'])
    except (KeyError, TypeError) as e:
        hits = sum(map(len, query))
    if debug[1]: print "Query is {}, hits are {}".format(query, hits)
    return hits


#TODO: For performance, make multithreaded
if __name__ == '__main__':
    cam = video.create_capture(0)  #cv2.VideoCapture(0)
    ret, img = cam.read()
    takephoto = False
    imgdir = "./img/"  #out.jpg" #path of outputfile
    textFile = "outtext/out"
    textFileExt = "outtext/out.txt"
    imgfile = imgdir + "out.jpg"

    start = False  #Used to denote start taking pictures
    startBuffer = 0
    startBufferMax = 10  #step size

    cand = []  #candidate texts
    maxHits = 0

    startTime = None
Exemple #57
0
def getVideoFeatures(video_path):
    '''
        return 光流均值,光流标准差,颜色冷暖,颜色轻重,颜色活跃度,颜色柔和度,暗色比例,亮色比例,饱和度,颜色能量,颜色方差
    '''
    optical_flow_list = []

    cam = video.create_capture(video_path)
    ret, prev = cam.read()
    # 2次图形下采样
    prev = cv.pyrDown(prev)
    prev = cv.pyrDown(prev)
    prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY)

    video_warmc_total = 0
    video_heavyl_total = 0
    video_activep_total = 0
    video_hards_total = 0
    video_darkProportion_total = 0
    video_lightPropertion_total = 0
    video_saturation_total = 0
    video_color_energy_total = 0
    video_color_std_total = 0

    count = 0
    n = 0

    SAMPLE_BAND = 10

    while True:
        ret, img = cam.read()
        if not ret:
            break

        count += 1
        # 每x帧采一次样
        print(count)
        if count % SAMPLE_BAND != 0 and count % SAMPLE_BAND != 1:
            continue

        # 图像下采样
        img = cv.pyrDown(img)
        img = cv.pyrDown(img)

        # 获得该帧的运动矢量
        #   https://docs.opencv.org/master/d7/d8b/tutorial_py_lucas_kanade.html
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        if count % SAMPLE_BAND == 1:
            flow = cv.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3,
                                               150, 3, 5, 1.2, 0)
            optical_flow_list.append(flow)

            # 获得该帧的颜色特征
            video_warmc, video_heavyl, video_activep, video_hards, video_darkProportion, video_lightPropertion, video_saturation, video_color_energy, video_color_std = getFrameFeatures.getColorFeatures(
                img)

            video_warmc_total += video_warmc
            video_heavyl_total += video_heavyl
            video_activep_total += video_activep
            video_hards_total += video_hards
            video_darkProportion_total += video_darkProportion
            video_lightPropertion_total += video_lightPropertion
            video_saturation_total += video_saturation
            video_color_energy_total += video_color_energy
            video_color_std_total += video_color_std

            n += 1
        prevgray = gray

    cam.release()

    optical_flow_array = np.array(optical_flow_list)
    print("optical_flow_array.shape: ", optical_flow_array.shape)
    optical_flow_mean = np.mean(optical_flow_array, axis=0)
    print("optical_flow_mean.shape: ", optical_flow_mean.shape)
    optical_flow_std = np.std(optical_flow_array, axis=0)
    print("optical_flow_std.shape: ", optical_flow_std.shape)

    video_optical_flow_mean = optical_flow_mean.mean(axis=0).mean(axis=0)
    video_optical_flow_std = optical_flow_std.mean(axis=0).mean(axis=0)

    video_warmc_total /= n
    video_heavyl_total /= n
    video_activep_total /= n
    video_hards_total /= n
    video_darkProportion_total /= n
    video_lightPropertion_total /= n
    video_saturation_total /= n
    video_color_energy_total /= n
    video_color_std_total /= n

    x, y = video_optical_flow_mean
    a, b = video_optical_flow_std

    return x * x + y * y, (
        abs(a) + abs(b)
    ) / 2, video_warmc_total, video_heavyl_total, video_activep_total, video_hards_total, video_darkProportion_total, video_lightPropertion_total, video_saturation_total, video_color_energy_total, video_color_std_total
Exemple #58
0
def image_cap():
    d=[]


    print(__doc__)

    try:
        fn = sys.argv[1]
    except:
        fn = 0
    cap = video.create_capture(fn)
    cap.set(cv.CAP_PROP_FPS, 12)

    def process_frame(frame, t0):
        # some intensive computation...
        #frame = cv.medianBlur(frame, 19)
        #frame = cv.medianBlur(frame, 19)
        return frame, t0

    threadn = cv.getNumberOfCPUs()
    pool = ThreadPool(processes = threadn)
    pending = deque()

    threaded_mode = True

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
    while True:

        while len(pending) > 0 and pending[0].ready():

            res, t0 = pending.popleft().get()
            latency.update(clock() - t0)
            draw_str(res, (20, 20), "threaded      :  " + str(threaded_mode))
            draw_str(res, (20, 40), "latency        :  %.1f ms" % (latency.value*1000))
            draw_str(res, (20, 60), "frame interval :  %.1f ms" % (frame_interval.value*1000))
            res=cv.resize(res,(176,100))
            cv2_im = cv.cvtColor(res, cv.COLOR_BGR2RGB)
            pil_im = Image.fromarray(cv2_im)

            d.append(np.array(pil_im))
            if len(d)==32:
                t1 = data_input(d[0:16])
                t2 = data_input(d[16:32])
                in_x = np.array([t1, t2])
                in_x = np.reshape(in_x, (2, 16, 128, 128, 3))
                start = time.clock()
                #p = Pool(1)
                #p.map(evaluate, in_x)

                evaluate(in_x)
                elapsed = time.clock()
                elapsed = elapsed - start
                print("Time spent in (function name) is: ", elapsed)
                d=[]


            cv.imshow('threaded video', res)
        if len(pending) < threadn:
            ret, frame = cap.read()
            t = clock()
            frame_interval.update(t - last_frame_time)
            last_frame_time = t
            if threaded_mode:
                task = pool.apply_async(process_frame, (frame.copy(), t))
            else:
                task = DummyTask(process_frame(frame, t))
            pending.append(task)
        ch = cv.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break




    cv.destroyAllWindows()
Exemple #59
0
 def __init__(self, video_src):
     self.cam = video.create_capture(video_src)
Exemple #60
0
    args, video_src = getopt.getopt(sys.argv[1:], '',
                                    ['cascade=', 'nested-cascade='])

    #-----------------------------------
    #Selecting video source(file/camera)
    #-----------------------------------

    try:
        video_src = video_src[0]
    except:

        #fn = raw_input("Enter Camera/Video (1/0): ")
        fn = 0  #int(fn)
        if fn == 1:
            print "Video File "
            cam = create_capture(
                'vania.mp4', fallback='synth:bg=../data/lena.jpg:noise=0.05')

        elif fn == 0:
            print "Web Camera"
            cam = create_capture(
                fn, fallback='synth:bg=../data/lena.jpg:noise=0.05')

#------------------------
#Defining various cascade
#------------------------

    args = dict(args)
    cascade_fn = args.get(
        '--cascade',
        "../../data/haarcascades/haarcascade_frontalface_alt_tree.xml")
    nested_fn = args.get('--nested-cascade', "cascade/open.xml")