def MOG(cap): fgbg = cv2.createBackgroundSubtractorMOG() while(1): ret, frame = cap.read() fgmask = fgbg.apply(frame) cv2.imshow('frame',fgmask) k = cv2.waitKey(30) & 0xff if k == 27: break cap.release() cv2.destroyAllWindows()
def test_412(self): #41.2BackgroundSubtractorMOG # 这是一个以混合高斯模型为基础的前景/背景分割算法 cap = cv2.VideoCapture(0) fgbg = cv2.createBackgroundSubtractorMOG() while (1): ret, frame = cap.read() fgmask = fgbg.apply(frame) cv2.imshow('frame', fgmask) k = cv2.waitKey(30) & 0xff if k == 27: break cap.release() cv2.destroyAllWindows() print("")
def bg_separation(frames, t): if (FLAGS.bgs == "MOG"): separator = cv2.createBackgroundSubtractorMOG() elif (FLAGS.bgs == "MOG2"): separator = cv2.createBackgroundSubtractorMOG2() elif (FLAGS.bgs == "GMG"): separator = cv2.createBackgroundSubtractorGMG() kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) else: print FLAGS.bgs, "method not supported" sys.exit(0) n, h, w, _ = frames.shape output_frames = np.zeros((n, h, w), dtype=np.uint8) for i in range(n): output_frames[i] = separator.apply(frames[i]) return output_frames
def get_cv2_object(name): if name.startswith("cv2."): name = name[4:] if name.startswith("cv."): name = name[3:] if name == "Algorithm": return cv2.Algorithm__create("Feature2D.ORB"), name elif name == "FeatureDetector": return cv2.FeatureDetector_create("ORB"), name elif name == "DescriptorExtractor": return cv2.DescriptorExtractor_create("ORB"), name elif name == "BackgroundSubtractor": return cv2.createBackgroundSubtractorMOG(), name elif name == "StatModel": return cv2.KNearest(), name else: try: obj = getattr(cv2, name)() except AttributeError: obj = getattr(cv2, "create" + name)() return obj, name
def run(): trig_sleep = 100 thres = 50 cam = PiCamera() cam.start_preview() sleep(5) stream = picamera.array.PiRGBArray(cam) fgbg = cv2.createBackgroundSubtractorMOG() for x in range(10000): #get picamera still image cam.capture(stream, format='bgr') frame = stream.array fgmask = fgbg.apply(frame) print(frame) if np.sum(fgmask) > thres and trig_sleep < 0: trig_sleep = 100 trigger(datetime.datetime.now(), frame) trig_sleep -= 1
def background_subtractor(video_link, method="MOG"): cap = cv2.VideoCapture(video_link) if method == "MOG": fgbg = cv2.createBackgroundSubtractorMOG() elif method == "MOG2": fgbg = cv2.createBackgroundSubtractorMOG2() elif method == "GMG": kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) fgbg = cv2.createBackgroundSubtractorGMG() while (1): ret, frame = cap.read() fgmask = fgbg.apply(frame) if method == "GMG": fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel) cv2.imshow('frame', fgmask) print(fgmask) k = cv2.waitKey(30) & 0xff if k == 27: break cap.release() cv2.destroyAllWindows()
def GMM_Mog2(): import cv2 cam = cv2.VideoCapture(0) # 处理视频 fgbg = cv2.createBackgroundSubtractorMOG() while cam.isOpened(): ret, frame = cam.read() if ret: fgmask = fgbg.apply(frame) # 通过腐蚀和膨胀过滤一些噪声 erode = cv2.erode(fgmask, (21, 21), iterations=1) dilate = cv2.dilate(fgmask, (21, 21), iterations=1) (_, cnts, _) = cv2.findContours(dilate.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for c in cnts: c_area = cv2.contourArea(c) if c_area < 1600 or c_area > 16000: # 过滤太小或太大的运动物体,这类误检概率比较高 continue (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2) cv2.imshow("origin", frame) if cv2.waitKey(1) == ord('q'): break cv2.destroyAllWindows()
import numpy as np import cv2 cap = cv2.VideoCapture('image.jpg') fgbg = cv2.createBackgroundSubtractorMOG() while (1): ret, frame = cap.read() fgmask = fgbg.apply(frame) cv2.imshow('frame', fgmask) k = cv2.waitKey(30) & 0xff if k == 27: break cap.release() cv2.destroyAllWindows()
def getbg(): global fgbg fgbg = cv2.createBackgroundSubtractorMOG(500,6, parbgratio, parnoise) #int history, int nmixtures, return fgbg
''' Program name : BackGround Subtraction using MOG in python Author name : Prateek Mishra ''' # Import modules import numpy as np import cv2 # Creating video Caputure object cap = cv2.VideoCapture('InputVideo.avi') # Creating BackGround subtactor object subtactor = cv2.createBackgroundSubtractorMOG() while True: # Reading Frames from video ret, frame = cap.read() # Applying MOG BackGround subtactor fgmask = subtactor.apply(frame) # Displaying output Frame cv2.imshow('frame', fgmask) if cv2.waitKey(30) & 0xff == 27: break cap.release() cv2.destroyAllWindows()
import numpy as np import cv2 import os # Doesn't work in OpenCV 3.0-beta #cap = cv2.VideoCapture(os.path.abspath('../video/soccer_ball4_orig.mov')) cap = cv2.VideoCapture(os.path.abspath('../video/tennis_ball2.mov')) fgbg = cv2.createBackgroundSubtractorMOG(200, 5, 0.7, 0.1) # params: int history=200, int nmixtures=5, double backgroundRatio=0.7, double noiseSigma=0 (automatic)); while (1): ret, frame = cap.read() fgmask = fgbg.apply(frame) cv2.imshow('frame', fgmask) k = cv2.waitKey(30) & 0xff if k == 27: break cap.release() cv2.destroyAllWindows()
def detectPeople(self): _center = [314.67404, 231.52438] #_center = [112.0679, 132.63786] list = [] list_P = [] list_N = [] svm = cv2.ml.NormalBayesClassifier_create() #svm.setKernel(cv2.ml.SVM_LINEAR) #svm.setType(cv2.ml.SVM_C_SVC) #svm.setC(2.67) #svm.setGamma(5.383) #Contadore de entrada e saida cnt_up = 0 cnt_down = 0 #Fonte de video #cap = cv2.VideoCapture(0) # Descomente para usar a camera. #cap = cv2.VideoCapture("C:\\Users\\Bruno\\Documents\\GitHub\\Contador\\peopleCounter.avi") #Captura um video #cap = cv2.VideoCapture("C:\\Users\\Bruno\\Documents\\GitHub\\Contador\\d.mp4") #Captura um video #cap = cv2.VideoCapture("/home/vino/Documents/Contest2018/Cambus/contadorPessoas/src/videos/input2.avi") #Captura um video #cap = cv2.VideoCapture("/home/vino/Documents/Contest2018/Cambus/contadorPessoas/src/videos/cambus.avi") #cap = cv2.VideoCapture("/home/vino/Documents/Contest2018/Cambus/contadorPessoas/src/videos/sample-video.avi") cap = cv2.VideoCapture("..\\..\\bus.avi") #Captura um video #Descomente para imprimir as propriedades do video """for i in range(19): print (i, cap.get(i))""" #Metodo GET para pegar width e height do frame w = cap.get(3) h = cap.get(4) x_meio = int(w/2) y_meio = int(h/2) frameArea = h*w print("Area do Frame:", frameArea) areaTH = frameArea/50 print ('Area Threshold', areaTH) # Area de contorno usada para detectar uma pessoa #Linhas de Entrada/Saida #line_up = int(2*(h/6)) #line_down = int(3*(h/6)) line_up = int(4.7*(h/10)) #deve-se adaptar de acordo com as caracteristicas da camera line_down = int(5.3*(h/10)) #deve-se adaptar de acordo com as caracteristicas da camera print ("Line UP:", line_up) print ("Line DOW:", line_down) #up_limit = int(1*(h/6)) #down_limit = int(5*(h/6)) up_limit = int(0.1*(h/10)) down_limit = int(9.9*(h/10)) l1UP = int(4.8*(h/10)) l1DOWN = int(5.2*(h/10)) l2UP = int(4.9*(h/10)) l2DOWN = int(5.1*(h/10)) print ("Limite superior:", up_limit) print ("Limite inferior:", down_limit) #Propriedades das linhas print ("Red line y:",str(line_down)) print ("Blue line y:", str(line_up)) line_down_color = (0,0,255) line_up_color = (255,0,0) pt1 = [0, line_down]; pt2 = [w, line_down]; pts_L1 = np.array([pt1,pt2], np.int32) pts_L1 = pts_L1.reshape((-1,1,2)) pt3 = [0, line_up]; pt4 = [w, line_up]; pts_L2 = np.array([pt3,pt4], np.int32) pts_L2 = pts_L2.reshape((-1,1,2)) pt5 = [0, up_limit]; pt6 = [w, up_limit]; pts_L3 = np.array([pt5,pt6], np.int32) pts_L3 = pts_L3.reshape((-1,1,2)) pt7 = [0, down_limit]; pt8 = [w, down_limit]; pts_L4 = np.array([pt7,pt8], np.int32) pts_L4 = pts_L4.reshape((-1,1,2)) pt9 = [0, l1UP]; pt10 = [w, l1UP]; pts_L5 = np.array([pt9,pt10], np.int32) pts_L5 = pts_L5.reshape((-1,1,2)) pt11 = [0, l1DOWN]; pt12 = [w, l1DOWN]; pts_L6 = np.array([pt11,pt12], np.int32) pts_L6 = pts_L6.reshape((-1,1,2)) pt13 = [0, l2UP]; pt14 = [w, l2UP]; pts_L7 = np.array([pt13,pt14], np.int32) pts_L7 = pts_L7.reshape((-1,1,2)) pt15 = [0, l2DOWN]; pt16 = [w, l2DOWN]; pts_L8 = np.array([pt15,pt16], np.int32) pts_L8 = pts_L8.reshape((-1,1,2)) #Substrator de fundo #fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = False) #fgbg = cv2.createBackgroundSubtractorMOG2(500,detectShadows = True) fgbg = cv2.createBackgroundSubtractorMOG2() fgbg = cv2.createBackgroundSubtractorMOG() #fgbg = cv2.bgsegm.createBackgroundSubtractorMOG() #fgbg = cv2.BackgroundSubtractorMOG() #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)) #fgbg = cv2.bgsegm.createBackgroundSubtractorGMG() #Elementos estruturantes para filtros morfoogicos kernelOp = np.ones((3,3),np.uint8) kernelOp2 = np.ones((5,5),np.uint8) kernelOp3 = np.ones((8, 8), np.uint8) kernelCl = np.ones((11,11),np.uint8) kernelCl2 = np.ones((8, 8), np.uint8) #Inicializacao de variaveis Globais font = cv2.FONT_HERSHEY_SIMPLEX pessoas = [] max_p_age = 5 pid = 1 while(cap.isOpened()): ##for image in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): #Le uma imagem de uma fonte de video ret, frame = cap.read() ## frame = image.array for pessoa in pessoas: pessoa.age_one() #age every person one frame ######################### # PRE-PROCESSAMENTO # ######################### #Aplica subtracao de fundo fgmask = fgbg.apply(frame) fgmask2 = fgbg.apply(frame) #Binarizacao para eliminar sombras (color gris) try: fgmask = cv2.GaussianBlur(fgmask, (3, 3), 0) #fgmask2 = cv2.GaussianBlur(fgmask2, (3, 3), 0) ret,imBin= cv2.threshold(fgmask,128,255,cv2.THRESH_BINARY) #ret,imBin2 = cv2.threshold(fgmask2,128,255,cv2.THRESH_BINARY) #Opening (erode->dilate) para remover o ruido. mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp) #mask2 = cv2.morphologyEx(imBin2, cv2.MORPH_OPEN, kernelOp) #Closing (dilate -> erode) para juntar regioes brancas. mask = cv2.morphologyEx(mask , cv2.MORPH_CLOSE, kernelCl) #mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernelCl) except: print('EOF') print ('Entrou:',cnt_up) print ('Saiu:',cnt_down) #print(list) #a = np.array(list) Z = np.vstack(list) #Z = np.vstack(list) # convert to np.float32 Z = np.float32(Z) # define criteria and apply kmeans() criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) ret, label, center = cv2.kmeans(Z, 1, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) # Now separate the data, Note the flatten() A = Z[label.ravel() == 0] B = Z[label.ravel() == 1] #print("A") #print(A) #print(len(A)) #print("B") #print(B) #print(len(B)) #print("centre ----") ## print(center) # Plot the data plt.scatter(A[:, 0], A[:, 1]) plt.scatter(B[:, 0], B[:, 1], c='r') plt.scatter(center[:, 0], center[:, 1], s=80, c='y', marker='s') plt.xlabel('Height'), plt.ylabel('Weight') plt.show() a = np.float32(list_P) responses = np.array(list_N) #responses = np.float32(responses) print(len(a)) print(len(responses)) trained = svm.train(a, cv2.ml.ROW_SAMPLE, responses) if (trained): print("trained", trained) print("IsTrained", svm.isTrained()) svm.save('svm_data1.dat') else: print("nao saolvou") #return (cnt_up - cnt_down) #break ################# # CONTORNOS # ################# # RETR_EXTERNAL returns only extreme outer flags. All child contours are left behind. _, contours0, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours0: #frame = cv2.drawContours(frame, cnt, -1, (0,255,0), 3, 8) area = cv2.contourArea(cnt) peri = cv2.arcLength(cnt, True) M = cv2.moments(cnt) #### #### coloca numa lista para treinamento 1 list_P.append(np.float32(cv2.HuMoments(M))) list_N.append(0) ### #### #### coloca numa lista para treinamento 2 #list_P.append(np.float32(cnt.flatten())) #list_N.append(1) ### shape = cv2.HuMoments(M).flatten() #print(type(cnt[0])) #print(cv2.HuMoments(M).flatten()) #print(cnt.flatten()) #print("-------------------------------------------------------------------------------------------------") #print(decimal.Decimal(shape[6])) #print(format((shape[0]), '20f')) #cv2.drawContours(frame, cnt, -1, (0,0,255), 3, 8) if area > areaTH: #and (peri > 950 and peri < 2500): ##################### # RASTREAMENTO # ##################### #Falta agregar condicoes para multiplas pessoas, saidas e entradas da tela #M = cv2.moments(cnt) #print("Antes dos filtros: ", M) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) x, y, w, h = cv2.boundingRect(cnt) dist = math.hypot(_center[0] - cx, _center[1] - cy) # tentativa de remover retangulos muito largos #if(x >= 240 or h >= 240): # continue new = True if cy in range(up_limit, down_limit): #print("----------------------------------------------------------------") #print(cnt) #print("----------------------------------------------------------------") #if(len(cnt) < 80): # print("Possivel nao pessoa ................") #continue #print("Shape de nao pessoa: ", cv2.HuMoments(M).flatten()) for pessoa in pessoas: if abs(cx - pessoa.getX()) <= w and abs(cy - pessoa.getY()) <= h: # O objeto esta perto de um que ja foi detectado anteriormente new = False pessoa.updateCoords(cx,cy) #atualizar coordenadas no objeto e reseta a idade if pessoa.deslocaCima(line_down,line_up) == True: # and shape[0] < 0.30:# and dist < 170 and dist > 70 : #and (pessoa.getOffset() - time.time() < -0.95): print("Diferenca de tempo: ", (pessoa.getOffset() - time.time())) cnt_up += 1; print ("ID: ",pessoa.getId(),'Entrou as',time.strftime("%c")) print("Area objeto: " + str(area)) print("Distancia do centroide da pessoa: ", dist) print(M) print("Perimetro: ", peri) print("Shape da pessoa: ", shape[0] < 0.30) print("Shape da pessoa: ", shape[0] ) list.append((cx,cy)) list_P.pop() list_N.pop() list_P.append(np.float32(cv2.HuMoments(M))) #print(np.float32(cv2.HuMoments(M))) list_N.append(1) #trainingData = np.matrix(cnt, dtype=np.float32) #print("Training data ...... ") #print(trainingData) elif pessoa.deslocaBaixo(line_down,line_up) == True : # and dist < 170 and dist > 70: # and (pessoa.getOffset() - time.time() < -0.95): print("Diferenca de tempo: ", (pessoa.getOffset() - time.time())) cnt_down += 1; print ("ID: ",pessoa.getId(),'Saiu as',time.strftime("%c")) print("Area objeto: " + str(area)) print("Distancia do centroide da pessoa: ", dist) print(M) print("Perimetro: ", peri) print("Shape da pessoa: ", shape[0] < 0.30) print("Shape da pessoa: ", shape[0]) list.append((cx, cy)) list_P.pop() list_N.pop() list_P.append(np.float32(cv2.HuMoments(M))) #print(np.float32(cv2.HuMoments(M))) list_N.append(1) #trainingData = np.matrix(cnt, dtype=np.float32) #print("Training data ...... ") #print(trainingData) break if pessoa.getState() == '1': if pessoa.getDir() == 'Saiu' and pessoa.getY() > down_limit: pessoa.setDone() elif pessoa.getDir() == 'Entrou' and pessoa.getY() < up_limit: pessoa.setDone() if pessoa.timedOut(): #remover pessoas da lista index = pessoas.index(pessoa) pessoas.pop(index) del pessoa #libera a memoria de variavel i. if new == True: p = Pessoa.Pessoa(pid, cx, cy, max_p_age, time.time()) pessoas.append(p) pid += 1 ################# # DESENHOS # ################# cv2.circle(frame,(cx,cy), 5, (0,0,255), -1) img = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2) #cv2.drawContours(frame, cnt, -1, (0,255,0), 3) #END for cnt in contours0 ######################### # DESENHAR TRAJETORIAS # ######################### for pessoa in pessoas: if len(pessoa.getTracks()) >= 2: pts = np.array(pessoa.getTracks(), np.int32) pts = pts.reshape((-1,1,2)) frame = cv2.polylines(frame,[pts],False,pessoa.getRGB()) #if pessoa.getId() == 9: #print (str(pessoa.getX()), ',', str(pessoa.getY())) cv2.putText(frame, str(pessoa.getId()),(pessoa.getX(),pessoa.getY()),font,0.3,pessoa.getRGB(),1,cv2.LINE_AA) ################# # IMAGEM # ################# str_up = 'Entraram '+ str(cnt_up) str_down = 'Sairam '+ str(cnt_down) tituloup = "Entrada " titulodown = "Saida " #dataehora = strftime("%c") dataehora = strftime("%A, %d %b %Y %H:%M:%S", gmtime()) frame = cv2.polylines(frame,[pts_L1],False,line_down_color,thickness=1) frame = cv2.polylines(frame,[pts_L2],False,line_up_color,thickness=1) frame = cv2.polylines(frame,[pts_L3],False,(255,255,0),thickness=1) frame = cv2.polylines(frame,[pts_L4],False,(255,255,0),thickness=1) frame = cv2.polylines(frame,[pts_L5],False,(line_up_color),thickness=1) frame = cv2.polylines(frame,[pts_L6],False,(line_down_color),thickness=1) frame = cv2.polylines(frame,[pts_L7],False,(line_up_color),thickness=1) frame = cv2.polylines(frame,[pts_L8],False,(line_down_color),thickness=1) self.escreveCabecalho(frame, str_up, str_down, titulodown,tituloup,dataehora,font, x_meio) cv2.imshow('Frame',frame) cv2.imshow('Debug',mask) cv2.imshow('Binarizacao', imBin) #preisonar ESC para sair k = cv2.waitKey(30) & 0xff if k == 27: break #END while(cap.isOpened()) ################# # LIMPEZA # ################# cap.release() cv2.destroyAllWindows()
def main(): fourcc = cv2.VideoWriter_fourcc(*'XVID') #outGrid = cv2.VideoWriter('sep/0_0.grid.avi', fourcc, 20.0, (1280, 720)) #outFull = cv2.VideoWriter('sep/0_0.full.avi', fourcc, 20.0, (1280, 720)) cap = cv2.VideoCapture('sep/0_0.avi') #outGrid = cv2.VideoWriter('sep/25_68351.grid.avi',fourcc, 20.0, (1280,720)) #outFull = cv2.VideoWriter('sep/25_68351.full.avi',fourcc, 20.0, (1280,720)) #cap = cv2.VideoCapture('sep/25_68351.avi') fn = 0 ret, iframe = cap.read() H, W, _ = iframe.shape tpl = template() M = initM() visTpl = cv2.warpPerspective(tpl, np.eye(3), (1280, 720)) cRot = cv2.warpPerspective(tpl, M, (1280, 720)) fullCourt = [] fullImg = np.zeros_like(iframe) m = np.eye(3) tic = time.clock() MS = [M] Theta = 25 prev = iframe fgbg = cv2.createBackgroundSubtractorMOG() #params = np.array([ [1.1, .9, 1.2], [.9, 1.1, 1.2], [.9, .9, 1] ]) while(cap.isOpened()): ret, frame = cap.read() if not ret: break fn += 1 # if fn%2 == 0: continue # if fn % 6 == 0: continue frameHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) threshColor = cv2.inRange( frameHSV, np.array([0, 47, 151]), np.array([16, 255, 255])) threshColor = cv2.morphologyEx( threshColor, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8)) edges = cv2.Canny(threshColor, 200, 255, apertureSize=3) edges[565:650, 240:950] = 0 #frame[565:650, 240:950] = 0 cv2.circle(edges, (1042, 620), 29, 0, -1) dstIdx = cv2.findNonZero(edges).reshape(-1, 2) if len(dstIdx) < 5000: MS.append(MS[-1]) continue nbrs = NearestNeighbors( n_neighbors=1, radius=1.0, algorithm='auto').fit(dstIdx) cnt = Theta converge = 25 while cnt: img = frame.copy() cnt -= 1 #blank = np.zeros_like(frame) cv2.warpPerspective(tpl, np.dot(m, M), (W, H), dst=cRot) oKeys, nKeys = neighbors(cRot, dstIdx, nbrs, d=10) # if len(nKeys) < 8000: break dm, mask = cv2.findHomography(oKeys, nKeys, method=cv2.LMEDS) if dm is None: dm = np.eye(3) else: pass #print len(mask), np.sum(mask) converge = np.linalg.norm(dm - np.eye(3)) #import ipdb; ipdb.set_trace() #sx, sy, x, y = dm[0,0], dm[1,1], dm[0,2], dm[1,2] # print m #img[...,1] = cv2.bitwise_or(cRot, img[...,1]) # cv2.putText(img, "[%d]#f %d %4f"%(100-cnt, fn,converge),(10, 30), # FONT, 1,(255,255,255),1,cv2.LINE_AA) #cv2.imshow('frame', img) #cv2.imshow('visTpl', blank) #key = cv2.waitKey(1) & 0xFF # if key == ord('q'): # return #if converge < 0.45: # break dm = 1.2 * (dm - np.eye(3)) + np.eye(3) m = np.dot(dm, m) m = m / m[2, 2] while False: #cv2.imshow('visTpl', blank) cv2.imshow('frame', img) key = cv2.waitKey(5) & 0xFF if key == ord('q'): return if key == ord('a'): break if key == ord('c'): cnt = False break M = np.dot(m, M) M = M / M[2, 2] MS.append(M) alpha = np.sqrt(m[0, 2] * m[2, 0]) gamma = np.sqrt(-m[1, 2] * m[2, 1]) f = - m[1, 2] / gamma r = m[0, 2] / (alpha * f) # print converge # print 50 - cnt if fn > 2: m = .6 * (m - np.eye(3)) + np.eye(3) else: m = np.eye(3) img[..., 1] = cv2.bitwise_or(cRot, img[..., 1]) inv = cv2.warpPerspective(frame, M, (W, H), flags=cv2.WARP_INVERSE_MAP, borderMode=cv2.BORDER_CONSTANT, borderValue=(0, 0, 0)) fmask = cv2.warpPerspective(np.zeros_like(cRot), M, (W, H), flags=cv2.WARP_INVERSE_MAP, borderMode=cv2.BORDER_CONSTANT, borderValue=255) # inv = cv2.warpPerspective(frame, np.dot(M, np.linalg.inv(visM())), # (W, H), flags=cv2.WARP_INVERSE_MAP, # borderMode=cv2.BORDER_CONSTANT, # borderValue=(0, 0, 0)) # fmask = cv2.warpPerspective(np.zeros_like(cRot), # np.dot(M, np.linalg.inv(visM())), # (W, H), flags=cv2.WARP_INVERSE_MAP, # borderMode=cv2.BORDER_CONSTANT, # borderValue=255) #inv[...,1] = cv2.bitwise_or(visTpl, inv[...,1]) fullT1 = cv2.bitwise_and(fullImg, fullImg, mask=fmask) fullImg = cv2.addWeighted(fullImg, 0.99, inv, 0.01, 0.45) #fullImg = inv.copy() fmaskI = cv2.bitwise_not(fmask) fullImg = cv2.bitwise_or(fullImg, fullT1) visImg = cv2.bitwise_and(inv, inv, mask=fmaskI) bg = cv2.bitwise_and(fullImg, fullImg, mask=fmask) visImg = cv2.add(visImg, bg) visImg[..., 1] = cv2.bitwise_or(visTpl, visImg[..., 1]) toc = time.clock() sys.stdout.write("\rI[%s] #%s %.4f %.4f sec/frame\n" % (Theta - cnt, fn, converge, (toc - tic) / fn)) sys.stdout.write("\r%.4f %.4f %.4f %.4f" % (alpha, gamma, f, r)) sys.stdout.flush() cv2.putText(img, "[%d]#f %d %.2f %.2f sec/frame" % (Theta - cnt, fn, converge, (toc - tic) / fn), (10, 30), FONT, 1, (255, 255, 255), 1, cv2.LINE_AA) cv2.imshow('frame', img) cv2.putText(visImg, "[%d]#f %d %.2f %.2f sec/frame" % (Theta - cnt, fn, converge, (toc - tic) / fn), (10, 600), FONT, 1, (255, 255, 255), 1, cv2.LINE_AA) cv2.imshow('visImg', visImg) cv2.imshow('inv', inv) #fgmask = fgbg.apply(visImg) #cv2.imshow('fgmask',cv2.bitwise_and(inv, inv, mask=fgmask)) #cv2.imshow('curr', curr) key = cv2.waitKey(1) & 0xFF if key == ord('q'): return # outGrid.write(img) # outFull.write(visImg) MS = np.array(MS)
#!/usr/local/bin/python3 import cv2 import numpy as np import math import time import boto3 import os import PIL import glob import subprocess from IPython import embed import sys from pprint import pprint import botocore from shutil import copyfile img = cv2.imread('/Desktop/CAPSTONE_R/chess-irs/pictures/processed_states/2019-03-25-01:34:12.041180:raw_state.jpg') fgbg = cv2.createBackgroundSubtractorMOG(128,cv2.THRESH_BINARY,1) masked_image = fgbg.apply(img) masked_image[masked_image==127]=0 cv2.imShow(masked_image)
cv2.accumulateWeighted(vid.get_data(10), avg1, 1) cv2.accumulateWeighted(vid.get_data(102), avg1, 1) io.imshow(avg1-avg100) io.imshow(reversed(avg1-avg100)) io.imshow((avg1+avg100)) fgbg= cv2.createBackgroundSubtractorKNN() fgmask = fgbg.apply(avg1) io.imshow(fgmask) fgbg= cv2.createBackgroundSubtractorMOG2() fgmask = fgbg.apply(avg1) io.imshow(fgmask) fgbg= cv2.createBackgroundSubtractorMOG2(1000) fgbg.getBackgroundImage() io.imshow(fgbg.getBackgroundImage()) io.imshow(fgmask) fgbg= cv2.createBackgroundSubtractorMOG() fgbg= cv2.createBackgroundSubtractorMOG2() fgbg.apply(ivg1, ivg100) fgbg.apply(avg1, avg100) a = fgbg.apply(avg1, avg100) io.imshow(a) a = fgbg.apply(avg1) io.imshow(a) a = fgbg.apply(avg1) a = fgbg.apply(avg100) fgbg.clear() fgbg.apply(avg100) io.imshow(fgbg.apply(avg100)) cv2 cv2.version cv2.version()
import cv2 import numpy import imutils cap = cv2.VideoCapture(0) fconvolve = cv2.createBackgroundSubtractorMOG() while (cap.isOpened()): t, frame = cap.read() result = fconvolve.apply(frame) # result = cv2.dilate(result, None, iterations = 2) countours = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) countours = imutils.grab_contours(countours) for c in countours: if cv2.contourArea(c) < 10000: continue (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1) cv2.imshow('hi', frame) if cv2.waitKey(30) == ord('q'): break
import cv2 import numpy as np cap = cv2.VideoCapture('hello/Resources/vtest.avi') fgbg = cv2.createBackgroundSubtractorMOG( ) # by default detectshadows is true, change if needed # more methods are available for this check video while cap.isOpened(): ret, frame = cap.read() if frame is None: break fgmask = fgbg.apply(frame) cv2.imshow('frame', frame) cv2.imshow('FG MASK', fgmask) if cv2.waitKey(40) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()