def hough_circle(): print('시작') img1 = cv2.imread( 'C:\Sources\PycharmProjects\Capstone1\data\m00021718_r1.jpg') img2 = img1.copy() img2 = cv2.GaussianBlur(img2, (3, 3), 0) imgray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) circles = cv2.HoughCircles(imgray, cv2.HOUGH_GRADIENT, 1, 10, param1=70, param2=50, minRadius=0, maxRadius=0) if circles is not None: circles = np.uint16(np.around(circles)) for i in circles[0, :]: cv2.circle(img1, (i[0], i[1]), i[2], (255, 255, 0), 1) cv2.imshow('HoughCircles', img1) cv2.waitKey(0) cv2.destroyAllWindow() else: print('원을 찾지 못했습니다.')
def get_frame_from_primary_display(x=25, y=25, width=825, height=615): """Get Frame From Monitor from (x, y) to (x + width, y + height) Use ImageGrab.grab Args: x (int): Frame x-pos y (int): Frame y-pos width (int): Frame Width height (int): Frame Height Returns: np.array: Monitor Screen For Input Area """ if DEBUG: last_time = time.time() while (True): # PressKey(DirectionKey.W.value) # frame # bbox=(x, y, width, height), 40은 TitleBar의 높이를 의미 screen = np.array(ImageGrab.grab(bbox=(x, y, width, height))) new_screen = process_img(screen) if DEBUG: print('Loop took {} secdons', format(time.time() - last_time)) last_time = time.time() cv2.imshow('OSORI-GTA5', np.array(new_screen)) # 문자를 "아스키 코드 번호"로 변환하려면 ord() 함수를 사용 if cv2.waitKey(25) & 0xFF == ord( 'q'): # waitKey(25) will display a frame for 25 ms cv2.destroyAllWindow() breakd
def main(): # 全ての処理を行う。 binarization() cv2.imshow('differential', img) elapsed_time = time.time() - start print(elapsed_time) cv2.waitKey() cv2.destroyAllWindow()
def fusionsensor(WaterLevl1,WaterLev2,WaterLev3,WaterLev4,Sitdec1,Sitdec2,Motionsensor): # Fusoin sensor # Water level and sitting toilet detection if WaterLevel1 >= 900: speech = Speech("Water level in tank 1 is now level 1",lang1) speech.play(sox_effects) # Speech fully play speech = Speech("ระดับนำ้ในถังที่ 1 ตอนนี้ ระดับที่ 1",lang2) # Thai language for the speech s$ speech.play(sox_effects) # Speech fully play speech = Speech("水箱1中的水位現在為1級",lang3) # Chinese language speech synthesys speech.play(sox_effects) if WaterLevel2 >= 900: speech = Speech("Water level in tank 1 is now full",lang1) speech.play(sox_effects) # Speech fully play speech = Speech("ระดับนำ้ในถังที่ 1 ตอนนี้เต็มแล้ว",lang2) # Thai language for the speech s$ speech.play(sox_effects) # Speech fully play speech = Speech("水箱1級現已滿。",lang3) # Chinese language speech synthesys speech.play(sox_effects) if WaterLevel3 >= 900: speech = Speech("Water level in tank 1 is now level ",lang1) speech.play(sox_effects) # Speech fully play speech = Speech("ระดับนำ้ในถังที่ 2 ตอนนี้ ระดับที่ 1",lang2) # Thai language for the speech s$ speech.play(sox_effects) # Speech fully play speech = Speech("水箱2中的水位現在為1級",lang3) # Chinese language speech synthesys speech.play(sox_effects) if WaterLevel4 >= 900: speech = Speech("Water level in tank 2 is now full",lang1) speech.play(sox_effects) # Speech fully play speech = Speech("ระดับนำ้ในถังที่ 2 ตอนนี้เต็มแล้ว",lang2) # Thai language for the speech s$ speech.play(sox_effects) # Speech fully play speech = Speech("水箱2級現已滿。",lang3) # Chinese language speech synthesys speech.play(sox_effects) if Sitdec1 < 1023: speech = Speech("Sitting on the closet",lang1) speech.play(sox_effects) speech = Speech("ตอนนี้มีคนนั่งบนส้วม",lang2) speech.play(sox_effects) speech = Speech("現在,有人坐在馬桶上。",lang3) speech.play(sox_effects) if Sitdec2 < 1023: speech = Speech("Sitting on the closet",lang1) speech.play(sox_effects) speech = Speech("ตอนนี้มีคนนั่งบนส้วม",lang2) speech.play(sox_effects) speech = Speech("現在,有人坐在馬桶上。",lang3) speech.play(sox_effects) if Motionsensor == 'True': # Sensor read true state ment after retected the motion ret,frame = cap.read() # Reading the frame from the camera picture input cv2.imshow('Smarttoilet Cam guard',frame) print(str(Today) + str(timedata)) # Showing today time data cv2.imwrite(str(Today) +"/"+ str(timedata) + ".png",frame) # Time as name picture data speech = Speech("Hello Welcome to smart toilet we keep your picture data as user identification",lang1) speech.play(sox_effects) # Speech fully play speech = Speech("สวัสดีค่ะ ยินดีต้อนรับสู่ห้องนำ้อัจฉริยะค่ะเราขอเก็บข้อมูลรูปภาพของคุณเพื่อใช้ในการระบุตัวผู้ใช้งาน",lang2) # Thai language for the speech synthesys output function speech.play(sox_effects) # Speech fully play speech = Speech("您好,歡迎來到智能浴室。我們希望收集您的圖像數據以便識別 ",lang3) #Chinese language speech synthesys speech.play(sox_effects) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() elif Motionsensor == 'False': cv2.destroyAllWindow() #Destroy all window after done software
def get_face_with_video(): cam = cv2.VideoCapture(0) # 调用计算机摄像头,一般默认为0 width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH) + 0.5) height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT) + 0.5) fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 定义编码 out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (width, height)) # 创建videowriter对象 while (cam.isOpened() is True): ret, frame = cam.read() # 逐帧捕获 if ret is True: # 输出当前帧 detectFace(frame) detectFace_1(frame) cv2.imshow('MyCamera', frame) if (cv2.waitKey(1) & 0xFF) == ord('q'): break else: break out.release() cam.release() cv2.destroyAllWindow()
def generate(): face_cascade = cv2.CascadeClassifier( 'cascades/haarcascade_frontalface_default.xml') eye_cascade = cv2.CascadeClassifier('cascades/haarcascade_eye.xml') camera = cv2.VideoCapture(0) count = 0 while (True): ret, frame = camera.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: img = cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) f = cv2.resize(gray[y:y + h, x:x + w], (200, 200)) cv2.imwrite('data/lia/%s.pgm' % str(count), f) count += 1 cv2.imshow('camera', frame) if cv2.waitKey(1000 / 12) & 0xff == ord('q'): break camera.release() cv2.destroyAllWindow()
def display(): print("Press ESC to Delete") print("Press S to Save") imgFile = cv2.imread('test_image.png') cv2.imshow('image', imgFile) key = cv2.waitKey(0) if key == 27: cv2.destroyWindow('image') if key == 115:cv2.destroyAllWindow()
def shoot(self): if self.ret: print("찰칵") cv2.imwrite("galary/" + str(self.cnt) + ".png", self.frame) cv2.imshow(str(self.cnt - 1) + '.png', self.frame) k = cv2.waitKey(0) if k == 27: # esc key cv2.destroyAllWindow()
def main(): img1 = np.zeros((512, 512, 3), np.uint8) cv2.line(img1, (0, 99), (99, 0), (255, 0, 0), 2) cv2.rectangle(img1, (45, 50), (100, 345), (0, 0, 255), 20) cv2.imshow('Lena', img1) cv2.waitKey(0) cv2.destroyAllWindow('Lena')
def check_vid(rtsp): cap = cv2.VideoCapture(rtsp) while True: ret, frame = cap.read() cv2.imshow('frame', frame) k = cv2.waitKey(3) & 0xFF if k == ord('q'): self.calib_switch = False cv2.destroyAllWindow()
def main(): start = time.time() bilateral() end = time.time() - start print(end) print(img) print(img2) cv2.imshow("bilateral", img) cv2.imshow("gray", img2) cv2.waitKey() cv2.destroyAllWindow()
def __build__(self): count = 0 data_set = [] label_set = [] train_data = [] assert filename, "filename is missing" while (True): count += 1 warm_up = 30 if count % 10 == 0: print(count) time = cv2.getTickCount() printscreen = np.array(ImageGrab.grab(bbox=(0, 400, 1000, 1200))) printscreen = cv2.cvtColor(printscreen, cv2.COLOR_BGR2RGB) printscreen = cv2.resize(printscreen, (self.width, self.height)) # print(printscreen.shape) cv2.imshow("window", printscreen) # Save images Save_Image.create_save_image(count, printscreen, warm_up) # Save .npy or .pickle (-1, 224, 224, 3) printscreen = np.array(printscreen).reshape( self.width, self.height, 3) if count <= part_1 + warm_up and count > warm_up: # 30 530 tower = 1 data_set.append(printscreen) label_set.append(tower) elif count > part_1 + (2 * warm_up) and count <= part_2 + ( 2 * warm_up): # 560 1060 minion = 2 data_set.append(printscreen) label_set.append(minion) elif count > part_2 + (3 * warm_up) and count <= part_3 + ( 3 * warm_up): # 1090 1590 ezreal = 3 data_set.append(printscreen) label_set.append(ezreal) elif count >= part_3 + (3 * warm_up): #1590 train_data.append([data_set, label_set]) train_data = np.array(train_data) np.save("data/" + filename, train_data) print("LOL_Data.npy saved", "shape is {}".format(train_data.shape)) break if cv2.waitKey(25) & 0xFF == ord("q"): cv2.destroyAllWindow() break
def get_pic_x_y(): # 运行这个函数,上面那个被这个调用 cv2.namedWindow("image") cv2.setMouseCallback("image", on_mouse) cv2.imshow("image", img1) while (True): try: cv2.waitKey(100) except Exception: cv2.destroyWindow("image") break cv2.waitKey(100) cv2.destroyAllWindow()
def main(): #--- 画像入力処理 ---# print("Please input picture") input_picture = raw_input('>>> ') input_img = cv2.imread(input_picture, 0) lbp_img = copy.deepcopy(input_img) img_hor = len(input_img) img_ver = len(input_img[0]) histgram = [0 for col in range(256)] points = 8 radius = 1 #--- LBP処理部分 ---# for i in range(0, img_hor): for j in range(0, img_ver): # reset array mask = [0 for col in range(points)] lbp_img[i][j] = 2**8 - 1 # when ( left / right / top / bottom ) exist, calc LBP if i > radius and i + radius < img_ver and j > radius and j + radius < img_hor: #--- LBP算出部分 ---# for part in range(0, points): # measure distance between [j][j] and its neighborhoods x_len = int(radius * math.cos(360 / points * part) + 0.5) y_len = int(radius * math.sin(360 / points * part) + 0.5) # get mask pattern -正負判定はsign()でもok- if input_img[i + x_len][j + y_len] - input_img[i][j] > 0: mask[part] = 1 # loop time to find min value print(mask) for k in range(0, points): lbp_val = 0 for x in range(0 + k, points): lbp_val += mask[x] * (2**(x - k)) for y in range(0, k): lbp_val += mask[y] * (2**(y + (points - k))) lbp_img[i][j] = min(lbp_img[i][j], lbp_val) print(lbp_img[i][j]) #--- ヒストグラム作成 ---# for i in range(0, img_hor): for j in range(0, img_ver): histgram[lbp_img[i][j]] += 1 #--- 結果表示部分 ---# cv2.imshow("Local Binary Pattern", lbp_img) # when input key was 's', save result image or fin if cv2.waitKey(0) == ord('s'): cv2.imwrite("LBP.png", lbp_img) cv2.destroyAllWindow()
def thresh_binary(): image = cv2.imread("/home/zk/opencvtest/opencvlearn/image/dog.jpg") gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) t, thresh1 = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY) t, thresh2 = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV) t, thresh3 = cv2.threshold(gray, 128, 255, cv2.THRESH_TRUNC) t, thresh4 = cv2.threshold(gray, 128, 255, cv2.THRESH_TOZERO) t, thresh5 = cv2.threshold(gray, 128, 255, cv2.THRESH_TOZERO_INV) cv2.imshow('Thresh_binary', thresh1) cv2.imshow('Thresh_binary_inv', thresh2) cv2.imshow('Thresh_trunc', thresh3) cv2.imshow('Thresh_tozero', thresh4) #cv2.imshow('Thresh_tozero_inv',thresh5) cv2.imshow('img', image) if cv2.waitKey(0) == 27: cv2.destroyAllWindow()
def screenRecorder(): fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter("output.avi", fourcc, 5.0, (1366, 768)) while True: img = ImageGrab.grab() img_np = np.array(27) frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRB) cv2.imshow("SCreen Recorder", frame) out.write(frame) if cv2.waitKey(1) == 27: break out.release() cv2.destroyAllWindow()
def savevideo(name): cap = cv.VideoCapture(0) fourcc = cv.VideoWriter_fourcc(*'XVID') out = cv.VideoWriter(name, fourcc, 20.0, (640, 480)) while (cap.isOpened()): ret, frame = cap.read() if ret == True: out.write(frame) cv.imshow('frame', frame) if cv.waitKey(20) & 0xFF == ord('q'): break else: break cap.release() out.release() cv.destroyAllWindow()
def tracking(): img1 = cv2.imread('./data/test/ruptured/mark/m03913745_1.jpg') hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV) lower_white = np.array([0, 0, 99]) upper_white = np.array([0, 0, 100]) mask_white = cv2.inRange(hsv, lower_white, upper_white) res = cv2.bitwise_and(img1, img1, mask=mask_white) cv2.imshow('original', img1) cv2.imshow('WHITE', res) cv2.waitKey(0) cv2.destroyAllWindow()
def binary(): img = cv2.imread('images.jpg', 0) #dd.jpg dalam format grayscale(parameter kedua) a, thresh = cv2.threshold(img, 125, 255, cv2.THRESH_BINARY) b, thresh_inv = cv2.threshold(img, 125, 255, cv2.THRESH_BINARY_INV) c, thresh_trunch = cv2.threshold(img, 125, 255, cv2.THRESH_TRUNC) d, thresh_tozero = cv2.threshold(img, 125, 255, cv2.THRESH_TOZERO) e, thresh_tozero_inv = cv2.threshold(img, 125, 255, cv2.THRESH_TOZERO_INV) cv2.imshow('threshold', thresh) cv2.imshow('thresh_inv', thresh_inv) cv2.imshow('thresh_trunch', thresh_trunch) cv2.imshow('thresh_tozero', thresh_tozero) cv2.imshow('thresh_tozero_inv', thresh_tozero_inv) cv2.imshow('original', img) cv2.waitKey(0) cv2.destroyAllWindow()
def main(): # 全ての処理を行う。 if __name__ == "__main__": start = time.time() smoothing_color() end = time.time() - start print(end) gray = False if gray == True: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) cv2.imshow('average', gray_img) cv2.waitKey() cv2.destroyAllWindow() else: cv2.imshow('average', img) cv2.waitKey() cv2.destroyAllWindow()
def facerun(request): cascPath = "/Users/sahilsagar/Desktop/ML/Project/Snapchat/face.xml" face=cv2.CascadeClassifier(cascPath) cam=cv2.VideoCapture(0) while True: check,frame=cam.read() faces=face.detectMultiScale(frame,1.3,5) for x,y,w,h in faces: cv2.rectangle(frame,(x,y),(x+w,y+h),(255,255,220),2) cv2.imshow("face",frame) key=cv2.waitKey(1) if key==ord('q'): break cam.release() cv2.destroyAllWindow() return redirect('/')
def main(): # 全ての処理を行う。 start = time.time() smoothing() end = time.time() gray = False print(end - start) if gray == True: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) cv2.imshow('gauss', gray_img) cv2.waitKey() cv2.destroyAllWindow() else: cv2.imshow('gauss', img) cv2.waitKey() cv2.destroyAllWindow()
def video(): #cap = cv2.VideoCapture(0) #webcam cap = cv2.VideoCapture('tes.mp4') #play dari file while (True): ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #RGB diubah ke GrayScale #cv2.imwrite('tangkapRGB.png', frame) #img disimpan #cv2.imwrite('tangkapGRAY.png', gray) #cv2.imshow("Hasil capture", gray) cv2.imshow("Hasil capture", frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindow()
def main(): #WindowName="live" #cv2.namedWindow(WindowName) cap = cv2.VideoCapture(1) if cap.isOpened(): ret, frame = cap.read() else: ret = False while ret: ret, frame = cap.read() #output = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) #frame = imutils.resize(frame, width=600) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, greenLower, greenUpper) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None if len(cnts) > 0: c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) if x < 300: print("move left " + str(-1 * (x - 300)) + " spaces") else: print("move right " + str(-1 * (300 - x)) + " spaces") cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2) #cv2.imshow(WindowName,output) cv2.imshow("color original", frame) cv2.imshow("Mask", mask) if cv2.waitKey(1) == 27: break cv2.destroyAllWindow() cap.release()
def imgBlending(imgfile1, imgfile2): img1 = cv2.imread(imgfile1) img2 = cv2.imread(imgfile2) cv2.namedWindow('imgPane') cv2.createTrackbar('MIXING', 'imgPane', 0, 100, onMouse) mix = cv2.getTrackbarPos('MIXING', 'imgPane') while True: img = cv2.addWeighted(img1, float(100 - mix) / 100, img2, float(mix) / 100, 0) cv2.imshow('imgPane', img) k = cv2.waitKey(1) & 0xFF if k == 27: break mix = cv2.getTrackbarPos('MIXING', 'imgPane') cv2.destroyAllWindow()
def detect_camera(cfgfile, weightfile, imgfile): m = Darknet(cfgfile) m.print_network() m.load_weights(weightfile) print('Loading weights from %s... Done!' % (weightfile)) num_classes = 80 if num_classes == 20: namesfile = 'data/voc.names' elif num_classes == 80: namesfile = 'data/coco.names' else: namesfile = 'data/names' use_cuda = 0 if use_cuda: m.cuda() cap = cv2.VideoCapture(0) while True: ret, frame = cap.read() cv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) pil_img = Image.fromarray(cv_img) sized = pil_img.resize((m.width, m.height)) for i in range(2): start = time.time() boxes = do_detect(m, sized, 0.5, 0.4, use_cuda) finish = time.time() if i == 1: print('Predicted in %f seconds.' % (finish - start)) class_names = load_class_names(namesfile) pil_img = plot_boxes(pil_img, boxes, None, class_names) img = cv2.cvtColor(numpy.asarray(pil_img), cv2.COLOR_RGB2BGR) cv2.imshow("OpenCV", img) if cv2.waitKey(1) & 0xff == ord('q'): break cv2.destroyAllWindow()
def video_save_webcam(): cap = cv2.VideoCapture(0) fourcc = cv2.cv.CV_FOURCC( *'DIVX' ) #cv2.VideoWriter(['namafile', [nilai fourcc], [nilai fps], [nilai lebar,panjang]]) out = cv2.VideoWriter('output.avi', fourcc, 5, (640, 480)) raw_input('Tekan enter untuk mulai esc untuk saving dan keluar') while (cap.isOpened()): ret, frame = cap.read() if ret == True: frame = cv2.flip(frame, 180) #mengatur drajat out.write(frame) cv2.imshow("Hasil capture", frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break cap.release() cv2.destroyAllWindow()
def main(): path = "/home/g_r00t/PRIYANK/ex_images/" imgpath1 = path + "4.2.01.tiff" img = cv2.imread(imgpath1) rows, columns, channels = img.shape angle = 0 while True: if angle == 360: angle = 0 R1 = cv2.getRotationMatrix2D((columns / 2, rows / 2), angle, 1) print(R1) output = cv2.warpAffine(img, R1, (columns, rows)) cv2.imshow('Rotation Image', output) angle = angle + 1 time.sleep(0.01) if cv2.waitKey(1) == 27: break cv2.destroyAllWindow()
def main(): # 全ての処理を行う。 final() cv2.imshow('differential', img) cv2.waitKey() cv2.destroyAllWindow()
if sorted_ctrs == []: img_last_class = ['nothing'] for j, ctr in enumerate(sorted_ctrs): x, y, w, h = cv2.boundingRect(ctr) if w > 35 and h > 40 and j < 2: # img = cv2.cvtColor(img_or, cv2.COLOR_BGR2GRAY) img = img_or[y:y + h, x:x + w, :] img = cv2.resize(img, (100, 250)) img = img.reshape(1, 100, 250, 3) classes = model.predict(img) class_index = np.where(classes > 0.5)[1][0] image_class = img_class(class_index) if image_class != img_last_class: class_count[class_index] = class_count[class_index] + 1 # if sum(sum(sum(img_or[y:y+h,x:x+w,:]-cv2.resize(img_last, (w, h)) )))>100: cv2.rectangle(img_or, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.putText(img_or, image_class, (x, y + 20), font, 0.8, (0, 255, 0), 2, cv2.LINE_AA) img_last_class = image_class # images2=np.hstack((img_or,cv2.cvtColor(img_diff, cv2.COLOR_GRAY2BGR))) images2 = np.hstack( (img_or, print_text(class_count.astype(int), np.copy(blank_img)))) cv2.imshow(winName, images2) i = i + 1 key = cv2.waitKey(1) & 0xFF if key == 27: # comment this 'if' to hide window cv2.destroyAllWindow(winName) break cv2.waitKey(0)
import cv2 img = cv2.imread("sample2.jpg") grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cv2.imwrite("GrayImage.jpg", grayImg) cv2.imshow("OriginalImage", img) cv2.imshow("GrayScale Image", grayImg) cv2.waitKey(0) cv2.destroyAllWindow()
import numpy as np def sum_rgb(src): b,g,r = cv2.split(src) s = np.zeros(src.shape,dtype=np.uint8) dst = np.zeros(src.shape, dtype=np.uint8) s = cv2.addWeighted(r, 1./3., g, 1./3., 0.0) s = cv2.addWeighted(s, 2./3., b, 1./3., 0.0) ret, dst = cv2.threshold(s, 100, 100, cv2.THRESH_TRUNC) return dst if __name__ == '__main__': namedwindow = "example" ll = './image/OpticalFlow0.jpg' cv2.namedWindow(namedwindow,1) src = cv2.imread(ll) dst = sum_rgb(src) cv2.imshow(namedwindow,dst) while True: c = cv2.waitKey(10) if c == 27: break cv2.destroyAllWindow()