def draw_window(self, POS): title = self.IMG_PATH cv2.destroyAllWindows() img = cv2.imread(self.IMG_PATH) if not self.rectangle_drawn: if self.PREV_POS: cv2.rectangle(img, self.PREV_POS[::-1], POS[::-1], (0, 0, 255), thickness=1, lineType=8, shift=0) title += ' | ' + ','.join(list(map( str, self.PREV_POS[::-1]))) + ' - ' + ','.join( list(map(str, POS[::-1]))) self.COORDS = [self.PREV_POS[::-1][:], POS[::-1][:]] self.PREV_POS = None self.rectangle_drawn = True elif POS and POS[0] - 2 >= 0 and POS[0] + 2 < img.shape[ 0] and POS[1] - 2 >= 0 and POS[1] + 2 < img.shape[1]: img[POS[0] - 2:POS[0] + 2, POS[1] - 2:POS[1] + 2] = [0, 0, 255] self.PREV_POS = POS[:] cv2.imshow(title, img) cv2.moveWindow(title, 100, 100) cv.SetMouseCallback(title, self.on_mouse, 0)
def main(): # Read the .mp4 video using OpenCV Python API cv2.VideoCapture vid_cap = cv2.VideoCapture(vid_filepath) # Print the frame width, frame height, frames per second # and frame count of the input video using cap.get fwidth = vid_cap.get(cv.CV_CAP_PROP_FRAME_WIDTH) fheight = vid_cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT) fps = vid_cap.get(cv.CV_CAP_PROP_FPS) fcount = vid_cap.get(cv.CV_CAP_PROP_FRAME_COUNT) print "Frame width: " + str(fwidth) + "\nFrame height: " + str( fheight) + "\nFrames per second: " + str( fps) + "\nFrame count: " + str(fcount) _, img = vid_cap.read() filename_topview = '..//img//top-view.jpg' top_image = cv2.imread(filename_topview) # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cv2.namedWindow('frame1') cv.SetMouseCallback('frame1', on_mouse, None) cv2.imshow('frame1', top_image) cv2.waitKey(0) vid_cap.release()
def main(): # Read the .mp4 video using OpenCV Python API cv2.VideoCapture cap = cv2.VideoCapture("football_left.mp4") # Print the frame width, frame height, frames per second # and frame count of the input video using cap.get fwidth = cap.get(cv.CV_CAP_PROP_FRAME_WIDTH) fheight = cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT) fps = cap.get(cv.CV_CAP_PROP_FPS) fcount = cap.get(cv.CV_CAP_PROP_FRAME_COUNT) print "Frame width: " + str(fwidth) + "\nFrame height: " + str( fheight) + "\nFrames per second: " + str( fps) + "\nFrame count: " + str(fcount) _, img = cap.read() # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cv2.namedWindow('frame1') cv.SetMouseCallback('frame1', on_mouse, None) cv2.imshow('frame1', img) cv2.waitKey(0) cap.release()
def getBounds(img): # Grayscale and thresholding #imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #img = cv2.GaussianBlur(img, (0, 0), 0.5) #ret, thresh = cv2.threshold(img, 120, 255, 1) thresh = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 3) kernel = np.ones((5, 5), np.uint8) thresh = cv2.dilate(thresh, kernel, iterations=2) thresh = cv2.erode(thresh, kernel, iterations=2) thresh_copy = np.copy(thresh) thresh_copy = cv2.cvtColor(thresh_copy, cv2.COLOR_GRAY2BGR) #Calculate contours contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #print contours contours = filter(lambda c: len(c) > 50, contours) blank_thresh = np.copy(thresh_copy) cv2.namedWindow('image') cv.SetMouseCallback('image', on_mouse, 0) cv2.imshow('image', thresh_copy) cv2.waitKey(0) & 0xFF cv2.destroyAllWindows() return (contours, thresh_copy)
def crop_w_mouse_live(img): cv2.namedWindow('Video') mmc.startContinuousSequenceAcquisition(1) while True: img = mmc.getLastImage() if mmc.getRemainingImageCount() > 0: img = mmc.getLastImage() cv2.namedWindow('real image') cv.SetMouseCallback('real image',onmouse, 0) cv2.imshow('real image', img) else: print('No frame') if cv2.waitKey(32) >= 0: break cv2.destroyAllWindows() mmc.stopSequenceAcquisition() x= boxes[-2][0] y= boxes[-2][1] w= boxes[-1][0] - x h= boxes[-1][1] - y print "selectROI x "+str(x)+" y "+str(y)+" w "+str(w)+" h "+str(h) mmc.setROI(x,y,w,h)
def __init__(self, orig): self.name = orig self.image = cv.LoadImage(orig) self.region = None self.drag_start = None cv.NamedWindow("Image") cv.SetMouseCallback("Image", self.on_mouse)
def _show_image(self): self.subLock.acquire(True) local_image = deepcopy(self._np_image) self.subLock.release() # draw circles for idx, points in enumerate(self._roi_points): cv2.circle(local_image, (points[0], points[1]), 5, (255, 0, 0), 2) # draw green lines cv2.polylines(local_image, np.int32([np.array(self._roi_points)]), 1, (0, 255, 0), 2) cv2.polylines(local_image, np.int32([np.array( self._other_roi_points)]), 1, (0, 255, 0), 2) cv.ShowImage("Learn Play game RGB", cv.fromarray(local_image)) cv.SetMouseCallback("Learn Play game RGB", self._on_mouse_click, 0) cv.CreateTrackbar("Gain", "Learn Play game RGB", self._gain_slider, 100, self._on_gain_slider) cv.CreateTrackbar("Red Threshold", "Learn Play game RGB", self._inrange_colour_thresh, 500, self._on_red_slider) cv.CreateTrackbar("High red", "Learn Play game RGB", self._high_colour_slider, 40, self._on_high_colour_slider) cv.CreateTrackbar("Low red", "Learn Play game RGB", self._low_colour_slider, 40, self._on_low_colour_slider) cv.WaitKey(3)
def setGateParams(): image_size = feedread().shape[:2] feed_center_x = int(image_size[1]/2) feed_center_y = int(image_size[0]/2) boxes = [] count = 0 while 1: count += 1 img = feedread() img = cv2.resize(img, None, fx=1, fy=1,interpolation=cv2.INTER_AREA) cv2.namedWindow('real image') cv.SetMouseCallback('real image', on_mouse, 0) cv2.imshow('real image', img) k = cv2.waitKey(1) & 0xFF print(k) if k==27: cv2.destroyAllWindows() break elif count >= 2: if cv2.waitKey(0)& 0xFF == 27: cv2.destroyAllWindows() break count = 0
def create_homography(): global field_counter filename_topview = '..//img//top-view.jpg' filename_sideview = '..//img//side-view.jpg' hgcoord_filepath = '..//txt//hgmatrix.txt' top_image = cv2.imread(filename_topview) side_image = cv2.imread(filename_sideview) print "Select the four corners from the Background" print "The corners should be selected: Left-Down, Left-Top, Right-Top, Right-Down" cv2.namedWindow('Side-View') cv.SetMouseCallback('Side-View', field_click, None) cv2.imshow('Side-View', side_image) cv2.waitKey(0) cv2.destroyAllWindows() side_view_corners = np.copy(field_corners) top_view_corners = np.array([[44, 393], [44, 30], [598, 30], [598, 393]], dtype="float32") H = cv2.findHomography(side_view_corners, top_view_corners)[0] np.savetxt(hgcoord_filepath, H) return H
def repeat(): global capture #declare as globals since we are assigning to them now global camera_index global done frame = cv.QueryFrame(capture) cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 3, 3) imgHsv = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, imgHsv, cv.CV_BGR2HSV) #imgHsv2 = GetThresholdedImage(imgHsv) #print(numpy.asarray(cv.GetMat(imgHsv))) imgRGBA = cv.CreateImage(cv.GetSize(frame), 8, 4) cv.CvtColor(frame, imgRGBA, cv.CV_BGR2RGBA) cv.Smooth(imgRGBA, imgRGBA, cv.CV_GAUSSIAN, 3, 3) (filteredImg, offsetX, offsetY) = parallelSumRed(imgRGBA, 640, 480) #3D array d = numpy.sqrt(offsetX * offsetX + offsetY * offsetY) if d != 0: print("Distance = " + str(c1 / d + c2) + "cm") print("OffsetX = " + str(offsetX) + "; OffsetY = " + str(offsetY)) print("") imgRGB = cv.CreateImage(cv.GetSize(frame), 8, 3) #cv.CvtColor(Image.fromarray(filteredImg), imgRGB, cv.CV_RGBA2RGB) imgRGBA = cv.fromarray(numpy.reshape(filteredImg, (480, 640, 4))) if offsetX != 0 or offsetY != 0: cv.Rectangle(imgRGBA, (320 + offsetX - 6, 240 + offsetY - 6), (320 + offsetX + 6, 240 + offsetY + 6), (255, 0, 255, 255), 1, 8) cv.Line(imgRGBA, (0, 240 + offsetY), (639, 240 + offsetY), (255, 0, 255, 255), 1, 8) cv.Line(imgRGBA, (320 + offsetX, 0), (320 + offsetX, 479), (255, 0, 255, 255), 1, 8) cv.ShowImage(HSVWindow, imgRGBA) cv.ShowImage(original, frame) cv.SetMouseCallback(original, onMouseMove, [ cv.CV_EVENT_MOUSEMOVE, numpy.asarray(cv.GetMat(imgHsv)), numpy.asarray(cv.GetMat(frame)) ]) #cv.SetMouseCallback(HSVWindow, onMouseMove, [cv.CV_EVENT_MOUSEMOVE, numpy.asarray(cv.GetMat(imgHsv)), numpy.asarray(cv.GetMat(frame))]) #cv.ShowImage(filtered, imgHsv2) c = cv.WaitKey(10) if (str(c) == "27"): #if ESC is pressed print("Thank You!") done = True if (str(c) == "99"): #'c' for calibration calibration(int(input("How many data points: ")))
def __init__(self, input_filename, output_filename): self.prev_pt = None self.outname = output_filename self.orig = cv.LoadImage(input_filename) self.image = cv.CloneImage(self.orig) self.chans = self.im_to_lsb() cv.ShowImage("image", self.image) cv.ShowImage("LSB", self.chans[0]) cv.SetMouseCallback("image", self.on_mouse)
def start(self): _, self.img = self.cam.read() cv2.imshow("img", self.img) cv.SetMouseCallback("img", self.__mouseHandler, None) if not self.bb: _, self.img = self.cam.read() cv2.imshow("img", self.img) cv2.waitKey(30) cv2.waitKey(0)
def handleMouse(self): self.readMouseEvents() ret = False if self.select.state == pss.IDLE: return False # redraw image if needed if self.select.refreshDraw: self.select.refreshDraw = False self.drawPlate() if self.select.state == pss.READY: # disable mouse while handling save_menu cv.SetMouseCallback("Camera", self.on_dummy, param=0) # enter in save_menu ret = self.saveMenu() self.select.state = pss.IDLE cv.SetMouseCallback("Camera", self.on_mouse, param=0) self.drawPlate(False) return ret
def __init__(self): self.capture = cv.CaptureFromCAM(0) cv.NamedWindow("CamShiftDemo", 1) cv.NamedWindow("Backprojection", 1) cv.NamedWindow("Histogram", 1) cv.SetMouseCallback( "CamShiftDemo", self.on_mouse) #Instantiate call back for mouse event self.drag_start = None # Set to (x,y) when mouse starts drag self.track_window = None # Set to rect when the mouse drag finishes
def __init__(self): self.capture = cv.CaptureFromCAM(1) cv.NamedWindow("CamShiftDemo", 1) cv.NamedWindow("Histogram", 1) cv.SetMouseCallback("CamShiftDemo", self.on_mouse) self.drag_start = None # Set to (x,y) when mouse starts drag self.track_window = None # Set to rect when the mouse drag finishes print( "Keys:\n" " ESC - quit the program\n" " b - switch to/from backprojection view\n" "To initialize tracking, drag across the object with the mouse\n")
def interactiveMask((img, dep)): # Event handler pointQueue = [] def handleMouseClick(event, x, y, flags, parameters): if event == cv.CV_EVENT_LBUTTONDOWN: pointQueue.append((x, y)) #print pointQueue # Ask user to click points on depth image #imshow("Click mask", dep*15) imshow("Click mask", img) cv2.namedWindow("Click mask", 1) cv.SetMouseCallback("Click mask", handleMouseClick, None) key = 0 print "[interactiveMask] Press SPACE when finished (ESC to quit):" while (key) != 32: key = waitKey(10) % 256 if key == 27: print "Quit program" exit(1) cv2.destroyWindow("Click mask") print "[interactiveMask] final point queue:", pointQueue print "" # Show masked for verification mask = np.zeros(dep.shape, dtype="uint8") cv2.fillPoly(mask, [np.array(pointQueue, dtype="int32")], (255, 255, 255)) #imshow("mask", mask) img2 = img.copy() imshow("masked img", img2) img2[mask == 0] = 0 imshow("masked img", img2) dep2 = dep.copy() dep2[mask == 0] = 0 imshow("masked dep", dep2 * 15) waitKey(0) # Write to file imwrite(os.path.join(DEFAULT_PARAMS_PATH, "mask.png"), mask) # Clean finish destroyWindow("Click mask") destroyWindow("masked img") destroyWindow("masked dep") return ~mask
def buildMask(folder): fnames = os.listdir(folder) num = 100 imgPath = '%s/image_%i_rgb.png' % (folder, num) print imgPath depPath = '%s/image_%i_dep.png' % (folder, num) print depPath img = imread(imgPath) imgDepth = imread(depPath, -1) img2 = img.copy() img2[imgDepth <= 0, :] = 0 #imshow('Click mask', img2) imshow('Click mask', imgDepth * 15) pointQueue = [] def handleMouseClick(event, x, y, flags, parameters): if event == cv.CV_EVENT_LBUTTONDOWN: pointQueue.append((x, y)) print pointQueue cv2.namedWindow("Click mask", 1) cv.SetMouseCallback("Click mask", handleMouseClick, None) key = 0 while (key % 256) != 32: key = waitKey(10) print 'final point queue:', pointQueue mask = np.ones((img.shape[0], img.shape[1]), dtype='uint8') * 255 cv2.fillPoly(mask, [np.array(pointQueue, dtype="int32")], (0, 0, 0)) imshow('mask', mask) img2[mask > 0] = 0 imshow('masked img', img2) dep2 = imgDepth.copy() dep2[mask > 0] = 0 imshow('masked dep', dep2 * 10) waitKey(0) outpath = os.path.join(folder, 'mask.png') imwrite(outpath, mask) print 'wrote', outpath
def main(): # Read the .mp4 video using OpenCV Python API cv2.VideoCapture cap = cv2.VideoCapture("football_right.mp4") _, f = cap.read() #f = cv2.imread("background.jpg", cv2.CV_LOAD_IMAGE_COLOR) f_hsv = cv2.cvtColor(f, cv2.COLOR_BGR2HSV) cv2.namedWindow('frame1') cv.SetMouseCallback('frame1', on_mouse, f_hsv) cv2.imshow('frame1', f) cv2.waitKey(0) cap.release()
def add_region(self): count = 0 count += 1 url = self.frame() self.img = cv2.imread(url) cv2.namedWindow('real image') cv.SetMouseCallback('real image', self.on_mouse, 0) cv2.startWindowThread() while (1): cv2.imshow('real image', self.img) k = cv2.waitKey(1) & 0xFF if k == 27: cv2.destroyAllWindows() break print("naprej")
def run(self): cv2.namedWindow('image') self.initTemplate() cv.SetMouseCallback('image', self.on_mouse, 0) #launch window while True: ret, im = self.cam.read() warped = crop.doWarp(im, self.rect) disp = self.processFrame(warped) cv2.imshow('image', disp[::-1, ::-1]) if cv2.waitKey(1) == ord('q'): cv2.destroyAllWindows() return
def select_rectangle(cam): info = RectangleInfo() window_name = 'Select Rectangle' cv2.namedWindow(window_name) while not info.selection_finished: retval, img = cam.read() cv.SetMouseCallback(window_name, create_mouse_event_handler(img, info), 0) img_copy = img.copy() cv2.rectangle(img_copy, info.p1, info.p2, (0, 0, 255)) cv2.imshow(window_name, img_copy) if cv2.waitKey(33) == 27: cv2.destroyAllWindows() break return ((info.p1, info.p2), img)
def __init__(self): filename = "2013y04m02d22h07m25s.jpg" self.capture = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_COLOR) #self.capture = cv.CaptureFromFile("/home/bat/zenith-wind-power-read-only/KiteControl-Qt/videos/kiteFlying.avi") cv.NamedWindow("SelectROI", 1) cv.NamedWindow("Histogram", 1) cv.SetMouseCallback("SelectROI", self.on_mouse) self.drag_start = None # Set to (x,y) when mouse starts drag self.track_window = None # Set to rect when the mouse drag finishes print( "Keys:\n" " ESC - quit the program\n" " b - switch to/from backprojection view\n" "To initialize tracking, drag across the object with the mouse\n")
def resize(img, width=None, height=None, interactive=False): result = img img_height, img_width = img.shape[:2] if interactive: global mx, my mx, my = img_width, img_height cv2.namedWindow('seam', cv.CV_WINDOW_AUTOSIZE) cv.SetMouseCallback('seam', window_callback, img) cv2.imshow('seam', result) cv2.waitKey(0) print 'Resizing to {} (width) x {} (height).'.format(mx, my) if height is None: height = my if width is None: width = mx dy = img_height - height if img_height - height > 0 else 0 dx = img_width - width if img_width - width > 0 else 0 for i in xrange(dy): energies = cumulative_energies_horizontal(energy(result)) seam = horizontal_seam(energies) draw_seam(result, seam, interactive=interactive) result = remove_horizontal_seam(result, seam) for i in xrange(dx): energies = cumulative_energies_vertical(energy(result)) seam = vertical_seam(energies) draw_seam(result, seam, interactive=interactive) result = remove_vertical_seam(result, seam) # cv2.imwrite('resized.jpg', result) print 'Press any key to close the window.' cv2.imshow('seam', result) cv2.waitKey(0) cv2.destroyAllWindows()
def __init__(self): self.capture = cv.CaptureFromCAM(0) self.frame = cv.CreateImage((320, 200), 8, 3) self.backproject = cv.CreateImage((320, 200), 8, 3) self.header = None cv.NamedWindow("CamShiftDemo", 1) cv.NamedWindow("Histogram", 1) cv.SetMouseCallback("CamShiftDemo", self.on_mouse) self.drag_start = None # Set to (x,y) when mouse starts drag self.track_window = None # Set to rect when the mouse drag finishes self.pause = False print( "Keys:\n" " ESC - quit the program\n" " b - switch to/from backprojection view\n" " p - pause processing\n" "To initialize tracking, drag across the object with the mouse\n")
def init(landmarks, img): """Allows the user to provide an initial fit for the given model in the given image by dragging the mean shape to the right position on a dental radiograph. Args: landmarks (Landmark): A model. img (nparray): An image to fit the model on. Returns: The centroid of the manual fit. The landmark points, adapted to the position chosen by the user and the scale of the image. """ global tooth oimgh = img.shape[0] img, scale = rg.resize(img, 1200, 800) imgh = img.shape[0] canvasimg = np.array(img) # transform model points to image coord points = landmarks.as_matrix() min_x = abs(points[:, 0].min()) min_y = abs(points[:, 1].min()) points = [((point[0] + min_x) * scale, (point[1] + min_y) * scale) for point in points] tooth = points pimg = np.array([(int(p[0] * imgh), int(p[1] * imgh)) for p in points]) cv2.polylines(img, [pimg], True, (0, 255, 0)) # show gui cv2.imshow('choose', img) cv.SetMouseCallback('choose', __mouse, canvasimg) cv2.waitKey(0) cv2.destroyAllWindows() centroid = np.mean(tooth, axis=0) return centroid, Landmarks( np.array([[point[0] * oimgh, point[1] * oimgh] for point in tooth]))
def crop_w_mouse(img): print "crop" down = False while(1): cv2.namedWindow('real image') cv.SetMouseCallback('real image',onmouse, 0) cv2.imshow('real image', img) if cv2.waitKey(33) == 27: cv2.destroyAllWindows() break x= boxes[-2][0] y= boxes[-2][1] w= boxes[-1][0] - x h= boxes[-1][1] - y print "selectROI x "+str(x)+" y "+str(y)+" w "+str(w)+" h "+str(h) mmc.setROI(x,y,w,h)
def _show_image(self): self.subLock.acquire(True) local_image = deepcopy(self._np_image) self.subLock.release() for idx, points in enumerate(self._roi_points): cv2.circle(local_image, (points[0], points[1]), 5, (255, 0, 0), 2) cv2.polylines(local_image, np.int32([np.array(self._roi_points)]), 1, (0, 255, 0), 2) cv2.imshow("Connect Four RGB", local_image) cv.SetMouseCallback("Connect Four RGB", self._on_mouse_click, 0) cv.CreateTrackbar("Gain", "Connect Four RGB", self._gain_slider, 100, self._on_gain_slider) cv.CreateTrackbar("Red Threshold", "Connect Four RGB", self._red_thresh, 500, self._on_red_slider) cv.CreateTrackbar("Yellow Threshold", "Connect Four RGB", self._yellow_thresh, 500, self._on_yellow_slider) cv.WaitKey(3)
def run(self): """Start loop which gets a new image, then processes it""" # Create display window. cv2.namedWindow("Blob Detector") cv2.moveWindow("Blob Detector", 100, 100) if self._trainable: print "Using mouse clicking." cv.SetMouseCallback("Blob Detector", self.onmouse) else: print "Not using mouse clicking." while True: self.ticker.tick(True) # Get the image cv_im = self.get_new_image()[0] if cv_im: # Pre-process the image HSV_image = self.preprocess_image(cv_im) # Create a copy image on which found contours are shown. self.draw_image = copy.deepcopy(HSV_image) if self._trainable and self.add_from_next_image: # Try to add the area specified by the most recent click. self.add_new_cube(HSV_image) self.detection_process(HSV_image) self.image_display(self.draw_image) else: # If no image is recieved, nothing can be done. print "No image recieved!" print "-------------------------------" self.update()
def manualCropProcess(imPath): #if __name__ =='__main__': count = 0 while (1): count += 1 global img img = cv2.imread(imPath, 1) #img = cv2.resize(img, None, fx = 0.25,fy = 0.25) #height, width,_=img.shape cv2.namedWindow('real image') # cv2.resizeWindow('real image',int(width*.30),int(height*.30)) cv.SetMouseCallback('real image', on_mouse, 0) imgRes = cv2.resize(img, None, fx=0.25, fy=0.25) cv2.imshow('real image', imgRes) if count < 50: if cv2.waitKey(33) == 27: cv2.destroyAllWindows() break elif count >= 50: if cv2.waitKey(0) == 27: cv2.destroyAllWindows() break count = 0
def crop_Images(images): global drawingBox folder = 'croped/' os.system('rm -r '+folder) os.system('mkdir '+folder) cv.NamedWindow('Crop') for im in images: print im try: image = cv.LoadImage(im) temp = cv.CloneImage(image) cv.SetMouseCallback('Crop', mouse_Callback, image) while True: cv.Copy(image,temp) if drawingBox: draw_box(temp) cv.ShowImage('Crop', temp) key = cv.WaitKey(10) if key == 13: break except: print 'Ocurrio un error' cv.DestroyWindow('Crop')