def initialize(): """Initializes interface, images, and other global variables we need.""" #Initialize all the windows and other things needed for the program interface #Set up the windows containing the image from the kinect camera, the altered # threshold image the threshold sliders, and the kinect range image. Interface.initialize(D) #Set the method to handle incoming mouse data in the Image window cv.SetMouseCallback('Image', HandleData.mouseImage, None) #Set the method to handle incoming mouse data in the Range window cv.SetMouseCallback('Range', HandleData.mouseRange, None)
def play_video(path, name, posmsec=0, fps=0): capture = cv.CaptureFromFile(path) if fps <= 0: fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS) interval = int(1000.0 / fps) cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_POS_MSEC, posmsec) playing = [True] cv.NamedWindow(name) def on_mouse(event, x, y, flags, param): if event == cv.CV_EVENT_RBUTTONDOWN: playing[0] = False cv.SetMouseCallback(name, on_mouse) while playing[0]: frame = cv.QueryFrame(capture) if frame is None: playing[0] = False else: cv.ShowImage(name, frame) cv.WaitKey(interval) cv.DestroyWindow(name) del capture
def select_optimal_colors(self): # in case something else is still open cv.DestroyAllWindows() capture = cv.CaptureFromCAM(self.camera_index) if not capture: QMessageBox.information(self, "Camera Error", "Camera not found") return cv.NamedWindow("click & drag to select object", cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow("click & drag to select object", 610, 0) cv.SetMouseCallback("click & drag to select object", self.mouseHandler, None) camera_on = True while camera_on: if (not self.busy_updating): frame = cv.QueryFrame(capture) if not frame: break cv.ShowImage("click & drag to select object", frame) self.frame = frame k = cv.WaitKey(1) # press q or escape to quit camera view if k == 27 or k == 113 or self.end_record: camera_on = False cv.DestroyAllWindows() self.end_record = False break
def imagen_homografia(n): global points, width, height,teclado urllib.urlretrieve("http://192.168.0.100:8080/photo.jpg", "foto.jpg") foto=cv.LoadImage('foto.jpg') im_in= cv.CloneImage(foto) #CALIBRACION width, height = cv.GetSize(im_in) im = cv.CloneImage(foto) if n == 0 and (teclado ==1 or teclado ==3): cv.ShowImage('Escoger 4 puntos',im) cv.SetMouseCallback('Escoger 4 puntos', mouse, im) cv.WaitKey(0) guardarpoints(points) h**o = homografia(im_in.width, im_in.height, im.width, im.height) cv.Save('homography.cvmat', h**o) else: points = cargarpoints() h**o = homografia(im_in.width, im_in.height, im.width, im.height) out_big = cv.CloneImage(im_in) cv.WarpPerspective(im_in, out_big, h**o) out_small = cv.CloneImage(im) cv.Resize(out_big, out_small) return out_small, out_big
def initUI(): """ initializes the UI and ROS publishers/subscribers """ global D # initialize openCV window cv.NamedWindow("LibraComplex") cv.MoveWindow("Ranges", WIN_WIDTH / 2, WIN_HEIGHT / 2) D.image = cv.CreateImage((WIN_WIDTH, WIN_HEIGHT), 8, 3) # 8 is pixel depth and 3 is # of channels # initialize mouse click callback # see http://docs.opencv.org/modules/highgui/doc/user_interface.html#setmousecallback cv.SetMouseCallback("LibraComplex", onMouse, None) # initialize ROS publishers and subscribers rospy.init_node("node_nav", anonymous=True) rospy.Subscriber("hallwayType", HallwayType, command_callback) D.commandPub = rospy.Publisher("navCommand", NavCommand) # prepare map with TMap D.prev_type = None D.map = TMap() D.map.generateMap(LIBRA) D.start_node = 0 D.dst_node = 22 D.curr_node = D.start_node D.commands, D.cmd_path = D.map.generateCommands(D.start_node, D.dst_node) D.commands.append(["STOP", "STOP"]) D.cmd_path = D.cmd_path[1:-1]
def __init__(self): rospy.init_node('cam_shift_node') self.ROI = rospy.Publisher("roi", RegionOfInterest, queue_size=1) """ Create the display window """ self.cv_window_name = "Camshift Tracker" cv.NamedWindow(self.cv_window_name, 0) """ Create the cv_bridge object """ self.bridge = CvBridge() """ Subscribe to the raw camera image topic """ self.image_sub = rospy.Subscriber("/camera/image_raw", Image, self.image_callback) """ Set up a smaller window to display the CamShift histogram. """ cv.NamedWindow("Histogram", 0) cv.MoveWindow("Histogram", 700, 10) cv.SetMouseCallback(self.cv_window_name, self.on_mouse) self.drag_start = None # Set to (x,y) when mouse starts dragtime self.track_window = None # Set to rect when the mouse drag finishes self.hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 ) self.backproject_mode = False
def getHsvRange(self): self.x_co = 0 self.y_co = 0 cv.NamedWindow('camera feed', cv.CV_WINDOW_AUTOSIZE) capture = cv.CaptureFromCAM(1) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8) while True: src = cv.QueryFrame(capture) # src = cv.LoadImage('2012_automata.jpg') cv.Smooth(src, src, cv.CV_BLUR, 3) hsv = cv.CreateImage(cv.GetSize(src), src.depth, 3) cv.CvtColor(src, hsv, cv.CV_BGR2HSV) cv.SetMouseCallback("camera feed", self.on_mouse, 0) s = cv.Get2D(hsv, self.y_co, self.x_co) # print "H:", s[0], " S:", s[1], " V:", s[2] cv.PutText(src, str(s[0]) + "," + str(s[1]) + "," + str(s[2]), (self.x_co, self.y_co), font, (55, 25, 255)) cv.ShowImage("camera feed", src) if cv.WaitKey(10) == 27: return (s[0], s[1], s[2]) break
def main(): global startframe tree = utils.open_project(sys.argv) if tree == None: return os.chdir("shot_slitscans") '''cv.NamedWindow("win", cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow("win", 500, 200) cv.SetMouseCallback("win", mouse_callback)''' bg_img = cv.CreateImage((576, 576), cv.IPL_DEPTH_8U, 1) #cv.Set(bg_img, (180)) files = sorted(glob.glob("*.png")) print(files) i = 0 while i < len(files): file = files[i] startframe = int(file.split("_")[3].split(".")[0]) print(startframe) cap = cv.CreateFileCapture(file) img = cv.QueryFrame(cap) win_name = "%d" % (int( float(i + 1) * 100.0 / len(files))) + "% - " + file cv.NamedWindow(win_name, cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow(win_name, 500, 200) cv.SetMouseCallback(win_name, mouse_callback) cv.ShowImage(win_name, bg_img) cv.ShowImage(win_name, img) key = cv.WaitKey(0) print(key) if key in [2555904, 1113939]: # right arrow i += 1 elif key in [2424832, 1113937]: # left arrow i -= 1 if i < 0: i = 0 elif key in [27, 1048603]: # ESC break elif key in [1113941]: # page up i -= 100 if i < 0: i = 0 elif key in [1113942]: # page down i += 100 else: print("Unknown key code: {}".format(key)) cv.DestroyWindow(win_name) src_dir = os.path.dirname(sys.argv[0]) os.chdir(src_dir) os.system("python 02_2_save-shots.py \"" + sys.argv[1] + "\"")
def create_mtf(name, width, height, screen, distance_user): out = cv.CreateImage((width, height), 8, 1) b = math.log(width / 2.0) / width a = 1.0 / (b * width) d = 0.01 c = math.log(d) / height x_vector = [ 0.5 + 0.5 * math.sin(2 * math.pi * a * math.e**(b * val)) for val in xrange(width) ] y_vector = [d * math.e**(-c * val) for val in xrange(height)] for i in xrange(width): for j in xrange(height): out[j, i] = 255.0 * (x_vector[i]) * y_vector[j] cv.ShowImage("mtf", out) cv.MoveWindow("mtf", 0, 0) cv.SetMouseCallback("mtf", user_x, [width, height, a, b, screen, distance_user]) cv.SaveImage("mtf.png", out) cv.WaitKey(0)
def MouseCalibrate(image, markers): windowName = "Choose Point" cv2.namedWindow(windowName) gotPoint = [False] * len(markers) ind = [0] pt = [0, 0] def mouseback(event, x, y, flags, param): if event == cv.CV_EVENT_LBUTTONUP: # here event is left mouse button double-clicked print x, y pt[0] = x pt[1] = y gotPoint[ind[0]] = True cv.SetMouseCallback(windowName, mouseback) for i in range(0, len(markers)): #redisplay image and title cv2.imshow(windowName, image) ind[0] = i #ask for pt while not gotPoint[ind[0]]: ret, calibImage = calibCap.retrieve() cv2.imshow(windowName, calibImage) cv.WaitKey(1) #add point to matrix markers[i].pos = [pt[0], pt[1]] cv.DestroyWindow(windowName)
def image_callback(data): global running if (running): image = bridge.imgmsg_to_cv(data, "bgr8") #normalize image cv.Split(image, rgb_r, rgb_g, rgb_b, None) red_mean = cv2.mean(np.asarray(rgb_r[:, :])) cv.Div(src2=cv.fromarray(np.ones((480, 640))), src1=rgb_r, dst=scaled_r, scale=128 / red_mean[0]) green_mean = cv2.mean(np.asarray(rgb_g[:, :])) cv.Div(src2=cv.fromarray(np.ones((480, 640))), src1=rgb_g, dst=scaled_g, scale=128 / green_mean[0]) blue_mean = cv2.mean(np.asarray(rgb_b[:, :])) cv.Div(src2=cv.fromarray(np.ones((480, 640))), src1=rgb_b, dst=scaled_b, scale=128 / blue_mean[0]) cv.Merge(scaled_r, scaled_g, scaled_b, None, cv_image) cv.CvtColor(cv_image, hsv, cv.CV_BGR2HSV) # --convert from BGR to HSV cv.CvtColor(cv_image, lab, cv.CV_BGR2Lab) cv.Split(hsv, hsv_h, hsv_s, hsv_v, None) cv.Split(cv_image, rgb_r, rgb_g, rgb_b, None) cv.Split(lab, lab_l, lab_a, lab_b, None) cv.Split(luv, luv_l, luv_u, luv_v, None) cv.Split(hls, hls_h, hls_l, hls_s, None) cv.Split(xyz, xyz_x, xyz_y, xyz_x, None) cv.Split(ycrcb, ycrcb_y, ycrcb_cr, ycrcb_cb, None) cv.Not(lab_a, a_not) cv.Sub(hsv_s, a_not, sa) cv.Sub(luv_u, hls_h, test) cv.Sub(hls_s, hls_h, sminh) threshold_red(sa) cv.ShowImage("red", red_dilated_image) red_contours, _ = cv2.findContours(image=np.asarray( red_dilated_image[:, :]), mode=cv.CV_RETR_EXTERNAL, method=cv.CV_CHAIN_APPROX_SIMPLE) print_lidar_projections(cv_image) circles = extract_circles(red_contours, [1, 0, 0]) for x, y, radius in circles: cv.Circle(cv_image, (x, y), radius, [0, 0, 1], 3) cv.SetMouseCallback("camera feed", mouse_callback, hsv_image) cv.ShowImage("camera feed", cv_image) cv.WaitKey(3)
def __init__(self, node_name): rospy.init_node(node_name) rospy.on_shutdown(self.cleanup) self.node_name = node_name self.input_rgb_image = "input_rgb_image" self.input_depth_image = "input_depth_image" self.output_image = "output_image" self.show_text = rospy.get_param("~show_text", True) self.show_features = rospy.get_param("~show_features", True) """ Initialize the Region of Interest and its publisher """ #self.ROI = RegionOfInterest() #self.pubROI = rospy.Publisher("/roi", RegionOfInterest) """ Do the same for the point cluster publisher """ #self.cluster3d = PointStamped() #self.pub_cluster3d = rospy.Publisher("/target_point", PointStamped) """ Initialize a number of global variables """ self.image = None self.image_size = None self.depth_image = None self.grey = None self.selected_point = None self.selection = None self.drag_start = None self.keystroke = None self.key_command = None self.detect_box = None self.track_box = None self.display_box = None self.keep_marker_history = False self.night_mode = False self.auto_face_tracking = True self.cps = 0 # Cycles per second = number of processing loops per second. self.cps_values = list() self.cps_n_values = 20 self.flip_image = False """ Create the display window """ self.cv_window_name = self.node_name cv.NamedWindow(self.cv_window_name, cv.CV_NORMAL) cv.ResizeWindow(self.cv_window_name, 640, 480) """ Create the cv_bridge object """ self.bridge = CvBridge() """ Set a call back on mouse clicks on the image window """ cv.SetMouseCallback(self.node_name, self.on_mouse_click, None) """ A publisher to output the display image back to a ROS topic """ self.output_image_pub = rospy.Publisher(self.output_image, Image) """ Subscribe to the raw camera image topic and set the image processing callback """ self.image_sub = rospy.Subscriber(self.input_rgb_image, Image, self.image_callback, queue_size=1) self.depth_sub = rospy.Subscriber(self.input_depth_image, Image, self.depth_callback, queue_size=1) rospy.loginfo("Starting " + self.node_name)
def __init__( self ): """ Constructor. """ rospy.init_node('UIWindow') """ Getting ROS parameters """ """ Pseudo constants """ """Members """ """ Subscribers """ """ Publishers """ """Creating Move base client """ print "creating the window" cv.NamedWindow("ui_kinect_image", cv.CV_WINDOW_AUTOSIZE) cv.SetMouseCallback("ui_kinect_image", self.onmouse) import cv, numpy """Create a simple matrix to display """ a = numpy.ones((480, 640)) mat = cv.fromarray(a) print mat.rows print mat.cols cv.ShowImage("ui_kinect_image", mat) def onmouse(event, x, y, flags, param): if event == cv.EVENT_LBUTTONDOWN: drag_start = x, y sel = 0,0,0,0 elif event == cv.EVENT_LBUTTONUP: if sel[2] > sel[0] and sel[3] > sel[1]: print "sel[0]=" + sel[0] print "sel[1]=" + sel[1] print "sel[2]=" + sel[2] print "sel[3]=" + sel[3] drag_start = None elif drag_start: #print flags if flags & cv.EVENT_FLAG_LBUTTON: minpos = min(drag_start[0], x), min(drag_start[1], y) maxpos = max(drag_start[0], x), max(drag_start[1], y) sel = minpos[0], minpos[1], maxpos[0], maxpos[1] print "Dragging" else: print "selection is complete" drag_start = None
def __init__(self, controller): self.text = [] self.writer = cv.CreateVideoWriter("/home/jing/Videos/"+datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")+".avi", cv.CV_FOURCC('M', 'J', 'P', 'G'), 25, (IMG_WIDTH,IMG_HEIGHT), True) self.lasttime = time.time() self.sp = (0,0) cv.NamedWindow('RGB', cv.CV_WINDOW_AUTOSIZE) cv.SetMouseCallback('RGB', self.mouse_ev) self.sp_callback = controller.set_sp_callback self.track = False
def update_windows(n=3): #print "update windows!" l = len(captures) for i in xrange(1, min(n, l) + 1): #print "setting ",i tmp = cv.CloneImage(captures[-i]) cv.PutText(tmp, "%s" % (l - i + 1), (1, 24), font, (255, 255, 255)) cv.ShowImage("card_%d" % i, tmp) cv.SetMouseCallback("card_%d" % i, card_window_clicked, l - i)
def show(self, window="Image Montage", pos=None, delay=0): ''' Will display the montage image, as well as register the mouse handling callback function so that the user can scroll the montage by clicking the increment/decrement arrows. ''' img = self.asImage() cv.NamedWindow(window) cv.SetMouseCallback(window, self._onClick, window) img.show(window=window, pos=pos, delay=delay)
def run_interface(data, sample_rate, spectrogram, view_width, speed=1): spec_view = sndtools.spectrogram.SpectrogramView(spectrogram, view_width) cv.NamedWindow("Spectrogram") cv.SetMouseCallback("Spectrogram", mouse_callback, (spec_view, sample_rate)) p = pyaudio.PyAudio() stream = p.open(format=p.get_format_from_width(2), channels=1, rate=int(sample_rate * speed), output=True) print CONTROLS_HELP direction = 1 current_sample = 0 while True: if stream.is_active(): slice_end = current_sample + direction * 2048 slice_end = max(min(slice_end, len(data)), 0) data_slice = data[current_sample:slice_end:direction] stream.write(data_slice.tostring()) current_sample = slice_end img = spec_view.view(current_sample) cv.ShowImage("Spectrogram", img) key = chr(cv.WaitKey(5) & 255) if key == ' ': if stream.is_active(): stream.stop_stream() else: stream.start_stream() elif key in ('r', 'R'): direction = -1 * direction elif key in ('q', 'Q'): break elif key in ('+', '=', '-', '_'): if key in ('+', '='): speed += 0.1 else: speed -= 0.1 speed = min(MAX_SPEED, max(0.1, speed)) print "Speed:", speed stream.close() stream = p.open(format=p.get_format_from_width(2), channels=1, rate=int(sample_rate * speed), output=True) stream.stop_stream() stream.close() p.terminate()
def CalibrationWindowThread(im): cv.ShowImage(window_name, im) cv.SetMouseCallback(window_name, on_mouse) global key while True: if not key.is_set(): cv.WaitKey(1) else: break
def __init__(self, *args): CalibrationNode.__init__(self, *args) cv.NamedWindow("display") self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.20, 1, thickness=2) #self.button = cv.LoadImage("%s/button.jpg" % roslib.packages.get_pkg_dir(PKG)) cv.SetMouseCallback("display", self.on_mouse) cv.CreateTrackbar("scale", "display", 0, 100, self.on_scale)
def __init__(self, name, update_period=None): KeyCommandedObject.__init__(self) self.name = name self.update_period = update_period cv.NamedWindow(self.name) cv.SetMouseCallback(self.name, self.handleEvents, 0) if update_period > 0: #If I am given an update period, I update til I am closed cont = True while (cont): cont = self.update()
def show(self, window="Image Montage", pos=None, delay=0): """ Will display the montage image, as well as register the mouse handling callback function so that the user can scroll the montage by clicking the increment/decrement arrows. @return: The key code of the key pressed, if any, that dismissed the window. """ img = self.asImage() cv.NamedWindow(window) cv.SetMouseCallback(window, self._clickHandler.onClick, window) key = img.show(window=window, pos=pos, delay=delay) return key
def init_globals(): """ sets up the data we need in the global dictionary D """ # get D so that we can change values in it global D # our publishers, D.K_PUB D.K_PUB = rospy.Publisher('key_press_data', String) #D.BALL_PUB = rospy.Publisher('ball_location_data', String) #D.GOAL_PUB = rospy.Publisher('goal_location_data', String) #D.PIKA_PUB = rospy.Publisher('pika_location_data', String) D.LOCATION_PUB = rospy.Publisher('location_data', String) # put threshold values into D D.thresholds = { 'low_red': 0, 'high_red': 255, 'low_green': 0, 'high_green': 255, 'low_blue': 0, 'high_blue': 255, 'low_hue': 0, 'high_hue': 255, 'low_sat': 0, 'high_sat': 255, 'low_val': 0, 'high_val': 255 } # Set up the windows containing the image from the camera, # the altered image, and the threshold sliders. cv.NamedWindow('image') cv.MoveWindow('image', 0, 0) cv.NamedWindow('threshold') THR_WIND_OFFSET = 640 if D.half_size: THR_WIND_OFFSET /= 2 cv.MoveWindow('threshold', THR_WIND_OFFSET, 0) # Set the method to handle mouse button presses cv.SetMouseCallback('image', onMouse, None) # We have not created our "scratchwork" images yet D.created_images = False # Variable for key presses D.last_key_pressed = 255 # Create a connection to the Kinect D.bridge = cv_bridge.CvBridge() # camera for missile launcher D.camera = cv.CaptureFromCAM(-1)
def __init__(self, files, target): cv.NamedWindow('preview', 1) cv.CreateTrackbar('t-low', 'preview', 35, 254, self.changeThresholdMin) cv.CreateTrackbar('t-upper', 'preview', 220, 254, self.changeThresholdMax) cv.SetMouseCallback('preview', self.onMouseEvent) self.files = files self.target = target self.step = 360.000 / len(files) self.run()
def process(self, cv_image, info, image2=None): self.clicked = False self.name = "ClickNode" cv.NamedWindow(self.name) cv.ShowImage(self.name, cv_image) cv.SetMouseCallback(self.name, self.onMouse, 0) while not self.clicked: cv.WaitKey(5) cv.WaitKey(10) cv.DestroyWindow(self.name) for i in range(10): cv.WaitKey(1) return ([self.point], {}, cv_image)
def __init__(self): self.capture = cv.CaptureFromCAM(0) cv.NamedWindow( "CamShiftDemo", 1 ) cv.NamedWindow( "Histogram", 1 ) cv.SetMouseCallback( "CamShiftDemo", self.on_mouse) self.drag_start = None # Set to (x,y) when mouse starts drag self.track_window = None # Set to rect when the mouse drag finishes print( "Keys:\n" " ESC - quit the program\n" " b - switch to/from backprojection view\n" "To initialize tracking, drag across the object with the mouse\n" )
def __init__(self, files, target): cv.NamedWindow('preview', 1) cv.CreateTrackbar('t-low', 'preview', self.thresholdMin, 256, self.changeThresholdMin) cv.CreateTrackbar('t-upper', 'preview', self.thresholdMax, 256, self.changeThresholdMax) #cv.CreateTrackbar('c-size', 'preview', self.contourSize, 100000, self.changeContoursize ) cv.SetMouseCallback('preview', self.onMouseEvent) self.files = files self.target = target self.step = 360.000 / len(files) self.run()
def __init__( self, files, target ): cv.NamedWindow('preview', 1) cv.CreateTrackbar('threshold low', 'preview', self.thresholdMin, 256, self.changeThresholdMin ) cv.CreateTrackbar('threshold high', 'preview', self.thresholdMax, 256, self.changeThresholdMax ) cv.CreateTrackbar('offset center', 'preview', 100, 200, self.changeCenterOffset ) #cv.CreateTrackbar('c-size', 'preview', self.contourSize, 100000, self.changeContoursize ) cv.SetMouseCallback( 'preview', self.onMouseEvent ) self.files = files self.target = target self.step = math.radians(360.000 / len(files)) self.angles = [radians(i*(360.00/len(files))) for i in range(0,len(files))] self.run();
def __init__(self): self.capture = cv.CaptureFromCAM(0) cv.NamedWindow( "CamShiftDemo", cv.CV_WINDOW_NORMAL) cv.NamedWindow( "Histogram", 1 ) cv.SetMouseCallback( "CamShiftDemo", self.on_mouse) # find middle of screen frame = cv.QueryFrame(self.capture) self.midScreenX = (frame.width/2) self.midScreenY = (frame.height/2) self.midScreen = (self.midScreenX, self.midScreenY) print "This is the center of the screen: " + str(self.midScreen) self.drag_start = None # Set to (x,y) when mouse starts drag self.track_window = None # Set to rect when the mouse drag finishes
def __init__(self): #self.capture = cv.CaptureFromCAM(0) #self.capture=cv.CaptureFromFile('/home/diego/Videos/colillas-2010.mpg'); self.capture=cv.CaptureFromFile('/home/diego/Videos/primerpaneo2.wmv'); cv.NamedWindow( "Output", 1 ) cv.SetMouseCallback( "Output", self.on_mouse) self.drag_start = None # Set to (x,y) when mouse starts drag self.track_window = None # Set to rect when the mouse drag finishes self.objects={} # Dictionary containing the position of objects print( "Keys:\n" " ESC - quit the program\n" " b - switch to/from backprojection view\n" "To initialize tracking, drag across the object with the mouse\n" )
def __init__(self): # Setting up the publisher to broadcast the data. self.publisher = rospy.Publisher('imageData', String) self.thresholds = {'low_red': 0, 'high_red': 256,\ 'low_green': 0, 'high_green': 256, \ 'low_blue': 0, 'high_blue':256, \ 'low_hue': 0, 'high_hue': 256,\ 'low_sat': 0, 'high_sat': 256, \ 'low_val': 0, 'high_val': 256} self.load_thresholds() cv.NamedWindow('image') cv.MoveWindow('image', 0, 0) cv.SetMouseCallback('image', self.onMouse2, None) cv.NamedWindow('threshold') cv.MoveWindow('threshold',0,480) cv.SetMouseCallback('threshold', self.onMouse2, None) self.make_control_window() self.bridge = cv_bridge.CvBridge() self.image = None # the image from the drone self.new_image = False # did we just receive a new image? self.threshed_image = None # thresholded image self.contours_found = False # If you want to use text on the image, uncomment the line below. # it sets the font to something readable. # # self.font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1) # Drag And Drop data members. self.mouse_down = False self.down_coord = (-1, -1) self.up_coord = (-1, -1) self.sections = [] self.mode = "add"