def run(self): hist = cv2.createHist([180], cv2.CV_HIST_ARRAY, [(0,180)], 1 ) backproject_mode = True while True: frame = cv2.QueryFrame( self.capture ) # Convert to HSV and keep the hue hsv = cv2.createImage(cv2.GetSize(frame), 8, 3) cv2.cvtColor(frame, hsv, cv2.CV_BGR2HSV) self.hue = cv2.createImage(cv2.GetSize(frame), 8, 1) cv2.split(hsv, self.hue, None, None, None) # Compute back projection backproject = cv2.createImage(cv2.GetSize(frame), 8, 1) cv2.calcArrBackProject( [self.hue], backproject, hist ) # Run the cam-shift (if the a window is set and != 0) if self.track_window and is_rect_nonzero(self.track_window): crit = ( cv2.CV_TERMCRIT_EPS | cv2.CV_TERMCRIT_ITER, 10, 1) (iters, (area, value, rect), track_box) = cv2.camShift(backproject, self.track_window, crit) #Call the camshift !! self.track_window = rect #Put the current rectangle as the tracked area # If mouse is pressed, highlight the current selected rectangle and recompute histogram if self.drag_start and is_rect_nonzero(self.selection): sub = cv2.getSubRect(frame, self.selection) #Get specified area #Make the effect of background shadow when selecting a window save = cv2.cloneMat(sub) cv2.convertScale(frame, frame, 0.5) cv2.copy(save, sub) #Draw temporary rectangle x,y,w,h = self.selection cv2.rectangle(frame, (x,y), (x+w,y+h), (255,255,255)) #Take the same area but in hue image to calculate histogram sel = cv2.getSubRect(self.hue, self.selection ) cv2.calcArrHist( [sel], hist, 0) #Used to rescale the histogram with the max value (to draw it later on) (_, max_val, _, _) = cv2.getMinMaxHistValue( hist) if max_val != 0: cv2.convertScale(hist.bins, hist.bins, 255. / max_val) elif self.track_window and is_rect_nonzero(self.track_window): #If window set draw an elipseBox cv2.ellipseBox( frame, track_box, cv2.CV_RGB(255,0,0), 3, cv2.CV_AA, 0 ) cv2.showImage( "CamShiftDemo", frame ) cv2.showImage( "Backprojection", backproject) cv2.showImage( "Histogram", self.hue_histogram_as_image(hist)) c = cv2.waitKey(7) % 0x100 if c == 27: break
def OnFileButtonDownClick(self): global p1 global p1max global button1Val global Nfile global currentImg global originalImg print('file button down') if Nfile > 0: Nfile = Nfile - 1 print('Nfile:' + str(Nfile)) self.labelVariable.set(self.entryVariable.get() + " p1val=" + str(filelist[Nfile])) self.entry.focus_set() self.entry.selection_range(0, Tkinter.END) originalImg = cv2.imread(filelist[Nfile]) currentImg = cv2.copy(originalImg) img = Image.open(filelist[Nfile]) photo2 = ImageTk.PhotoImage(img) label1 = Tkinter.Label(self, image=photo2) label1.image = photo2 label1.grid(row=Imrow, column=0, columnspan=Ncols, sticky=Tkinter.NW) self.update()
def get_feed(self): global a_time_to_die, feed_data, feed_width, feed_height, feed_ready scanner = zbar.ImageScanner() scanner.parse_config('enable') shift = 1 while 1: if a_time_to_die: return try: HOST = '' # The remote host PORT = 12345 # The same port as used by the server s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) s.send('Yo!') data = data = recvall(s) s.close() data = pickle.loads(data) #print 'Received img' height, width, depth = data.shape if effect == smiley_face: try: data = cv2.flip(detect_faces(cv2.flip(data, 0)),0) except: print "face fail" elif effect == target_face: try: data = cv2.flip(detect_faces_2(cv2.flip(data, 0)),0) except: print "face 2 fail" elif effect == trippy_colours: try: shift += 1 data += shift % 256 except: print "shift fail" elif effect == trippy_colours_2: try: shift += 1 for x in xrange(shift % 3): data2 = cv2.copy(data) cv2.mixChannels(data2,data, [(0,1),(1,2),(2,1)]) except: print "mario fail" elif effect == vertical_hold: try: shift += 10 M = numpy.float32([[1,0,0],[0,1,shift%height]]) data1 = cv2.warpAffine(data,M,(width,height)) M = numpy.float32([[1,0,0],[0,1,(shift%height)-height]]) data2 = cv2.warpAffine(data,M,(width,height)) data = data1 + data2 M = numpy.float32([[1,0,shift%width],[0,1,0]]) data1 = cv2.warpAffine(data,M,(width,height)) M = numpy.float32([[1,0,(shift%width)-width],[0,1,0]]) data2 = cv2.warpAffine(data,M,(width,height)) data = data1 + data2 except: print "vhold fail" elif effect == colour_band: try: shift += 5 b = height / 20 top = shift % (width - b) # band of colour data_g = cv2.cvtColor(cv2.cvtColor(data, cv2.COLOR_BGR2GRAY), cv2.CV_GRAY2BGR) data_g[0:width, top:top+b] = data[0:width, top:top+b] data = data_g # inversion data_i = 255 - data data[0:width, top:top+b] = data_i[0:width, top:top+b] except: print "vhold fail" data = cv2.cvtColor(data,cv2.COLOR_BGR2RGB) feed_data = data.tostring() feed_width = width feed_height = height feed_ready = True sleep(.001) except: s.close() print("fail") sleep(25)
# (3) an OEM value, in this case, 7 which implies that we are # treating the ROI as a single line of text config = ("-l eng --oem 1 --psm 7") text = pytesseract.image_to_string(roi, config=config) # add the bounding box coordinates and OCR'd text to the list # of results results.append(((startX, startY, endX, endY), text)) # sort the results bounding box coordinates from top to bottom results = sorted(results, key=lambda r: r[0][1]) # loop over the results for ((startX, startY, endX, endY), text) in results: # display the text OCR'd by Tesseract print("OCR TEXT") print("========") print("{}\n".format(text)) # strip out non-ASCII text so we can draw the text on the image # using OpenCV, then draw the text and a bounding box surrounding # the text region of the input image text = "".join([c if ord(c) < 128 else "" for c in text]).strip() output = cv2.copy() cv2.rectangle(output, (startX, startY), (endX, endY), (0, 0, 255), 2) cv2.putText(output, text, (startX, startY - 20), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 3) # show the output image cv2.imshow("Text Detection", output) cv2.waitKey(0)