def calibrate(numcam, num_bots, tcp_ips, tcp_ports, buffer_size, cam_pairs): CAM_IDS = [x % 10 for x in tcp_ports] cams_to_be_calib = get_cam_to_be_calibrated(cam_pairs) print 'cams_to_be_calib', cams_to_be_calib trackers = {} for y, x in cams_to_be_calib.items(): print x, y idx = CAM_IDS.index(y) z = CamTracker(y, num_bots, tcp_ips[idx], tcp_ports[idx], buffer_size) trackers[y] = z for i in trackers.keys(): trackers[i].thread.start() print cam_pairs tfs = [] for i, j in cam_pairs: print "======>", i, j tf = {} tf['from_to'] = (i, j) id_i = int(i) id_j = int(j) print id_j, id_i calib_obj = Calibrate(trackers[id_i], trackers[id_j], 'somefilename') calib_obj.calibrate() try: tf['tf'] = calib_obj.tf except: continue print calib_obj.R tfs.append(tf) close_trackers(trackers) return tfs
def __init__(self): super().__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) self.ui.exportButton.setDisabled(True) self.ui.surfaceButton.setDisabled(True) self.calibrator = Calibrate() self.ui.CPULabel.setText(str(multiprocessing.cpu_count())) self.ui.spinBox.setMaximum(multiprocessing.cpu_count()) self.iren = self.ui.vtkWidget.GetRenderWindow().GetInteractor() self.ui.graphwidget.recon.vtkWidget = self.ui.vtkWidget self.ui.cvViewer.PosFound.connect(self.ui.graphwidget.drawScatter) self.ui.checkBox.stateChanged.connect(self.start_scanning) self.ui.surfaceButton.clicked.connect(self.ui.graphwidget.drawSurface) self.ui.calibrateButton.clicked.connect(self.calibrator.show) self.calibrator.ui.captureButton.clicked.connect( lambda: self.calibrator.run(self.ui.cvViewer.camera.img)) self.ui.refreshButton.clicked.connect(self.serial_refresh) self.ui.comboBox.currentIndexChanged['int'].connect( self.serialdata_refresh) self.ui.cvViewer.finder.finished.connect( lambda: self.ui.checkBox.setCheckState(Qt.Unchecked)) self.ui.cvViewer.finder.finished.connect( lambda: self.ui.surfaceButton.setDisabled(False)) self.ui.cvViewer.motion.motionProgress['int'].connect( self.ui.progressBar.setValue) self.ui.cvViewer.finder.foundProgress['int'].connect( self.ui.scanprogressBar.setValue) self.ui.graphwidget.recon.reconstruction_status.connect( self.ui.textEdit.append) self.ui.graphwidget.recon.reconstruction_done.connect(self.recon_done) self.ui.spinBox.valueChanged.connect(self.ui.cvViewer.finder.setnump) self.ui.exportButton.clicked.connect(self.ui.graphwidget.export)
def setUp(self): self.low_resolution = DATA_FOLDER / Path('calib_png_small') self.high_resolution = DATA_FOLDER / Path('calib_jpg_u') self.low_calib_C0 = Calibrate(self.low_resolution, 'C0') self.low_calib_C1 = Calibrate(self.low_resolution, 'C1') self.high_calib_C0 = Calibrate(self.high_resolution, 'C0') self.high_calib_C1 = Calibrate(self.high_resolution, 'C1')
def load(self, host="localhost", port=6499): print "Tuner.load " + str(port) # Calibrate page self.calibrate = Calibrate(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.calibrate.get_widget(), "Calibrate") # Preflight page self.preflight = Preflight(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.preflight.get_widget(), "Preflight") # Launch page self.launch = Launch(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.launch.get_widget(), "Launch") # Circle hold page self.circle = Circle(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.circle.get_widget(), "Circle") # Chirp page self.chirp = Chirp(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.chirp.get_widget(), "Chirp") # Land page self.land = Land(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.land.get_widget(), "Land")
def setUp(self): # TODO: write what needs to be instantiated for each test self.low_resolution = DATA_FOLDER / Path('calib_png_small') self.high_resolution = DATA_FOLDER / Path('calib_jpg_u') self.low_calib_C0 = Calibrate(self.low_resolution, 'C0') self.low_calib_C1 = Calibrate(self.low_resolution, 'C1') self.high_calib_C0 = Calibrate(self.high_resolution, 'C0') self.high_calib_C1 = Calibrate(self.high_resolution, 'C1') self.decoder = Decoder(testing=True)
def __init__(self): self.isBgCaptured = 0 # bool, whether the background captured # Camera self.camera = cv2.VideoCapture(0) self.camera.set(10, 200) self.handDetect = HandDetect() self.calibrate = Calibrate() self.demo = None
def main(): print("Start MatLabEngine:\n ") eng = matlab.engine.start_matlab() print("Start Calibrate:\n ") Calibrate.calibrate(eng) print("Start e-book reader:\n ") app = QApplication(sys.argv) window = MainWindow(eng) window.show() qt_return_code = app.exec_() print('Qt return code:' + str(qt_return_code)) sys.exit(qt_return_code)
def calibrate(objname, filtercol): from calibrate import Calibrate calv = Calibrate(obj=objname, filtercol=filtercol) calv.resetframes() calv.getrefframes() for r in calv.ref: print r for ref in calv.ref: print 'reference:', ref calv.getframes(ref) calv.corrframes(ref)
def __init__(self, draw=True, colour='blue', side='left', main_pitch=True, load_bg_model=False, corners_from_mem=False): _filepath = sys.argv[0] _relpath = os.path.split(_filepath)[0] if _relpath is not '': _relpath = _relpath + '/' self._bg = cv.LoadImage(_relpath + "b02.jpg") self._draw = draw self._colour = colour self._side = side self._n_avg_bg = 30 self._corners_from_mem = corners_from_mem self._last_update = 0 self._fps_update_interval = 0.5 self._num_frames = 0 self._fps = 0 self._calibrate = Calibrate((9,6)) self.calibrate_cam(from_mem = True) self._config = yaml.load(file('config/020311-0830-main_pitch.yaml', 'r')) self._vision = Vision(self._bg, self._calibrate, self._config, fix_distortions = True, main_pitch=main_pitch, corners_from_mem=self._corners_from_mem) self._vision_com = VisionCom(self._vision, colour=self._colour, side=self._side) self._ba = None self._load_bg_model = load_bg_model self._frame_name = 'annotated' self._capture = Capture() self._base_time = 0
def __init__(self, avg_bg_frames = 30, online = True, main = True, advanced = False, config = "config/020311-0830-secondary_pitch.yaml"): # Set up vision, visioncom calibrate = Calibrate((9,6)) calibrate.calibrate_camera_from_mem("calibration_data/") self._config = yaml.load(file(config, 'r')) self._offline_img = cv.LoadImage("b02.jpg") self._vision = Vision(self._offline_img, calibrate, self._config, fix_distortions = True, main_pitch = main) self._visioncom = VisionCom(self._vision) self._capture_pipe, capture_pipe = Pipe() self._capture = Process(target=Capture, args = (capture_pipe,)) self._capture.start() self._fps_then = 0 self._fps_frames = 0 self._fps = 0 self._base_time = 0 self._advanced = advanced self._online = online if self._online: if avg_bg_frames: frames = [] for i in range(avg_bg_frames): frames.append(self.get_frame()) self._vision.avg_bg_frames(frames) # Set up GUI and start process self._events_pipe , events_pipe = Pipe(False) set_shapes_pipe, get_shapes_pipe = False, False if advanced: self._set_shapes_pipe, set_shapes_pipe = Pipe(False) get_shapes_pipe , self._get_shapes_pipe = Pipe(False) feed_pipe , self._feed_pipe = Pipe(False) locations_pipe , self._locations_pipe = Pipe(False) self._gui_process = Process(target = gui.Gui, args = (events_pipe, locations_pipe, feed_pipe, self._config, get_shapes_pipe, set_shapes_pipe)) self._gui_process.start() self._locations_pipe.send(("pitch",{True:"main",False:"other"}[main])) self._window = "source" self._our_colour = "blue" self._show_window = True self._run = True self._loop()
def on_compensate(self): if not self.monitor_running: self.status.showMessage("Please, connect sensor") return if not self.compensate: self.toolbar_buttons['collection'].setText('Stop') initial = float(self.data_view['heading'].text()) self.calibrate = Calibrate(initial) self.compensate = True self.status.showMessage('Start compensate', 1000) else: self.toolbar_buttons['collection'].setText('Collection') self.progress.setValue(0) self.maxdub = self.calibrate.compute() del self.calibrate self.compensate = False self.status.showMessage('Stop compensate', 1000)
def __init__(self, directory: Path, imprefix: str, R, t=np.array([[0, 0, -2]]).T, chess_size=(6, 8), square_length=2.8): self.directory = directory self.pickle = Path('calibration_' + imprefix + '.pickle') self.imprefix = imprefix if directory == None: self.path = DATA_FOLDER / Path('calib_jpg_u') self.pickle = self.path / self.pickle if self.pickle.exists(): self.get_pickle() else: Calibrate.__init__(self, directory, imprefix, chess_size, square_length) self.f = (self.fx + self.fy) / 2 self.c = np.array([[self.cx, self.cy]]).T self.R = makerotation(0, 0, 0) if R is None else R self.t = t self.align_cameras(chess_size, square_length) else: if isinstance(directory, list): self.path = Path.cwd() self.pickle = self.path / self.pickle if self.pickle.exists(): self.get_pickle() else: Calibrate.__init__(self, directory, imprefix, chess_size, square_length) self.f = (self.fx + self.fy) / 2 self.c = np.array([[self.cx, self.cy]]).T self.R = makerotation(0, 0, 0) if R is None else R self.t = t self.align_cameras(chess_size, square_length) else: self.path = Path(directory) self.pickle = self.path / self.pickle if self.pickle.exists(): self.get_pickle() else: Calibrate.__init__(self, directory, imprefix, chess_size, square_length) self.f = (self.fx + self.fy) / 2 self.c = np.array([[self.cx, self.cy]]).T self.R = makerotation(0, 0, 0) if R is None else R self.t = t self.align_cameras(chess_size, square_length) self.write_pickle()
class Window(QMainWindow): def __init__(self): super().__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) self.ui.exportButton.setDisabled(True) self.ui.surfaceButton.setDisabled(True) self.calibrator = Calibrate() self.ui.CPULabel.setText(str(multiprocessing.cpu_count())) self.ui.spinBox.setMaximum(multiprocessing.cpu_count()) self.iren = self.ui.vtkWidget.GetRenderWindow().GetInteractor() self.ui.graphwidget.recon.vtkWidget = self.ui.vtkWidget self.ui.cvViewer.PosFound.connect(self.ui.graphwidget.drawScatter) self.ui.checkBox.stateChanged.connect(self.start_scanning) self.ui.surfaceButton.clicked.connect(self.ui.graphwidget.drawSurface) self.ui.calibrateButton.clicked.connect(self.calibrator.show) self.calibrator.ui.captureButton.clicked.connect( lambda: self.calibrator.run(self.ui.cvViewer.camera.img)) self.ui.refreshButton.clicked.connect(self.serial_refresh) self.ui.comboBox.currentIndexChanged['int'].connect( self.serialdata_refresh) self.ui.cvViewer.finder.finished.connect( lambda: self.ui.checkBox.setCheckState(Qt.Unchecked)) self.ui.cvViewer.finder.finished.connect( lambda: self.ui.surfaceButton.setDisabled(False)) self.ui.cvViewer.motion.motionProgress['int'].connect( self.ui.progressBar.setValue) self.ui.cvViewer.finder.foundProgress['int'].connect( self.ui.scanprogressBar.setValue) self.ui.graphwidget.recon.reconstruction_status.connect( self.ui.textEdit.append) self.ui.graphwidget.recon.reconstruction_done.connect(self.recon_done) self.ui.spinBox.valueChanged.connect(self.ui.cvViewer.finder.setnump) self.ui.exportButton.clicked.connect(self.ui.graphwidget.export) def recon_done(self): self.iren.Initialize() self.ui.exportButton.setDisabled(False) def start_scanning(self): if self.ui.serialLabel.text() == '无串口' and self.ui.checkBox.isChecked( ): QMessageBox.information(self, '提示', '未连接串口', QMessageBox.Ok, QMessageBox.Ok) self.ui.checkBox.setCheckState(Qt.Unchecked) return if not os.path.exists('cameradata/newcameramtx.npy'): QMessageBox.information(self, '提示', '请先标定相机', QMessageBox.Ok, QMessageBox.Ok) self.ui.checkBox.setCheckState(Qt.Unchecked) return self.ui.cvViewer.finderState(self.ui.checkBox.isChecked(), self.ui.comboBox.currentText()) def serial_refresh(self): self.ui.comboBox.clear() self.ui.comboBox.addItems([ str(x).split(' - ')[0] for x in list(serial.tools.list_ports.comports()) ]) print([str(x) for x in list(serial.tools.list_ports.comports())]) def serialdata_refresh(self, i): data = [ str(x).split(' - ')[1] for x in list(serial.tools.list_ports.comports()) ][i] self.ui.serialLabel.setText(data)
class Tuner(QtGui.QWidget): def __init__(self, host="localhost", port=6499): super(Tuner, self).__init__() self.default_title = "Aura Tasks" #self.chirp = None #self.circle = None #self.land = None self.initUI() self.load(host=host, port=port) self.clean = True def initUI(self): self.setWindowTitle(self.default_title) layout = QtGui.QVBoxLayout() self.setLayout(layout) # Main work area self.tabs = QtGui.QTabWidget() layout.addWidget(self.tabs) #self.overview = Overview(changefunc=self.onChange) #self.tabs.addTab( self.overview.get_widget(), "Overview" ); # 'File' button bar file_group = QtGui.QFrame() layout.addWidget(file_group) file_layout = QtGui.QHBoxLayout() file_group.setLayout(file_layout) save = QtGui.QPushButton('Save') save.clicked.connect(self.save) file_layout.addWidget(save) quit = QtGui.QPushButton('Quit') quit.clicked.connect(self.quit) file_layout.addWidget(quit) file_layout.addStretch(1) self.resize(800, 700) self.show() def load(self, host="localhost", port=6499): print "Tuner.load " + str(port) # Calibrate page self.calibrate = Calibrate(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.calibrate.get_widget(), "Calibrate") # Preflight page self.preflight = Preflight(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.preflight.get_widget(), "Preflight") # Launch page self.launch = Launch(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.launch.get_widget(), "Launch") # Circle hold page self.circle = Circle(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.circle.get_widget(), "Circle") # Chirp page self.chirp = Chirp(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.chirp.get_widget(), "Chirp") # Land page self.land = Land(changefunc=self.onChange, host=host, port=port) self.tabs.addTab(self.land.get_widget(), "Land") def save(self): print "called for save, but does nothing yet" def quit(self): QtCore.QCoreApplication.instance().quit() def onChange(self): #print "parent onChange() called!" #result = self.rebuildTabNames() #if result: # self.rebuildWingLists() self.clean = False def isClean(self): return self.clean def setClean(self): self.clean = True
class MagneticApp(MainWindow): app_title = "Magnetic Viewer - {0}" TIMEOUT = 100 def __init__(self, data=None, title=None, *args, **kwargs): super().__init__(*args, **kwargs) self.setupUi() self.errors_data = 0 # Start/Stop monitor sensor data self.monitor_running = False # Start/Stop compensate self.compensate = False # On/Off logging data self.logging_enable = False # Search available serial ports available_ports = sensor.scan_ports() if not available_ports: self.status.showMessage("No available ports") for btn in self.modeButtonGroup.buttons(): btn.setDisabled(True) else: self.portbox.addItems(available_ports) # Connecting model to consumers self.model = SensorDataModel() self.centralWidget().table_view.setModel(self.model) # Connecting signal/slot self.toolbar_buttons['collection'].clicked.connect(self.on_compensate) self.toolbar_buttons['log_on'].clicked.connect(self.turn_logging) self.toolbar_buttons['select_path'].clicked.connect( self.on_select_path) # ...button group self.modeButtonGroup.buttonClicked[QAbstractButton].connect( self.on_run) self.viewButtonGroup.buttonClicked[QAbstractButton].connect( self.on_switch_mode) # ...comboboxs self.spin.valueChanged[int].connect(self.on_set_chart_xinterval) # ...models self.model.rowsInserted.connect(self.on_model_changed) def on_model_changed(self): self.counter.setText("Rx: {}".format(self.model.rowCount())) def timerEvent(self, QTimerEvent): """ Handler timer event, every 100ms""" # <1> Get data from sensor try: data = [ round(item, 1) for item in sensor.SENSOR_QUEUE.get(timeout=0.5) ] except queue.Empty: self.status.showMessage("No sensor data") if self.errors_data <= 10000: self.errors_data += 1 self.errors.setText("Err: {}".format(self.errors_data)) else: self.errors.setText("Err: {}".format(">10000")) return pid, r, p, h, hy_raw, hx_raw, hz_raw, hy, hx, hz = data # <2> Append data to model self.model.append_data((r, p, h, hy_raw, hx_raw, hz_raw, hy, hx, hz)) # <4> Show to data view self.data_view.update(r, p, h, hy_raw, hx_raw, hz_raw, hy, hx, hz) # <3> Apply correction algorithms if self.options['dub z'].checkState(): hy_raw, hx_raw, hz_raw = to_horizont(hy_raw, hx_raw, hz_raw, r, p) if self.options['dub soft-iron'].checkState(): try: heading = self.maxdub.correct_heading(hx, hy) fmt_value = '{0:.1f}' self.data_view2['heading'].setText(fmt_value.format(heading)) except AttributeError: self.options['dub soft-iron'].setCheckState(False) self.status.showMessage("Error! Please, calibrate", 1000) # <5> Update plot if self.options['update charts'].checkState(): self.charts['inclinometer'].update_plot(r, p) #self.charts['heading'].update_plot(h) #self.charts['magnitometer'].update_plot(hy, hx, hz) #FIXME: При включения графика девиации увеличивается в разы количество пропущенных сигналов #self.charts['deviation'].update_plot(hy, hx) # <6> Logging data if self.logging_enable: time = QtCore.QDateTime.currentDateTime().toString( "yyyy-MM-dd hh:mm:ss.zzz") path = self.lineedit.text() str_data = ",".join((str(x) for x in (time, hex(pid), r, p, h, hy_raw, hx_raw, hz_raw, hy, hx, hz))) str_data += '\n' self.status.showMessage(time) with open(path, 'a') as f: f.write(str_data) # <7> Compensate mode if self.compensate: self.calibrate.update(data) self.progress.setValue(self.calibrate.status()) time = QtCore.QDateTime.currentDateTime().toString( "yyyy-MM-dd hh:mm:ss.zzz") path = self.lineedit.text() #str_data = ",".join((str(x) for x in (time, hex(pid), r, p, h, hy_raw, hx_raw, hz_raw, hy, hx, hz))) str_data = ",".join((str(x) for x in (hy, hx))) str_data += '\n' with open(path, 'a') as f: f.write(str_data) def on_run(self, btn): name = btn.objectName() if name == 'start': self.on_start() elif name == 'stop': self.on_stop() def on_start(self): if self.monitor_running: return # Disable widgets self.portbox.setDisabled(True) self.toolbar_buttons['select_path'].setDisabled(True) self.toolbar_buttons['rescan'].setDisabled(True) # ...disable action rescan self.menus['file'].children()[1].setDisabled(True) # Set selectable port to sensor port = self.portbox.currentText() self.serobj = serobj = serial.Serial(port, timeout=0.1) self.sensor = sensor.Sensor(serobj) # Run recieve data to single thread self.t = threading.Thread(target=self.sensor.run, daemon=False) self.t.start() # Run timer self.timer_recieve = self.startTimer(self.TIMEOUT, timerType=QtCore.Qt.PreciseTimer) self.status.showMessage("Running") self.monitor_running = True def on_stop(self): if not self.monitor_running: return self.status.showMessage("Stop") # Enable widgets self.portbox.setEnabled(True) self.toolbar_buttons['rescan'].setEnabled(True) # ...enable action rescan self.menus['file'].children()[1].setEnabled(True) # Kill timer if self.timer_recieve: self.killTimer(self.timer_recieve) self.timer_recieve = None # terminate thread self.sensor.terminate() self.t.join(timeout=0.1) # Close port self.serobj.close() del self.sensor self.monitor_running = False def on_clear(self): self.model.reset() self.centralWidget().chart_view.clear_area() def on_switch_mode(self, btn): btn_name = btn.objectName() if btn_name == 'chart': self.stack.setCurrentIndex(0) elif btn_name == 'table': self.stack.setCurrentIndex(1) elif btn_name == 'compensate': self.stack.setCurrentIndex(2) import subprocess subprocess.run(["python", "magnetic_viewer.py"]) else: self.stack.setCurrentIndex(0) def on_compensate(self): if not self.monitor_running: self.status.showMessage("Please, connect sensor") return if not self.compensate: self.toolbar_buttons['collection'].setText('Stop') initial = float(self.data_view['heading'].text()) self.calibrate = Calibrate(initial) self.compensate = True self.status.showMessage('Start compensate', 1000) else: self.toolbar_buttons['collection'].setText('Collection') self.progress.setValue(0) self.maxdub = self.calibrate.compute() del self.calibrate self.compensate = False self.status.showMessage('Stop compensate', 1000) def turn_logging(self): """ On/Off logging. If logging ON then bottombar visible """ self.logging_enable = ~self.logging_enable if self.logging_enable: self.record_bar.setVisible(True) else: self.record_bar.setVisible(False) def on_set_chart_xinterval(self, interval): for chart in self.charts.values(): chart.set_xmax(interval) def on_select_path(self): ''' Select path to save log ''' if not os.path.exists(REPORT_PATH): try: os.mkdir(REPORT_PATH) except OSError: self.status.showMessage("Error creating path", 2000) fname, _ = QFileDialog.getSaveFileName(self, "Select Path", REPORT_PATH, "Log (*.log)") if fname: self.lineedit.setText(fname) def closeEvent(self, QCloseEvent): self._action_quit() def _action_quit(self): try: self.sensor.terminate() self.t.join(timeout=0.1) except AttributeError: pass QtCore.QCoreApplication.exit(0)
class HandyAR: cap_region_x_begin = 0.5 # start point/total width cap_region_y_end = 0.8 # start point/total width threshold = 60 # BINARY threshold blurValue = 41 # GaussianBlur parameter bgSubThreshold = 50 def __init__(self): self.isBgCaptured = 0 # bool, whether the background captured # Camera self.camera = cv2.VideoCapture(0) self.camera.set(10, 200) self.handDetect = HandDetect() self.calibrate = Calibrate() self.demo = None def run(self): while self.camera.isOpened(): ret, frame = self.camera.read() frame = cv2.bilateralFilter(frame, 5, 50, 100) # smoothing filter frame = cv2.flip(frame, 1) # flip the frame horizontally cv2.rectangle( frame, (int(self.cap_region_x_begin * frame.shape[1]), 0), (frame.shape[1], int(self.cap_region_y_end * frame.shape[0])), (255, 0, 0), 2) # half of the original frame for opengl operation halfframe = frame[0:int(self.cap_region_y_end * frame.shape[0]), int(self.cap_region_x_begin * frame.shape[1]):frame.shape[1]] cv2.imshow('original', frame) if self.isBgCaptured == 1: # this part wont run until background captured img = self.handDetect.removeBG(frame) img = img[0:int(self.cap_region_y_end * frame.shape[0]), int(self.cap_region_x_begin * frame.shape[1]):frame.shape[1]] # clip the ROI #cv2.imshow('mask', img) # convert image into binary image gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (self.blurValue, self.blurValue), 0) #cv2.imshow('blur', blur) ret, thresh = cv2.threshold(blur, self.threshold, 255, cv2.THRESH_BINARY) #cv2.imshow('ori', thresh) # get coutours thresh1 = copy.deepcopy(thresh) contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) length = len(contours) maxArea = -1 if length > 0: for i in range( length ): # find the biggest contour (according to area) temp = contours[i] area = cv2.contourArea(temp) if area > maxArea: maxArea = area maxi = i contour = contours[maxi] hull = cv2.convexHull(contour) drawing = np.zeros(img.shape, np.uint8) cv2.drawContours(drawing, [contour], 0, (0, 255, 0), 2) cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3) imagepoints = self.handDetect.detectFingers( contour, drawing) if imagepoints.__len__() > 0: self.calibrate.calibrate(imagepoints, drawing, frame) glP, glM = self.calibrate.getGLPM() # if you want to see calibration points, comment those lines # draw objects on hand with opengl self.demo = DemoAR(halfframe, glP, glM) self.demo.main() cv2.imshow('output', drawing) # Keyboard OP k = cv2.waitKey(10) if k == 27: # press ESC to exit exit(0) elif k == ord('b'): # press 'b' to capture the background self.handDetect.bgModel = cv2.BackgroundSubtractorMOG2( 0, self.bgSubThreshold) self.isBgCaptured = 1 print('Background Captured') elif k == ord('r'): # press 'r' to reset the background self.handDetect.bgModel = None self.isBgCaptured = 0 self.handDetect.setDoStart(False) print('Reset BackGround') elif k == ord('s'): self.handDetect.setDoStart(True) print('Start Calibration')
valid_names = [ 'prev_ball_centre','ball_centre','ball_radius','ball_roundness_metric', 'prev_yellow_robot_centre','yellow_robot_centre','prev_yellow_robot_black_dot','yellow_robot_black_dot','yellow_robot_t_area', 'prev_blue_robot_centre','blue_robot_centre','prev_blue_robot_black_dot','blue_robot_black_dot','blue_robot_t_area', 'plate_area', 'offset_top', 'offset_bottom', 'offset_left', 'offset_right' ] if name in valid_names: return getattr(self, "_"+name) else: return None if __name__ == '__main__': from calibrate import Calibrate calibrate = Calibrate((9,6)) calibrate.calibrate_camera_from_mem("calibration_data/") src_bg = cv.LoadImage("../../../fixtures/12.02.11/empty.jpg") bg = cv.CreateImage((src_bg.width/2, src_bg.height/2),8,3) cv.Resize(src_bg,bg) src_frame = cv.LoadImage("../../../fixtures/12.02.11/shot0015.jpg") frame = cv.CreateImage((src_frame.width/2, src_frame.height/2),8,3) cv.Resize(src_frame,frame) v = Vision(bg, calibrate, fix_distortions=False, main_pitch=False) v.update(frame) ball = v.get_ball() print "Ball information:" print "> center: ", ball[0] if ball else None print "> radius: ", ball[1] if ball else None
def calibrate(self, device_params, pool, miner_name, algorithm, region, quick, overwrite, force): devices = Nvidia().get_nvidia_devices(1) if pool == 'nicehash' and region not in [ 'eu', 'usa', 'hk', 'jp', 'in', 'br' ]: Log().add('fatal', 'a valid region is required for nicehash') devices_to_calibrate = [] device_classes = [] for device_param in device_params.split(','): if device_param.isdigit(): if int(device_param) >= len(devices): Log().add('fatal', 'device %d not found' % (int(device_param))) else: devices_to_calibrate.append(devices[int(device_param)]) else: found = False for device in devices: if device.name == device_param: devices_to_calibrate.append(device) found = True elif (device_param == 'all' or device.dclass == device_param ) and device.dclass not in device_classes: devices_to_calibrate.append(device) device_classes.append(device.dclass) found = True if not found: Log().add('fatal', 'device %s not found' % (device_param)) log_dir = Config().get('logging.calibration_log_dir') if not log_dir: log_dir = "/var/log/minotaur" if miner_name == "all": miners = [] for miner_name in Config().get('miners').keys(): if Config().get('miners')[miner_name]['enable']: miners.append(eval("%s()" % (miner_name.title()))) else: if not miner_name in Config().get('miners').keys(): Log().add('fatal', 'miner %s is not configured' % (miner_name)) miners = [eval("%s()" % (miner_name.title()))] if len(miners) == 0: Log().add('fatal', "no miners available") if pool == 'all': pools = [] for pool_name in Config().get('pools').keys(): if Config().get('pools.%s.enable' % (pool_name)): pools.append(pool_name) elif pool not in Config().get('pools').keys(): Log().add('fatal', 'unknown pool: %s' % (pool)) else: pools = [pool] algorithms = {} for pool_name in pools: algorithms[pool_name] = {} for miner in miners: if not pool_name in Pools().pools.keys(): Log().add('fatal', 'pool %s is not enabled' % (pool_name)) pool = Pools().pools[pool_name] if miner.name not in pool.supported_miners: continue if algorithm == "all": algorithms[pool_name][ miner.name] = miner.supported_algorithms() else: algorithms[pool_name][miner.name] = [] for algo_param in algorithm.split(","): if algo_param == 'all': algorithms[pool_name][ miner.name] = miner.supported_algorithms() else: if algo_param[0] == '!': exclude_algo = algo_param[1:] if miner.name in algorithms.keys( ) and exclude_algo in algorithms[miner.name]: algorithms[pool_name][miner.name].remove( exclude_algo) else: if algo_param in miner.supported_algorithms(): algorithms[pool_name][miner.name].append( algo_param) print "" self.calibration_banner() print "" n = 0 for device in devices_to_calibrate: log_file = "%s/calibration_%d.log" % (log_dir, device.id) Log().set_log_file(log_file) for pool_name in algorithms.keys(): for miner in miners: if miner.name in algorithms[pool_name].keys(): for algorithm in algorithms[pool_name][miner.name]: n += 1 if algorithm in Config( ).get('algorithms.single') or algorithm in Config( ).get('algorithms.double'): Calibration().load() if not overwrite and Calibration().get( '%s.%s.%s' % (device.dclass, miner.name, algorithm)): device.log( 'info', 'not overwriting existing calibration data for miner %s algorithm %s (use --overwrite to override)' % (miner.name, algorithm)) else: Calibrate().start(device, pool_name, miner, algorithm, region, quick, force) else: Log().add( 'warning', 'algorithm %s is not in the config file - skipping' % (algorithm)) Log().add('info', 'nothing to do.')
bd=0, highlightthickness=0) canvas.pack() tk.update() # Create ball ball = Ball(canvas, 'red', eng) i = 0 count = 1000 while i < count: ball.draw_experiment(math.floor(i / 200)) tk.update_idletasks() tk.update() time.sleep(0.01) i = i + 1 ball.gazeSubscriber.stop() tk.destroy() mean_error = eng.ExperimentData() print('Mean Error: ' + str(mean_error) + 'px') if __name__ == '__main__': print("Start MatLab Engine:\n ") eng = matlab.engine.start_matlab() print("Start Calibrate:\n ") Calibrate.calibrate(eng) print("Start experiment:\n") Experiment.experiment(eng)
def calibrate(objname): from calibrate import Calibrate calv = Calibrate(obj=objname + ' %%', filtercol='V') calv.resetframes() calv.getrefframes() for r in calv.ref: print r for ref in calv.ref: print 'reference:', ref calv.getframes(ref) calv.corrframes(ref) calb = Calibrate(obj=objname + ' %%', filtercol='B') calb.resetframes() calb.getrefframes() for r in calb.ref: print r for ref in calb.ref: print 'reference:', ref calb.getframes(ref) calb.corrframes(ref)
bufferAvg < minValueFromCal ): # this gets the minimum 8-sensor average from the time that calibration was run minValueFromCal = bufferAvg # it sets the threshold that separates gestures from resting. else: if (calibrateFlag == 1): mode = CALIBRATING gesture = REST mode = controlLogic(mode, gesture, 0) if (mode == CALIBRATING): print("mode = CALIBRATING") fistCal = Calibrate() fistGrouping = fistCal.getMaxGrouping(fistCalData) piwiCal = Calibrate() piwiGrouping = piwiCal.getMaxGrouping(piwiCalData) piwoCal = Calibrate() piwoGrouping = piwoCal.getMaxGrouping(piwoCalData) minValueFromCalArray = [ minValueFromCal, minValueFromCal, minValueFromCal ] with open(CSVFILE, 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(fistGrouping)
def __init__(self, video_port=0, pitch=None, planner_callback=None): self.config = Config(self) while pitch is None: req_room = raw_input("Enter Pitch Room [{opts}]: ".format(opts="/".join(self.config.pitch_room.options))) pitch_no = self.config.pitch_room.getCode(req_room, unifier=lambda str: re.sub(r'\W+', '', str.upper())) if pitch_no is not None: pitch = pitch_no self.config.pitch_room.selected = pitch if pitch == 0: self.config.colours = colour_profiles['pitch_3d03'] elif pitch == 1: self.config.colours = colour_profiles['pitch_3d04'] self.cam = Camera(port=video_port, pitch=pitch, config=self.config) #print("Camera initialised") def print_function(x): info(x) if planner_callback is None: self.planner_callback = (lambda x: print_function(x)) else: self.planner_callback = planner_callback c = Calibrate(self.cam, self.config) self.world_latest = World() self.world_previous = None # c.run(True) print("Basic camera calibration complete") colours = c.calibrateColor(self.cam) all_colors = np.empty([10, 3], dtype = np.uint8) color_id = 0 if colours is not None: for colour, data in colours.iteritems(): if data is not None: for field in data: self.config.colours[colour][field] = np.uint8(data[field]) all_colors[color_id] = np.uint8(data[field]) color_id += 1 print("Colors recorded") np.save("color_calibrations", all_colors) else: print("Colors calibration skipped") all_colors = np.load("color_calibrations.npy") all_color_names = ['red', 'yellow', 'blue', 'green', 'pink'] for i in range(0,5): self.config.colours[all_color_names[i]]['max'] = all_colors[i*2] self.config.colours[all_color_names[i]]['min'] = all_colors[i*2+1] # print all_color_names[i], all_colors[i*2+1], all_colors[i*2] self.config.addFilter("overlay", filters.filter_overlay, default=1) self.config.addFilter("grayscale", filters.filter_grayscale) self.config.addFilter("normalised", filters.filter_normalize) self.config.addFilter("red", partial(filters.filter_colour, "red")) self.config.addFilter("yellow", partial(filters.filter_colour, "yellow")) self.config.addFilter("blue", partial(filters.filter_colour, "blue")) self.config.addFilter("green", partial(filters.filter_colour, "green")) self.config.addFilter("pink", partial(filters.filter_colour, "pink")) self.config.addFilter("manual colour", partial(filters.filter_colour, None)) #print("Filters set up") #print("Initialising trackers") self.tracker_ball = DotTracker(0, 0, 'red', self.config, "ball") self.tracker_blue = DotTracker(0, 0, 'yellow', self.config, "robot") self.tracker_yellow = DotTracker(0, 0, 'blue', self.config, "robot") self.config.GUI() self.display()
class VisionWrapper: def __init__(self, draw=True, colour='blue', side='left', main_pitch=True, load_bg_model=False, corners_from_mem=False): _filepath = sys.argv[0] _relpath = os.path.split(_filepath)[0] if _relpath is not '': _relpath = _relpath + '/' self._bg = cv.LoadImage(_relpath + "b02.jpg") self._draw = draw self._colour = colour self._side = side self._n_avg_bg = 30 self._corners_from_mem = corners_from_mem self._last_update = 0 self._fps_update_interval = 0.5 self._num_frames = 0 self._fps = 0 self._calibrate = Calibrate((9,6)) self.calibrate_cam(from_mem = True) self._config = yaml.load(file('config/020311-0830-main_pitch.yaml', 'r')) self._vision = Vision(self._bg, self._calibrate, self._config, fix_distortions = True, main_pitch=main_pitch, corners_from_mem=self._corners_from_mem) self._vision_com = VisionCom(self._vision, colour=self._colour, side=self._side) self._ba = None self._load_bg_model = load_bg_model self._frame_name = 'annotated' self._capture = Capture() self._base_time = 0 def online_feed(self): frames = [] if self._load_bg_model: frames = [self._capture.pull()] else: frames = [self._capture.pull() for i in range(self._n_avg_bg)] self._vision.avg_bg_frames(frames, load_from_mem=self._load_bg_model) reset_base = False last_base_time = 0 time_interval = 60 while 1: lstart = time.time() if lstart - last_base_time >= time_interval: self.calc_baseline() last_base_time = time.time() frame = self._capture.pull() timestamp = datetime.fromtimestamp(self.correct_timestamp() / 1000.0).strftime('%Y-%m-%dT%H:%M:%S.%f') self._vision.update(frame) self._vision_com.transmit(timestamp) self.update_fps() try: if self._frame_name == 'annotated': vframe = self._vision.get_frame(name='source', annotated=True) else: vframe = self._vision.get_frame(name=self._frame_name, annotated=False) except: vframe = self._vision.get_frame(name='annotated', annotated=True) font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 0.75, 0.75, 0.0, 1, cv.CV_AA) msg = ">> FPS: %.2f" % (self._fps,) cv.PutText(vframe, msg, (10,15), font, cv.Scalar(0,0,255)) if self._draw: cv.ShowImage("Feed", vframe) c = cv.WaitKey(2) % 0x100 if c != -1: if c == 27 or chr(c) == 'q' : break if chr(c) == 'p' : cv.SaveImage(timestamp + ".jpg", vframe) if chr(c) == 'a' : self._frame_name = 'annotated' if chr(c) == 's' : self._frame_name = 'source' if chr(c) == 'r' : self._frame_name = 'threshed_red' if chr(c) == 'y' : self._frame_name = 'threshed_yellow' if chr(c) == 'b' : self._frame_name = 'threshed_blue' if chr(c) == 'h' : self._frame_name = 'threshed_black' if chr(c) == 'g' : self._frame_name = 'threshed_green' if chr(c) == 'f' : self._frame_name = 'foreground' if chr(c) == 'c' : self._frame_name = 'foreground_connected' def offline_feed(self, filename): capture = cv.CaptureFromFile(filename) width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)) height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT)) fps = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)) frames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)) for f in range(frames - 2): frame = cv.QueryFrame(capture) timestamp = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') frame = self._calibrate.undistort_single_image(frame) self._vision.update(frame) self._vision_com.transmit(timestamp) self.update_fps() if self._draw: cv.ShowImage("Feed", frame) if cv.WaitKey(1000 / fps) == 27: break def calc_baseline(self): for i in range(3): self._capture.pull() self._base_time = int(round(time.time() * 1000)) def correct_timestamp(self): t = int(round(time.time() * 1000)) return t - (t - self._base_time) % 40 def update_fps(self): self._num_frames += 1 current_update = time.time() if(current_update - self._last_update > self._fps_update_interval): self._fps = self._num_frames / (current_update - self._last_update) self._last_update = current_update self._num_frames = 0 def calibrate_cam(self, from_mem=True, filenames=[]): if from_mem: self._calibrate.calibrate_camera_from_mem("calibration_data/") else: images = [cv.LoadImage(file) for file in filenames] self._calibrate.calibrate_camera(images, save_data=True)
def __init__(self, video_port=0, pitch=None, planner_callback=None): self.config = Config(self) while pitch is None: req_room = raw_input("Enter Pitch Room [{opts}]: ".format( opts="/".join(self.config.pitch_room.options))) pitch_no = self.config.pitch_room.getCode( req_room, unifier=lambda str: re.sub(r'\W+', '', str.upper())) if pitch_no is not None: pitch = pitch_no self.config.pitch_room.selected = pitch if pitch == 0: self.config.colours = colour_profiles['pitch_3d03'] elif pitch == 1: self.config.colours = colour_profiles['pitch_3d04'] self.cam = Camera(port=video_port, pitch=pitch, config=self.config) #print("Camera initialised") def print_function(x): info(x) if planner_callback is None: self.planner_callback = (lambda x: print_function(x)) else: self.planner_callback = planner_callback c = Calibrate(self.cam, self.config) self.world_latest = World() self.world_previous = None # c.run(True) print("Basic camera calibration complete") colours = c.calibrateColor(self.cam) all_colors = np.empty([10, 3], dtype=np.uint8) color_id = 0 if colours is not None: for colour, data in colours.iteritems(): if data is not None: for field in data: self.config.colours[colour][field] = np.uint8( data[field]) all_colors[color_id] = np.uint8(data[field]) color_id += 1 print("Colors recorded") np.save("color_calibrations", all_colors) else: print("Colors calibration skipped") all_colors = np.load("color_calibrations.npy") all_color_names = ['red', 'yellow', 'blue', 'green', 'pink'] for i in range(0, 5): self.config.colours[all_color_names[i]]['max'] = all_colors[i * 2] self.config.colours[all_color_names[i]]['min'] = all_colors[i * 2 + 1] # print all_color_names[i], all_colors[i*2+1], all_colors[i*2] self.config.addFilter("overlay", filters.filter_overlay, default=1) self.config.addFilter("grayscale", filters.filter_grayscale) self.config.addFilter("normalised", filters.filter_normalize) self.config.addFilter("red", partial(filters.filter_colour, "red")) self.config.addFilter("yellow", partial(filters.filter_colour, "yellow")) self.config.addFilter("blue", partial(filters.filter_colour, "blue")) self.config.addFilter("green", partial(filters.filter_colour, "green")) self.config.addFilter("pink", partial(filters.filter_colour, "pink")) self.config.addFilter("manual colour", partial(filters.filter_colour, None)) #print("Filters set up") #print("Initialising trackers") self.tracker_ball = DotTracker(0, 0, 'red', self.config, "ball") self.tracker_blue = DotTracker(0, 0, 'yellow', self.config, "robot") self.tracker_yellow = DotTracker(0, 0, 'blue', self.config, "robot") self.config.GUI() self.display()