def __init__(self, height, width, fps, device, background=None, alpha=None, fullframe_path=None, gain=100): super().__init__(height, width, background, alpha) self.fps = fps self.device = device self.fullframe_path = fullframe_path self.counter = 0 bus = PyCapture2.BusManager() numCams = bus.getNumOfCameras() print(10 * '*' + ' AVAILABLE CAMERAS ' + 10 * '*') for i in range(numCams): print('\n{})'.format(i + 1)) self.cap = PyCapture2.Camera() self.uid = bus.getCameraFromIndex(i) self.cap.connect(self.uid) self.print_cam_info(self.cap) self.cap.disconnect() bus = PyCapture2.BusManager() if not numCams: raise RuntimeError('No Flea3 camera detected') self.cap = PyCapture2.Camera() self.uid = bus.getCameraFromSerialNumber(int(device)) self.cap.connect(self.uid) print(10 * '*' + ' SELECTED CAMERA ' + 10 * '*') self.print_cam_info(self.cap) fmt7imgSet = PyCapture2.Format7ImageSettings( PyCapture2.MODE.MODE_4, 0, 0, 2048, 1080, PyCapture2.PIXEL_FORMAT.MONO8) fmt7pktInf, isValid = self.cap.validateFormat7Settings(fmt7imgSet) if not isValid: raise RuntimeError("Format7 settings are not valid!") self.cap.setFormat7ConfigurationPacket( fmt7pktInf.recommendedBytesPerPacket, fmt7imgSet) self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.FRAME_RATE, absValue=fps) self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.AUTO_EXPOSURE, absValue=False) self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.SHUTTER, absValue=1 / fps * 1000) self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.GAIN, absValue=100) self.cap.startCapture()
def capture(self): bus = PyCapture2.BusManager() cam = PyCapture2.Camera() maxWidth = 2448 maxHeight = 2048 cam.connect(bus.getCameraFromIndex(0)) #cam.setProperty(type = PyCapture2.PROPERTY_TYPE.AUTO_EXPOSURE, absValue = self.exp_time) cam.setProperty(type=PyCapture2.PROPERTY_TYPE.GAIN, absValue=self.gain) fmt7imgSet = PyCapture2.Format7ImageSettings( 0, 0, 0, maxWidth, maxHeight, PyCapture2.PIXEL_FORMAT.MONO8) fmt7pktInf, isValid = cam.validateFormat7Settings(fmt7imgSet) if not isValid: print("Format7 settings are not valid!") exit() cam.setFormat7ConfigurationPacket(fmt7pktInf.recommendedBytesPerPacket, fmt7imgSet) cam.startCapture() for i in range(self.n_images): self.grabImages(cam) for filename in os.listdir("."): print(filename) if filename.startswith("fc2"): os.rename(filename, "{}.{}.tif".format(self.input_angle, i)) cam.stopCapture()
def __init__(self): super(cameraBase, self).__init__() self.camera = PyCapture2.Camera() self.running = False self.readyCapture = False self.mode = self.VIDEO_MODE
def initRealtimeCamera(): # set camera bus = PyCapture2.BusManager() cam = PyCapture2.Camera() uid = bus.getCameraFromIndex(0) cam.connect(uid) cam.startCapture() return cam
def __init__(self,camIndex=0): """this is all the stuff you need to do to initialize the camera. It should work for multiple cameras as long as you change the camIndex""" bus = PyCapture2.BusManager() numCams = bus.getNumOfCameras() self.camera = PyCapture2.Camera() uid = bus.getCameraFromIndex(0) self.camera.connect(uid) self.camera.startCapture()
def initialize_camera(index): """ Initializes a PT Grey camera by its enumerated index, and returns the camera instance :return: A camera instance representing the camera and its controls. """ bus = PyCapture2.BusManager() camera = PyCapture2.Camera() uid = bus.getCameraFromIndex(index) camera.connect(uid) configure_camera(camera) camera.startCapture() return camera
def __init__(self, camera_id): try: self.bus = PyCapture2.BusManager() self.num_cams = self.bus.getNumOfCameras() self.camera = PyCapture2.Camera() self.uid = self.bus.getCameraFromIndex(camera_id) self.camera.connect(self.uid) self.print_build_info() self.print_camera_info() except Exception as e: print(e) print('Could not load camera, will now exit.') exit()
def main(): bus = PyCapture2.BusManager() numCams = bus.getNumOfCameras() print "Number of cameras detected: ", numCams if not numCams: print "Insufficient number of cameras. Exiting..." exit() # Select camera on 0th index cam = PyCapture2.Camera() uid = bus.getCameraFromIndex(0) cam.connect(uid) RR.RobotRaconteurNode.s.NodeName = "OverheadCam" overheadCam = overheadCam_imp(cam) t = RR.TcpTransport() t.StartServer(5885) RR.RobotRaconteurNode.s.RegisterTransport(t) updateMarkers = threading.Thread(target=detectMarkers, args=(overheadCam, )) updateMarkers.setDaemon(True) updateMarkers.start() try: with open('overheadCamServiceFast.robdef', 'r') as f: service_def = f.read() except: print("error1") try: RR.RobotRaconteurNode.s.RegisterServiceType(service_def) except: print("error2") try: RR.RobotRaconteurNode.s.RegisterService("ohcam", "edu.rpi.camService.ohcam", overheadCam) print("Connect at tcp://localhost:5885/OverheadCam/ohcam") raw_input("press enter to quit...\r\n") except: print("error") RR.RobotRaconteurNode.s.Shutdown() cam.stopCapture() cam.disconnect() cv2.destroyAllWindows()
def __init__(self): """Initialise the PyCapture2 Camera.""" self.busManager = PyCapture2.BusManager() self.camera = PyCapture2.Camera() # Temporary buffer self.buffer = None self.callback = None # Declare camera modes here self.mode_current = None self.mode_default = None self.mode_focus = None self.mode_capture = None
def connectpyCapture(camera_): printBuildInfo() bus = py2.BusManager() numCams = bus.getNumOfCameras() print "Number of cameras detected: ", numCams if not numCams: print "Insufficient number of cameras. Exiting..." exit() c = py2.Camera() py2.Config.highPerformanceRetrieveBuffer = True c.connect(bus.getCameraFromIndex(camera_)) printCameraInfo(c) # fmt7info, supported = c.getFormat7Info(0) # print "Connecting to camera: " , bool c.getFormat7Configuration() if camera_ == 0: fmt7imgSet = py2.Format7ImageSettings(0, 0, 0, 2048, 900, py2.PIXEL_FORMAT.MONO8) else: fmt7imgSet = py2.Format7ImageSettings(0, 700, 400, 1040, 1000, py2.PIXEL_FORMAT.MONO8) fmt7pktInf, isValid = c.validateFormat7Settings(fmt7imgSet) c.setFormat7ConfigurationPacket(fmt7pktInf.recommendedBytesPerPacket, fmt7imgSet) c.startCapture() return c
def initialize(self): # Initialize video capture # Ensure sufficient cameras are found bus = PyCapture2.BusManager() numCams = bus.getNumOfCameras() print "Number of cameras detected: ", numCams if not numCams: print "Insufficient number of cameras. Exiting..." self.exit() # Select camera on 0th index self.c = PyCapture2.Camera() uid = bus.getCameraFromIndex(0) self.c.connect(uid) printCameraInfo(self.c) print "Starting image capture..."
def listAvailableModes(cam_num=None, cam=None, bus=None): """ List valid video modes and framerates for specified camera. Parameters ---------- cam_num : int, optional Index of camera to connect to. Cannot be specified alongside <cam>, and must be specified if <cam> is not. cam : PyCapture2.Camera instance, optional Handle to camera connection. Cannot be specified alongisde <cam_num>, and must be specified if <cam_num> is not. bus : PyCapture2.BusManager instance, optional Only relevant if <cam> is None / <cam_num> is not None. Bus manager to connect camera. If None (default), one will be created. Returns ------- res : list List of (mode, framerate) tuples giving all valid options. """ # Ensure one of cam_num or cam is specified if cam_num is None and cam is None: raise ValueError('Must specify cam_num or cam') elif cam_num is not None and cam is not None: raise ValueError('Can only specify one of cam_num or cam') # If no cam given, create one from index. Also create bus if needed. if cam is None: cam = PyCapture2.Camera() if bus is None: bus = PyCapture2.BusManager() cam.connect(bus.getCameraFromIndex(cam_num)) # Find and return compatible modes res = [] for modename, modeval in VIDEO_MODES.items(): for ratename, rateval in FRAMERATES.items(): try: if cam.getVideoModeAndFrameRateInfo(modeval, rateval): res.append((modename, ratename)) except: pass return res
def getPortInfo(): # Ensure sufficient cameras are found bus = PyCapture2.BusManager() num_cams = bus.getNumOfCameras() camera_infos = [] print('Number of cameras detected: ', num_cams) if not num_cams: print('Insufficient number of cameras. Exiting...') return None for i in range(num_cams): camera_guid = bus.getCameraFromIndex(i) camera = PyCapture2.Camera() camera.connect(camera_guid) serial_num, model_name = Chameleon.get_camera_info(camera) camera_infos.append("{}&{}&{}".format(str(model_name), serial_num, i)) camera.disconnect() return camera_infos
def setup_camera(): bus = PyCapture2.BusManager() num_cams = bus.getNumOfCameras() logging.info('Number of cameras detected: %d' % num_cams) if not num_cams: logging.error('Insufficient number of cameras. Exiting...') raise ValueError('Insufficient number of cameras. Exiting...') cam = PyCapture2.Camera() cam.connect(bus.getCameraFromIndex(0)) cam_info = cam.getCameraInfo() logging.info('*** CAMERA INFORMATION ***') logging.info('Serial number - %d', cam_info.serialNumber) logging.info('Camera model - %s', cam_info.modelName) logging.info('Camera vendor - %s', cam_info.vendorName) logging.info('Sensor - %s', cam_info.sensorInfo) logging.info('Resolution - %s', cam_info.sensorResolution) logging.info('Firmware version - %s', cam_info.firmwareVersion) logging.info('Firmware build time - %s', cam_info.firmwareBuildTime) cam.startCapture() return cam
def run(self): bus = PyCapture2.BusManager() uid = bus.getCameraFromIndex(0) c = PyCapture2.Camera() c.connect(uid) while True: c.startCapture() img = c.retrieveBuffer() c.stopCapture() cv_img1 = np.array(img.getData(), dtype="uint8").reshape( (img.getRows(), img.getCols())) cv_img = cv2.cvtColor(cv_img1, cv2.COLOR_BAYER_BG2BGR) cv_img = cv2.resize(cv_img, (380, 270)) height, width, dim = cv_img.shape bytesPerLine = dim * width image = QtGui.QImage(cv_img.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888) self.imageChanged.emit(image)
def initializeCameras(): bus = PyCapture2.BusManager() nCameras = bus.getNumOfCameras() cameras = [None for _ in range(nCameras)] # enforce a frame rate of 30 or 60 fps (even if the camera supports others) frameRateCode = getFrameRateCode(config["DEFAULT"]["frameRate"]) videoModeCode = getVideoModeCode(config["DEFAULT"]["videoMode"]) asyncSpeedCode = getBusSpeedCode(config["DEFAULT"]["asyncBusSpeed"]) isochSpeedCode = getBusSpeedCode(config["DEFAULT"]["isochBusSpeed"]) if frameRateCode is None: raise ValueError("Unhandled frame rate ({}).".format( config["DEFAULT"]["frameRate"])) if videoModeCode is None: raise ValueError("Unhandled video mode ({}).".format( config["DEFAULT"]["videoMode"])) for ii in range(nCameras): # create and connect each camera cam = PyCapture2.Camera() cam.connect(bus.getCameraFromIndex(ii)) # make sure the requested video mode is supported if not cam.getVideoModeAndFrameRateInfo(videoModeCode, frameRateCode): raise ValueError("Unsupported video mode/frame rate.") # set video mode and other settings before saving camera object cam.setVideoModeAndFrameRate(videoModeCode, frameRateCode) cam.setEmbeddedImageInfo( timestamp=config["DEFAULT"].getboolean("embedTimestamp")) cam.setEmbeddedImageInfo( frameCounter=config["DEFAULT"].getboolean("embedFrameCounter")) cam.setConfiguration(asyncBusSpeed=asyncSpeedCode) cam.setConfiguration(isochBusSpeed=isochSpeedCode) cameras[ii] = cam # all or nothing validated = validateCameras(cameras) return (validated, (nCameras if validated else 0))
def __init__(self, serial_number): """Initialize FlyCapture2 API camera. Serial number should be of int type.""" global PyCapture2 import PyCapture2 ver = PyCapture2.getLibraryVersion() min_ver = (2, 12, 3, 31) # first release with python 3.6 support if ver < min_ver: raise RuntimeError( f"PyCapture2 version {ver} must be >= {min_ver}") bus = PyCapture2.BusManager() self.camera = PyCapture2.Camera() self.camera.connect(bus.getCameraFromSerialNumber(serial_number)) config = self.camera.getConfiguration() config.grabTimeout = 1000 # in ms config.highPerformanceRetrieveBuffer = True self.camera.setConfiguration(config) # set which values of properties to return self.get_props = [ 'present', 'absControl', 'absValue', 'onOff', 'autoManualMode', 'valueA', 'valueB' ] fmts = { prop: getattr(PyCapture2.PIXEL_FORMAT, prop) for prop in dir(PyCapture2.PIXEL_FORMAT) if not prop.startswith('_') } self.pixel_formats = IntEnum('pixel_formats', fmts) self._abort_acquisition = False
def connect_camera(self): # Get bycapture information #libVer = PyCapture2.getLibraryVersion() #self.pyCaptureInfo.append("PyCapture2 library version " + str(libVer[0]) + "." + str(libVer[1]) + "." + str(libVer[3]) ) # Check if there are sufficient cameras bus = PyCapture2.BusManager() self.numCams = bus.getNumOfCameras() if self.index < self.numCams: # Connect camera self.cam = PyCapture2.Camera() uid = bus.getCameraFromIndex( self.index) # This could be used to select multiple cameras self.cam.connect(uid) #self.enable_embedded_timestamp(True) if self.cam is not None: camInfo = self.cam.getCameraInfo() self.camInfo.append("Serial number - " + str(camInfo.serialNumber)) self.camInfo.append("Camera model - " + str(camInfo.modelName)) self.camInfo.append("Camera vendor - " + str(camInfo.vendorName)) self.camInfo.append("Sensor - " + str(camInfo.sensorInfo)) self.camInfo.append("Resolution - " + str(camInfo.sensorResolution)) self.camInfo.append("Firmware version - " + str(camInfo.firmwareVersion)) self.camInfo.append("Firmware build time- " + str(camInfo.firmwareBuildTime)) else: self.camInfo.append("No Camera information to display")
def run(self): if not onLinux: return self.net = dn.load_net(self.cfgFile, self.weightFile, 0) self.meta = dn.load_meta(self.dataFile) dataFile = open(self.dataFile, "r") line = dataFile.readline() namesFile = None while line: line = line.replace(" ", "") line = line.split("=") if line[0] == "names": print "Names file:", line namesFile = open(line[1].replace("\n", ""), "r") break line = dataFile.readline() dataFile.close() if namesFile != None: line = namesFile.readline() index = 0 while line: self.detectionDictionary[line.replace("\n", "")] = index index += 1 line = namesFile.readline() namesFile.close() if useVideo == True: video = cv2.VideoCapture(videoFile) else: bus = pc2.BusManager() self.activeCamera = pc2.Camera() self.frontCamera = pc2.Camera() self.botCamera = pc2.Camera() self.frontCamera.connect(bus.getCameraFromIndex(1)) self.frontCamera.startCapture() time.sleep(1) self.botCamera.connect(bus.getCameraFromIndex(0)) self.botCamera.startCapture() self.activeCamera = self.frontCamera while self.running == True: if self.useVideo == False: self.image = self.activeCamera.retrieveBuffer() self.image = self.image.convert(pc2.PIXEL_FORMAT.BGR) img = np.array(self.image.getData(), dtype="uint8").reshape((self.image.getRows(), self.image.getCols(), 3)) img = cv2.flip(img, -1) else: t, img = self.activeCamera.read() #yoloImage = dn.IMAGE() detections = dn.detect_np(self.net, self.meta, img, thresh=.1, hier_thresh = .1) newDetections = [] self.imgPoints = [] self.srcPoints = [] for detection in detections: fixedClassNumber = self.detectionDictionary[detection[0]] cv2.putText(img, detection[0], (int(detection[2][0] - (.5 * detection[2][2])), int(detection[2][1] - (.5 * detection[2][3]))), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,255), 2) newDetections.append([fixedClassNumber, detection[1], detection[2]]) if detection[0] in self.torpedoDictionary or detection[0] == "Corner": if detection[0] != "Corner": pass self.imgPoints.append((detection[2][0], detection[2][1])) self.srcPoints.append(self.torpedoSrcPoints[self.torpedoDictionary[detection[0]]]) elif detection[0] == "Corner": pixelError = 20 '''cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,0), -1) while True: cv2.imshow("Vision", img) if cv2.waitKey(1) == ord(" "): break''' for secondDet in detections: '''cv2.rectangle(img, (int(secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError ),int(secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError)), (int(secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError),int(secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError)), (255, 0, 0), 3) cv2.rectangle(img, (int(secondDet[2][0]-(.5 * secondDet[2][2])), int(secondDet[2][1]- (.5 * secondDet[2][3]))), (int(secondDet[2][0] + (.5*secondDet[2][2])), int(secondDet[2][1] + (.5*secondDet[2][3]))), (0,0,255)) while True: cv2.imshow("Vision", img) if cv2.waitKey(1) == ord(" "): break''' if secondDet[0] in self.torpedoDictionary: index = None if detection[2][0] >= secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError: cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,0), -1) index = self.torpedoDictionary[secondDet[0]] index += 9 elif detection[2][0] >= secondDet[2][0] + (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] + (.5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError: cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (0,255,0), -1) index = self.torpedoDictionary[secondDet[0]] index += 6 if detection[2][0] >= secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] + (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] + (.5 * secondDet[2][3]) - pixelError: cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (0,0,255), -1) index = self.torpedoDictionary[secondDet[0]] index += 8 elif detection[2][0] >= secondDet[2][0] + (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] + (.5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] + (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] + (.5 * secondDet[2][3]) - pixelError: cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,255), -1) index = self.torpedoDictionary[secondDet[0]] index += 7 if index == None: pass else: self.srcPoints.append(self.torpedoSrcPoints[index]) self.imgPoints.append((int(detection[2][0]), int(detection[2][1]))) if len(self.imgPoints) >= 4: #print self.imgPoints, self.srcPoints rvec, tvec = cv2.solvePnP(np.array(self.srcPoints).astype("float32"), np.array(self.imgPoints).astype("float32"), np.array(self.camMat).astype("float32"), np.zeros((4,1)))[-2:] (pose1, jacobian) = cv2.projectPoints(np.array([(0.0,0.0,0.1)]), rvec, tvec, np.array(self.camMat).astype("float32"), None) (pose, jacobian) = cv2.projectPoints(np.array([(0,0,12.0)]), rvec, tvec, np.array(self.camMat).astype("float32"), None) cv2.line(img, (int(pose1[0][0][0]), int(pose1[0][0][1])), (int(pose[0][0][0]), int(pose[0][0][1])), (255,0,0), 2) for detection in detections: loc = detection[2] cv2.rectangle(img, (int(loc[0]-(.5 * loc[2])), int(loc[1]- (.5 * loc[3]))), (int(loc[0] + (.5*loc[2])), int(loc[1] + (.5*loc[3]))), (0,0,255)) self.getList.append(newDetections) cv2.imshow("Vision", img) self.activeCamera.stopCapture()
def run(self): self.net = dn.load_net(self.cfgFile, self.weightFile, 0) self.meta = dn.load_meta(self.dataFile) def nothing(t): pass cv2.namedWindow("Vision") dataFile = open(self.dataFile, "r") line = dataFile.readline() namesFile = None while line: line = line.replace(" ", "") line = line.split("=") if line[0] == "names": print "Names file:", line namesFile = open(line[1].replace("\n", ""), "r") break line = dataFile.readline() dataFile.close() if namesFile != None: line = namesFile.readline() index = 0 while line: self.detectionDictionary[line.replace("\n", "")] = index index += 1 line = namesFile.readline() namesFile.close() if self.useVideo == False and self.usePicture == False: bus = pc2.BusManager() self.activeCamera = pc2.Camera() self.activeCamera.connect(bus.getCameraFromIndex(0)) self.activeCamera.startCapture() elif self.useVideo == True: self.activeCamera = cv2.VideoCapture(self.videoPath) elif self.usePicture == True: self.sourceImg = cv2.imread(self.picturePath) while self.running == True: if self.useVideo == False and self.usePicture == False: self.image = self.activeCamera.retrieveBuffer() self.image = self.image.convert(pc2.PIXEL_FORMAT.BGR) img = np.array(self.image.getData(), dtype="uint8").reshape( (self.image.getRows(), self.image.getCols(), 3)) img = cv2.flip(img, -1) elif self.useVideo == True: t, img = self.activeCamera.read() else: img = copy.copy(self.sourceImg) #yoloImage = dn.IMAGE() w, h, c = img.shape detections = dn.detect_np(self.net, self.meta, img, thresh=.1, hier_thresh=.1) newDetections = [] self.imgPoints = [] self.srcPoints = [] for detection in detections: fixedClassNumber = self.detectionDictionary[detection[0]] newDetections.append( [fixedClassNumber, detection[1], detection[2]]) if detection[0] in self.torpedoDictionary or detection[ 0] == "Corner": if detection[0] != "Corner": self.imgPoints.append( (detection[2][0], detection[2][1])) self.srcPoints.append(self.torpedoSrcPoints[ self.torpedoDictionary[detection[0]]]) print "Detection", detection[0] if detection[0] == "Top Right Hole": pixelError = 20 x, y, width, height = detection[2] x1, y1, x2, y2 = x + ( .5 * width) + pixelError, y + ( .5 * height) + pixelError, x + ( .5 * width) + pixelError, y + ( .5 * height) + pixelError, if x1 < 0: x1 = 0 if y1 < 0: y1 = 0 if x2 >= w: x2 = w - 1 if y2 >= h: y2 = h - 1 print "Sub img", x1, y1, x2, y2 subImg = img[int(y1):int(y2), int(x1):int(x2)] while True: cv2.imshow("Sub", subImg) if cv2.waitKey(1) == ord("q"): break elif detection[0] == "Corner" and False: print "Found a corner" pixelError = 20 '''cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,0), -1) while True: cv2.imshow("Vision", img) if cv2.waitKey(1) == ord(" "): break''' for secondDet in detections: '''cv2.rectangle(img, (int(secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError ),int(secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError)), (int(secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError),int(secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError)), (255, 0, 0), 3) cv2.rectangle(img, (int(secondDet[2][0]-(.5 * secondDet[2][2])), int(secondDet[2][1]- (.5 * secondDet[2][3]))), (int(secondDet[2][0] + (.5*secondDet[2][2])), int(secondDet[2][1] + (.5*secondDet[2][3]))), (0,0,255)) while True: cv2.imshow("Vision", img) if cv2.waitKey(1) == ord(" "): break''' if secondDet[0] in self.torpedoDictionary: index = None if detection[2][0] >= secondDet[2][0] - ( .5 * secondDet[2][2] ) - pixelError and detection[2][ 0] <= secondDet[2][0] - ( .5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] - ( .5 * secondDet[2][3] ) + pixelError and detection[2][ 1] > secondDet[2][1] - ( .5 * secondDet[2][3]) - pixelError: print "In top left corner of", secondDet[ 0] cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255, 0, 0), -1) index = self.torpedoDictionary[ secondDet[0]] index += 6 elif detection[2][0] >= secondDet[2][0] + ( .5 * secondDet[2][2] ) - pixelError and detection[2][ 0] <= secondDet[2][0] + ( .5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] - ( .5 * secondDet[2][3] ) + pixelError and detection[2][ 1] > secondDet[2][1] - ( .5 * secondDet[2][3]) - pixelError: print "In top right corner of", secondDet[ 0] cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (0, 255, 0), -1) index = self.torpedoDictionary[ secondDet[0]] index += 3 if detection[2][0] >= secondDet[2][0] - ( .5 * secondDet[2][2] ) - pixelError and detection[2][ 0] <= secondDet[2][0] - ( .5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] + ( .5 * secondDet[2][3] ) + pixelError and detection[2][ 1] > secondDet[2][1] + ( .5 * secondDet[2][3]) - pixelError: print "In bottom left corner of", secondDet[ 0] cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (0, 0, 255), -1) index = self.torpedoDictionary[ secondDet[0]] index += 5 elif detection[2][0] >= secondDet[2][0] + ( .5 * secondDet[2][2] ) - pixelError and detection[2][ 0] <= secondDet[2][0] + ( .5 * secondDet[2][2]) + pixelError: if detection[2][1] <= secondDet[2][1] + ( .5 * secondDet[2][3] ) + pixelError and detection[2][ 1] > secondDet[2][1] + ( .5 * secondDet[2][3]) - pixelError: print "In bottom right corner of", secondDet[ 0] cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255, 0, 255), -1) index = self.torpedoDictionary[ secondDet[0]] index += 4 if index == None: pass else: index += 3 self.srcPoints.append( self.torpedoSrcPoints[index]) self.imgPoints.append( (int(detection[2][0]), int(detection[2][1]))) if len(self.imgPoints) >= 4: print "Solving..." #print self.imgPoints, self.srcPoints rvec, tvec = cv2.solvePnP( np.array(self.srcPoints).astype("float32"), np.array(self.imgPoints).astype("float32"), np.array(self.camMat).astype("float32"), np.zeros( (4, 1)))[-2:] (pose1, jacobian) = cv2.projectPoints( np.array([(0.0, 0.0, 0.1)]), rvec, tvec, np.array(self.camMat).astype("float32"), None) (pose, jacobian) = cv2.projectPoints( np.array([(0, 0, 12.0)]), rvec, tvec, np.array(self.camMat).astype("float32"), None) cv2.line(img, (int(pose1[0][0][0]), int(pose1[0][0][1])), (int(pose[0][0][0]), int(pose[0][0][1])), (255, 0, 0), 2) print "Rvec:", rvec, "\n" print "Tvec:", tvec s = time.time() while time.time() - s < 0: cv2.imshow("Vision", img) k = cv2.waitKey(1) if k == ord(" "): break for detection in detections: loc = detection[2] cv2.rectangle( img, (int(loc[0] - (.5 * loc[2])), int(loc[1] - (.5 * loc[3]))), (int(loc[0] + (.5 * loc[2])), int(loc[1] + (.5 * loc[3]))), (0, 0, 255)) self.getList.append(newDetections) cv2.imshow("Vision", img) key = cv2.waitKey(1) if key == ord('q'): self.running = False self.activeCamera.stopCapture()
def captureImages(comPort, toBeProcessedDir, mode): printBuildInfo() bus = PyCapture2.BusManager() numCams = bus.getNumOfCameras() if not numCams: print("Insufficient number of cameras. Exiting...") return -1 # Initiate Serial connection ser = serial.Serial(comPort, 9600, timeout=1) # 1s timeout sleep(1) ser.reset_input_buffer() ser.reset_output_buffer() c = PyCapture2.Camera() c.connect(bus.getCameraFromIndex(0)) #power on the Camera cameraPower = 0x610 powerVal = 0x80000000 c.writeRegister(cameraPower, powerVal) #waiting for Camera to power up retries = 10 timeToSleep = 0.1 #seconds for i in range(retries): sleep(timeToSleep) try: regVal = c.readRegister(cameraPower) except PyCapture2.Fc2error: pass awake = True if regVal == powerVal: break awake = False if not awake: print("Could not wake Camera. Exiting...") return -1 printCameraInfo(c) triggerMode = c.getTriggerMode() triggerMode.onOff = True triggerMode.mode = 0 triggerMode.parameter = 0 triggerMode.source = 7 c.setTriggerMode(triggerMode) pollForTriggerReady(c) grabM = PyCapture2.GRAB_MODE.BUFFER_FRAMES c.setConfiguration(numBuffers=20, grabTimeout=5000, grabMode=grabM) c.startCapture() if not checkSoftwareTriggerPresence(c): print( "SOFT_ASYNC_TRIGGER not implemented on this Camera! Stopping application" ) return -1 fireSoftwareTrigger(c) image = c.retrieveBuffer() pix = PyCapture2.PIXEL_FORMAT.RAW8 imAmbient = image.convert(pix) numImages = 4 if mode == "VISNIR": numImages = 8 offset = 4 if mode == "NIR" else 0 imgs = [] start = timer() for i in range(numImages): pollForTriggerReady(c) serC = str(i + offset).encode() ser.write(serC) t = ser.read(1) if not t.decode() == str(i + offset): print("Error: Invalid response from Serial port") return -1 fireSoftwareTrigger(c) image = c.retrieveBuffer() im1 = image.convert(pix) imgs.append(im1) ser.write(b'9') # Switch off all the lights for i, image in enumerate(imgs): error = image.save( os.path.join(toBeProcessedDir, ("im" + str(i) + ".bmp")).encode("utf-8"), PyCapture2.IMAGE_FILE_FORMAT.BMP) error = imAmbient.save( os.path.join(toBeProcessedDir, "imAmbient.bmp").encode("utf-8"), PyCapture2.IMAGE_FILE_FORMAT.BMP) print('Time taken: {}'.format(timer() - start)) c.setTriggerMode(onOff=False) print("Finished grabbing images!") c.stopCapture() c.disconnect() ser.close() return 0
cam.setProperty(type = pc2.PROPERTY_TYPE.GAIN, absControl=True, autoManualMode=False, absValue = 12.0) cam.startCapture() image = cam.retrieveBuffer() cam.stopCapture() data = np.array(image.getData()) # np.array() call prevents crash shape = (image.getRows(), image.getCols()) data = data.reshape(shape) return data bus = pc2.BusManager() numCams = bus.getNumOfCameras() print("Number of cameras detected:", numCams) cam = pc2.Camera() cam.connect(bus.getCameraFromIndex(0)) printCameraInfo(cam) fmt7info, supported = cam.getFormat7Info(0) fmt7imgSet = pc2.Format7ImageSettings(0, 0, 0, fmt7info.maxWidth, fmt7info.maxHeight, pc2.PIXEL_FORMAT.MONO8) fmt7pktInf, isValid = cam.validateFormat7Settings(fmt7imgSet) cam.setFormat7ConfigurationPacket(fmt7pktInf.recommendedBytesPerPacket, fmt7imgSet) d = pc2.PROPERTY_TYPE.__dict__ props = [key for key in d.keys() if isinstance(d[key], int)] enableEmbeddedTimeStamp(cam, True) cam.setProperty(type = pc2.PROPERTY_TYPE.SHUTTER, absControl=True, autoManualMode=False, absValue = 0.5) cam.setProperty(type = pc2.PROPERTY_TYPE.GAIN, absControl=True, autoManualMode=False, absValue = 12.0) cam.startCapture()
def __init__(self, height, width, fps, device, subsample=0, fullframe_path=None, gain=100, cam_identifier=None, start_timestamp=None, roi=None): super().__init__(height, width, fps=fps, subsample=subsample, fullframe_path=fullframe_path, cam_identifier=cam_identifier, start_timestamp=start_timestamp, roi=roi) self.device = device bus = PyCapture2.BusManager() numCams = bus.getNumOfCameras() print(10 * "*" + " AVAILABLE CAMERAS " + 10 * "*") for i in range(numCams): print("\n{})".format(i + 1)) self.cap = PyCapture2.Camera() self.uid = bus.getCameraFromIndex(i) self.cap.connect(self.uid) self.print_cam_info(self.cap) self.cap.disconnect() bus = PyCapture2.BusManager() if not numCams: raise RuntimeError("No Flea3 camera detected") self.cap = PyCapture2.Camera() self.uid = bus.getCameraFromSerialNumber(int(device)) self.cap.connect(self.uid) print(10 * "*" + " SELECTED CAMERA " + 10 * "*") self.print_cam_info(self.cap) fmt7imgSet = PyCapture2.Format7ImageSettings( PyCapture2.MODE.MODE_4, 0, 0, 2048, 1080, PyCapture2.PIXEL_FORMAT.MONO8) fmt7pktInf, isValid = self.cap.validateFormat7Settings(fmt7imgSet) if not isValid: raise RuntimeError("Format7 settings are not valid!") self.cap.setFormat7ConfigurationPacket( fmt7pktInf.recommendedBytesPerPacket, fmt7imgSet) self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.FRAME_RATE, absValue=fps) self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.AUTO_EXPOSURE, absValue=False) self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.SHUTTER, absValue=1 / fps * 1000) self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.GAIN, absValue=100) self.cap.startCapture()
def __init__(self, cam_num, bus=None, video_mode='VM_640x480RGB', framerate='FR_30', grab_mode='BUFFER_FRAMES'): """ Class provides methods for controlling camera, capturing images, and writing video files. Parameters ---------- cam_num : int Index of camera to use. See also the getAvailableCameras() function. bus : PyCapture2.BusManager, optional Bus manager object. If None (default), one will be created. video_mode : PyCapture2.VIDEO_MODE value or str, optional Determines the resolution and colour mode of video capture. Can be one of the PyCapture2.VIDEO_MODE codes, or a key for the VIDEO_MODES lookup dict. The default is 'VM_640x480RGB'. See also the listAvailableModes() function. framerate : PyCapture2.FRAMERATE value or str, optional Determines the frame rate of the video. Note that this is NOT the actual fps value, but the code PyCapture uses to determine it. Can be one of the PyCapture2.FRAMERATE codes, or a key for the FRAMERATES lookup dict. The default is 'FR_30'. See also the listAvailableModes() function. grab_mode : PyCapture2.GRAB_MODE value or str, optional Method for acquiring images from the buffer. Can be one of the PyCapture2.GRAB_MODE codes, or a key for the GRAB_MODES lookup dict. BUFFER_FRAMES mode (default) grabs the oldest frame from the buffer each time and does not clear the buffer in between; this ensures frames are not dropped, but the buffer may overflow if the program cannot keep up. DROP_FRAMES mode grabs the newest frame from the buffer each time and clears the buffer in between; this prevents the buffer overflowing but may lead to frames being missed. BUFFER_FRAMES should generally be preferred for recording, DROP_FRAMES may be preferable for live streaming. Examples -------- Open connection to first available camera. >>> cam = Camera(0) Optionally open a video file to write frames to. >>> cam.openVideoWriter('test.avi') Start camera capture - this must be called before acquiring images. >>> cam.startCapture() Acquire frames in a loop. Optionally display them in an OpenCV window. If a video writer was opened, each call to .getImage() will also write the frame out to the video file. >>> for i in range(300): ... ret, img = cam.getImage() ... if ret: ... cv2.imshow('Camera', img2array(img)) ... cv2.waitKey(1) Close the camera when done. This will stop camera capture, close the video writer (if applicable), and disconnect the camera. >>> cam.close() """ # Allocate args to class self.cam_num = cam_num self.bus = bus self.video_mode = video_mode self.framerate = framerate self.grab_mode = grab_mode # Allocate further defaults where needed if self.bus is None: self.bus = PyCapture2.BusManager() if isinstance(self.video_mode, str): self.video_mode = VIDEO_MODES[self.video_mode] if isinstance(self.framerate, str): self.framerate = FRAMERATES[self.framerate] if isinstance(self.grab_mode, str): self.grab_mode = GRAB_MODES[self.grab_mode] # Init camera self.cam = PyCapture2.Camera() self.uid = self.bus.getCameraFromIndex(self.cam_num) self.serial_num = self.bus.getCameraSerialNumberFromIndex(self.cam_num) self.cam.connect(self.uid) if not self.cam.getStats().cameraPowerUp: raise OSError('Camera is not powered on') if not self.cam.isConnected: raise OSError('Camera faied to connect') # Set mode and frame rate if not self.cam.getVideoModeAndFrameRateInfo(self.video_mode, self.framerate): raise OSError('Video mode and / or frame rate incompatible ' 'with camera') self.cam.setVideoModeAndFrameRate(self.video_mode, self.framerate) # Further config self.cam.setConfiguration(grabMode=self.grab_mode) # Reverse grab image resolution out of video mode self.img_size = imgSize_from_vidMode(self.video_mode) # Also note fps value self.fps = self.cam.getProperty( PyCapture2.PROPERTY_TYPE.FRAME_RATE).absValue # Place holders for video writer self.video_writer = None self.csv_fd = None self.csv_writer = None # Internal flags self._capture_isOn = False self._video_writer_isOpen = False
def __init__(self, serial_number): """Initialize FlyCapture2 API camera. Searches all cameras reachable by the host using the provided serial number. Fails with API error if camera not found. This function also does a significant amount of default configuration. * It defaults the grab timeout to 1 s * Ensures use of the API's HighPerformanceRetrieveBuffer * Ensures the camera is in Format 7, Mode 0 with full frame readout and MONO8 pixels * If using a GigE camera, automatically maximizes the packet size and warns if Jumbo packets are not enabled on the NIC Args: serial_number (int): serial number of camera to connect to """ global PyCapture2 import PyCapture2 ver = PyCapture2.getLibraryVersion() min_ver = (2, 12, 3, 31) # first release with python 3.6 support if ver < min_ver: raise RuntimeError( f"PyCapture2 version {ver} must be >= {min_ver}") print('Connecting to SN:%d ...' % serial_number) bus = PyCapture2.BusManager() self.camera = PyCapture2.Camera() self.camera.connect(bus.getCameraFromSerialNumber(serial_number)) # set which values of properties to return self.get_props = [ 'present', 'absControl', 'absValue', 'onOff', 'autoManualMode', 'valueA', 'valueB' ] fmts = { prop: getattr(PyCapture2.PIXEL_FORMAT, prop) for prop in dir(PyCapture2.PIXEL_FORMAT) if not prop.startswith('_') } self.pixel_formats = IntEnum('pixel_formats', fmts) self._abort_acquisition = False # check if GigE camera. If so, ensure max packet size is used cam_info = self.camera.getCameraInfo() if cam_info.interfaceType == PyCapture2.INTERFACE_TYPE.GIGE: # need to close generic camera first to avoid strange interactions print('Checking Packet size for GigE Camera...') self.camera.disconnect() gige_camera = PyCapture2.GigECamera() gige_camera.connect(bus.getCameraFromSerialNumber(serial_number)) mtu = gige_camera.discoverGigEPacketSize() if mtu <= 1500: msg = """WARNING: Maximum Transmission Unit (MTU) for ethernet NIC FlyCapture2_Camera SN:%d is connected to is only %d. Reliable operation not expected. Please enable Jumbo frames on NIC.""" print(dedent(msg % (serial_number, mtu))) gige_pkt_size = gige_camera.getGigEProperty( PyCapture2.GIGE_PROPERTY_TYPE.GIGE_PACKET_SIZE) # only set if not already at correct value if gige_pkt_size.value != mtu: gige_pkt_size.value = mtu gige_camera.setGigEProperty(gige_pkt_size) print(' Packet size set to %d' % mtu) else: print(' GigE Packet size is %d' % gige_pkt_size.value) # close GigE handle to camera, re-open standard handle gige_camera.disconnect() self.camera.connect(bus.getCameraFromSerialNumber(serial_number)) # set standard device configuration config = self.camera.getConfiguration() config.grabTimeout = 1000 # in ms config.highPerformanceRetrieveBuffer = True self.camera.setConfiguration(config) # ensure camera is in Format7,Mode 0 custom image mode fmt7_info, supported = self.camera.getFormat7Info(0) if supported: # to ensure Format7, must set custom image settings # defaults to full sensor size and 'MONO8' pixel format print('Initializing to default Format7, Mode 0 configuration...') fmt7_default = PyCapture2.Format7ImageSettings( 0, 0, 0, fmt7_info.maxWidth, fmt7_info.maxHeight, self.pixel_formats['MONO8'].value) self._send_format7_config(fmt7_default) else: msg = """Camera does not support Format7, Mode 0 custom image configuration. This driver is therefore not compatible, as written.""" raise RuntimeError(dedent(msg))
def _cam_init(self,set_gpio=True): self.bus = pc2.BusManager() self.cam = pc2.Camera() if not self.bus.getNumOfCameras(): display('No Point Grey Research cameras detected') raise # Run example on the first camera uid = self.bus.getCameraFromIndex(self.cam_id) self.cam.connect(uid) embedded_info = self.cam.getEmbeddedImageInfo() if embedded_info.available.timestamp: self.cam.setEmbeddedImageInfo(timestamp = True) enable_timestamp = True if enable_timestamp: display('[PointGrey] - timestamp is enabled.') else: display('[PointGrey] - timeStamp is disabled.') fmt7_info, supported = self.cam.getFormat7Info(0) if supported: display('[PointGrey] - Max image pixels: ({}, {})'.format( fmt7_info.maxWidth, fmt7_info.maxHeight)) if not len(self.roi): self.roi = [0,0,fmt7_info.maxWidth,fmt7_info.maxHeight] display('[PointGrey] - Image unit size: ({}, {})'.format( fmt7_info.imageHStepSize, fmt7_info.imageVStepSize)) display('[PointGrey] - Offset unit size: ({}, {})'.format( fmt7_info.offsetHStepSize, fmt7_info.offsetVStepSize)) #display('[PointGrey] - Pixel format bitfield: 0x{}'.format( # fmt7_info.pixelFormatBitField)) #if pc2.PIXEL_FORMAT.MONO8 & fmt7_info.pixelFormatBitField == 0: # display('[PointGrey] - setting MONO8') x,y,w,h = self.roi fmt7_img_set = pc2.Format7ImageSettings(0,x, y,w,h, pc2.PIXEL_FORMAT.MONO8) fmt7_pkt_inf, isValid = self.cam.validateFormat7Settings(fmt7_img_set) if not isValid: print('[PointGrey] - Format7 settings are not valid!') self.cam.setFormat7ConfigurationPacket(fmt7_pkt_inf.recommendedBytesPerPacket, fmt7_img_set) tmp = self.cam.getProperty(pc2.PROPERTY_TYPE.FRAME_RATE) tmp.absValue = self.frame_rate tmp.onOff = True self.cam.setProperty(tmp) tmp = self.cam.getProperty(pc2.PROPERTY_TYPE.FRAME_RATE) self.frame_rate = tmp.absValue display('[PointGrey] - Frame rate is:{0}'.format(self.frame_rate)) # Set gain tmp = self.cam.getProperty(pc2.PROPERTY_TYPE.GAIN) tmp.absValue = self.gain tmp.onOff = True self.cam.setProperty(tmp) #start communication #self.cam.open_device() #self.cam.set_acq_timing_mode('XI_ACQ_TIMING_MODE_FREE_RUN') #self.cam.set_exposure(self.exposure) #self.cam.set_binning_vertical(self.binning) #self.cam.set_binning_horizontal(self.binning) # Set the GPIO #self.cam.set_gpi_selector('XI_GPI_PORT1') #self.cam.set_trigger_source('XI_TRG_OFF') #if set_gpio: # self.cam.set_gpo_selector('XI_GPO_PORT1') # for now set the GPO to blink in software (port1 will do that for sync purposes, the test cam does not support XI_GPO_EXPOSURE_PULSE) # self.cam.set_gpo_mode('XI_GPO_ON'); #XI_GPO_EXPOSURE_PULSE # if self.triggered.is_set(): # self.cam.set_gpi_mode('XI_GPI_TRIGGER') # self.cam.set_trigger_source('XI_TRG_EDGE_RISING') self.lastframeid = -1 self.nframes.value = 0 self.camera_ready.set() self.prev_ts = 0 self.lasttime = time.time()
def CameraSetup(self, framerate, exposure): bus = self.BusSetup() # Select camera on 0th index self.camera = PyCapture2.Camera() c = self.camera c.connect(bus.getCameraFromIndex(0)) # Print camera details self.printCameraInfo(c) fmt7info, supported = c.getFormat7Info(0) self.printFormat7Capabilities(fmt7info) # See the pdf, use PROPERTY, and PROPERTY_TYPE, to set shutter. # Set video mode to format 7, raw8 # Set height to reduced that is acceptable # Ensure frame rate if FORMAT7 # go to town. # test by reseting camera and ensure code resets it, not flycap. if 0: # Check whether pixel format mono8 is supported if PyCapture2.PIXEL_FORMAT.MONO8 & fmt7info.pixelFormatBitField == 0: print "Pixel format is not supported\n" exit() # Configure camera format7 settings fmt7imgSet = PyCapture2.Format7ImageSettings( 0, 0, 0, fmt7info.maxWidth, fmt7info.maxHeight, PyCapture2.PIXEL_FORMAT.MONO8) fmt7pktInf, isValid = c.validateFormat7Settings(fmt7imgSet) if not isValid: print "Format7 settings are not valid!" exit() c.setFormat7ConfigurationPacket( fmt7pktInf.recommendedBytesPerPacket, fmt7imgSet) else: # spence go for speed. somewhere need to update frame rate option to turn off and free run # Check whether pixel format raw8 is supported if PyCapture2.PIXEL_FORMAT.RAW8 & fmt7info.pixelFormatBitField == 0: print "Pixel format is not supported\n" exit() # Configure camera format7 settings fmt7imgSet = PyCapture2.Format7ImageSettings( 0, 0, 0, fmt7info.maxWidth, 512, PyCapture2.PIXEL_FORMAT.RAW8) fmt7pktInf, isValid = c.validateFormat7Settings(fmt7imgSet) if not isValid: print "Format7 settings are not valid!" exit() c.setFormat7ConfigurationPacket( fmt7pktInf.recommendedBytesPerPacket, fmt7imgSet) #control frame rate c.setProperty(type=PyCapture2.PROPERTY_TYPE.FRAME_RATE, autoManualMode=False) c.setProperty(type=PyCapture2.PROPERTY_TYPE.AUTO_EXPOSURE, autoManualMode=False) c.setProperty(type=PyCapture2.PROPERTY_TYPE.FRAME_RATE, absValue=framerate) c.setProperty(type=PyCapture2.PROPERTY_TYPE.AUTO_EXPOSURE, absValue=exposure) return c
################################################################################################################### ######################### CAMERA SETUP ######################### bus = PyCapture2.BusManager( ) # identify the Globally Unique Identifier (GUID) of the camera num_cams = bus.getNumOfCameras( ) # determine the number of cameras connected to the PC print('Number of cameras detected:', num_cams) # print the number of cameras detected if not num_cams: # if no cameras are detected exit the application print('Insufficient number of cameras. Exiting...') exit() # Select camera on 0th index c = PyCapture2.Camera() # Assign Camera object to C uid = bus.getCameraFromIndex(0) # select camera on 0th index c.connect(uid) # connect camera object to physical camera # Disable On-board Image Processing c.setProperty(type=PyCapture2.PROPERTY_TYPE.AUTO_EXPOSURE, onOff=False) #Disable Auto Exposure print('Starting image capture...') c.startCapture() # start capturing data from the camera to the image buffers ################################################################################################################### ################################################################################################################### controller = Controller() # create object controller of the Controller() class app = DS_GUI() # create a DS_GUI object
# Example Main # # Print PyCapture2 Library Information print_build_info() # Ensure sufficient cameras are found bus = PyCapture2.BusManager() num_cams = bus.getNumOfCameras() print 'Number of cameras detected: %d' % num_cams if not num_cams: print 'Insufficient number of cameras. Exiting...' exit() # Select camera on 0th index c = PyCapture2.Camera() uid = bus.getCameraFromIndex(0) c.connect(uid) # Print camera details print_camera_info(c) # Enable camera embedded timestamp enable_embedded_timestamp(c, True) print 'Starting image capture...' c.startCapture() grab_images(c, 10) c.stopCapture() # Disable camera embedded timestamp
def Connect(self, camera_id=0): self.cam = PyCapture2.Camera() self.cam.connect(self.busman.getCameraFromIndex(camera_id)) self.info = self.cam.getCameraInfo()