Exemple #1
0
def printNumOfCam():
    """ returns the number of cameras """
    bus = PyCapture2.BusManager()
    numCams = bus.getNumOfCameras()
    print("Number of cameras detected: ", numCams)
    return numCams
Exemple #2
0
def captureImages(comPort, toBeProcessedDir, mode):
    printBuildInfo()
    bus = PyCapture2.BusManager()
    numCams = bus.getNumOfCameras()
    if not numCams:
        print("Insufficient number of cameras. Exiting...")
        return -1

    # Initiate Serial connection
    ser = serial.Serial(comPort, 9600, timeout=1)  # 1s timeout
    sleep(1)
    ser.reset_input_buffer()
    ser.reset_output_buffer()
    c = PyCapture2.Camera()
    c.connect(bus.getCameraFromIndex(0))

    #power on the Camera
    cameraPower = 0x610
    powerVal = 0x80000000

    c.writeRegister(cameraPower, powerVal)

    #waiting for Camera to power up
    retries = 10
    timeToSleep = 0.1  #seconds
    for i in range(retries):
        sleep(timeToSleep)
        try:
            regVal = c.readRegister(cameraPower)
        except PyCapture2.Fc2error:
            pass
        awake = True
        if regVal == powerVal:
            break
        awake = False
    if not awake:
        print("Could not wake Camera. Exiting...")
        return -1
    printCameraInfo(c)

    triggerMode = c.getTriggerMode()
    triggerMode.onOff = True
    triggerMode.mode = 0
    triggerMode.parameter = 0
    triggerMode.source = 7

    c.setTriggerMode(triggerMode)
    pollForTriggerReady(c)

    grabM = PyCapture2.GRAB_MODE.BUFFER_FRAMES
    c.setConfiguration(numBuffers=20, grabTimeout=5000, grabMode=grabM)

    c.startCapture()

    if not checkSoftwareTriggerPresence(c):
        print(
            "SOFT_ASYNC_TRIGGER not implemented on this Camera! Stopping application"
        )
        return -1

    fireSoftwareTrigger(c)
    image = c.retrieveBuffer()
    pix = PyCapture2.PIXEL_FORMAT.RAW8
    imAmbient = image.convert(pix)

    numImages = 4
    if mode == "VISNIR":
        numImages = 8

    offset = 4 if mode == "NIR" else 0
    imgs = []
    start = timer()
    for i in range(numImages):
        pollForTriggerReady(c)
        serC = str(i + offset).encode()
        ser.write(serC)
        t = ser.read(1)
        if not t.decode() == str(i + offset):
            print("Error: Invalid response from Serial port")
            return -1

        fireSoftwareTrigger(c)
        image = c.retrieveBuffer()
        im1 = image.convert(pix)
        imgs.append(im1)

    ser.write(b'9')  # Switch off all the lights

    for i, image in enumerate(imgs):
        error = image.save(
            os.path.join(toBeProcessedDir,
                         ("im" + str(i) + ".bmp")).encode("utf-8"),
            PyCapture2.IMAGE_FILE_FORMAT.BMP)

    error = imAmbient.save(
        os.path.join(toBeProcessedDir, "imAmbient.bmp").encode("utf-8"),
        PyCapture2.IMAGE_FILE_FORMAT.BMP)

    print('Time taken: {}'.format(timer() - start))

    c.setTriggerMode(onOff=False)
    print("Finished grabbing images!")
    c.stopCapture()
    c.disconnect()

    ser.close()
    return 0
Exemple #3
0
    def __init__(self,
                 height,
                 width,
                 fps,
                 device,
                 subsample=0,
                 fullframe_path=None,
                 gain=100,
                 cam_identifier=None,
                 start_timestamp=None,
                 roi=None):
        super().__init__(height,
                         width,
                         fps=fps,
                         subsample=subsample,
                         fullframe_path=fullframe_path,
                         cam_identifier=cam_identifier,
                         start_timestamp=start_timestamp,
                         roi=roi)

        self.device = device

        bus = PyCapture2.BusManager()
        numCams = bus.getNumOfCameras()

        print(10 * "*" + " AVAILABLE CAMERAS " + 10 * "*")
        for i in range(numCams):
            print("\n{})".format(i + 1))
            self.cap = PyCapture2.Camera()
            self.uid = bus.getCameraFromIndex(i)
            self.cap.connect(self.uid)
            self.print_cam_info(self.cap)
            self.cap.disconnect()

            bus = PyCapture2.BusManager()

        if not numCams:
            raise RuntimeError("No Flea3 camera detected")

        self.cap = PyCapture2.Camera()
        self.uid = bus.getCameraFromSerialNumber(int(device))
        self.cap.connect(self.uid)

        print(10 * "*" + " SELECTED CAMERA " + 10 * "*")
        self.print_cam_info(self.cap)

        fmt7imgSet = PyCapture2.Format7ImageSettings(
            PyCapture2.MODE.MODE_4, 0, 0, 2048, 1080,
            PyCapture2.PIXEL_FORMAT.MONO8)
        fmt7pktInf, isValid = self.cap.validateFormat7Settings(fmt7imgSet)
        if not isValid:
            raise RuntimeError("Format7 settings are not valid!")

        self.cap.setFormat7ConfigurationPacket(
            fmt7pktInf.recommendedBytesPerPacket, fmt7imgSet)

        self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.FRAME_RATE,
                             absValue=fps)
        self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.AUTO_EXPOSURE,
                             absValue=False)
        self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.SHUTTER,
                             absValue=1 / fps * 1000)
        self.cap.setProperty(type=PyCapture2.PROPERTY_TYPE.GAIN, absValue=100)

        self.cap.startCapture()
Exemple #4
0

def ledimage(cam, shutter = 0.5, gain = 12.0):
    cam.setProperty(type = pc2.PROPERTY_TYPE.SHUTTER, absControl=True, autoManualMode=False, absValue = 0.5)
    cam.setProperty(type = pc2.PROPERTY_TYPE.GAIN, absControl=True, autoManualMode=False, absValue = 12.0)
    cam.startCapture()
    image = cam.retrieveBuffer()
    cam.stopCapture()

    data = np.array(image.getData())  # np.array() call prevents crash
    shape = (image.getRows(), image.getCols())
    data = data.reshape(shape)

    return data

bus = pc2.BusManager()
numCams = bus.getNumOfCameras()
print("Number of cameras detected:", numCams)

cam = pc2.Camera()
cam.connect(bus.getCameraFromIndex(0))
printCameraInfo(cam)

fmt7info, supported = cam.getFormat7Info(0)
fmt7imgSet = pc2.Format7ImageSettings(0, 0, 0, fmt7info.maxWidth, fmt7info.maxHeight, pc2.PIXEL_FORMAT.MONO8)
fmt7pktInf, isValid = cam.validateFormat7Settings(fmt7imgSet)
cam.setFormat7ConfigurationPacket(fmt7pktInf.recommendedBytesPerPacket, fmt7imgSet)

d = pc2.PROPERTY_TYPE.__dict__
props = [key for key in d.keys() if isinstance(d[key], int)]
Exemple #5
0
    def run(self):

        self.net = dn.load_net(self.cfgFile, self.weightFile, 0)
        self.meta = dn.load_meta(self.dataFile)

        def nothing(t):
            pass

        cv2.namedWindow("Vision")

        dataFile = open(self.dataFile, "r")
        line = dataFile.readline()
        namesFile = None
        while line:
            line = line.replace(" ", "")
            line = line.split("=")
            if line[0] == "names":
                print "Names file:", line
                namesFile = open(line[1].replace("\n", ""), "r")
                break
            line = dataFile.readline()

        dataFile.close()
        if namesFile != None:
            line = namesFile.readline()
            index = 0
            while line:
                self.detectionDictionary[line.replace("\n", "")] = index
                index += 1
                line = namesFile.readline()
        namesFile.close()

        if self.useVideo == False and self.usePicture == False:
            bus = pc2.BusManager()
            self.activeCamera = pc2.Camera()
            self.activeCamera.connect(bus.getCameraFromIndex(0))
            self.activeCamera.startCapture()
        elif self.useVideo == True:
            self.activeCamera = cv2.VideoCapture(self.videoPath)
        elif self.usePicture == True:
            self.sourceImg = cv2.imread(self.picturePath)

        while self.running == True:
            if self.useVideo == False and self.usePicture == False:
                self.image = self.activeCamera.retrieveBuffer()
                self.image = self.image.convert(pc2.PIXEL_FORMAT.BGR)
                img = np.array(self.image.getData(), dtype="uint8").reshape(
                    (self.image.getRows(), self.image.getCols(), 3))
                img = cv2.flip(img, -1)
            elif self.useVideo == True:
                t, img = self.activeCamera.read()
            else:
                img = copy.copy(self.sourceImg)
            #yoloImage = dn.IMAGE()
            w, h, c = img.shape
            detections = dn.detect_np(self.net,
                                      self.meta,
                                      img,
                                      thresh=.1,
                                      hier_thresh=.1)
            newDetections = []
            self.imgPoints = []
            self.srcPoints = []
            for detection in detections:
                fixedClassNumber = self.detectionDictionary[detection[0]]
                newDetections.append(
                    [fixedClassNumber, detection[1], detection[2]])
                if detection[0] in self.torpedoDictionary or detection[
                        0] == "Corner":
                    if detection[0] != "Corner":
                        self.imgPoints.append(
                            (detection[2][0], detection[2][1]))
                        self.srcPoints.append(self.torpedoSrcPoints[
                            self.torpedoDictionary[detection[0]]])
                        print "Detection", detection[0]
                        if detection[0] == "Top Right Hole":
                            pixelError = 20
                            x, y, width, height = detection[2]
                            x1, y1, x2, y2 = x + (
                                .5 * width) + pixelError, y + (
                                    .5 * height) + pixelError, x + (
                                        .5 * width) + pixelError, y + (
                                            .5 * height) + pixelError,

                            if x1 < 0:
                                x1 = 0
                            if y1 < 0:
                                y1 = 0
                            if x2 >= w:
                                x2 = w - 1
                            if y2 >= h:
                                y2 = h - 1
                            print "Sub img", x1, y1, x2, y2
                            subImg = img[int(y1):int(y2), int(x1):int(x2)]
                            while True:
                                cv2.imshow("Sub", subImg)
                                if cv2.waitKey(1) == ord("q"):
                                    break

                    elif detection[0] == "Corner" and False:
                        print "Found a corner"
                        pixelError = 20
                        '''cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,0), -1)
					while True:
						cv2.imshow("Vision", img)
						if cv2.waitKey(1) == ord(" "):
							break'''
                        for secondDet in detections:
                            '''cv2.rectangle(img, (int(secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError ),int(secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError)), (int(secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError),int(secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError)), (255, 0, 0), 3)
						cv2.rectangle(img, (int(secondDet[2][0]-(.5 * secondDet[2][2])), int(secondDet[2][1]- (.5 * secondDet[2][3]))), (int(secondDet[2][0] + (.5*secondDet[2][2])), int(secondDet[2][1] + (.5*secondDet[2][3]))), (0,0,255))
						while True:
							cv2.imshow("Vision", img)
							if cv2.waitKey(1) == ord(" "):
								break'''
                            if secondDet[0] in self.torpedoDictionary:
                                index = None
                                if detection[2][0] >= secondDet[2][0] - (
                                        .5 * secondDet[2][2]
                                ) - pixelError and detection[2][
                                        0] <= secondDet[2][0] - (
                                            .5 * secondDet[2][2]) + pixelError:
                                    if detection[2][1] <= secondDet[2][1] - (
                                            .5 * secondDet[2][3]
                                    ) + pixelError and detection[2][
                                            1] > secondDet[2][1] - (
                                                .5 *
                                                secondDet[2][3]) - pixelError:
                                        print "In top left corner of", secondDet[
                                            0]
                                        cv2.circle(img, (int(detection[2][0]),
                                                         int(detection[2][1])),
                                                   10, (255, 0, 0), -1)
                                        index = self.torpedoDictionary[
                                            secondDet[0]]
                                        index += 6
                                elif detection[2][0] >= secondDet[2][0] + (
                                        .5 * secondDet[2][2]
                                ) - pixelError and detection[2][
                                        0] <= secondDet[2][0] + (
                                            .5 * secondDet[2][2]) + pixelError:
                                    if detection[2][1] <= secondDet[2][1] - (
                                            .5 * secondDet[2][3]
                                    ) + pixelError and detection[2][
                                            1] > secondDet[2][1] - (
                                                .5 *
                                                secondDet[2][3]) - pixelError:
                                        print "In top right corner of", secondDet[
                                            0]
                                        cv2.circle(img, (int(detection[2][0]),
                                                         int(detection[2][1])),
                                                   10, (0, 255, 0), -1)
                                        index = self.torpedoDictionary[
                                            secondDet[0]]
                                        index += 3
                                if detection[2][0] >= secondDet[2][0] - (
                                        .5 * secondDet[2][2]
                                ) - pixelError and detection[2][
                                        0] <= secondDet[2][0] - (
                                            .5 * secondDet[2][2]) + pixelError:
                                    if detection[2][1] <= secondDet[2][1] + (
                                            .5 * secondDet[2][3]
                                    ) + pixelError and detection[2][
                                            1] > secondDet[2][1] + (
                                                .5 *
                                                secondDet[2][3]) - pixelError:
                                        print "In bottom left corner of", secondDet[
                                            0]
                                        cv2.circle(img, (int(detection[2][0]),
                                                         int(detection[2][1])),
                                                   10, (0, 0, 255), -1)
                                        index = self.torpedoDictionary[
                                            secondDet[0]]
                                        index += 5
                                elif detection[2][0] >= secondDet[2][0] + (
                                        .5 * secondDet[2][2]
                                ) - pixelError and detection[2][
                                        0] <= secondDet[2][0] + (
                                            .5 * secondDet[2][2]) + pixelError:
                                    if detection[2][1] <= secondDet[2][1] + (
                                            .5 * secondDet[2][3]
                                    ) + pixelError and detection[2][
                                            1] > secondDet[2][1] + (
                                                .5 *
                                                secondDet[2][3]) - pixelError:
                                        print "In bottom right corner of", secondDet[
                                            0]
                                        cv2.circle(img, (int(detection[2][0]),
                                                         int(detection[2][1])),
                                                   10, (255, 0, 255), -1)
                                        index = self.torpedoDictionary[
                                            secondDet[0]]
                                        index += 4
                                if index == None:
                                    pass
                                else:
                                    index += 3
                                    self.srcPoints.append(
                                        self.torpedoSrcPoints[index])
                                    self.imgPoints.append(
                                        (int(detection[2][0]),
                                         int(detection[2][1])))

            if len(self.imgPoints) >= 4:
                print "Solving..."
                #print self.imgPoints, self.srcPoints
                rvec, tvec = cv2.solvePnP(
                    np.array(self.srcPoints).astype("float32"),
                    np.array(self.imgPoints).astype("float32"),
                    np.array(self.camMat).astype("float32"), np.zeros(
                        (4, 1)))[-2:]
                (pose1, jacobian) = cv2.projectPoints(
                    np.array([(0.0, 0.0, 0.1)]), rvec, tvec,
                    np.array(self.camMat).astype("float32"), None)
                (pose, jacobian) = cv2.projectPoints(
                    np.array([(0, 0, 12.0)]), rvec, tvec,
                    np.array(self.camMat).astype("float32"), None)
                cv2.line(img, (int(pose1[0][0][0]), int(pose1[0][0][1])),
                         (int(pose[0][0][0]), int(pose[0][0][1])), (255, 0, 0),
                         2)
                print "Rvec:", rvec, "\n"
                print "Tvec:", tvec
                s = time.time()
                while time.time() - s < 0:
                    cv2.imshow("Vision", img)
                    k = cv2.waitKey(1)
                    if k == ord(" "):
                        break

            for detection in detections:
                loc = detection[2]
                cv2.rectangle(
                    img,
                    (int(loc[0] - (.5 * loc[2])), int(loc[1] - (.5 * loc[3]))),
                    (int(loc[0] + (.5 * loc[2])), int(loc[1] + (.5 * loc[3]))),
                    (0, 0, 255))

            self.getList.append(newDetections)
            cv2.imshow("Vision", img)
            key = cv2.waitKey(1)
            if key == ord('q'):
                self.running = False
        self.activeCamera.stopCapture()
Exemple #6
0
    def set_controller(
            self, controller):  #pass in Controller class as the controller
        self.controller = controller
        # Below: create instances of the frame classes
        # so that they can be accessed in the controller
        controller.set_video_frame(self.video_frame)
        controller.set_controls_frame(self.controls_frame)
        controller.set_setup_frame(self.setup_frame)


###################################################################################################################
###################################################################################################################

######################### CAMERA SETUP #########################

bus = PyCapture2.BusManager(
)  # identify the Globally Unique Identifier (GUID) of the camera
num_cams = bus.getNumOfCameras(
)  # determine the number of cameras connected to the PC
print('Number of cameras detected:',
      num_cams)  # print the number of cameras detected
if not num_cams:  # if no cameras are detected exit the application
    print('Insufficient number of cameras. Exiting...')
    exit()

# Select camera on 0th index
c = PyCapture2.Camera()  # Assign Camera object to C
uid = bus.getCameraFromIndex(0)  # select camera on 0th index
c.connect(uid)  # connect camera object to physical camera

# Disable On-board Image Processing
c.setProperty(type=PyCapture2.PROPERTY_TYPE.AUTO_EXPOSURE,
Exemple #7
0
    def __init__(self,
                 cam_num,
                 bus=None,
                 video_mode='VM_640x480RGB',
                 framerate='FR_30',
                 grab_mode='BUFFER_FRAMES'):
        """
        Class provides methods for controlling camera, capturing images,
        and writing video files.

        Parameters
        ----------
        cam_num : int
            Index of camera to use. See also the getAvailableCameras()
            function.
        bus : PyCapture2.BusManager, optional
            Bus manager object. If None (default), one will be created.
        video_mode : PyCapture2.VIDEO_MODE value or str, optional
            Determines the resolution and colour mode of video capture. Can be
            one of the PyCapture2.VIDEO_MODE codes, or a key for the
            VIDEO_MODES lookup dict. The default is 'VM_640x480RGB'. See also
            the listAvailableModes() function.
        framerate : PyCapture2.FRAMERATE value or str, optional
            Determines the frame rate of the video. Note that this is NOT the
            actual fps value, but the code PyCapture uses to determine it.
            Can be one of the PyCapture2.FRAMERATE codes, or a key for the
            FRAMERATES lookup dict. The default is 'FR_30'. See also the
            listAvailableModes() function.
        grab_mode : PyCapture2.GRAB_MODE value or str, optional
            Method for acquiring images from the buffer. Can be one of the
            PyCapture2.GRAB_MODE codes, or a key for the GRAB_MODES lookup
            dict. BUFFER_FRAMES mode (default) grabs the oldest frame from
            the buffer each time and does not clear the buffer in between;
            this ensures frames are not dropped, but the buffer may overflow
            if the program cannot keep up. DROP_FRAMES mode grabs the newest
            frame from the buffer each time and clears the buffer in between;
            this prevents the buffer overflowing but may lead to frames
            being missed. BUFFER_FRAMES should generally be preferred for
            recording, DROP_FRAMES may be preferable for live streaming.

        Examples
        --------

        Open connection to first available camera.

        >>> cam = Camera(0)

        Optionally open a video file to write frames to.

        >>> cam.openVideoWriter('test.avi')

        Start camera capture - this must be called before acquiring images.

        >>> cam.startCapture()

        Acquire frames in a loop. Optionally display them in an OpenCV window.
        If a video writer was opened, each call to .getImage() will also write
        the frame out to the video file.

        >>> for i in range(300):
        ...     ret, img = cam.getImage()
        ...     if ret:
        ...         cv2.imshow('Camera', img2array(img))
        ...         cv2.waitKey(1)

        Close the camera when done. This will stop camera capture,
        close the video writer (if applicable), and disconnect the camera.

        >>> cam.close()

        """
        # Allocate args to class
        self.cam_num = cam_num
        self.bus = bus
        self.video_mode = video_mode
        self.framerate = framerate
        self.grab_mode = grab_mode

        # Allocate further defaults where needed
        if self.bus is None:
            self.bus = PyCapture2.BusManager()
        if isinstance(self.video_mode, str):
            self.video_mode = VIDEO_MODES[self.video_mode]
        if isinstance(self.framerate, str):
            self.framerate = FRAMERATES[self.framerate]
        if isinstance(self.grab_mode, str):
            self.grab_mode = GRAB_MODES[self.grab_mode]

        # Init camera
        self.cam = PyCapture2.Camera()
        self.uid = self.bus.getCameraFromIndex(self.cam_num)
        self.serial_num = self.bus.getCameraSerialNumberFromIndex(self.cam_num)
        self.cam.connect(self.uid)
        if not self.cam.getStats().cameraPowerUp:
            raise OSError('Camera is not powered on')
        if not self.cam.isConnected:
            raise OSError('Camera faied to connect')

        # Set mode and frame rate
        if not self.cam.getVideoModeAndFrameRateInfo(self.video_mode,
                                                     self.framerate):
            raise OSError('Video mode and / or frame rate incompatible '
                          'with camera')
        self.cam.setVideoModeAndFrameRate(self.video_mode, self.framerate)

        # Further config
        self.cam.setConfiguration(grabMode=self.grab_mode)

        # Reverse grab image resolution out of video mode
        self.img_size = imgSize_from_vidMode(self.video_mode)

        # Also note fps value
        self.fps = self.cam.getProperty(
            PyCapture2.PROPERTY_TYPE.FRAME_RATE).absValue

        # Place holders for video writer
        self.video_writer = None
        self.csv_fd = None
        self.csv_writer = None

        # Internal flags
        self._capture_isOn = False
        self._video_writer_isOpen = False
Exemple #8
0
def printNumOfCam():
    bus = PyCapture2.BusManager()
    numCams = bus.getNumOfCameras()
    print("Number of cameras detected: ", numCams)
    if not numCams:
        print("Insufficient number of cameras. Exiting...")
Exemple #9
0
    def _cam_init(self,set_gpio=True):
        self.bus = pc2.BusManager()
        self.cam = pc2.Camera()
        if not self.bus.getNumOfCameras():
            display('No Point Grey Research cameras detected')
            raise
        # Run example on the first camera
        uid = self.bus.getCameraFromIndex(self.cam_id)
        self.cam.connect(uid)
        embedded_info = self.cam.getEmbeddedImageInfo()
        if embedded_info.available.timestamp:
            self.cam.setEmbeddedImageInfo(timestamp = True)
            enable_timestamp = True
        if enable_timestamp:
            display('[PointGrey] - timestamp is enabled.')
        else:
            display('[PointGrey] - timeStamp is disabled.') 
        fmt7_info, supported = self.cam.getFormat7Info(0)
        if supported:
            display('[PointGrey] - Max image pixels: ({}, {})'.format(
                fmt7_info.maxWidth, fmt7_info.maxHeight))
            if not len(self.roi):
                self.roi = [0,0,fmt7_info.maxWidth,fmt7_info.maxHeight]
            display('[PointGrey] - Image unit size: ({}, {})'.format(
                fmt7_info.imageHStepSize, fmt7_info.imageVStepSize))
            display('[PointGrey] - Offset unit size: ({}, {})'.format(
                fmt7_info.offsetHStepSize, fmt7_info.offsetVStepSize))
            #display('[PointGrey] - Pixel format bitfield: 0x{}'.format(
            #    fmt7_info.pixelFormatBitField))
            #if pc2.PIXEL_FORMAT.MONO8 & fmt7_info.pixelFormatBitField == 0:
            #    display('[PointGrey] - setting MONO8')
            x,y,w,h = self.roi
            fmt7_img_set = pc2.Format7ImageSettings(0,x, y,w,h,
                                                    pc2.PIXEL_FORMAT.MONO8)
            fmt7_pkt_inf, isValid = self.cam.validateFormat7Settings(fmt7_img_set)
            if not isValid:
                print('[PointGrey] - Format7 settings are not valid!')
            self.cam.setFormat7ConfigurationPacket(fmt7_pkt_inf.recommendedBytesPerPacket, fmt7_img_set)
        tmp = self.cam.getProperty(pc2.PROPERTY_TYPE.FRAME_RATE)
        tmp.absValue = self.frame_rate
        tmp.onOff = True
        self.cam.setProperty(tmp)
        tmp = self.cam.getProperty(pc2.PROPERTY_TYPE.FRAME_RATE)
        self.frame_rate = tmp.absValue
        display('[PointGrey] - Frame rate is:{0}'.format(self.frame_rate))
        # Set gain
        tmp = self.cam.getProperty(pc2.PROPERTY_TYPE.GAIN)
        tmp.absValue = self.gain
        tmp.onOff = True
        self.cam.setProperty(tmp)
        #start communication
        #self.cam.open_device()
        #self.cam.set_acq_timing_mode('XI_ACQ_TIMING_MODE_FREE_RUN')
        #self.cam.set_exposure(self.exposure)
        #self.cam.set_binning_vertical(self.binning)
        #self.cam.set_binning_horizontal(self.binning)
        # Set the GPIO
        #self.cam.set_gpi_selector('XI_GPI_PORT1')
        #self.cam.set_trigger_source('XI_TRG_OFF')

        #if set_gpio:
        #    self.cam.set_gpo_selector('XI_GPO_PORT1')
            # for now set the GPO to blink in software (port1 will do that for sync purposes, the test cam does not support XI_GPO_EXPOSURE_PULSE)
        #    self.cam.set_gpo_mode('XI_GPO_ON'); #XI_GPO_EXPOSURE_PULSE
        #    if self.triggered.is_set():
        #        self.cam.set_gpi_mode('XI_GPI_TRIGGER')
        #        self.cam.set_trigger_source('XI_TRG_EDGE_RISING')
        self.lastframeid = -1
        self.nframes.value = 0
        self.camera_ready.set()
        self.prev_ts = 0
        self.lasttime = time.time()
Exemple #10
0
	def run(self):
		if not onLinux:
			return

		self.net = dn.load_net(self.cfgFile, self.weightFile, 0)
		self.meta = dn.load_meta(self.dataFile)
	
		dataFile = open(self.dataFile, "r")
		line = dataFile.readline()
		namesFile = None
		while line:
			line = line.replace(" ", "")
			line = line.split("=")
			if line[0] == "names":
				print "Names file:", line
				namesFile = open(line[1].replace("\n", ""), "r")
				break
			line = dataFile.readline()

		dataFile.close()
		if namesFile != None:
			line = namesFile.readline()
			index = 0
			while line:
				self.detectionDictionary[line.replace("\n", "")] = index
				index += 1
				line = namesFile.readline()	
		namesFile.close()

		if useVideo == True:
			video = cv2.VideoCapture(videoFile)
		else:
			bus = pc2.BusManager()
			self.activeCamera = pc2.Camera()
			self.frontCamera = pc2.Camera()
			self.botCamera = pc2.Camera()
			self.frontCamera.connect(bus.getCameraFromIndex(1))
			self.frontCamera.startCapture()
			time.sleep(1)
			self.botCamera.connect(bus.getCameraFromIndex(0))
			self.botCamera.startCapture()
			self.activeCamera = self.frontCamera

		while self.running == True:
                    if self.useVideo == False:
			self.image = self.activeCamera.retrieveBuffer()
			self.image = self.image.convert(pc2.PIXEL_FORMAT.BGR)
			img = np.array(self.image.getData(), dtype="uint8").reshape((self.image.getRows(), self.image.getCols(), 3))
			img = cv2.flip(img, -1)
                    else:
			t, img = self.activeCamera.read()
                    #yoloImage = dn.IMAGE()
                    detections = dn.detect_np(self.net, self.meta, img, thresh=.1, hier_thresh = .1)
                    newDetections = []
                    self.imgPoints = []
                    self.srcPoints = []
                    for detection in detections:
                            fixedClassNumber = self.detectionDictionary[detection[0]]
			    cv2.putText(img, detection[0], (int(detection[2][0] - (.5 * detection[2][2])), int(detection[2][1] - (.5 * detection[2][3]))), cv2.FONT_HERSHEY_SIMPLEX, .5, (0,0,255), 2)
                            newDetections.append([fixedClassNumber, detection[1], detection[2]])
                            if detection[0] in self.torpedoDictionary or detection[0] == "Corner":
				if detection[0] != "Corner":
					pass
					self.imgPoints.append((detection[2][0], detection[2][1]))
					self.srcPoints.append(self.torpedoSrcPoints[self.torpedoDictionary[detection[0]]])
				elif detection[0] == "Corner":
					pixelError = 20
					'''cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,0), -1)
					while True:
						cv2.imshow("Vision", img)
						if cv2.waitKey(1) == ord(" "):
							break'''
					for secondDet in detections:
						'''cv2.rectangle(img, (int(secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError ),int(secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError)), (int(secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError),int(secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError)), (255, 0, 0), 3)
						cv2.rectangle(img, (int(secondDet[2][0]-(.5 * secondDet[2][2])), int(secondDet[2][1]- (.5 * secondDet[2][3]))), (int(secondDet[2][0] + (.5*secondDet[2][2])), int(secondDet[2][1] + (.5*secondDet[2][3]))), (0,0,255))
						while True:
							cv2.imshow("Vision", img)
							if cv2.waitKey(1) == ord(" "):
								break'''
						if secondDet[0] in self.torpedoDictionary:
							index = None
							if detection[2][0] >= secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError:
								if detection[2][1] <= secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError:
									cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,0), -1)
									index = self.torpedoDictionary[secondDet[0]]
									index += 9
							elif detection[2][0] >= secondDet[2][0] + (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] + (.5 * secondDet[2][2]) + pixelError:
								if detection[2][1] <= secondDet[2][1] - (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] - (.5 * secondDet[2][3]) - pixelError:
									cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (0,255,0), -1)
									index = self.torpedoDictionary[secondDet[0]]
									index += 6
							if detection[2][0] >= secondDet[2][0] - (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] - (.5 * secondDet[2][2]) + pixelError:
								if detection[2][1] <= secondDet[2][1] + (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] + (.5 * secondDet[2][3]) - pixelError:
									cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (0,0,255), -1)
									index = self.torpedoDictionary[secondDet[0]]
									index += 8
							elif detection[2][0] >= secondDet[2][0] + (.5 * secondDet[2][2]) - pixelError and detection[2][0] <= secondDet[2][0] + (.5 * secondDet[2][2]) + pixelError:
								if detection[2][1] <= secondDet[2][1] + (.5 * secondDet[2][3]) + pixelError and detection[2][1] > secondDet[2][1] + (.5 * secondDet[2][3]) - pixelError:
									cv2.circle(img, (int(detection[2][0]), int(detection[2][1])), 10, (255,0,255), -1)
									index = self.torpedoDictionary[secondDet[0]]
									index += 7
							if index == None:
								pass
							else:
								self.srcPoints.append(self.torpedoSrcPoints[index])
								self.imgPoints.append((int(detection[2][0]), int(detection[2][1])))
					

                    if len(self.imgPoints) >= 4:
				#print self.imgPoints, self.srcPoints
				rvec, tvec = cv2.solvePnP(np.array(self.srcPoints).astype("float32"), np.array(self.imgPoints).astype("float32"), np.array(self.camMat).astype("float32"), np.zeros((4,1)))[-2:]
				(pose1, jacobian) = cv2.projectPoints(np.array([(0.0,0.0,0.1)]), rvec, tvec, np.array(self.camMat).astype("float32"), None)
				(pose, jacobian) = cv2.projectPoints(np.array([(0,0,12.0)]), rvec, tvec, np.array(self.camMat).astype("float32"), None)
				cv2.line(img, (int(pose1[0][0][0]), int(pose1[0][0][1])), (int(pose[0][0][0]), int(pose[0][0][1])), (255,0,0), 2)

		    for detection in detections:
			    loc = detection[2]
			    cv2.rectangle(img, (int(loc[0]-(.5 * loc[2])), int(loc[1]- (.5 * loc[3]))), (int(loc[0] + (.5*loc[2])), int(loc[1] + (.5*loc[3]))), (0,0,255))
		    self.getList.append(newDetections)
		    cv2.imshow("Vision", img)
		self.activeCamera.stopCapture()
    def initialize(self):
        # called only once, before calling "update" for the first time

        # logger.info('Blackfly camera {} is being initialized'.format(self.serial))

        # adds an instance of PyCapture2's camera class
        self.bus = PyCapture2.BusManager()
        self.camera_instance = PyCapture2.GigECamera()

        # connects the software camera object to a physical camera
        self.camera_instance.connect(
            self.bus.getCameraFromSerialNumber(self.parameters['serial']))

        # Powers on the Camera
        cameraPower = 0x610
        powerVal = 0x80000000
        try:
            self.camera_instance.writeRegister(cameraPower, powerVal)
        except PyCapture2.Fc2error:
            print "problem"

        # Waits for camera to power up
        retries = 10
        timeToSleep = 0.1  # seconds
        for i in range(retries):
            try:
                regVal = self.camera_instance.readRegister(cameraPower)
            # Camera might not respond to register reads during powerup.
            except PyCapture2.Fc2error:
                pass
            awake = True
            if regVal == powerVal:
                break
            awake = False
            time.sleep(timeToSleep)
        if not awake:  # aborts if Python was unable to wake the camera
            # logger.info('Could not wake Blackfly camera. Exiting...')
            exit()

        # # Use these three lines to check camera info, if desired
        # self.camInfo = self.camera_instance.getCameraInfo()
        # resolution_str = self.camInfo.sensorResolution
        # converts the resolution information to an int list
        # self.cam_resolution = map(int, resolution_str.split('x'))
        # # print "Serial number - ", self.camInfo.serialNumber
        # # print "Camera model - ", self.camInfo.modelName

        # # Enables the camera's 3.3V, 120mA output on GPIO pin 3 (red jacketed
        # lead) if needed
        # output_voltage = 0x19D0
        # voltage_mode = 0x00000001
        # self.camera[cam_index].writeRegister(output_voltage, voltage_mode)

        # Configures trigger mode for hardware triggering
        trigger_mode = self.camera_instance.getTriggerMode()
        trigger_mode.onOff = True  # turns the trigger on
        trigger_mode.mode = 0
        trigger_mode.polarity = 1
        trigger_mode.source = 0  # specifies an external hardware trigger
        self.camera_instance.setTriggerMode(trigger_mode)
        # # Sets the camera grab mode:
        # # 0 = The camera retrieves only the newest image from the buffer each time the RetrieveBuffer() function
        # #     is called. Older images will be dropped. See p. 93 in the PyCapture 2 API Reference manual.
        # # 1 = The camera retrieves the oldest image from the buffer each time RetrieveBuffer() is called.
        # #     Ungrabbed images will accumulated until the buffer is full, after which the oldest will be deleted.
        PyCapture2.GRAB_MODE = 0
        self.isInitialized = True
Exemple #12
0
 def __init__(self):
     self.bus = PyCapture2.BusManager()
     num_cams = self.bus.getNumOfCameras()
     if not num_cams:
         exit()
Exemple #13
0
def main(camera_serial_number, fileName_prefix):
    # Print PyCapture2 Library Information
    printBuildInfo()

    # Ensure sufficient cameras are found
    bus = PyCapture2.BusManager()
    numCams = bus.getNumOfCameras()
    print "Number of cameras detected: ", numCams
    if not numCams:
        print "Insufficient number of cameras. Exiting..."
        exit()

    c = PyCapture2.Camera()
    # c.connect(bus.getCameraFromIndex(camera_index))
    c.connect(bus.getCameraFromSerialNumber(camera_serial_number))  # !!!

    # Power on the Camera
    cameraPower = 0x610
    powerVal = 0x80000000

    c.writeRegister(cameraPower, powerVal)

    # Waiting for camera to power up
    retries = 10
    timeToSleep = 0.1  #seconds
    for i in range(retries):
        sleep(timeToSleep)
        try:
            regVal = c.readRegister(cameraPower)
        except PyCapture2.Fc2error:  # Camera might not respond to register reads during powerup.
            pass
        awake = True
        if regVal == powerVal:
            break
        awake = False
    if not awake:
        print "Could not wake Camera. Exiting..."
        exit()

    # Configure trigger mode
    triggerMode = c.getTriggerMode()
    triggerMode.onOff = True
    triggerMode.mode = 14
    triggerMode.parameter = 0
    triggerMode.polarity = 1
    triggerMode.source = 0  # Using external trigger  !!!

    c.setTriggerMode(triggerMode)

    # Set external trigger
    #externalTrigger = 0x830
    #fireVal = 0x83100000
    #c.writeRegister(externalTrigger, fireVal)

    c.setConfiguration(grabTimeout=100000)

    # Print camera details
    fmt7info, supported = c.getFormat7Info(1)

    # Check whether pixel format mono8 is supported
    if PyCapture2.PIXEL_FORMAT.RAW8 & fmt7info.pixelFormatBitField == 0:
        print "Pixel format is not supported\n"
        exit()

    # Configure camera format7 settings
    # Left, Top, Width, Height
    # fmt7imgSet = PyCapture2.Format7ImageSettings(1, 0, 0, fmt7info.maxWidth, fmt7info.maxHeight, PyCapture2.PIXEL_FORMAT.RAW8)
    # fmt7imgSet = PyCapture2.Format7ImageSettings(0, 368, 296, 496, 416, PyCapture2.PIXEL_FORMAT.RAW8) # Camera 1 side
    # fmt7imgSet = PyCapture2.Format7ImageSettings(1, 144, 162, 304, 350, PyCapture2.PIXEL_FORMAT.RAW8) # Camera 1

    # Automatically get settings from the GUI (Han 20210414)
    setting_in_gui = c.getFormat7Configuration()[0]
    fmt7imgSet = PyCapture2.Format7ImageSettings(setting_in_gui.mode,
                                                 setting_in_gui.offsetX,
                                                 setting_in_gui.offsetY,
                                                 setting_in_gui.width,
                                                 setting_in_gui.height,
                                                 setting_in_gui.pixelFormat)

    fmt7pktInf, isValid = c.validateFormat7Settings(fmt7imgSet)
    if not isValid:
        print "Format7 settings are not valid!"
        exit()
    c.setFormat7ConfigurationPacket(fmt7pktInf.maxBytesPerPacket, fmt7imgSet)

    # Enable camera embedded timestamp
    c.setEmbeddedImageInfo(timestamp=True)

    # Configure camera buffer settings
    #bufferFrame = PyCapture2.Config()
    #bufferFrame.numBuffers = 64
    #bufferFrame.grabMode = 1
    #bufferFrame.highPerformanceRetrieveBuffer = True

    c.setConfiguration(numBuffers=64)
    c.setConfiguration(grabMode=1)
    c.setConfiguration(highPerformanceRetrieveBuffer=True)

    # import pdb; pdb.set_trace()

    # Print camera details
    printCameraInfo(c)
    print "\n---------------- \n"
    print "Camera: ", fileName_prefix

    # --------------------------------------------------
    # Start acquisition
    c.startCapture()

    avi = PyCapture2.AVIRecorder()
    #fRateProp = c.getProperty(PyCapture2.PROPERTY_TYPE.FRAME_RATE)
    #frameRate = fRateProp.absValue
    frameRate = 30
    trialIdx = 1

    fileName = fileName_prefix + "{}.avi".format(trialIdx)
    #image = c.retrieveBuffer()
    #avi.AVIOpen(fileName, frameRate)
    avi.MJPGOpen(fileName, frameRate, 95)

    # Grab images
    try:
        while True:  # Loop per trial
            frame_count = 0
            intervals = []
            last_frame = 0
            print "Trial=", '{:<4d}'.format(trialIdx), "  ",

            while True:
                try:
                    image = c.retrieveBuffer()
                    avi.append(image)

                    frame_count += 1
                    this_frame = time()
                    intervals.append(this_frame - last_frame)
                    # print image.getDataSize(), (this_frame - last_frame)*1000
                    last_frame = this_frame

                    if frame_count > 0:  # Once first frame received, set Timeout to 100ms
                        c.setConfiguration(grabTimeout=trial_time_out)

                except PyCapture2.Fc2error as fc2Err:
                    #print "Error retrieving buffer : ", fc2Err
                    avi.close()  # Close file

                    trialIdx += 1
                    fileName = fileName_prefix + "{}.avi".format(trialIdx)
                    #avi.AVIOpen(fileName, frameRate)
                    avi.MJPGOpen(fileName, frameRate, 95)
                    #avi.H264Open(fileName, frameRate, image.getCols(), image.getRows(), 100000)
                    c.setConfiguration(grabTimeout=100000)

                    intervals.append(
                        time() - last_frame
                    )  # Gap between receiving the last frame and starting new file (including 100ms Timeout)
                    #continue
                    break

            #print "max frame interval = ", intervals
            #print np.histogram(intervals)
            interval_count, interval_edges = np.histogram(intervals[1:-1], 7)
            interval_bins = (interval_edges[:-1] + interval_edges[1:]) / 2
            print '{:5d} @ {:.3f} Hz'.format(frame_count,
                                             1 / np.mean(intervals[1:-1])),
            print ''.join([
                " | {:.2f}ms:{:<4d}".format(bb * 1000, nn)
                for nn, bb in zip(interval_count, interval_bins) if nn > 0
            ]),
            print '>> gap={:.3f}ms'.format(intervals[-1] * 1000)

    except KeyboardInterrupt:  # Ctrl-C
        pass

    c.setTriggerMode(onOff=False)
    print "Finished grabbing images!"

    c.stopCapture()
    c.disconnect()
    def __init__(self, serial_number):
        """Initialize FlyCapture2 API camera.
        
        Searches all cameras reachable by the host using the provided serial
        number. Fails with API error if camera not found.
        
        This function also does a significant amount of default configuration.
        
        * It defaults the grab timeout to 1 s
        * Ensures use of the API's HighPerformanceRetrieveBuffer
        * Ensures the camera is in Format 7, Mode 0 with full frame readout and MONO8 pixels
        * If using a GigE camera, automatically maximizes the packet size and warns if Jumbo packets are not enabled on the NIC
        
        Args:
            serial_number (int): serial number of camera to connect to
        """
        
        global PyCapture2
        import PyCapture2
        
        ver = PyCapture2.getLibraryVersion()
        min_ver = (2,12,3,31) # first release with python 3.6 support
        if ver < min_ver:
            raise RuntimeError(f"PyCapture2 version {ver} must be >= {min_ver}")
        
        print('Connecting to SN:%d ...'%serial_number)
        bus = PyCapture2.BusManager()
        self.camera = PyCapture2.Camera()
        self.camera.connect(bus.getCameraFromSerialNumber(serial_number))
        
        # set which values of properties to return
        self.get_props = ['present','absControl','absValue',
                          'onOff','autoManualMode',
                          'valueA','valueB']
        
        fmts = {prop:getattr(PyCapture2.PIXEL_FORMAT,prop) 
                for prop in dir(PyCapture2.PIXEL_FORMAT) 
                if not prop.startswith('_')}
                
        self.pixel_formats = IntEnum('pixel_formats',fmts)

        self._abort_acquisition = False
        
        # check if GigE camera. If so, ensure max packet size is used
        cam_info = self.camera.getCameraInfo()
        if cam_info.interfaceType == PyCapture2.INTERFACE_TYPE.GIGE:
            # need to close generic camera first to avoid strange interactions
            print('Checking Packet size for GigE Camera...')
            self.camera.disconnect()
            gige_camera = PyCapture2.GigECamera()
            gige_camera.connect(bus.getCameraFromSerialNumber(serial_number))
            mtu = gige_camera.discoverGigEPacketSize()
            if mtu <= 1500:
                msg = """WARNING: Maximum Transmission Unit (MTU) for ethernet 
                NIC FlyCapture2_Camera SN:%d is connected to is only %d. 
                Reliable operation not expected. 
                Please enable Jumbo frames on NIC."""
                print(dedent(msg%(serial_number,mtu)))
            
            gige_pkt_size = gige_camera.getGigEProperty(PyCapture2.GIGE_PROPERTY_TYPE.GIGE_PACKET_SIZE)
            # only set if not already at correct value
            if gige_pkt_size.value != mtu:
                gige_pkt_size.value = mtu
                gige_camera.setGigEProperty(gige_pkt_size)
                print('  Packet size set to %d'%mtu)
            else:
                print('  GigE Packet size is %d'%gige_pkt_size.value)
            
            # close GigE handle to camera, re-open standard handle
            gige_camera.disconnect()
            self.camera.connect(bus.getCameraFromSerialNumber(serial_number))
            
        # set standard device configuration
        config = self.camera.getConfiguration()
        config.grabTimeout = 5000 # in ms
        config.highPerformanceRetrieveBuffer = True
        self.camera.setConfiguration(config)

        # ensure camera is in Format7,Mode 0 custom image mode
        fmt7_info, supported = self.camera.getFormat7Info(0)
        if supported:
            # to ensure Format7, must set custom image settings
            # defaults to full sensor size and 'MONO8' pixel format
            print('Initializing to default Format7, Mode 0 configuration...')
            fmt7_default = PyCapture2.Format7ImageSettings(0,0,0,fmt7_info.maxWidth,fmt7_info.maxHeight,self.pixel_formats['MONO8'].value)
            self._send_format7_config(fmt7_default)
            
        else:
            msg = """Camera does not support Format7, Mode 0 custom image
            configuration. This driver is therefore not compatible, as written."""
            raise RuntimeError(dedent(msg))
Exemple #15
0
 def useFrontCamera(self):
     if self.activeCamera != None:
         self.activeCamera.stopCapture()
     bus = pc2.BusManager()
     self.activeCamera.connect(bus.getCameraFromIndex(0))
     self.activeCamera.startCapture()
Exemple #16
0
                ts.cycleCount - prev_ts.cycleCount)
            print('Timestamp [ {:04d} {:04d} ] - {:03d} - {} {}'.format(
                ticks, ts.cycleCount, diff, count, filename))

        prev_ts = ts


#
# Example Main
#

# Print PyCapture2 Library Information
print_build_info()

# Ensure sufficient cameras are found
bus = PyCapture2.BusManager()
num_cams = bus.getNumOfCameras()
print('Number of cameras detected: ', num_cams)
if not num_cams:
    print('Insufficient number of cameras. Exiting...')
    exit()

# Select camera on 0th index
c = PyCapture2.Camera()
uid = bus.getCameraFromIndex(0)
c.connect(uid)
print_camera_info(c)

# Enable camera embedded timestamp
enable_embedded_timestamp(c, True)
def captureVideo(idxCam, config, startEvent, abortEvent):
    nVideos = 0
    nFrames = 0
    videoType = config["DEFAULT"]["videoType"]
    videoDuration = config["DEFAULT"].getfloat("videoDuration")
    frameRate = config["DEFAULT"].getfloat("frameRate")
    nPulseFrames = int(config["DEFAULT"].getfloat("analogOutDuration") *
                       frameRate)
    nPeriodFrames = int(config["DEFAULT"].getfloat("analogOutPeriod") *
                        frameRate)
    nSessionFrames = int(config["DEFAULT"].getfloat("sessionDuration") *
                         frameRate)
    nVideoFrames = int(videoDuration * frameRate)

    # initialize our random frame offset
    maxPeriodOffset = config["DEFAULT"].getfloat("analogOutPeriodRange")
    nOffsetFrames = nPeriodFrames + getRandomFrameOffset(
        maxPeriodOffset, frameRate)

    recordIndefinitely = nSessionFrames == 0

    ext = getVideoExtension(videoType)
    cameraName = "cam{:01}".format(idxCam)
    cameraPath = os.path.join(config["DEFAULT"]["dataPath"],
                              config["DEFAULT"]["sessionName"], cameraName)
    processName = mp.current_process().name
    vid = PyCapture2.AVIRecorder()

    # get the analog input mapping for frame code output
    pinMap, maxValue = getAnalogPinMap(config["DEFAULT"]["analogPinMap"])
    address = int(config["DEFAULT"]["analogOutAddress"], base=16)

    # get and connect the camera
    bus = PyCapture2.BusManager()
    camera = PyCapture2.Camera()
    camera.connect(bus.getCameraFromIndex(idxCam))

    # wait for the go signal
    startEvent.wait(0.5)
    camera.startCapture()
    time.sleep(0.1)

    capturingVideo = True
    while capturingVideo:
        # open up new video and log files
        videoFileName = "{}_{:06}".format(cameraName, nVideos)
        videoFilePath = os.path.join(cameraPath,
                                     "{}.{}".format(videoFileName, ext))
        logFilePath = os.path.join(cameraPath, "{}.txt".format(videoFileName))
        if videoType == "H264":
            vid.H264Open(filename=videoFilePath.encode("utf8"),
                         width=640,
                         height=480,
                         framerate=frameRate,
                         bitrate=config["DEFAULT"].getint("bitrateH264"))
        elif videoType == "MJPG":
            vid.MJPGOpen(filename=videoFilePath.encode("utf8"),
                         framerate=frameRate,
                         quality=config["DEFAULT"].getint("qualityMJPG"))
        elif videoType == "AVI":
            vid.AVIOpen(filename=videoFilePath.encode("utf8"),
                        framerate=frameRate)

        with open(logFilePath, 'w') as log:
            # the frame of the first trigger
            lastTriggerFrame = 0
            analogValue = 1

            # acquire and append camera images indefinitely until
            # the user cancels the operation
            for ii in range(nVideoFrames):
                # end session if recording duration exceeds user specified
                # duration or if user aborts
                if recordIndefinitely and abortEvent.is_set():
                    capturingVideo = False
                    break
                elif not recordIndefinitely and nFrames >= nSessionFrames:
                    capturingVideo = False
                    abortEvent.set()
                    break

                try:
                    # acquire camera image and stamp logfile
                    image = camera.retrieveBuffer()
                    vid.append(image)
                    log.write("{}\t{}\n".format(ii + 1,
                                                getTimestamp(datetime.now())))
                    nFrames += 1
                except PyCapture2.Fc2error as error:
                    print(
                        "{}: error retrieving buffer ({}): dropping frame {}.".
                        format(processName, error, ii))
                    continue
                except:
                    pass

                try:
                    if idxCam == 0:
                        # output an analog value to synchronize (and code info)
                        # set special analog value for start of files
                        if ii == 0:
                            setAnalogOutputValue(maxValue, pinMap, address)
                        elif ii in (2 * nPulseFrames, 4 * nPulseFrames):
                            setAnalogOutputValue(0, pinMap, address)
                        elif ii == 3 * nPulseFrames:
                            setAnalogOutputValue(nVideos % (maxValue + 1),
                                                 pinMap, address)
                        elif ii > 4 * nPulseFrames and (
                                ii - lastTriggerFrame) == nOffsetFrames:
                            setAnalogOutputValue(analogValue, pinMap, address)
                            analogValue = analogValue + 1 if analogValue < maxValue else 1
                            lastTriggerFrame = ii
                        elif ii > 4 * nPulseFrames and (
                                ii - lastTriggerFrame) == nPulseFrames:
                            setAnalogOutputLow(address)
                            # set a new random trigger shift
                            nOffsetFrames = nPeriodFrames + getRandomFrameOffset(
                                maxPeriodOffset, frameRate)

                except:
                    pass

        # save and write the last video file
        # note that this will silently fail if file size >2 GB!
        vid.close()
        try:
            print("\t\t{}: wrote {}.{} @ {}.".format(
                processName, videoFileName, ext, getTimestamp(datetime.now())))
        except:
            pass

        # increment the video counter (unless interrupted)
        if capturingVideo: nVideos += 1