예제 #1
0
    def _run(self, keep=True):
        print fc2.get_library_version()
        c = fc2.Context()
        if (c.get_num_of_cameras() < 1): return
        c.connect(*c.get_camera_from_index(0))
        m, f = c.get_video_mode_and_frame_rate()
        print(c.get_format7_configuration())
        c.start_capture()
        im = fc2.Image()
        while True:
            c.retrieve_buffer(im)
            a = np.array(im)
            a = cv2.resize(a, (640, 480))
            a = cv2.flip(a, 0)
            if self.ctrl:
                self.ctrl.lock.acquire()
                self.ctrl.imageQueue.append(a)
                self.ctrl.haveImage = True
                self.ctrl.lock.release()
                self.ctrl.event()
                #cv2.imshow('rgb1', a)
                #cv2.waitKey(1)
            else:
                cv2.imshow('rgb', a)

                if not keep:
                    key = cv2.waitKey()
                    break
                else:
                    key = cv2.waitKey(1)
                    if key & 0xFF == ord('q'): break
        c.stop_capture()
        c.disconnect()
예제 #2
0
def launch_FLIR_GUI(bg=None):

    if bg is None:
        cap = fc2.Context()
        cap.connect(*cap.get_camera_from_index(0))
        cap.set_video_mode_and_frame_rate(fc2.VIDEOMODE_640x480Y8,
                                          fc2.FRAMERATE_15)
        m, f = cap.get_video_mode_and_frame_rate()
        p = cap.get_property(fc2.FRAME_RATE)
        cap.set_property(**p)
        cap.start_capture()
        img = fc2.Image()
        cap.retrieve_buffer(img)
        frame = np.array(img)

        im = frame.copy()
        im = np.expand_dims(im, 2)

        r = cv2.selectROI(im, fromCenter=False)

        cap.stop_capture()
        cap.disconnect()
        cv2.destroyAllWindows()

    else:
        r = cv2.selectROI(bg, fromCenter=False)
        cv2.destroyAllWindows()

    return r
예제 #3
0
def camera(n_frames, save_dir):

	cap = fc2.Context()
	cap.connect(*cap.get_camera_from_index(0))

	cap.set_video_mode_and_frame_rate(fc2.VIDEOMODE_640x480Y8, fc2.FRAMERATE_30)
	m, f = cap.get_video_mode_and_frame_rate()
	p = cap.get_property(fc2.FRAME_RATE)
	cap.set_property(**p)
	cap.start_capture()

	baseTime = time.time()

	for i in range(n_frames):

		img = fc2.Image()
		cap.retrieve_buffer(img)
		frame = np.array(img)

		j = str(i)
		k = str(j.zfill(8))
		cv2.imwrite(save_dir+'/{}.jpg'.format(k), frame)

		# Video keyboard interrupt
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
	else:
		pass

	cap.stop_capture()
	cap.disconnect()
	cv2.destroyAllWindows()
	cv2.waitKey()
예제 #4
0
	def run(self):
		self.threadactive=True


		if self.testMode==True:
			cap = cv2.VideoCapture('/home/patrick/Desktop/out16.mp4')
		else:
			try:
				cap = fc2.Context()
				cap.connect(*cap.get_camera_from_index(0))
				cap.set_video_mode_and_frame_rate(fc2.VIDEOMODE_640x480Y8, fc2.FRAMERATE_30)
				m, f = cap.get_video_mode_and_frame_rate()
				p = cap.get_property(fc2.FRAME_RATE)
				cap.set_property(**p)
				cap.start_capture()
			except fc2.ApiError:
				msg = 'Make sure a camera is connected  '
				self.error = ErrorMsg(msg)
				self.error.show()
				self.threadactive=False
				self.finished.emit()
				return None


		i = 0

		while (i < self.nFrames and self.threadactive):

			if self.testMode==True:
				ret, frame = cap.read()
				frame = cv2.resize(frame, (420,420))
				frame = frame.copy()
			else:
				img = fc2.Image()
				cap.retrieve_buffer(img)
				frame = np.array(img)
				frame = cv2.cvtColor(frame,cv2.COLOR_GRAY2RGB)

			h, w, ch = frame.shape
			bytesPerLine = ch * w
			convertToQtFormat = QImage(frame.data, w, h, bytesPerLine, QImage.Format_RGB888)
			p = convertToQtFormat.scaled(420, 420, QtCore.Qt.KeepAspectRatio)
			self.changePixmap.emit(p)
			if self.write==True:
				j = str(i)
				k = str(j.zfill(6))
				cv2.imwrite(self.saveDir+'/{}.jpg'.format(k), frame)
			i = i+1
			prog = int(100*(i/self.nFrames))
			if self.write==True:
				self.count.emit(prog)
		self.finished.emit()

		if self.testMode==True:
			cap.release()

		else:
			cap.stop_capture()
			cap.disconnect()
예제 #5
0
    def run(self):      
        while True:
            im = fc2.Image()
            self.c.retrieve_buffer(im)
            a = np.array(im)    

            rawImage = QImage(a.data, a.shape[1], a.shape[0], QImage.Format_Indexed8)

            self.changePixmap.emit(rawImage)
예제 #6
0
 def getimg(self):
     """
     Get an image from the camera.
     Transposes the image for easy viewing
     Image is buffered in self.img.
     :return:
     """
     self.im = fc2.Image()
     self.img = np.transpose(np.array(self.ctx.retrieve_buffer(self.im)))
     return self.img
예제 #7
0
 def get_frame(self):
     im = fc2.Image()
     self.context.retrieve_buffer(im)
     #print 'get_frame im:', dir(im), 'size:', im.__sizeof__(), 'format:', PIXEL_FORMAT[im.get_format()]
     #print 'rows, cols, stride', im.get_rows(), im.get_cols(), im.get_stride()
     #print 'bayerformat, datasize, received datasize', im.get_bayerFormat(), im.get_dataSize(), im.get_receivedDataSize()
     a = np.array(im)
     # convert Bayer -> RGB
     # These two work: GB RGB, GR BGR (fc2BayerTileFormat = 3 = FC2_BT_GBRG, /**< Green-Blue-Red-Green. */)
     return cv2.cvtColor(a, cv.CV_BayerGR2BGR)
 def __init__(self, camnum):
     fourcc = 0
     fpsec = 70
     resolution = (1504, 1500)
     self.cam = fc2.Context()
     self.cam.connect(*self.cam.get_camera_from_index(camnum))
     self.serial_num = self.cam.get_camera_info()
     self.im = fc2.Image()
     self.video = cv2.VideoWriter('cam'+str(camnum)+'.AVI', fourcc, fpsec,
                                  resolution, False)
     self.cam.start_capture()
     self.cycles = []
     self.camID = camnum
예제 #9
0
def readFrame(c):

    im1 = fc2.Image()

    [c.retrieve_buffer(im1)]

    a_bgr = np.array(im1)

    a_bgr = cv2.medianBlur(a_bgr, 5)

    gr = cv2.cvtColor(a_bgr, cv2.COLOR_BGR2GRAY)

    return a_bgr, gr
예제 #10
0
 def retrieve_buffer(self):
     try:
         raw_img = fc2.Image()
         self.cam.retrieve_buffer(raw_img)
         #img = self.cam.GrabNumPyImage('bgr')
     except:
         print "No open camera found, need to initialize camera and start capture"
         self.cam = self.initializeCamera()
         #self.cam.stop_capture()
         self.cam.start_capture()
         raw_img = fc2.Image()
         self.cam.retrieve_buffer(raw_img)
         #self.cam.stop_capture() #! when adding a stop the regular camera viewer needs to be reconnected.
         self.cam.disconnect()
     img = np.array(raw_img)
     img_bgr = cv2.cvtColor(img, cv2.COLOR_BAYER_BG2BGR)
     b, g, r = cv2.split(img_bgr)
     img_rgb = cv2.merge([r, g, b])
     #img=cv2.transpose(img_rgb)
     img = cv2.flip(cv2.transpose(img_rgb), 1)
     #cv2.Flip(timg,timg,flipMode=0)
     return img  #cv2.flip(cv2.merge([r,g,b]), 1)
예제 #11
0
def getBG():
    # if address is not None:
    #   bg = cv2.imread(config.bg)
    #   bg = cv2.cvtColor(bg, cv2.COLOR_GRAY2RGB)
    # else:
    cap = fc2.Context()
    cap.connect(*cap.get_camera_from_index(0))
    cap.set_video_mode_and_frame_rate(fc2.VIDEOMODE_640x480Y8,
                                      fc2.FRAMERATE_15)
    m, f = cap.get_video_mode_and_frame_rate()
    p = cap.get_property(fc2.FRAME_RATE)
    cap.set_property(**p)
    cap.start_capture()

    temp = np.ndarray((480, 640, 60), dtype=np.uint8)

    for i in range(60):

        img = fc2.Image()
        cap.retrieve_buffer(img)
        frame = np.array(img)

        temp[:, :, i] = frame[:, :]

    bgAvg = np.mean(temp, axis=2)

    #_, bg = cv2.threshold(bgAvg, 150, 255, 0)

    bg = bgAvg.astype(np.uint8)

    cap.stop_capture()
    cap.disconnect()

    bgBig = cv2.resize(bg, None, fx=2, fy=2)

    r = cv2.selectROI(bgBig, fromCenter=False)

    x, y, w, h = r[0], r[1], r[2], r[3]

    bgBig[y:y + h, x:x + w] = np.ones((h, w), dtype=np.uint8) * 160

    bg = cv2.resize(bgBig, None, fx=0.5, fy=0.5)

    cv2.destroyAllWindows()

    return bg
예제 #12
0
def GenericCamera(n_frames, save_dir, block):
    cap = fc2.Context()
    cap.connect(*cap.get_camera_from_index(0))
    cap.set_video_mode_and_frame_rate(fc2.VIDEOMODE_640x480Y8,
                                      fc2.FRAMERATE_15)
    m, f = cap.get_video_mode_and_frame_rate()
    p = cap.get_property(fc2.FRAME_RATE)
    cap.set_property(**p)
    cap.start_capture()

    photos = []

    time0 = time.time()
    for i in range(n_frames):
        print(time.time() - time0)

        if block == 'pre':
            logging.debug("{},{},{}".format(str(time.time() - globalZeroTime),
                                            'pre', str(i)))
        else:
            logging.debug("{},{},{}".format(str(time.time() - globalZeroTime),
                                            'post', str(i)))

        img = fc2.Image()
        cap.retrieve_buffer(img)
        frame = np.array(img)
        photos.append(frame)
        # smallFrame = cv2.resize(frame, None, fx=0.5, fy=0.5)
        cv2.imshow("Frame", frame)
        #cv2.imwrite(save_dir+'/{}.jpg'.format(i), frame)

        # Video keyboard interrupt
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    if save_dir is not None:
        with open(save_dir + '/photos.pkl', 'wb') as f:
            pickle.dump(photos, f)
    else:
        pass

    cap.stop_capture()
    cap.disconnect()
    cv2.destroyAllWindows()
    cv2.waitKey()
예제 #13
0
    def open(self):
        self.context = fc2.Context()
        self.context.connect(
            *self.context.get_camera_from_index(self.device_no))

        # self.reset_camera()

        if self.fast_and_small_video:
            width, height = self.set_resolution(3 * 480, 3 * 480)
            self.framerate = framerate_focus
        else:
            self.framerate = framerate_full
            self.set_resolution()  # maximize resolution

        if self.triggered:
            self.set_software_trigger()
            print('Framerate set to: {:.1f} Hz'.format(self.framerate))

        self.context.start_capture()
        self.im = fc2.Image()
예제 #14
0
def test():
    print fc2.get_library_version()
    c = fc2.Context()
    print c.get_num_of_cameras()
    c.connect(*c.get_camera_from_index(0))
    print c.get_camera_info()
    c.set_video_mode_and_frame_rate(fc2.VIDEOMODE_1280x960Y16,
            fc2.FRAMERATE_7_5)
    m, f = c.get_video_mode_and_frame_rate()
    print m, f
    print c.get_video_mode_and_frame_rate_info(m, f)
    print c.get_property_info(fc2.FRAME_RATE)
    p = c.get_property(fc2.FRAME_RATE)
    print p
    c.set_property(**p)
    c.start_capture()
    im = fc2.Image()
    print [np.array(c.retrieve_buffer(im)).sum() for i in range(80)]
    a = np.array(im)
    print a.shape, a.base
    c.stop_capture()
    c.disconnect()
    def __init__(self):
        """Setup the communications with the flycapture2 device."""
        import flycapture2 as fc2

        # FlyCapture Info printing and setup:
        print "\n\nlibrary version: {}\n".format(fc2.get_library_version())
        self.context = fc2.Context()
        print "Number of Cameras: {}\n".format(
            self.context.get_num_of_cameras())
        self.context.connect(*self.context.get_camera_from_index(0))
        print "Camera Info: {}\n".format(self.context.get_camera_info())
        m, f = self.context.get_video_mode_and_frame_rate()
        print "Video Mode: {}\nFrame Rate:{}\n".format(m, f)
        print "Frame Rate Property Info: {}\n".format(
            self.context.get_property_info(fc2.FRAME_RATE))
        p = self.context.get_property(fc2.FRAME_RATE)
        print "Frame Rate Property: {}\n".format(p)
        self.context.set_property(**p)
        self.context.start_capture()
        self.fc2_image = fc2.Image()
        print "done with flycap2 setup"
        self.cam_on = True

        self.image = None
        cv2.namedWindow('raw', cv2.WINDOW_NORMAL)
        cv2.waitKey(5)

        self.__acquisition_thread = None
        self.lock = threading.Lock()

        self.run = True
        self.__acquisition_thread = threading.Thread(group=None,
                                                     target=self.acquire,
                                                     name='acquisition_thread',
                                                     args=(),
                                                     kwargs={})
        self.__acquisition_thread.start()
예제 #16
0
파일: Cam.py 프로젝트: crackmech/cnnFlyWalk
currDir = None
c = fc2.Context()
nCam = c.get_num_of_cameras()

for i in range(0, nCam):
    c.connect(*c.get_camera_from_index(i))
    if c.get_camera_info()['serial_number'] == camNumber:
        camSerial = i

c.connect(*c.get_camera_from_index(camSerial))
p = c.get_property(fc2.FRAME_RATE)
print "Frame Rate: "+str(p['abs_value'])
logFileWrite("Camera Frame Rate: "+str(p['abs_value']))

im = fc2.Image()
c.start_capture()
imData = displayCam(c, im)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(imData, 'Starting Image', (400, 300), font, 2, (0,255,100),5)
startNT(cv2.imwrite,(roiDir+'start.jpeg',imData))

for nLoop in range (startSegment,totalLoops):
    dirTime = present_time()
    print('==========  Starting loop #%i  =========='%(nLoop+1))
#    saveDir = imDir+dirTime+'/'
#    os.mkdir(saveDir)
    CapNProc(c, im, nFrames, imDir, baseDir, roiDir, saveImDel, dirTime,\
                    framedelThresh, avgWeight, dirThresh, displayRate, extension)
    print "\r\nWaiting for loop number: "+str(nLoop+1)
c.stop_capture()
예제 #17
0
def get_data():
    """Return 3 random integers between 0 and 9"""
    # return random.sample(range(10), 3)
    return np.array(c.retrieve_buffer(fc2.Image()))
예제 #18
0
 def dequeue(self):
     im = fc2.Image()
     self.ctx.retrieve_buffer(im)
     return np.array(im)
예제 #19
0
    pargs = ['python', 'SC_display_window.py']  # debug
    p = Popen(pargs)
    sys.exit()
signal.signal(signal.SIGINT,
              __control_c_stop)  # kill display in case of Ctrl-C termination.
time.sleep(.1)

try:
    # Connect to the camera
    c = fc2.Context()
    if c.get_num_of_cameras == 0:
        print 'There are no cameras detected.'
        kill_display()
        sys.exit()
    c.connect(*c.get_camera_from_index(0))
    buf = fc2.Image()

    # Initialize the camera
    #   TODO: set appropriate mode and pixel format depending on connected camera model.
    c.set_format7_configuration(7, args.roi[0], args.roi[1], args.roi[2],
                                args.roi[3], fc2.PIXEL_FORMAT_MONO16)
    #             (type             , present, on_off, auto , abs_c, one_p, val         , val_a, val_b)
    c.set_property(fc2.FRAME_RATE, True, False, False, True, False, 1., 0, 0)
    c.set_property(fc2.AUTO_EXPOSURE, True, False, False, False, False, 0., 0,
                   0)
    c.set_property(fc2.BRIGHTNESS, True, True, False, True, False, 0., 0, 0)
    c.set_property(fc2.SHARPNESS, True, True, False, False, False, 0., 0, 0)
    c.set_property(fc2.GAMMA, True, True, False, True, False, 1., 0, 0)
    c.set_property(fc2.GAIN, True, True, False, True, False, args.gain, 0, 0)
    c.set_property(fc2.SHUTTER, True, True, False, True, False, args.shutter,
                   0, 0)
예제 #20
0
if acquire_images:
    # Start acquiring
    camera.start_capture()  #capture_start()
    # camera.queue_frame()

print('Beginning imaging [Hit ESC to quit]...')

while True:

    t = time.time()

    if acquire_images:
        # im_array, meta = camera.capture_wait()
        # camera.queue_frame()
        im_array = np.array(camera.retrieve_buffer(fc2.Image()))

        nframes += 1

        if save_images:
            meta['f'] = nframes
            im_queue.put((im_array, meta))

        im_to_show = im_array  #(im_array / 2**4).astype(np.uint8)
        cv2.imshow('cam_window', im_to_show)

    key = cv2.waitKey(1) & 0xFF
    # if key is 27 or key is 113:
    #     break

    if key == ord("q"):
예제 #21
0
    def run(self):
        cap = fc2.Context()
        cap.connect(*cap.get_camera_from_index(0))
        cap.set_video_mode_and_frame_rate(fc2.VIDEOMODE_640x480Y8,
                                          fc2.FRAMERATE_15)
        m, f = cap.get_video_mode_and_frame_rate()
        p = cap.get_property(fc2.FRAME_RATE)
        cap.set_property(**p)
        cap.start_capture()

        cy = r[1] + int(r[3] / 2)
        cx = r[0] + int(r[2] / 2)

        x = r[0]
        y = r[1]
        w = r[2]
        h = r[3]

        photos = []
        slidingWindow = []

        for i in range(self.nFrames):
            img = fc2.Image()
            cap.retrieve_buffer(img)
            frame = np.array(img)

            photos.append(frame)

            smallFrame = frame[y:y + h, x:x + w]
            smallFrame = np.multiply(smallFrame, mask)
            smallFrame[np.where(smallFrame == 0)] = 255

            detect, trackedFrame, pos = track(smallFrame, 120, config.flySize)

            if pos[1] is None:
                slidingWindow.append(np.mean(slidingWindow))
            else:
                slidingWindow.append(pos[1])

            cv2.imshow("Frame", trackedFrame)

            #cv2.imwrite(self.saveDir+'/{}.jpg'.format(i), frame)

            # Code for walking direction
            # 0 = No walking
            # 1 = Walking towards top
            # 2 = Walking towards bottom

            if i > 2:
                slidingWindow.pop(0)
                d = np.diff(slidingWindow)
                da = np.mean(d)

                if da > 2:
                    signal = 2
                elif da < -2:
                    signal = 1
                else:
                    signal = 0
            else:
                signal = 0

            logging.debug('{},{},{},{},{}'.format(
                str(time.time() - globalZeroTime), 'reinforcement', str(i),
                str(pos[0]), str(pos[1]))),

            if not q.full():
                # This will be set conditionally by the tracker
                item = signal
                q.put(item)
                #logging.debug('Putting {} in queue'.format(signal))

            # Video keyboard interrupt
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        if self.save_dir is not None:
            with open(self.save_dir + '/photos.pkl', 'wb') as f:
                pickle.dump(photos, f)
        else:
            pass

        cap.stop_capture()
        cap.disconnect()
        cv2.destroyAllWindows()
        cv2.waitKey()

        return
예제 #22
0
 def __init__(self, serial_number):
     self.context = cap.Context()
     self.connected = False
     self.serial_number = serial_number
     self.image_buffer = cap.Image()
예제 #23
0
p = c.get_property(fc2.FRAME_RATE)
print p
c.set_property(**p)
c.start_capture()

#cv2.startWindowThread()
#cv2.namedWindow("image")
cv2.startWindowThread()
cv2.namedWindow("image")
#cv2.setMouseCallback("image", click_and_crop)

while True:

    key = cv2.waitKey(1) & 0xFF

    im = np.array(c.retrieve_buffer(fc2.Image()))
    # plt.imshow(im)
    cv2.imshow("image", im)

    if key == ord("q"):
        break

    # print a.shape, a.base

cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
print "Done."

c.stop_capture()
c.disconnect()