Пример #1
0
def main():
    # we need a QApplication, that runs our QT Gui Framework
    app = PyuEyeQtApp()

    # a basic qt window
    view = PyuEyeQtView()
    view.show()
    view.user_callback = process_image

    # camera class to simplify uEye API access
    cam = Camera()
    cam.init()
    cam.set_colormode(ueye.IS_CM_BGR8_PACKED)
    cam.set_aoi(0, 0, 640, 480)
    cam.alloc()
    cam.capture_video()

    # a thread that waits for new images and processes all connected views
    thread = FrameThread(cam, view)
    thread.start()

    # cleanup
    app.exit_connect(thread.stop)
    app.exec_()

    thread.stop()
    thread.join()

    cam.stop_video()
    cam.exit()
Пример #2
0
def main(config_path="/home/oran/Pictures/Settings/default-camera-settings.ini"
         ):
    print(config_path)
    # we need a QApplication, that runs our QT Gui Framework
    app = PyuEyeQtApp()

    # a basic qt window
    view = PyuEyeQtView()
    view.resize(1920 / 1.5, 1080 / 1.5)
    view.show()
    #update_config_gain(update={'red': '0','green' : '0','blue':'0'},set=True)
    #update_config_exposure(update=70,set=True)
    # view.user_callback = adjust_manually
    view.user_callback = print_means
    # view.user_callback = adjust_manually
    # camera class to simplify uEye API access
    cam = Camera()
    cam.init()
    # cam.set
    cam.set_colormode(ueye.IS_CM_BGR8_PACKED)
    pParam = ueye.wchar_p()
    pParam.value = config_path
    ueye.is_ParameterSet(1, ueye.IS_PARAMETERSET_CMD_LOAD_FILE, pParam, 0)

    # cam.set(cv2.cv.CV_CAP_PROP_EXPOSURE, 10)

    # cam.__getattribute__('is_CameraStatus')
    # cam.__setattr__('GetCameraInfo',0)
    #cam.set_aoi(0,0, 1280, 1024)
    cam.set_aoi(0, 0, 4912, 3684)
    cam.alloc()
    cam.capture_video()
    ueye._is_GetError
    ueye.is_Exposure(1,
                     ueye.c_uint(1),
                     ueye.c_void_p(),
                     cbSizeOfParam=ueye.c_int(0))
    # ueye.IS_EXPOSURE_CMD_GET_FINE_INCREMENT_RANGE_MIN = 20
    # ueye.IS_EXPOSURE_CMD_GET_FINE_INCREMENT_RANGE_MAX = 21

    # a thread that waits for new images and processes all connected views
    thread = FrameThread(cam, view)
    thread.start()

    # update_config_gain()

    # cleanup
    app.exit_connect(thread.stop)
    app.exec_()

    thread.stop()
    thread.join()

    cam.stop_video()
    cam.exit()
Пример #3
0
def main():

    # we need a QApplication, that runs our QT Gui Framework
    print 'a'
    app = PyuEyeQtApp()
    print 'b'
    # a basic qt window
    view = PyuEyeQtView()
    print 'c'
    view.show()
    print 'd'
    view.user_callback = process_image
    print 'e'
    # camera class to simplify uEye API access
    cam = Camera()
    print 'f'
    cam.init()
    cam.set_colormode(ueye.IS_CM_BGR8_PACKED)
    #cam.set_colormode(ueye.IS_CM_SENSOR_RAW8)
    #cam.set_aoi(0,0, 1280, 1024)
    cam.set_aoi(300, 300, 400, 400)
    #cam.set_aoi(500,500,900,900)
    print 'g'
    cam.alloc()
    print 'h'
    cam.capture_video()
    print 'i'
    # a thread that waits for new images and processes all connected views
    print 'j'
    thread = FrameThread(cam, view)
    print 'k'
    thread.start()

    print 'l'

    # cleanup
    app.exit_connect(thread.stop)
    print '1'
    app.exec_()
    print '2'
    thread.stop()
    print '3'
    thread.join()
    print '4'
    cam.stop_video()
    print '5'
    cam.exit()
    print '6'
Пример #4
0
class idscamera(object):
    def __init__(self,
                 camhand=1,
                 AOIwidth=1280,
                 AOIheight=1024,
                 buffer_count=100):
        self.camhandle = camhand
        self.cam = Camera(self.camhandle)
        self.cam.init()
        self.cam.set_colormode(ueye.IS_CM_BGR8_PACKED)
        # ret = self.cam.set_exposure(30.0)
        #ret = cam.set_LUT()
        # ret = self.cam.set_aoi(0, 0, AOIwidth, AOIheight)
        self.cam.alloc(buffer_count)
        ret = self.cam.capture_video(True)  # start to capture;
        self.img_buffer = ImageBuffer()
        self.DATA = ImageData(self.camhandle, self.img_buffer)

    def extract(self):

        ret = ueye.is_GetActSeqBuf(self.camhandle, self.img_buffer.mem_id,
                                   self.img_buffer.mem_ptr,
                                   self.img_buffer.mem_ptrlast)

        # ret = ueye.is_WaitForNextImage(self.camhandle,
        #                                100,
        #                                self.img_buffer.mem_ptr,
        #                                self.img_buffer.mem_id)
        # print(self.img_buffer.mem_id)
        if ret == ueye.IS_SUCCESS:
            #DATA = ImageData(cam.handle(), img_buffer)
            self.DATA.lock()
            self.DATA.getdata(self.img_buffer)
            image = self.DATA.as_1d_image()
            #print(DATA.array.shape)
            self.DATA.unlock()

            return image
        else:
            return None

    def get_size(self):
        return self.cam.get_size()

    def stop(self):
        self.cam.stop_video()
        self.cam.exit()
Пример #5
0
def main():
    # we need a QApplication, that runs our QT Gui Framework
    ##app = PyuEyeQtApp()

    # a basic qt window
    #view = PyuEyeQtView()
    #view.show()
    #view.user_callback = process_image

    # camera class to simplify uEye API access
    cam = Camera(1)
    cam.init()
    cam.set_colormode(ueye.IS_CM_BGR8_PACKED)

    #ret = cam.set_exposure(30.0)
    #ret = cam.set_LUT()
    #
    width, height = cam.get_size()
    x = 0
    y = 0
    ret = cam.set_aoi(x, y, width, height)
    cam.alloc(buffer_count=100)
    ret = cam.capture_video(True)  # start to capture;

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('_output.avi', fourcc, 20.0, (width, height))

    # a thread that waits for new images and processes all connected views
    #thread = FrameThread(cam)
    #thread.start()
    running = True
    img_buffer = ImageBuffer()
    DATA = ImageData(cam.handle(), img_buffer)
    while (ret == 0) and running:
        #img_buffer = ImageBuffer()
        ret = ueye.is_WaitForNextImage(cam.handle(), 1000, img_buffer.mem_ptr,
                                       img_buffer.mem_id)
        #print(img_buffer.mem_id)
        if ret == ueye.IS_SUCCESS:
            #DATA = ImageData(cam.handle(), img_buffer)
            DATA.getdata(img_buffer)
            #print(DATA.array.shape)
            image = DATA.as_1d_image()
            DATA.unlock()
            # make a gray image
            #imag = cv2.cvtColor(image, cv2.COLOR_BGR)
            cv2.imshow("Simple_black", image)
            out.write(image)
            if cv2.waitKey(1) == 27:
                break
            #cv2.Mat(ImageData.array)

    # cleanup
    #app.exit_connect(thread.stop)
    #app.exec_()

    #thread.stop()
    #thread.join()

    cam.stop_video()
    cam.exit()
    cv2.destroyAllWindows()
    out.release()
Пример #6
0
def IDSCamera(cfgfile, weightfile, useGPU):

    from ctypes import byref

    # Python external libraries
    from pyueye import ueye  #importing this too early would require IDS camera drivers to be installed just to run the "StandardCamera" code

    # Python modules
    from pyueye_example_camera import Camera
    from pyueye_example_utils import ImageData, Rect, ImageBuffer

    ### IDS camera initializations
    cam = Camera()
    cam.init()
    cam.set_colormode(ueye.IS_CM_BGR8_PACKED)
    cam.alloc()
    cam.capture_video()

    ### startup of thread that pulls image frames from the IDS camera
    input_q = Queue(8)
    output_q = Queue(8)
    thread = FrameThread(cam, 1, cfgfile, weightfile, useGPU, input_q,
                         output_q)
    thread.start()
    loop = True

    m = Darknet(cfgfile)
    ### initialization for creation of a .avi file for sharing of proof of concept
    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    out = cv2.VideoWriter('output.avi', fourcc, 5.0, (480, 360))

    if m.num_classes == 20:
        namesfile = 'C:/Users/Catharina/Documents/GitHub/VisionSystem/data/voc.names'
    elif m.num_classes == 80:
        namesfile = 'C:/Users/Catharina/Documents/GitHub/VisionSystem/data/coco.names'
    else:
        namesfile = 'data/names'

    class_names = load_class_names(namesfile)

    num_workers = 2
    pool = Pool(num_workers, IDS_worker,
                (input_q, output_q, cfgfile, weightfile, useGPU))

    while loop:
        cv2.waitKey(10)
        image, bboxes = output_q.get()

        print('------')
        draw_img, waitsignal = plot_boxes_cv2(
            image, bboxes, None, class_names
        )  #draw boxes associated with detections onto the base images | AlarmDetection.py is called in here
        cv2.imshow(
            'cfgfile', draw_img
        )  #show the image frame that now has detections drawn onto it | draw_image will be entirely green/yellow/red after a judgement is made by AlarmDetection.py for verification or alarm
        '''uncomment the following line to record video | file is named output.avi and will overwrite any existing files with same name'''
        #out.write(draw_img)

        if waitsignal == True:
            cv2.waitKey(2000)
            waitsignal = False

        if cv2.waitKey(1) & 0xFF == ord('q'):
            loop = False
            out.release()
            cv2.destroyAllWindows()
            thread.stop()
            thread.join()
            print('join')
            pool.terminate()
            print('terminate')
            cam.stop_video()
            print('stop_video')
            cam.exit()
            print('cam exit')

            break

    print('IDS_Camera close')