Beispiel #1
0
def main():
    # we need a QApplication, that runs our QT Gui Framework    
    app = PyuEyeQtApp()
 
    # a basic qt window
    view = PyuEyeQtView()
    view.show()
    view.user_callback = process_image

    # camera class to simplify uEye API access
    cam = Camera()
    cam.init()
    cam.set_colormode(ueye.IS_CM_MONO8)
    print('Color Mode:', cam.get_colormode())

#     print(ueye.IS_CM_BGR8_PACKED)
    cam.set_aoi(0, 0, 1280, 4)
    aoi = cam.get_aoi()
    print('AOI:', aoi.x, aoi.y, aoi.width, aoi.height)
    
    print('Framerate Range:', cam.get_FrameTimeRange()[0], cam.get_FrameTimeRange()[1],cam.get_FrameTimeRange()[2])
    
#     cam.set_fps(10)
    cam.set_fps(1/cam.get_FrameTimeRange()[0])
    
    cam.set_exposure(0.1)
    print('Exposure Time:', cam.get_exposure())
    
#     print(cam.get_colordepth()[0], cam.get_colordepth()[1])
    cam.alloc()
    cam.capture_video()
    
    #a thread that waits for new images and processes all connected views
    thread = FrameThread(cam, view)
#     thread.setDaemon(True)
    thread.start()
    
    
    # cleanup
    app.exit_connect(thread.stop)
    app.exec_()
 
    print('Frame Rate:', cam.get_fps())
    thread.stop()
    thread.join()

    cam.stop_video()
    cam.exit()
Beispiel #2
0
def main():

    # camera class to simplify uEye API access
    cam = Camera()
    cam.init()
    #    cam.set_colormode(ueye.IS_CM_BGR8_PACKED)
    #    cam.set_aoi(0,0, 640, 480)

    cam.alloc()
    cam.capture_video()

    # a thread that waits for new images and processes all connected views
    thread = FrameThread(cam)
    thread.start()

    time.sleep(60)

    thread.stop()
    thread.join()

    cam.stop_video()
    cam.exit()
Beispiel #3
0
def main():
    parent_conn, child_conn = Pipe()
    cpt = 0
    max_frames = int(input("How many pictures would you like?: "))

    # camera class to simplify uEye API access
    cam = Camera()
    cam.init()
    cam.set_colormode(ueye.IS_CM_BGR8_PACKED)
    cam.set_aoi(0, 0, 1920, 1080)

    cam.alloc()
    cam.capture_video()

    # a thread that waits for new images and processes all connected views
    thread = FrameThread(cam, child_conn)

    thread.start()
    while cpt < max_frames:
        img = parent_conn.recv()
        cv.imshow("Image", img)
        cv.waitKey(200)
        cv.imwrite('train_file/image%04i.jpg' % cpt, img)
        print("image captured")
        #print(input_q.qsize())

        time.sleep(5)
        cpt += 1

    #time.sleep(10)

    thread.stop()
    #thread.join()

    cam.stop_video()
    cam.exit()
    print("Exiting the program")
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')

# Initialize the pipeline connecting the thread and the main function
parent_conn, child_conn = Pipe()



# camera class to simplify uEye API access
cam = Camera()
cam.init()
cam.set_colormode(ueye.IS_CM_BGR8_PACKED)
cam.set_aoi(0, 0, 2048, 2048)

cam.alloc()
cam.capture_video()
cam.set_full_auto()

# a thread that waits for new images and processes all connected views
thread = FrameThread(cam, child_conn)
thread.start()
time.sleep(8)

while(True):

    # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]
    # i.e. a single-column array, where each item in the column has the pixel RGB value


    #print("check this point-1")
    #time.sleep(1)