Esempio n. 1
0
def test_alpha_view():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_ARGB32)
    qimg.fill(23)
    v = qimage2ndarray.alpha_view(qimg)
    qimg.setPixel(12, 10, QtGui.qRgb(12,34,56))
    assert_equal(v[10,12], 255)
    assert_equal(v[10,11], 0)
Esempio n. 2
0
def test_rgb_view():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGB32)
    qimg.fill(QtGui.qRgb(23,0,0))
    v = qimage2ndarray.rgb_view(qimg)
    qimg.setPixel(12, 10, QtGui.qRgb(12,34,56))
    assert_equal(list(v[10,10]), [23,0,0])
    assert_equal(list(v[10,12]), [12,34,56])
Esempio n. 3
0
def test_RGBA8888():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGBA8888)
    qimg.fill(0)
    v = _qimageview(qimg)
    qimg.setPixel(12, 10, QtGui.qRgb(0x12, 0x34, 0x56))
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10, 12],
                 0x123456ff if sys.byteorder == 'big' else 0xff563412)
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 4
0
def test_ARGB32():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_ARGB32)
    qimg.fill(0)
    v = _qimageview(qimg)
    qimg.setPixel(12, 10, QtGui.qRgb(0x12, 0x34, 0x56))
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10, 12],
                 0xff123456 if sys.byteorder == 'little' else 0x563412ff)
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 5
0
def test_raw_rgba64():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGBA64)
    qimg.fill(0)
    v = qimage2ndarray.raw_view(qimg)
    qimg.fill(1)
    qimg.setPixel(12, 10, QtGui.qRgb(0x12, 0x34, 0x56))
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10, 10], 0x010100000000)
    assert_equal(v[10, 12], 0xffff565634341212)
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 6
0
def test_raw_grayscale8():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_Grayscale8)
    qimg.fill(0)
    v = qimage2ndarray.raw_view(qimg)
    qimg.fill(1)
    qimg.setPixel(12, 10, QtGui.qRgb(42, 42, 42))
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10, 10], 1)
    assert_equal(v[10, 12], 42)
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 7
0
def test_raw_rgb16():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGB16)
    qimg.fill(0)
    v = qimage2ndarray.raw_view(qimg)
    qimg.fill(23)
    qimg.setPixel(12, 10, QtGui.qRgb(0, 0, 91))
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10, 10], 23)
    assert_equal(v[10, 12], 91 >> 3)
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 8
0
def test_raw_rgb32():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGB32)
    qimg.fill(0)
    v = qimage2ndarray.raw_view(qimg)
    qimg.fill(23)
    qimg.setPixel(12, 10, QtGui.qRgb(0, 0, 42))
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10, 10], 23 | 0xff000000)
    assert_equal(v[10, 12], 42 | 0xff000000)
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 9
0
def test_recarray_view():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_ARGB32)
    qimg.fill(23)
    v = qimage2ndarray.recarray_view(qimg)
    qimg.setPixel(12, 10, QtGui.qRgb(12,34,56))
    assert_equal(v["g"][10,12], 34)
    assert_equal(v["g"].sum(), 34)
    assert_equal(v["green"].sum(), 34)
    assert_equal(v.g[10,12], 34)
    # this worked in the past, but with NumPy 1.2.1, I get:
    # TypeError: function takes at most 2 arguments (3 given)
    assert_equal(v[10,12]["g"], 34)
Esempio n. 10
0
def test_RGBX64():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGBX64)
    qimg.fill(QtGui.qRgb(0, 0, 0))
    v = _qimageview(qimg)
    qimg.setPixel(12, 10, QtGui.qRgb(0x12, 0x34, 0x56))
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10, 10],
                 0xffff000000000000 if sys.byteorder == 'little' else 0xffff)
    assert_equal(
        v[10, 12], 0xffff565634341212
        if sys.byteorder == 'little' else 0x121234345656ffff)
    assert_equal(v.nbytes, numBytes(qimg))
def test_scalar2qimage():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    qImg = qimage2ndarray.array2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_RGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(42,42,42))) # max pixel
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0)))    # zero pixel
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))    # min pixel
def test_rgb2qimage():
    a = numpy.zeros((240, 320, 3), dtype = float)
    a[12,10] = (42.42, 20, 14)
    a[13,10] = (-10, 0, -14)
    qImg = qimage2ndarray.array2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_RGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(42,20,14)))
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0)))
Esempio n. 13
0
def test_rgb2qimage():
    a = numpy.zeros((240, 320, 3), dtype = float)
    a[12,10] = (42.42, 20, 14)
    a[13,10] = (-10, 0, -14)
    qImg = qimage2ndarray.array2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_RGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(42,20,14)))
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0)))
Esempio n. 14
0
def test_scalar2qimage():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    qImg = qimage2ndarray.array2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_RGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(42,42,42))) # max pixel
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0)))    # zero pixel
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))    # min pixel
Esempio n. 15
0
def test_scalar2qimage_with_alpha():
    a = numpy.zeros((240, 320, 2), dtype = float)
    a[...,1] = 255
    a[12,10] = (42.42, 128)
    a[13,10] = (-10, 0)
    qImg = qimage2ndarray.array2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_ARGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgba(42,42,42,128))) # max pixel
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgba(0,0,0,255)))    # zero pixel
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgba(0,0,0,0)))      # min pixel
def test_scalar2qimage_with_alpha():
    a = numpy.zeros((240, 320, 2), dtype = float)
    a[...,1] = 255
    a[12,10] = (42.42, 128)
    a[13,10] = (-10, 0)
    qImg = qimage2ndarray.array2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_ARGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgba(42,42,42,128))) # max pixel
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgba(0,0,0,255)))    # zero pixel
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgba(0,0,0,0)))      # min pixel
Esempio n. 17
0
def test_gray2qimage():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    qImg = qimage2ndarray.gray2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_Indexed8)
    assert_equal(a.nbytes, numBytes(qImg) * a.itemsize)
    assert_equal(numColors(qImg), 256)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(42,42,42)))
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0)))
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
def test_gray2qimage():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    qImg = qimage2ndarray.gray2qimage(a)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_Indexed8)
    assert_equal(a.nbytes, numBytes(qImg) * a.itemsize)
    assert_equal(numColors(qImg), 256)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(42,42,42)))
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0)))
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
Esempio n. 19
0
def test_bool2qimage_normalize():
    a = numpy.zeros((240, 320), dtype = bool)
    a[12,10] = True
    # normalization should scale to 0/255
    # (not raise a numpy exception, see issue #17)
    qImg = qimage2ndarray.gray2qimage(a, normalize = True)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(255,255,255)))
    assert_equal(hex(qImg.pixel(0,0)), hex(QtGui.qRgb(0,0,0)))
    a[:] = True
    qImg = qimage2ndarray.gray2qimage(a, normalize = True)
    # for boolean arrays, I would assume True should always map to 255
    assert_equal(hex(qImg.pixel(0,0)), hex(QtGui.qRgb(255,255,255)))
Esempio n. 20
0
def test_scalar2qimage_normalize_domain():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    qImg = qimage2ndarray.array2qimage(a, normalize = (-100, 100))
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_RGB32)
    x = int(255 * 142.42 / 200.0)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(x,x,x)))
    x = int(255 *  90.0 / 200.0)
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(x,x,x)))
    x = int(255 * 100.0 / 200.0)
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(x,x,x)))
def test_rgb2qimage_normalize():
    a = numpy.zeros((240, 320, 3), dtype = float)
    a[12,10] = (42.42, 20, 14)
    a[13,10] = (-10, 20, 0)
    qImg = qimage2ndarray.array2qimage(a, normalize = True)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_RGB32)
    assert_equal(hex(qImg.pixel(10,12)),
                 hex(QtGui.qRgb(255,(255*30.0/52.42),(255*24/52.42))))
    assert_equal(hex(qImg.pixel(10,13)),
                 hex(QtGui.qRgb(0,(255*30.0/52.42),(255*10/52.42))))
    x = int(255 * 10.0 / 52.42)
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(x,x,x)))       # zero pixel
Esempio n. 22
0
def test_rgb2qimage_normalize():
    a = numpy.zeros((240, 320, 3), dtype = float)
    a[12,10] = (42.42, 20, 14)
    a[13,10] = (-10, 20, 0)
    qImg = qimage2ndarray.array2qimage(a, normalize = True)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_RGB32)
    assert_equal(hex(qImg.pixel(10,12)),
                 hex(QtGui.qRgb(255,(255*30.0/52.42),(255*24/52.42))))
    assert_equal(hex(qImg.pixel(10,13)),
                 hex(QtGui.qRgb(0,(255*30.0/52.42),(255*10/52.42))))
    x = int(255 * 10.0 / 52.42)
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(x,x,x)))       # zero pixel
def test_scalar2qimage_normalize_domain():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    qImg = qimage2ndarray.array2qimage(a, normalize = (-100, 100))
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_RGB32)
    x = int(255 * 142.42 / 200.0)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(x,x,x)))
    x = int(255 *  90.0 / 200.0)
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(x,x,x)))
    x = int(255 * 100.0 / 200.0)
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(x,x,x)))
def test_gray2qimage_normalize_onlymax():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    qImg = qimage2ndarray.gray2qimage(a, normalize = 80)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_Indexed8)
    assert_equal(a.nbytes, qImg.numBytes() * a.itemsize)
    assert_equal(qImg.numColors(), 256)
    x = int(255 * 42.42 / 80.0)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(x,x,x)))
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(0,0,0)))
def test_alpha_view():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_ARGB32)
    qimg.fill(23)
    v = qimage2ndarray.alpha_view(qimg)
    qimg.setPixel(12, 10, QtGui.qRgb(12,34,56))
    assert_equal(v[10,12], 255)
    assert_equal(v[10,11], 0)
Esempio n. 26
0
def test_scalar2qimage_masked():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    a[:,160:] = 100
    a = numpy.ma.masked_greater(a, 99)
    qImg = qimage2ndarray.array2qimage(a, normalize = True)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_ARGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(255,255,255)))
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
    x = int(255 * 10.0 / 52.42)
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(x,x,x)))
    assert_equal(QtGui.qAlpha(qImg.pixel(0,10)), 255)
    assert_equal(QtGui.qAlpha(qImg.pixel(200,10)), 0)
def test_scalar2qimage_masked():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    a[:,160:] = 100
    a = numpy.ma.masked_greater(a, 99)
    qImg = qimage2ndarray.array2qimage(a, normalize = True)
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_ARGB32)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(255,255,255)))
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
    x = int(255 * 10.0 / 52.42)
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(x,x,x)))
    assert_equal(QtGui.qAlpha(qImg.pixel(0,10)), 255)
    assert_equal(QtGui.qAlpha(qImg.pixel(200,10)), 0)
Esempio n. 28
0
def test_ARGB32():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_ARGB32)
    qimg.fill(0)
    v = _qimageview(qimg)
    qimg.setPixel(12, 10, 42)
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10, 12], 42)
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 29
0
def test_viewcreation():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGB32)
    v = _qimageview(qimg)
    assert_equal(v.shape, (240, 320))
    assert v.base is qimg
    del qimg
    w, h = v.base.width(), v.base.height()  # should not segfault
    assert_equal((w, h), (320, 240))
Esempio n. 30
0
def test_odd_size_32bit():
    qimg = QtGui.QImage(321, 240, QtGui.QImage.Format_ARGB32)
    qimg.fill(0)
    v = _qimageview(qimg)
    qimg.setPixel(12, 10, 42)
    assert_equal(v.shape, (240, 321))
    assert_equal(v[10, 12], 42)
    assert_equal(v.strides[0], qimg.bytesPerLine())
Esempio n. 31
0
def test_data_access():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_Indexed8)
    setNumColors(qimg, 256)
    qimg.fill(42)
    v = _qimageview(qimg)
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10, 10], 42)
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 32
0
def test_gray2qimage_normalize_domain():
    a = numpy.zeros((240, 320), dtype = float)
    a[12,10] = 42.42
    a[13,10] = -10
    qImg = qimage2ndarray.gray2qimage(a, normalize = (-100, 100))
    assert not qImg.isNull()
    assert_equal(qImg.width(), 320)
    assert_equal(qImg.height(), 240)
    assert_equal(qImg.format(), QtGui.QImage.Format_Indexed8)
    assert_equal(a.nbytes, numBytes(qImg) * a.itemsize)
    assert_equal(numColors(qImg), 256)
    x = int(255 * 142.42 / 200.0)
    assert_equal(hex(qImg.pixel(10,12)), hex(QtGui.qRgb(x,x,x)))
    x = int(255 *  90.0 / 200.0)
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(x,x,x)))
    x = int(255 * 100.0 / 200.0)
    assert_equal(hex(qImg.pixel(10,14)), hex(QtGui.qRgb(x,x,x)))
Esempio n. 33
0
def test_byte_view_rgb32():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGB32)
    v = qimage2ndarray.byte_view(qimg)
    qimg.fill(23)
    qimg.setPixel(12, 10, 42)
    assert_equal(v.shape, (240, 320, 4))
    assert_equal(list(v[10,10]), [23, 0, 0, 0xff])
    assert_equal(list(v[10,12]), [42, 0, 0, 0xff])
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 34
0
def test_odd_size_8bit():
    qimg = QtGui.QImage(321, 240, QtGui.QImage.Format_Indexed8)
    setNumColors(qimg, 256)
    qimg.fill(0)
    v = _qimageview(qimg)
    qimg.setPixel(12, 10, 42)
    assert_equal(v.shape, (240, 321))
    assert_equal(v[10, 12], 42)
    assert_equal(v.strides[0], qimg.bytesPerLine())
def test_viewcreation():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGB32)
    v = _qimageview(qimg)
    assert_equal(v.shape, (240, 320))
    assert v.base is not None
    del qimg
    if hasattr(v.base, 'width'):
        w, h = v.base.width(), v.base.height()  # should not segfault
        assert_equal((w, h), (320, 240))
    v[239] = numpy.arange(320)  # should not segfault
Esempio n. 36
0
def test_byte_view_indexed():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_Indexed8)
    setNumColors(qimg, 256)
    v = qimage2ndarray.byte_view(qimg)
    qimg.fill(23)
    qimg.setPixel(12, 10, 42)
    assert_equal(v.shape, (240, 320, 1))
    assert_equal(list(v[10,10]), [23])
    assert_equal(list(v[10,12]), [42])
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 37
0
def test_raw_indexed8():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_Indexed8)
    setNumColors(qimg, 256)
    qimg.fill(0)
    v = qimage2ndarray.raw_view(qimg)
    qimg.fill(23)
    qimg.setPixel(12, 10, 42)
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10,10], 23)
    assert_equal(v[10,12], 42)
    assert_equal(v.nbytes, numBytes(qimg))
Esempio n. 38
0
def test_coordinate_access():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_Indexed8)
    setNumColors(qimg, 256)
    qimg.fill(0)
    v = _qimageview(qimg)
    qimg.fill(23)
    qimg.setPixel(12, 10, 42)
    assert_equal(v.shape, (240, 320))
    assert_equal(v[10, 10], 23)
    assert_equal(v[10, 12], 42)
    assert_equal(v.nbytes, numBytes(qimg))
def test_empty2qimage():
    a = numpy.ones((240, 320), dtype = float)
    qImg = qimage2ndarray.gray2qimage(a, normalize = True)
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
    qImg = qimage2ndarray.array2qimage(a, normalize = True)
    assert_equal(hex(qImg.pixel(10,13)), hex(QtGui.qRgb(0,0,0)))
def test_rgb_view():
    qimg = QtGui.QImage(320, 240, QtGui.QImage.Format_RGB32)
    qimg.fill(23)
    v = qimage2ndarray.rgb_view(qimg)
    qimg.setPixel(12, 10, QtGui.qRgb(12,34,56))
    assert_equal(list(v[10,12]), [12,34,56])
Esempio n. 41
0
    def show_img(self):
        global temp_t
        success, self.img = self.camera.read()
        if success:
            self.Image_num += 1
            if self.Image_num % 10 == 9:
                frame_rate = 10 / (time.clock() - self.timelb)
                self.FmRateLCD.display(frame_rate)
                self.timelb = time.clock()
            if self.case == 0:
                showImg = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
                showImg = qimage2ndarray.array2qimage(showImg)
                self.Camera_2.setPixmap(QPixmap(showImg))  # 展示图片
                self.Camera_2.show()
            if self.case == 1:
                bounding_boxes, landmarks = detect_faces(self.img)
                self.img = show_bboxes(self.img, bounding_boxes, landmarks)
                showImg = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
                showImg = qimage2ndarray.array2qimage(showImg)
                self.Camera_2.setPixmap(QPixmap(showImg))  # 展示图片
                self.Camera_2.show()
            if self.case == 2:
                img_copy = self.img.copy()
                frag_gray = False
                self.time_ing = time.time()
                # point=[100,0,540,480]
                if self.frag_cap:
                    bounding_boxes, landmarks = detect_faces(self.img)
                    print('正在定位······')
                    if len(bounding_boxes) == 1:
                        self.point.clear()
                        for b in bounding_boxes:
                            b = [int(round(value)) for value in b]
                            for i in b:
                                self.point.append(i)
                        self.frag_cap = False
                    # print(point)
                    # cv2.rectangle(draw, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 2)
                    # 裁剪坐标为[y0:y1, x0:x1]

                if not self.frag_cap:
                    if self.point[0] < 540:
                        self.img = self.img[self.point[1] - 10:479,
                                            self.point[0] - 100:self.point[2] +
                                            100]
                    else:
                        self.img = self.img[self.point[1] - 10:479,
                                            self.point[0] - 100:639]
                else:
                    self.img = self.img[1:479, 1:640]
                if int(self.time_ing - self.time_first) % 60 == 0:
                    self.frag_cap = True

                else:
                    self.frag_cap = False
                bounding_boxes, landmarks = detect_faces(self.img)

                #通过MTCNN人脸框判断,当检测不到人脸时判断低头or瞌睡
                if len(bounding_boxes) == 0:
                    self.nod_fps += 1
                if self.nod_fps >= 3:
                    self.Head_state.setText('点头')
                    self.nod_count += 1
                if len(bounding_boxes) > 0:
                    self.nod_fps = 0

                #通过头部姿态欧拉角角度变化判断是否摇头
                if len(bounding_boxes) > 0:
                    Head_Y_X_Z = get_head_pose(landmarks)
                    print('pitch:{}, yaw:{}, roll:{}'.format(
                        Head_Y_X_Z[1], Head_Y_X_Z[2], Head_Y_X_Z[3]))
                    if (Head_Y_X_Z[2] < -0.75):
                        self.shake_fps_l += 1
                    if (Head_Y_X_Z[2] >= -0.75):
                        self.shake_fps_l = 0
                    if self.shake_fps_l >= 5:
                        self.shake_count += 1
                        self.Head_state.setText('摇头')
                    if Head_Y_X_Z[3] >= 0.30:
                        self.shake_fps_r += 1
                    if self.shake_fps_r >= 5:
                        self.shake_count += 1
                        self.Head_state.setText('摇头')
                    if Head_Y_X_Z[3] < 0.30:
                        self.shake_fps_r = 0
                    # print(Head_Y_X_Z[1])
                    # print(Head_Y_X_Z[2])
                    # print(Head_Y_X_Z[3])

                if time.time() - self.nod_start > 3:
                    self.Head_state.setText('')
                if time.time() - self.shake_start > 3:
                    self.Head_state.setText('')
                # 计算低头频率  每10s计算一次
                if time.time() - self.nod_start > 10:
                    times = time.time() - self.nod_start
                    self.nod_freq = self.nod_count / times
                    self.nod_start = time.time()
                    self.Nod_LCD.display(self.nod_freq)

                # 计算摇头频率
                if time.time() - self.shake_start > 10:
                    times = time.time() - self.shake_start
                    self.shake_freq = self.shake_count / times
                    self.shake_start = time.time()
                    self.shake_LCD.display(self.shake_freq)

                if len(bounding_boxes) > 0:
                    Emotions = get_emotion(
                        get_face_expression(self.img, bounding_boxes))
                    self.Emotion.setText(Emotions[1])
                    self.Emotion_pred.display(float(Emotions[0]))
                    # print(Emotions)
                    canvas = cv2.imread('img_resource/label_pred.jpg',
                                        flags=cv2.IMREAD_UNCHANGED)
                    for (i,
                         (emotion,
                          prob)) in enumerate(zip(self.EMOTIONS, Emotions[2])):
                        # text = "{}: {:.2f}%".format(emotion, prob * 100)
                        text = "{:.2f}%".format(prob * 100)
                        # 绘制表情类和对应概率的条形图
                        w = int(prob * 180)
                        # print(text)
                        # canvas = 255 * np.ones((250, 300, 3), dtype="uint8")

                        cv2.rectangle(canvas, (0, (i * 44) + 25),
                                      (w, (i * 43) + 40), (100, 200, 130), -1)
                        cv2.putText(canvas, text, (170, (i * 43) + 40),
                                    cv2.FONT_HERSHEY_DUPLEX, 0.6, (0, 0, 0), 1)
                        show = cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB)
                        showImage = QtGui.QImage(show.data, show.shape[1],
                                                 show.shape[0],
                                                 QtGui.QImage.Format_RGB888)
                        # cv2.imshow('test', showImage)
                        # showImg=QPixmap(showImage)
                        self.label_pred_img.setPixmap(
                            QtGui.QPixmap.fromImage(showImage))
                #         # print('test')
                # print('Head_Y_X_Z')
                # print(Head_Y_X_Z)

                x = cv2.resize(self.img, (300, 300)).astype(np.float32)
                flag_B = True  # 是否闭眼的flag
                flag_Y = False
                num_rec = 0  # 检测到的眼睛的数量

                # 分界线
                x -= self.img_mean
                x = x.astype(np.float32)
                x = x[:, :, ::-1].copy()
                x = torch.from_numpy(x).permute(2, 0, 1)
                xx = Variable(x.unsqueeze(0))
                # if torch.cuda.is_available():
                #     xx = xx.cuda()
                xx = xx.cuda()
                y = self.net(xx)
                softmax = nn.Softmax(dim=-1)
                detect = Detect(config.class_num, 0, 200, 0.01, 0.45)
                priors = utils.default_prior_box()

                loc, conf = y
                loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
                conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)

                detections = detect(
                    loc.view(loc.size(0), -1, 4),
                    softmax(conf.view(conf.size(0), -1, config.class_num)),
                    torch.cat([o.view(-1, 4) for o in priors], 0)).data
                labels = VOC_CLASSES
                # 将检测结果放置于图片上
                scale = torch.Tensor(self.img.shape[1::-1]).repeat(2)
                self.img = show_bboxes(self.img, bounding_boxes, landmarks)
                for i in range(detections.size(1)):

                    j = 0
                    while detections[0, i, j, 0] >= 0.4:
                        score = detections[0, i, j, 0]
                        label_name = labels[i - 1]
                        if label_name == 'calling' and score > 0.8:
                            self.Danger_state.setText('打电话')
                            self.danger_count += 1
                            frag_gray = True
                        if label_name == 'smoke' and score > 0.8:
                            self.Danger_state.setText('吸烟')
                            self.danger_count += 1
                            frag_gray = True
                        if label_name != 'smoke' and label_name != 'calling':
                            self.danger_t += 1
                        if self.danger_t >= 20:
                            self.Danger_state.setText('')
                            self.danger_t = 0
                        if label_name == 'open_eye':
                            self.open_t += 1
                            if self.open_t >= 20:
                                self.Eyes_state.setText('')
                                self.open_t = 0
                        if label_name == 'closed_mouth':
                            self.Mouth_state.setText(' ')
                        if label_name == 'closed_eye':
                            flag_B = False
                            frag_gray = True
                        if label_name == 'open_mouth':
                            flag_Y = True
                        display_txt = '%s:%.2f' % (label_name, score)
                        pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                        self.coords = (
                            pt[0], pt[1]), pt[2] - pt[0] + 1, pt[3] - pt[1] + 1
                        color = self.colors_tableau[i]
                        cv2.rectangle(self.img, (pt[0], pt[1]), (pt[2], pt[3]),
                                      color, 2)
                        cv2.putText(self.img, display_txt,
                                    (int(pt[0]), int(pt[1]) + 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                                    (255, 255, 255), 1, 8)
                        j += 1
                        num_rec += 1

                # cv2.imshow('test', self.img)
                if num_rec > 0:
                    if flag_B:
                        # print(' 1:eye-open')
                        self.list_B = np.append(self.list_B, 1)  # 睁眼为‘1’
                        self.list_blink = np.append(self.list_blink, 1)
                    else:
                        # print(' 0:eye-closed')

                        self.list_B = np.append(self.list_B, 0)  # 闭眼为‘0’
                        self.list_blink = np.append(self.list_blink, 0)
                    self.list_blink = np.delete(self.list_blink, 0)
                    self.list_B = np.delete(self.list_B, 0)
                    if flag_Y:
                        self.list_Y = np.append(self.list_Y, 1)
                    else:
                        self.list_Y = np.append(self.list_Y, 0)
                    self.list_Y = np.delete(self.list_Y, 0)
                else:
                    self.Msg.clear()
                    self.Msg.setPlainText('Nothing detected.')

                # print(list)
                # 实时计算PERCLOS
                self.perclos = 1 - np.average(self.list_blink)
                # print('perclos={:f}'.format(perclos))
                self.PERCLOS.display(self.perclos)
                if self.list_B[8] == 1 and self.list_B[9] == 0:
                    # 如果上一帧为’1‘,此帧为’0‘则判定为眨眼
                    self.Eyes_state.setText('眨眼')
                    self.blink_count += 1
                    frag_gray = True
                    str = datetime.datetime.now().strftime("%H:%M:%S")
                    self.State_record.append(str + ':眨眼')
                    # img_copy=cv2.cvtColor(img_copy,cv2.COLOR_RGB2GRAY)
                blink_T = time.time() - self.blink_start
                if blink_T > 30:
                    # 每30秒计算一次眨眼频率
                    blink_freq = self.blink_count / blink_T
                    self.blink_start = time.time()
                    self.blink_count = 0
                    print('blink_freq={:f}'.format(blink_freq))
                    self.Blink_freq.display(blink_freq * 2)
                # 检测打哈欠
                # if Yawn(list_Y,list_Y1):
                if (self.list_Y[len(self.list_Y) -
                                len(self.list_Y1):] == self.list_Y1).all():
                    # print('----------------------打哈欠----------------------')
                    self.Mouth_state.setText('打哈欠')
                    self.yawn_count += 1
                    frag_gray = True
                    str = datetime.datetime.now().strftime("%H:%M:%S")
                    self.State_record.append(str + ':打哈欠')
                    self.list_Y = np.zeros(50)
                # 计算打哈欠频率
                yawn_T = time.time() - self.yawn_start
                if yawn_T > 60:
                    yawn_freq = self.yawn_count / yawn_T
                    self.yawn_start = time.time()
                    self.yawn_count = 0
                    print('yawn_freq={:f}'.format(yawn_freq))
                    self.Yawn_freq.display(yawn_freq)

                # 计算危险行为频率
                DangerAct_T = time.time() - self.danger_start
                if DangerAct_T > 60:
                    danger_freq = self.danger_count / DangerAct_T
                    self.danger_start = time.time()
                    self.danger_count = 0
                    print('danger_freq={:f}'.format(danger_freq))
                    self.Danger_LCD.display(danger_freq)

                if (self.perclos > 0.4):
                    # print('疲劳')
                    self.State.setText('疲劳')
                elif (self.blink_freq > 0.3):
                    # print('疲劳')
                    self.State.setText('疲劳')
                    self.blink_freq = 0  # 如果因为眨眼频率判断疲劳,则初始化眨眼频率
                elif (self.yawn_freq > 5.0 / 60):
                    # print("疲劳")
                    self.State.setText('疲劳')
                    self.yawn_freq = 0  # 初始化,同上
                else:
                    self.State.setText('清醒')

                if not frag_gray:
                    showImg = cv2.cvtColor(img_copy, cv2.COLOR_BGR2RGB)
                else:
                    if self.isRecordImg:
                        str = datetime.datetime.now().strftime(
                            "%Y_%m_%d_%H_%M_%S")
                        temp = 'ImgRecord/' + str + '.jpg'
                        cv2.imwrite(temp, img_copy)

                    showImg = cv2.cvtColor(img_copy, cv2.COLOR_RGB2GRAY)
                showImg = qimage2ndarray.array2qimage(showImg)
                self.Camera_2.setPixmap(QPixmap(showImg))  # 展示图片
                self.Camera_2.show()
            if self.case == 3:
                img_copy = self.img.copy()
                frag_gray = False
                self.time_ing = time.time()
                # point=[100,0,540,480]
                if self.frag_cap:
                    bounding_boxes, landmarks = detect_faces(self.img)
                    print('正在定位······')
                    if len(bounding_boxes) == 1:
                        self.point.clear()
                        for b in bounding_boxes:
                            b = [int(round(value)) for value in b]
                            for i in b:
                                self.point.append(i)
                        self.frag_cap = False
                    # print(point)
                    # cv2.rectangle(draw, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 2)
                    # 裁剪坐标为[y0:y1, x0:x1]

                if not self.frag_cap:
                    if self.point[0] < 540:
                        self.img = self.img[self.point[1] - 10:479,
                                            self.point[0] - 100:self.point[2] +
                                            100]
                    else:
                        self.img = self.img[self.point[1] - 10:479,
                                            self.point[0] - 100:639]
                else:
                    self.img = self.img[1:479, 1:640]
                if int(self.time_ing - self.time_first) % 60 == 0:
                    self.frag_cap = True

                else:
                    self.frag_cap = False
                bounding_boxes, landmarks = detect_faces(self.img)

                # 通过MTCNN人脸框判断,当检测不到人脸时判断低头or瞌睡
                if len(bounding_boxes) == 0:
                    self.nod_fps += 1
                if self.nod_fps >= 3:
                    self.Head_state.setText('点头')
                    self.nod_count += 1
                if len(bounding_boxes) > 0:
                    self.nod_fps = 0

                # 通过头部姿态欧拉角角度变化判断是否摇头
                if len(bounding_boxes) > 0:
                    Head_Y_X_Z = get_head_pose(landmarks)
                    print('pitch:{}, yaw:{}, roll:{}'.format(
                        Head_Y_X_Z[1], Head_Y_X_Z[2], Head_Y_X_Z[3]))
                    if (Head_Y_X_Z[2] < -0.75):
                        self.shake_fps_l += 1
                    if (Head_Y_X_Z[2] >= -0.75):
                        self.shake_fps_l = 0
                    if self.shake_fps_l >= 5:
                        self.shake_count += 1
                        self.Head_state.setText('摇头')
                    if Head_Y_X_Z[3] >= 0.30:
                        self.shake_fps_r += 1
                    if self.shake_fps_r >= 5:
                        self.shake_count += 1
                        self.Head_state.setText('摇头')
                    if Head_Y_X_Z[3] < 0.30:
                        self.shake_fps_r = 0
                    # print(Head_Y_X_Z[1])
                    # print(Head_Y_X_Z[2])
                    # print(Head_Y_X_Z[3])

                if time.time() - self.nod_start > 3:
                    self.Head_state.setText('')
                if time.time() - self.shake_start > 3:
                    self.Head_state.setText('')
                # 计算低头频率  每10s计算一次
                if time.time() - self.nod_start > 10:
                    times = time.time() - self.nod_start
                    self.nod_freq = self.nod_count / times
                    self.nod_start = time.time()
                    self.Nod_LCD.display(self.nod_freq)

                # 计算摇头频率
                if time.time() - self.shake_start > 10:
                    times = time.time() - self.shake_start
                    self.shake_freq = self.shake_count / times
                    self.shake_start = time.time()
                    self.shake_LCD.display(self.shake_freq)

                if len(bounding_boxes) > 0:
                    Emotions = get_emotion(
                        get_face_expression(self.img, bounding_boxes))
                    self.Emotion.setText(Emotions[1])
                    self.Emotion_pred.display(float(Emotions[0]))
                    # print(Emotions)
                    canvas = cv2.imread('img_resource/label_pred.jpg',
                                        flags=cv2.IMREAD_UNCHANGED)
                    for (i,
                         (emotion,
                          prob)) in enumerate(zip(self.EMOTIONS, Emotions[2])):
                        # text = "{}: {:.2f}%".format(emotion, prob * 100)
                        text = "{:.2f}%".format(prob * 100)
                        # 绘制表情类和对应概率的条形图
                        w = int(prob * 180)
                        # print(text)
                        # canvas = 255 * np.ones((250, 300, 3), dtype="uint8")

                        cv2.rectangle(canvas, (0, (i * 44) + 25),
                                      (w, (i * 43) + 40), (100, 200, 130), -1)
                        cv2.putText(canvas, text, (170, (i * 43) + 40),
                                    cv2.FONT_HERSHEY_DUPLEX, 0.6, (0, 0, 0), 1)
                        show = cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB)
                        showImage = QtGui.QImage(show.data, show.shape[1],
                                                 show.shape[0],
                                                 QtGui.QImage.Format_RGB888)
                        # cv2.imshow('test', showImage)
                        # showImg=QPixmap(showImage)
                        self.label_pred_img.setPixmap(
                            QtGui.QPixmap.fromImage(showImage))
                #         # print('test')
                # print('Head_Y_X_Z')
                # print(Head_Y_X_Z)

                x = cv2.resize(self.img, (300, 300)).astype(np.float32)
                flag_B = True  # 是否闭眼的flag
                flag_Y = False
                num_rec = 0  # 检测到的眼睛的数量

                # 分界线
                x -= self.img_mean
                x = x.astype(np.float32)
                x = x[:, :, ::-1].copy()
                x = torch.from_numpy(x).permute(2, 0, 1)
                xx = Variable(x.unsqueeze(0))
                # if torch.cuda.is_available():
                #     xx = xx.cuda()
                xx = xx.cuda()
                y = self.net(xx)
                softmax = nn.Softmax(dim=-1)
                detect = Detect(config.class_num, 0, 200, 0.01, 0.45)
                priors = utils.default_prior_box()

                loc, conf = y
                loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
                conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)

                detections = detect(
                    loc.view(loc.size(0), -1, 4),
                    softmax(conf.view(conf.size(0), -1, config.class_num)),
                    torch.cat([o.view(-1, 4) for o in priors], 0)).data
                labels = VOC_CLASSES
                # 将检测结果放置于图片上
                scale = torch.Tensor(self.img.shape[1::-1]).repeat(2)
                self.img = show_bboxes(self.img, bounding_boxes, landmarks)
                for i in range(detections.size(1)):

                    j = 0
                    while detections[0, i, j, 0] >= 0.4:
                        score = detections[0, i, j, 0]
                        label_name = labels[i - 1]
                        if label_name == 'calling' and score > 0.8:
                            self.Danger_state.setText('打电话')
                            self.danger_count += 1
                            frag_gray = True
                        if label_name == 'smoke' and score > 0.8:
                            self.Danger_state.setText('吸烟')
                            self.danger_count += 1
                            frag_gray = True
                        if label_name != 'smoke' and label_name != 'calling':
                            self.danger_t += 1
                        if self.danger_t >= 20:
                            self.Danger_state.setText('')
                            self.danger_t = 0
                        if label_name == 'open_eye':
                            self.open_t += 1
                            if self.open_t >= 20:
                                self.Eyes_state.setText('')
                                self.open_t = 0
                        if label_name == 'closed_mouth':
                            self.Mouth_state.setText(' ')
                        if label_name == 'closed_eye':
                            flag_B = False
                            frag_gray = True
                        if label_name == 'open_mouth':
                            flag_Y = True
                        display_txt = '%s:%.2f' % (label_name, score)
                        pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                        self.coords = (
                            pt[0], pt[1]), pt[2] - pt[0] + 1, pt[3] - pt[1] + 1
                        color = self.colors_tableau[i]
                        cv2.rectangle(self.img, (pt[0], pt[1]), (pt[2], pt[3]),
                                      color, 2)
                        cv2.putText(self.img, display_txt,
                                    (int(pt[0]), int(pt[1]) + 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                                    (255, 255, 255), 1, 8)
                        j += 1
                        num_rec += 1

                # cv2.imshow('test', self.img)
                if num_rec > 0:
                    if flag_B:
                        # print(' 1:eye-open')
                        self.list_B = np.append(self.list_B, 1)  # 睁眼为‘1’
                        self.list_blink = np.append(self.list_blink, 1)
                    else:
                        # print(' 0:eye-closed')

                        self.list_B = np.append(self.list_B, 0)  # 闭眼为‘0’
                        self.list_blink = np.append(self.list_blink, 0)
                    self.list_blink = np.delete(self.list_blink, 0)
                    self.list_B = np.delete(self.list_B, 0)
                    if flag_Y:
                        self.list_Y = np.append(self.list_Y, 1)
                    else:
                        self.list_Y = np.append(self.list_Y, 0)
                    self.list_Y = np.delete(self.list_Y, 0)
                else:
                    self.Msg.clear()
                    self.Msg.setPlainText('Nothing detected.')

                # print(list)
                # 实时计算PERCLOS
                self.perclos = 1 - np.average(self.list_blink)
                # print('perclos={:f}'.format(perclos))
                self.PERCLOS.display(self.perclos)
                if self.list_B[8] == 1 and self.list_B[9] == 0:
                    # 如果上一帧为’1‘,此帧为’0‘则判定为眨眼
                    self.Eyes_state.setText('眨眼')
                    self.blink_count += 1
                    frag_gray = True
                    str = datetime.datetime.now().strftime("%H:%M:%S")
                    self.State_record.append(str + ':眨眼')
                    # img_copy=cv2.cvtColor(img_copy,cv2.COLOR_RGB2GRAY)
                blink_T = time.time() - self.blink_start
                if blink_T > 30:
                    # 每30秒计算一次眨眼频率
                    blink_freq = self.blink_count / blink_T
                    self.blink_start = time.time()
                    self.blink_count = 0
                    print('blink_freq={:f}'.format(blink_freq))
                    self.Blink_freq.display(blink_freq * 2)
                # 检测打哈欠
                # if Yawn(list_Y,list_Y1):
                if (self.list_Y[len(self.list_Y) -
                                len(self.list_Y1):] == self.list_Y1).all():
                    # print('----------------------打哈欠----------------------')
                    self.Mouth_state.setText('打哈欠')
                    self.yawn_count += 1
                    frag_gray = True
                    str = datetime.datetime.now().strftime("%H:%M:%S")
                    self.State_record.append(str + ':打哈欠')
                    self.list_Y = np.zeros(50)
                # 计算打哈欠频率
                yawn_T = time.time() - self.yawn_start
                if yawn_T > 60:
                    yawn_freq = self.yawn_count / yawn_T
                    self.yawn_start = time.time()
                    self.yawn_count = 0
                    print('yawn_freq={:f}'.format(yawn_freq))
                    self.Yawn_freq.display(yawn_freq)

                # 计算危险行为频率
                DangerAct_T = time.time() - self.danger_start
                if DangerAct_T > 60:
                    danger_freq = self.danger_count / DangerAct_T
                    self.danger_start = time.time()
                    self.danger_count = 0
                    print('danger_freq={:f}'.format(danger_freq))
                    self.Danger_LCD.display(danger_freq)

                if (self.perclos > 0.4):
                    # print('疲劳')
                    self.State.setText('疲劳')
                elif (self.blink_freq > 0.3):
                    # print('疲劳')
                    self.State.setText('疲劳')
                    self.blink_freq = 0  # 如果因为眨眼频率判断疲劳,则初始化眨眼频率
                elif (self.yawn_freq > 5.0 / 60):
                    # print("疲劳")
                    self.State.setText('疲劳')
                    self.yawn_freq = 0  # 初始化,同上
                else:
                    self.State.setText('清醒')

                if not frag_gray:
                    showImg = cv2.cvtColor(img_copy, cv2.COLOR_BGR2RGB)
                else:
                    if self.isRecordImg:
                        str = datetime.datetime.now().strftime(
                            "%Y_%m_%d_%H_%M_%S")
                        temp = 'ImgRecord/' + str + '.jpg'
                        cv2.imwrite(temp, img_copy)

                    showImg = cv2.cvtColor(img_copy, cv2.COLOR_RGB2GRAY)
                self.State_record.moveCursor(QTextCursor.End)
                showImg = qimage2ndarray.array2qimage(showImg)
                self.Camera_2.setPixmap(QPixmap(showImg))  # 展示图片
                self.Camera_2.show()