Example #1
0
    def getskills(self, intent, text, tok):

        if intent == 'clock':
            clock.start(tok)
        elif intent == 'camera':
            camera.start(tok)
        elif intent == 'smarthome':
            smarthome.start(tok)
        elif intent == 'weather':
            weather.start(tok)
        elif intent == 'music':
            music.start(tok)
        elif intent == 'mail':
            mail.start(tok)
        elif intent == 'joke':
            joke.start(tok)
        elif intent == 'news':
            news.start(tok)
        elif intent == 'tuling':
            tuling.start(text, tok)
        elif intent == 'snowboytrain':
            snowboytrain.start(tok)
        elif intent == 'raspberrypi-gpio':
            raspberrypigpio.start(tok)
        elif intent == 'respeaker':
            speaker.speak()
        elif intent == 'no':
            sconvenstation()
        elif intent == 'reintent':
            intent = nlp.do_intent(text, tok)
            s.getskills(intent, text, tok)
Example #2
0
    def getskills(self, intent, text, tok):

        s = skills()
        m = xlMusic()
        if intent == 'clock':
            clock.start(tok)
        elif intent == 'camera':
            camera.start(tok)
        elif intent == 'smarthome':
            smarthome.start(tok)
        elif intent == 'weather':
            weather.start(tok)
        elif intent == 'music':
            m.start(tok)
        elif intent == 'translate':
            ts.start(tok)
        elif intent == 'email':
            mail.start(tok)
        elif intent == 'joke':
            joke.start(tok)
        elif intent == 'news':
            news.start(tok)
        elif intent == 'express':
            express.start(tok)
        elif intent == 'reintent':
            nlu.do_intent(text, tok)
        elif intent == 'no':
            speaker.speacilrecorder()
        else:
            nlu.do_intent(text, tok)
Example #3
0
def write():
    user = input("Type the command: ")  #+ '\n'
    if (user == 't'):
        camera.start()
    elif (user == 'q'):
        camera.stop()
        print("==End of Photograph==")
    elif (user == 's'):
        camera.start()
        print("==Begin of Photograph==")
    else:
        ser.write(user.encode('utf-8'))
        #    ser.write("S".encode('utf-8'))
        print("Raspberry Pi send: " + user)
def main():

    useLiveCamera = True

    #gc.disable()

    # # transform to convert the image to tensor
    # transform = transforms.Compose([
    #     transforms.ToTensor()
    # ])
    # # initialize the model
    # model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True,
    #                                                             num_keypoints=17)
    # # set the computation device
    # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # # load the modle on to the computation device and set to eval mode
    # model.to(device).eval()

    # initialize glfw
    if not glfw.init():
        return
    glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4)
    glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
    #creating the window
    window = glfw.create_window(1600, 900, "PyGLFusion", None, None)
    if not window:
        glfw.terminate()
        return

    glfw.make_context_current(window)

    imgui.create_context()
    impl = GlfwRenderer(window)

    # rendering
    glClearColor(0.2, 0.3, 0.2, 1.0)

    #           positions        texture coords
    quad = [
        -1.0, -1.0, 0.0, 0.0, 0.0, 1.0, -1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0,
        1.0, 1.0, -1.0, 1.0, 0.0, 0.0, 1.0
    ]

    quad = np.array(quad, dtype=np.float32)

    indices = [0, 1, 2, 2, 3, 0]

    indices = np.array(indices, dtype=np.uint32)

    screenVertex_shader = (Path(__file__).parent /
                           'shaders/ScreenQuad.vert').read_text()

    screenFragment_shader = (Path(__file__).parent /
                             'shaders/ScreenQuad.frag').read_text()

    renderShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(screenVertex_shader, GL_VERTEX_SHADER),
        OpenGL.GL.shaders.compileShader(screenFragment_shader,
                                        GL_FRAGMENT_SHADER))

    # set up VAO and VBO for full screen quad drawing calls
    VAO = glGenVertexArrays(1)
    glBindVertexArray(VAO)

    VBO = glGenBuffers(1)
    glBindBuffer(GL_ARRAY_BUFFER, VBO)
    glBufferData(GL_ARRAY_BUFFER, 80, quad, GL_STATIC_DRAW)

    EBO = glGenBuffers(1)
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO)
    glBufferData(GL_ELEMENT_ARRAY_BUFFER, 24, indices, GL_STATIC_DRAW)

    glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 20, ctypes.c_void_p(0))
    glEnableVertexAttribArray(0)
    glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 20, ctypes.c_void_p(12))
    glEnableVertexAttribArray(1)

    # shaders

    bilateralFilter_shader = (Path(__file__).parent /
                              'shaders/bilateralFilter.comp').read_text()
    bilateralFilterShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(bilateralFilter_shader,
                                        GL_COMPUTE_SHADER))

    alignDepthColor_shader = (Path(__file__).parent /
                              'shaders/alignDepthColor.comp').read_text()
    alignDepthColorShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(alignDepthColor_shader,
                                        GL_COMPUTE_SHADER))

    depthToVertex_shader = (Path(__file__).parent /
                            'shaders/depthToVertex.comp').read_text()
    depthToVertexShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(depthToVertex_shader,
                                        GL_COMPUTE_SHADER))

    vertexToNormal_shader = (Path(__file__).parent /
                             'shaders/vertexToNormal.comp').read_text()
    vertexToNormalShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(vertexToNormal_shader,
                                        GL_COMPUTE_SHADER))

    raycast_shader = (Path(__file__).parent /
                      'shaders/raycast.comp').read_text()
    raycastShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(raycast_shader, GL_COMPUTE_SHADER))

    integrate_shader = (Path(__file__).parent /
                        'shaders/integrate.comp').read_text()
    integrateShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(integrate_shader, GL_COMPUTE_SHADER))

    trackP2P_shader = (Path(__file__).parent /
                       'shaders/p2pTrack.comp').read_text()
    trackP2PShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(trackP2P_shader, GL_COMPUTE_SHADER))

    reduceP2P_shader = (Path(__file__).parent /
                        'shaders/p2pReduce.comp').read_text()
    reduceP2PShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(reduceP2P_shader, GL_COMPUTE_SHADER))

    trackP2V_shader = (Path(__file__).parent /
                       'shaders/p2vTrack.comp').read_text()
    trackP2VShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(trackP2V_shader, GL_COMPUTE_SHADER))

    reduceP2V_shader = (Path(__file__).parent /
                        'shaders/p2vReduce.comp').read_text()
    reduceP2VShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(reduceP2V_shader, GL_COMPUTE_SHADER))

    LDLT_shader = (Path(__file__).parent / 'shaders/LDLT.comp').read_text()
    LDLTShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(LDLT_shader, GL_COMPUTE_SHADER))

    # Splatter
    globalMapUpdate_shader = (Path(__file__).parent /
                              'shaders/GlobalMapUpdate.comp').read_text()
    globalMapUpdateShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(globalMapUpdate_shader,
                                        GL_COMPUTE_SHADER))

    indexMapGenVert_shader = (Path(__file__).parent /
                              'shaders/IndexMapGeneration.vert').read_text()

    indexMapGenFrag_shader = (Path(__file__).parent /
                              'shaders/IndexMapGeneration.frag').read_text()

    IndexMapGenerationShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(indexMapGenVert_shader,
                                        GL_VERTEX_SHADER),
        OpenGL.GL.shaders.compileShader(indexMapGenFrag_shader,
                                        GL_FRAGMENT_SHADER))

    SurfaceSplattingVert_shader = (
        Path(__file__).parent / 'shaders/SurfaceSplatting.vert').read_text()

    SurfaceSplattingFrag_shader = (
        Path(__file__).parent / 'shaders/SurfaceSplatting.frag').read_text()

    SurfaceSplattingShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(SurfaceSplattingVert_shader,
                                        GL_VERTEX_SHADER),
        OpenGL.GL.shaders.compileShader(SurfaceSplattingFrag_shader,
                                        GL_FRAGMENT_SHADER))

    UnnecessaryPointRemoval_shader = (
        Path(__file__).parent /
        'shaders/UnnecessaryPointRemoval.comp').read_text()
    UnnecessaryPointRemovalShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(UnnecessaryPointRemoval_shader,
                                        GL_COMPUTE_SHADER))

    # P2V
    expm_shader = (Path(__file__).parent / 'shaders/expm.comp').read_text()
    expmShader = OpenGL.GL.shaders.compileProgram(
        OpenGL.GL.shaders.compileShader(expm_shader, GL_COMPUTE_SHADER))

    d2c, c2d, K, invK, colK = camera.start(useLiveCamera)

    shaderDict = {
        'renderShader': renderShader,
        'bilateralFilterShader': bilateralFilterShader,
        'alignDepthColorShader': alignDepthColorShader,
        'depthToVertexShader': depthToVertexShader,
        'vertexToNormalShader': vertexToNormalShader,
        'raycastVolumeShader': raycastShader,
        'integrateVolumeShader': integrateShader,
        'trackP2PShader': trackP2PShader,
        'reduceP2PShader': reduceP2PShader,
        'trackP2VShader': trackP2VShader,
        'reduceP2VShader': reduceP2VShader,
        'LDLTShader': LDLTShader,
        'globalMapUpdate': globalMapUpdateShader,
        'indexMapGeneration': IndexMapGenerationShader,
        'surfaceSplatting': SurfaceSplattingShader,
        'unnecessaryPointRemoval': UnnecessaryPointRemovalShader,
        'expm': expmShader
    }

    bufferDict = {
        'p2pReduction': -1,
        'p2pRedOut': -1,
        'p2vReduction': -1,
        'p2vRedOut': -1,
        'test': -1,
        'outBuf': -1,
        'poseBuffer': -1,
        'globalMap0': -1,
        'globalMap1': -1,
        'atomic0': -1,
        'atomic1': -1
    }

    textureDict = {
        'rawColor': -1,
        'lastColor': -1,
        'nextColor': -1,
        'rawDepth': -1,
        'filteredDepth': -1,
        'lastDepth': -1,
        'nextDepth': -1,
        'refVertex': -1,
        'refNormal': -1,
        'virtualVertex': -1,
        'virtualNormal': -1,
        'virtualDepth': -1,
        'virtualColor': -1,
        'mappingC2D': -1,
        'mappingD2C': -1,
        'xyLUT': -1,
        'tracking': -1,
        'volume': -1,
        'indexMap': -1
    }

    fboDict = {'indexMap': -1, 'virtualFrame': -1}
    #        'iters' : (2, 5, 10),

    fusionConfig = {
        'volSize': (128, 128, 128),
        'volDim': (1.0, 1.0, 1.0),
        'iters': (2, 2, 2),
        'initOffset': (0, 0, 0),
        'maxWeight': 100.0,
        'distThresh': 0.05,
        'normThresh': 0.9,
        'nearPlane': 0.1,
        'farPlane': 4.0,
        'maxMapSize': 5000000,
        'c_stable': 10.0,
        'sigma': 0.6
    }

    cameraConfig = {
        'depthWidth': 640,
        'depthHeight': 576,
        'colorWidth': 1920,
        'colorHeight': 1080,
        'd2c': d2c,
        'c2d': c2d,
        'depthScale': 0.001,
        'K': K,
        'invK': invK,
        'colK': colK
    }

    textureDict = frame.generateTextures(textureDict, cameraConfig,
                                         fusionConfig)
    bufferDict = frame.generateBuffers(bufferDict, cameraConfig, fusionConfig)

    colorMat = np.zeros(
        (cameraConfig['colorHeight'], cameraConfig['colorWidth'], 3),
        dtype="uint8")
    useColorMat = False
    integrateFlag = True
    resetFlag = True
    initPose = glm.mat4()
    initPose[3, 0] = fusionConfig['volDim'][0] / 2.0
    initPose[3, 1] = fusionConfig['volDim'][1] / 2.0
    initPose[3, 2] = 0

    blankResult = np.array([0, 0, 0, 0, 0, 0], dtype='float32')

    glBindBuffer(GL_SHADER_STORAGE_BUFFER, bufferDict['poseBuffer'])
    glBufferSubData(GL_SHADER_STORAGE_BUFFER, 0, 16 * 4,
                    glm.value_ptr(initPose))
    glBufferSubData(GL_SHADER_STORAGE_BUFFER, 16 * 4, 16 * 4,
                    glm.value_ptr(glm.inverse(initPose)))
    glBufferSubData(GL_SHADER_STORAGE_BUFFER, 16 * 4 * 2, 16 * 4,
                    glm.value_ptr(glm.mat4(1.0)))
    glBufferSubData(GL_SHADER_STORAGE_BUFFER, 16 * 4 * 3, 16 * 4,
                    glm.value_ptr(glm.mat4(1.0)))
    glBufferSubData(GL_SHADER_STORAGE_BUFFER, 16 * 4 * 4, 6 * 4, blankResult)
    glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0)

    mouseX, mouseY = 0, 0
    clickedPoint3D = glm.vec4(fusionConfig['volDim'][0] / 2.0,
                              fusionConfig['volDim'][1] / 2.0, 0, 0)
    sliderDim = fusionConfig['volDim'][0]

    #[32 64 128 256 512]
    currentSize = math.log2(fusionConfig['volSize'][0]) - 5
    volumeStatsChanged = False

    currPose = initPose

    # splatter stuff
    frameCount = 0
    fboDict = frame.generateFrameBuffers(fboDict, textureDict, cameraConfig)

    initAtomicCount = np.array([0], dtype='uint32')
    mapSize = np.array([0], dtype='uint32')

    glBindBuffer(GL_ATOMIC_COUNTER_BUFFER, bufferDict['atomic0'])
    glBufferSubData(GL_ATOMIC_COUNTER_BUFFER, 0, 4, initAtomicCount)
    glBindBuffer(GL_ATOMIC_COUNTER_BUFFER, 0)

    glBindBuffer(GL_ATOMIC_COUNTER_BUFFER, bufferDict['atomic1'])
    glBufferSubData(GL_ATOMIC_COUNTER_BUFFER, 0, 4, initAtomicCount)
    glBindBuffer(GL_ATOMIC_COUNTER_BUFFER, 0)

    # aa = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], dtype=torch.float32, device=torch.device('cuda'))
    # bb = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=torch.device('cuda'))

    # #setup pycuda gl interop needs to be after openGL is init
    # import pycuda.gl.autoinit
    # import pycuda.gl
    # cuda_gl = pycuda.gl
    # cuda_driver = pycuda.driver
    # from pycuda.compiler import SourceModule
    # import pycuda

    # pycuda_source_ssbo = cuda_gl.RegisteredBuffer(int(bufferDict['test']), cuda_gl.graphics_map_flags.NONE)

    # sm = SourceModule("""
    #     __global__ void simpleCopy(float *inputArray, float *outputArray) {
    #             unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;

    #             outputArray[x] = inputArray[x];
    #             inputArray[x] = 8008.135f;
    #     }
    # """)

    # cuda_function = sm.get_function("simpleCopy")

    # mappingObj = pycuda_source_ssbo.map()
    # data, size = mappingObj.device_ptr_and_size()

    # cuda_function(np.intp(aa.data_ptr()), np.intp(data), block=(8, 1, 1))

    # mappingObj.unmap()

    # glBindBuffer(GL_SHADER_STORAGE_BUFFER, bufferDict['test'])
    # tee = glGetBufferSubData(GL_SHADER_STORAGE_BUFFER, 0, 32)
    # glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0)
    # teeData = np.frombuffer(tee, dtype=np.float32)
    # print(teeData)

    # modTensor = aa.cpu().data.numpy()
    # print(modTensor)

    #fusionConfig['initOffset'] = (initPose[3,0], initPose[3,1], initPose[3,2])

    # LUTs
    #createXYLUT(k4a, textureDict, cameraConfig) <-- bug in this

    person.init()

    while not glfw.window_should_close(window):

        glfw.poll_events()
        impl.process_inputs()
        imgui.new_frame()

        sTime = time.perf_counter()

        try:
            capture = camera.getFrames(useLiveCamera)

            if capture.color is not None:
                #if useLiveCamera == False:
                #if k4a.configuration["color_format"] == ImageFormat.COLOR_MJPG:
                #    colorMat = cv2.imdecode(capture.color, cv2.IMREAD_COLOR)
                #    useColorMat = True
                glActiveTexture(GL_TEXTURE0)
                glBindTexture(GL_TEXTURE_2D, textureDict['rawColor'])
                glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0,
                                int(cameraConfig['colorWidth']),
                                int(cameraConfig['colorHeight']),
                                (GL_RGB, GL_RGBA)[useLiveCamera],
                                GL_UNSIGNED_BYTE,
                                (capture.color, colorMat)[useColorMat])

            if capture.depth is not None:
                glActiveTexture(GL_TEXTURE1)
                glBindTexture(GL_TEXTURE_2D, textureDict['rawDepth'])
                glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0,
                                int(cameraConfig['depthWidth']),
                                int(cameraConfig['depthHeight']), GL_RED,
                                GL_UNSIGNED_SHORT, capture.depth)

        except EOFError:
            break

        # #smallMat = cv2.pyrDown(colorMat)
        # start_time = time.time()
        # rotMat = cv2.flip(colorMat, 0)

        # pil_image = Image.fromarray(rotMat).convert('RGB')
        # image = transform(pil_image)

        # image = image.unsqueeze(0).to(device)
        # end_time = time.time()
        # print((end_time - start_time) * 1000.0)

        # with torch.no_grad():
        #     outputs = model(image)

        # output_image = utils.draw_keypoints(outputs, rotMat)
        # cv2.imshow('Face detection frame', output_image)
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break

        person.getPose(textureDict, cameraConfig, capture.color)

        frame.bilateralFilter(shaderDict, textureDict, cameraConfig)
        frame.depthToVertex(shaderDict, textureDict, cameraConfig,
                            fusionConfig)
        frame.alignDepthColor(shaderDict, textureDict, cameraConfig,
                              fusionConfig)
        frame.vertexToNormal(shaderDict, textureDict, cameraConfig)

        frame.mipmapTextures(textureDict)

        #currPose = track.runP2P(shaderDict, textureDict, bufferDict, cameraConfig, fusionConfig, currPose, integrateFlag, resetFlag)
        currPose = track.runP2V(shaderDict, textureDict, bufferDict,
                                cameraConfig, fusionConfig, currPose,
                                integrateFlag, resetFlag)

        #mapSize = track.runSplatter(shaderDict, textureDict, bufferDict, fboDict, cameraConfig, fusionConfig, mapSize, frameCount, integrateFlag, resetFlag)
        frameCount += 1

        if resetFlag == True:
            resetFlag = False
            integrateFlag = True

        imgui.begin("Menu", True)
        if imgui.button("Reset"):
            fusionConfig['volSize'] = (1 << (currentSize + 5), 1 <<
                                       (currentSize + 5), 1 <<
                                       (currentSize + 5))
            fusionConfig['volDim'] = (sliderDim, sliderDim, sliderDim)
            currPose, integrateFlag, resetFlag = track.reset(
                textureDict, bufferDict, cameraConfig, fusionConfig,
                clickedPoint3D)
            volumeStatsChanged = False

        if imgui.button("Integrate"):
            integrateFlag = not integrateFlag
        imgui.same_line()
        imgui.checkbox("", integrateFlag)

        changedDim, sliderDim = imgui.slider_float("dim",
                                                   sliderDim,
                                                   min_value=0.01,
                                                   max_value=5.0)

        clickedSize, currentSize = imgui.combo(
            "size", currentSize, ["32", "64", "128", "256", "512"])

        if imgui.is_mouse_clicked():
            if not imgui.is_any_item_active():
                mouseX, mouseY = imgui.get_mouse_pos()
                w, h = glfw.get_framebuffer_size(window)
                xPos = ((mouseX % int(w / 3)) / (w / 3) *
                        cameraConfig['depthWidth'])
                yPos = (mouseY / (h)) * cameraConfig['depthHeight']
                clickedDepth = capture.depth[
                    int(yPos + 0.5),
                    int(xPos + 0.5)] * cameraConfig['depthScale']
                clickedPoint3D = clickedDepth * (
                    cameraConfig['invK'] * glm.vec4(xPos, yPos, 1.0, 0.0))
                volumeStatsChanged = True

        if changedDim or clickedSize:
            volumeStatsChanged = True

        imgui.end()

        graphics.render(VAO, window, shaderDict, textureDict)

        imgui.render()

        impl.render(imgui.get_draw_data())

        eTime = time.perf_counter()

        #print((eTime-sTime) * 1000, mapSize[0])

        glfw.swap_buffers(window)

    glfw.terminate()
    if useLiveCamera == True:
        camera.stop()
Example #5
0
OPERATIONS = {
    'train':
    lambda model, args: model.train(tables.open_file(args.dataset, mode='r'),
                                    args.epochs, args.learning_rate),
    'train_pose':
    lambda model, args: model.train(tables.open_file(args.dataset, mode='r'),
                                    args.epochs, args.initial_epoch),
    'image':
    lambda model, args: splash_image(model, args.input, args.output),
    'video':
    lambda model, args: splash_video(model, args.input, args.output),
    'visualize':
    lambda model, args: visualize_image(model, args.input),
    'camera':
    lambda model, args: camera.start(model, ['BG', '1x1', '1x2', '1x3'],
                                     (448, 448))
}

parser = argparse.ArgumentParser(description='Program ti die yourself')
parser.add_argument('operation',
                    metavar='OP',
                    help='Operation to be executed',
                    choices=('train', 'image', 'video', 'camera', 'visualize',
                             'train_pose'))
parser.add_argument('-d',
                    '--dataset',
                    help='Path to HDF5 file containing dataset',
                    type=str,
                    default='dataset.hdf5')
parser.add_argument('-w',
                    '--weights',
Example #6
0
def control():
    global num_seed
    num_seed = 0
    model = load_model(MODEL_NAME)
    #-----------
    imagede = '/home/pi/Desktop/photos/default.jpg'
    img_default = load_image(imagede)
    classify(model, img_default)

    ##初始化
    pygame.init()
    ##变量存放处

    size = width, height = 300, 200
    bgColor = (0, 0, 0)

    ##設置界面寬高

    screen = pygame.display.set_mode(size)

    ##設置標題

    pygame.display.set_caption("Team 1 Monitor")

    ##要在Pygame中使用文本,必须创建Font对象

    ##第一个参数指定字体 ,第二个参数指定字体大小

    font = pygame.font.Font(None, 20)

    ##调用get_linesize()方法获得每行文本的高度

    line_height = font.get_linesize()
    position = 0
    screen.fill(bgColor)

    ##创建一个存放的文本TXT

    # f = open("record.txt",'w')

    while True:

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                # 關閉文件
                # f.close()
                sys.exit()
            # print('GG\n')
            if event.type == pygame.KEYDOWN:
                # f.write(str(event) + '\n')
                if event.key == K_w:
                    # print('w\n')
                    cm.send('#W')
                elif event.key == K_s:
                    cm.send('#S')
                if event.key == K_j:
                    # print('w\n')
                    cm.send('#w')
                elif event.key == K_k:
                    cm.send('#s')

                elif event.key == K_d:
                    cm.send('#D')
                elif event.key == K_a:
                    cm.send('#A')

                elif event.key == K_x:
                    cm.send('#x')
                elif event.key == K_b:
                    cm.send('#b')
                # --------------------------------------------

                elif event.key == K_p:
                    camera.stop()
                    imagepath = '/home/pi/Desktop/photos/' + str(
                        num_seed) + '.jpg'
                    img = load_image(imagepath)
                    label, prob, _ = classify(model, img)
                    print(
                        'we think image name:{} with certainty {} that it is {}'
                        .format(imagepath, prob, label))

                # ------------------------------
                # 目标跟随,返回
                #  hd5文件请放在执行文件目录下,输入输出在photos文件夹
                elif event.key == K_g:
                    camera.stop()
                    imagepath = '/home/pi/Desktop/photos/' + str(
                        num_seed) + '.jpg'
                    outputpath = '/home/pi/Desktop/photos/' + str(
                        num_seed) + 'new.jpg'
                    execution_path = os.getcwd()
                    detector = ObjectDetection()
                    detector.setModelTypeAsRetinaNet()
                    detector.setModelPath(
                        os.path.join(execution_path,
                                     'resnet50_coco_best_v2.0.1.h5'))
                    detector.loadModel()
                    a = time.time()

                    custom_objects = detector.CustomObjects(bottle=True)

                    detections = detector.detectCustomObjectsFromImage(
                        custom_objects=custom_objects,
                        input_image=imagepath,
                        output_image_path=outputpath,
                        minimum_percentage_probability=50,
                        box_show=True)
                    b = time.time()
                    print('the time is {}'.format(b - a))
                    print('the direction is {}'.format(
                        detections[0]['direction']))
                    for eachObject in detections:
                        print(eachObject['name'] + ':' +
                              eachObject['percentage_probability'])

                elif event.key == K_t:
                    num_seed = camera.capture(num_seed)

                elif event.key == K_q:
                    camera.stop()
                    print("==End of Photograph==")
                elif event.key == K_o:
                    camera.start()
                    print("==Begin of Photograph==")
                elif event.key == K_r:
                    camera.record()
                    # render()将文本渲染成Surface对象
                # 第一个参数是带渲染的文本
                # 第二个参数指定是否消除锯齿
                # 第三个参数指定文本的颜色
                screen.blit(font.render(str(event), True, (0, 255, 0)),
                            (0, position))
                position += line_height
                if position >= height:
                    position = 0
                    screen.fill(bgColor)
                pygame.display.flip()
Example #7
0
    def camera(self, tok):

        camera.start(tok)
Example #8
0
def start():
    camera.start()
    flash('Successfully started the camera')
    return redirect(url_for('index'))
Example #9
0
import communication as cm
import camera
import keyboard 

cm.handshake()

camera.start()

#while (True):
#    cm.write()

keyboard.control()
Example #10
0
import zlab
import camera
import time

camera.start()
camera.setMetaData("myvar1=22&yourvar=hello")
print "Recording"
camera.record("c:/zlab/camera_test.pma")
print "Sleeping"
time.sleep(4)
print "Stopping"
camera.stop()
print "Done"
Example #11
0
def control():
    global num_seed
    num_seed = 0
    model = load_model(MODEL_NAME)

    ##初始化
    pygame.init()
    ##变量存放处

    size = width, height = 300, 200
    bgColor = (0, 0, 0)

    ##設置界面寬高

    screen = pygame.display.set_mode(size)

    ##設置標題

    pygame.display.set_caption("Team 1 Monitor")

    ##要在Pygame中使用文本,必须创建Font对象

    ##第一个参数指定字体 ,第二个参数指定字体大小

    font = pygame.font.Font(None, 20)

    ##调用get_linesize()方法获得每行文本的高度

    line_height = font.get_linesize()
    position = 0
    screen.fill(bgColor)

    ##创建一个存放的文本TXT

    # f = open("record.txt",'w')

    while True:

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                # 關閉文件
                # f.close()
                sys.exit()
            # print('GG\n')
            if event.type == pygame.KEYDOWN:
                # f.write(str(event) + '\n')
                if event.key == K_w:
                    # print('w\n')
                    cm.send('#W')
                elif event.key == K_s:
                    cm.send('#S')
                if event.key == K_j:
                    # print('w\n')
                    cm.send('#w')
                elif event.key == K_k:
                    cm.send('#s')

                elif event.key == K_d:
                    cm.send('#D')
                elif event.key == K_a:
                    cm.send('#A')

                elif event.key == K_x:
                    cm.send('#x')
                elif event.key == K_b:
                    cm.send('#b')
                # --------------------------------------------
                #  这下面的代码是云加的,想法是按下p键开始预测,与之对应的向拍照传入种子来编号
                elif event.key == K_p:
                    imagepath = '/home/pi/Desktop/photos/' + str(num_seed) + '.jpg'

                    img = load_image(imagepath)
                    label, prob, _ = classify(model, img)
                    print('we think image name:{} with certainty {} that it is {}'.format(imagepath, prob, label))

                # ---------------------------------------------
                # ---------------------------------------------
                elif event.key == K_t:
                    num_seed = camera.capture(num_seed)


                elif event.key == K_q:
                    camera.stop()
                    print("==End of Photograph==")
                elif event.key == K_o:
                    camera.start()
                    print("==Begin of Photograph==")
                elif event.key == K_r:
                    camera.record()
                    # render()将文本渲染成Surface对象
                # 第一个参数是带渲染的文本
                # 第二个参数指定是否消除锯齿
                # 第三个参数指定文本的颜色
                screen.blit(font.render(str(event), True, (0, 255, 0)), (0, position))
                position += line_height
                if position >= height:
                    position = 0
                    screen.fill(bgColor)
                pygame.display.flip()
Example #12
0
def ca():
    print(time.ctime())
    camera.start()
Example #13
0
def do_intent(text, tok):

    sm = hass()
    m = xlMusic()
    services = {
        'musicurl_get': 'method=baidu.ting.song.play&songid=',
        'search': 'method=baidu.ting.search.catalogSug&query=',
        'hot':
        'method=baidu.ting.song.getRecommandSongList&song_id=877578&num=12'
    }

    if text != None:
        if '闹钟' in text:
            clock.start(tok)
        elif '打开' in text:
            sm.cortol('turn_on', text[6:-1], tok)
        elif '关闭' in text:
            sm.cortol('turn_off', text[6:-1], tok)
        elif '获取' in text:
            if '传感器' in text or '温度' in text:
                sm.sensor('sensor', text[6:-1], tok)
            elif '湿度' in text:
                sm.sensor('sensor', text[6:-1], tok)
            else:
                sm.sensor('switch', text[6:-1], tok)
        elif '天气' in text:
            weather.main(tok)
        elif '重新说' in text or '重复' in text:
            speaker.speak()
        elif '翻译' in text:
            ts.main(tok)
        elif '搜索' in text:
            tuling.main(text, tok)
        elif '闲聊' in text:
            tuling.main(text, tok)
        elif '怎么走' in text:
            maps.start(tok)
        elif '酒店' in text:
            tuling.main(text, tok)
        elif '旅游' in text:
            tuling.main(text, tok)
        elif '新闻' in text:
            news.start(tok)
        elif '拍照' in text:
            camera.start(tok)
        elif '邮件' in text or '邮件助手' in text:
            mail.start(tok)
        elif '快递' in text:
            express.start(tok)
        elif '笑话' in text:
            joke.main(tok)
        elif '训练' in text:
            snowboytrain.start(tok)
        elif '播放' in text:
            if '音乐' in text:
                m.sui_ji(services, tok)
            else:
                songname = text[2:-1]
                m.sou_suo(services, songname, tok)
        elif '我想听' in text:
            if '音乐' in text:
                m.sui_ji(services, tok)
            else:
                songname = text[3:-1]
                m.sou_suo(services, songname, tok)
        else:
            tuling.start(text, tok)
    else:
        speaker.speacilrecorder()