예제 #1
0
    def __init__(self, **kwargs):
        # auxiliary string for debugging output
        self.pre = self.__class__.__name__ + " : "
        # checks kwargs agains parameter_defs, attach ok'd parameters to this
        # object as attributes
        parameterInitCheck(LiveThread.parameter_defs, kwargs, self)

        # some "api-level 1" objects here = swig-wrapped cpp objects
        #
        # This parameter set is defined at the cpp header file "framefifo.h"
        self.framefifo_ctx = core.FrameFifoContext()
        self.framefifo_ctx.n_basic = self.n_basic
        self.framefifo_ctx.n_setup = self.n_setup
        self.framefifo_ctx.n_signal = self.n_signal
        self.framefifo_ctx.flush_when_full = self.flush_when_full
        # swig wrapped cpp LiveThread
        self.core = core.LiveThread(self.name, self.framefifo_ctx)
        self.core.setAffinity(self.affinity)
        if (self.rtsp_server > -1):
            self.core.setRTSPServer(self.rtsp_server)

        self.input_filter = self.core.getFrameFilter()

        self.active = True
        self.core.startCall()
예제 #2
0
def main():
    """Simple filterchain creation test
    """
    openglthread = core.OpenGLThread("openglthread")
    openglthread.startCall()
    livethread = core.LiveThread("livethread")
    livethread.startCall()

    if len(sys.argv) < 2:
        print("please give rtsp camera address as the first argument")

    filterchain = VisionAlarmFilterChain(address=sys.argv[1],
                                         slot=1,
                                         rgb_sync_event=reserveEventFd(),
                                         fmp4_sync_event=reserveEventFd())

    ## runs the filterchain
    filterchain(livethread=livethread, openglthread=openglthread)
    print("server is running for some time")
    filterchain.activateFragMP4()
    time.sleep(12)
    print("livethread stop")
    # preferably shutdown the system from beginning-to-end
    livethread.stopCall()
    filterchain.close()
    print("openglthread stop")
    openglthread.stopCall()
    print("bye!")
예제 #3
0
    def __init__(self, address, parent=None):
        super(Dashboard, self).__init__()
        self.address = address
        self.setupUI()
        self.alertFireDetected = True
        setValkkaLogLevel(loglevel_silent)
        filterchain = VisionAlarmFilterChain(self.w,
                                             self.wlay,
                                             address=self.address,
                                             slot=1)

        rgb_pars = filterchain.getRGBParameters()
        frag_mp4_pars = filterchain.getFragMP4Parameters()

        # process that recieves RGB24 frames
        rgb_process = RGB24Process()
        rgb_process.ignoreSIGINT()
        rgb_process.start()

        # process that receives frag mp4 fragments & prints out info
        if self.alertFireDetected:
            frag_mp4_process = FragMP4Process()
            frag_mp4_process.ignoreSIGINT()
            frag_mp4_process.start()

        # opengl is used to dump video to the screen
        # preserved frames in the memory and at the GPI
        gl_ctx = core.OpenGLFrameFifoContext()
        gl_ctx.n_720p = 100
        gl_ctx.n_1080p = 100
        gl_ctx.n_1440p = 0
        gl_ctx.n_4K = 0

        # max buffering time depends on the frames available
        buffering_time_ms = 100

        # Forking is done, Begin multithreading instantiation
        # Create opengl thread
        openglthread = core.OpenGLThread("openglthread", gl_ctx,
                                         buffering_time_ms)
        openglthread.startCall()
        # openglthread = OpenGLThread(
        #     name="openglthread",
        #     n_720p=50,
        #     n_1080p=50,
        #     n_1440p=50,
        #     verbose=False,
        #     msbuftime=buffering_time_ms
        # )

        # Livethread using live555
        livethread = core.LiveThread("livethread")
        livethread.startCall()

        # start decoders, create shmem servers etc
        filterchain(livethread=livethread, openglthread=openglthread)

        # pass shmem arguments ( from the server side) to the client processes
        rgb_process.activateRGB24Client(**rgb_pars)
        rgb_pipe = rgb_process.getPipe()

        # if alert detected activate frag-MP4
        if self.alertFireDetected:
            frag_mp4_process.activateFMP4Client(**frag_mp4_pars)

            # multiprocesses derived from valkka.multiprocess.AsyncBackMessageProcess
            # have a slightly different API from multiprocessing.Pipe:
            mp4_pipe = frag_mp4_process.getPipe()
            mp4_pipe_fd = mp4_pipe.getReadFd()

            # could be called within the main loop
            # in a real-life application you could do it like this:
            # - your ws server receives a request for fmp4
            # - your ws server process sends a request to main process
            # -.. which then calls this:
            filterchain.activateFragMP4()

        print("video array ", frag_mp4_process.returnVideoArray())
        """
        C++ side threads are running
        Python multiprocesses are running
        Starting main program
        """

        while True:
            try:
                if self.alertFireDetected:
                    # multiplex intercom from two multiprocess
                    rlis = [rgb_pipe, mp4_pipe_fd]
                    r, w, e = safe_select(rlis, [], [], timeout=1)

                    if rgb_pipe in r:
                        # there is an incoming message object from the rgb process
                        msg = rgb_pipe.recv()
                        print(
                            "MainProcess Dashboard: message from rgb process",
                            msg)
                    if mp4_pipe_fd in r:
                        # there is an incoming message object from the frag mp4 process
                        msg = mp4_pipe.recv()
                        print(
                            "MainProcess Dashboard: message from frag-MP4 process",
                            msg)

                    if len(r) < 1:
                        # Dashboard process is alive
                        # print("Dashboard process is alive")
                        pass
                else:
                    # multiplex intercom from two multiprocess
                    rlis = [rgb_pipe]
                    r, w, e = safe_select(rlis, [], [], timeout=1)

                    if rgb_pipe in r:
                        # there is an incoming message object from the rgb process
                        msg = rgb_pipe.recv()
                        print(
                            "MainProcess Dashboard: message from rgb process",
                            msg)

                    if len(r) < 1:
                        # Dashboard process is alive
                        # print("Dashboard process is alive")
                        pass

            except KeyboardInterrupt:
                print(
                    "Terminating the program !! You stupid shit head pressed ctrl + c "
                )
                break

        print("Bye ")
        print(" stopping process")

        frag_mp4_process.deactivateFMP4Client(
            ipc_index=frag_mp4_pars["ipc_index"])
        frag_mp4_process.stop()

        rgb_process.deactivateRGB24Client(ipc_index=rgb_pars["ipc_index"])
        rgb_process.stop()

        print("stopping livethread")
        livethread.stopCall()
        filterchain.close()
        print("stopping openglthread")
        openglthread.stopCall()
예제 #4
0
print()
print("Loading Valkka")

import valkka
from valkka import core

print("Valkka loaded ok")
print("   Version          ", valkka.core.__version__)
print("   Core loaded from ", core.__file__)
"""Test instantiation of some objects
"""
print()
print("   Testing Valkka classes")
live = core.LiveThread("live")
# inp  =core.FrameFifo("fifo") # in the API no more
# ff   =core.FifoFrameFilter("fifo",inp)
out = core.DummyFrameFilter("dummy")
av = core.AVThread("av", out)
gl = core.OpenGLThread("gl")

av_in = av.getFrameFilter()
gl_in = gl.getFrameFilter()

ctx = core.LiveConnectionContext()
ctx.slot = 1
ctx.connection_type = core.LiveConnectionType_rtsp
ctx.address = "rtsp://*****:*****@192.168.0.157"
ctx2 = core.LiveConnectionContext(core.LiveConnectionType_rtsp,
                                  "rtsp://*****:*****@192.168.0.157", 1, out)
print("   Valkka classes ok")
print()