コード例 #1
0
    def configure(self):
        """
        Configure whole upyHome components
        """
        try:
            self._load_config()

            MODE_DEBUG['value'] = self._get_config_key(self._config, KEY_DEBUG,
                                                       True)
            _debug('upyhome::configure')

            PLATFORM['value'] = self._get_config_key(self._config,
                                                     KEY_PLATFORM, True)
            self._name = self._config[CONFIG_NAME]
            # Re init base cb
            self._init_cb(self._get_config_key(self._config, KEY_USER_CB,
                                               True))
            #Configure components
            suscribes = self._get_config_key(self._config, KEY_SUSCRIBES)
            if suscribes is not None and isinstance(suscribes, list):
                for scb in suscribes:
                    self._add_suscribe(self._proxy, scb)
            #Components
            CONFIG_COMPS = [(CONFIG_NET, self._config_network),
                            (CONFIG_DIN, self._config_inputs),
                            (CONFIG_DOUT, self._config_outputs),
                            (CONFIG_LED, self._config_leds),
                            (CONFIG_I2C, self._config_i2cs),
                            (CONFIG_SPI, self._config_spis),
                            (CONFIG_DRIVER, self._config_drivers)]
            for CONF in CONFIG_COMPS:
                self._config_comp(*CONF)
        except Exception as ex:
            log_error('{0}@upyhome::configure'.format(ex))
コード例 #2
0
 def _test_error(self):
     """
     make an error
     """
     try:
         raise Exception('Test exception')
     except Exception as ex:
         log_error('{0}@upyhome::test_error'.format(ex))
コード例 #3
0
def get_action(camera_id):
    global action

    if camera_id in action:
        return action[camera_id]

    com.log_error(
        'get_action() - No value found for camera_id: {}'.format(camera_id))
コード例 #4
0
def get_known_faces_db_name(camera_id):
    global input_file

    if camera_id in input_file:
        return input_file[camera_id]

    com.log_error(
        'get_known_faces_db_name() - No value found for camera_id: {}'.format(
            camera_id))
コード例 #5
0
def get_output_db_name(camera_id):
    global output_file

    if camera_id in output_file:
        return output_file[camera_id]

    com.log_error(
        'get_output_db_name() - No value found for camera_id: {}'.format(
            camera_id))
コード例 #6
0
def add_faces_encodings(camera_id, face_encoding):
    global known_face_encodings
    #known_face_encodings.append(face_encoding)
    if camera_id in known_face_encodings:
        known_face_encodings[camera_id].append(face_encoding)
    else:
        com.log_error(
            'add_faces_encodings() - No value found for camera_id: {}'.format(
                camera_id))
コード例 #7
0
def set_action(camera_id, value):
    global action_types

    if value in action_types:
        global action
        return action.update({camera_id: action_types[value]})

    com.log_error('Unable to set up value:{}, must be one of this: {}'.format(
        value, action_types.keys()))
コード例 #8
0
def get_not_applicable_id(camera_id, abort=True):
    global not_applicable_id

    if camera_id in not_applicable_id:
        return not_applicable_id[camera_id]

    if abort:
        com.log_error(
            'get_not_applicable_id() - No value found for camera_id: {}'.
            format(camera_id))
    else:
        return []
コード例 #9
0
def read_pickle(pickle_file, exception=True):
    try:
        with open(pickle_file, 'rb') as f:
            known_face_encodings, known_face_metadata = pickle.load(f)
            return len(
                known_face_metadata), known_face_encodings, known_face_metadata
    except OSError as e:
        if exception:
            com.log_error(
                "Unable to open pickle_file: {}, original exception {}".format(
                    pickle_file, str(e)))
        else:
            return 0, [], []
コード例 #10
0
def read_pickle(pickle_file, exception=True):
    data = []
    try:
        with open(pickle_file, 'rb') as f:
            while True:
                try:
                    d = pickle.load(f)
                    data.append(d)
                except Exception as e:
                    break
            return data
    except OSError as e:
        if exception:
            com.log_error(
                "Unable to open pickle_file: {}, original exception {}".format(
                    pickle_file, str(e)))
        else:
            return 0, []
コード例 #11
0
def compare_data(data_file, known_faces_data, tolerated_difference_list):
    # load data from binary db of all faces from video
    total_visitors, video_face_encodings, video_faces_metadata = read_pickle(
        data_file)

    # load data from binary db of known faces
    total_known_faces, known_face_encodings, known_face_metadata = read_pickle(
        known_faces_data)

    if total_known_faces == 0 or total_visitors == 0:
        com.log_error("One of the db does not contain information {}")

    if not isinstance(tolerated_difference_list,
                      list) and len(tolerated_difference_list) > 0:
        com.log_error(
            "Paramter range_list must be 'list'. Current type {}".format(
                type(range_list)))

    for tolerated_difference in tolerated_difference_list:
        if tolerated_difference < 1 and tolerated_difference > 0:
            print('\n---- Using tolerated difference: {} ----'.format(
                tolerated_difference))
            #compare_data(face_encoding, known_face_encodings, known_face_metadata, tolerated_difference)

            #for known_face_encoding, known_metadata in zip(known_face_encodings, known_face_metadata):
            for video_face_encoding, video_metadata in zip(
                    video_face_encodings, video_faces_metadata):
                # check one by one all the images in the video against the known faces
                #metadata, best_index, lowest_distances = lookup_known_face(known_face_encoding, video_face_encodings, video_faces_metadata, tolerated_difference)
                metadata, best_index, lowest_distances = lookup_known_face(
                    video_face_encoding, known_face_encodings,
                    known_face_metadata, tolerated_difference)
                if best_index:
                    #print(metadata)
                    print('-' * 8)
                    print('Subject {} found'.format(metadata['name']))
                    #print('camera_id {}'.format(video_faces_metadata[best_index]['camera_id']))
                    print('initial {}'.format(
                        video_faces_metadata[best_index]['first_seen']))
                    print('last {}'.format(
                        video_faces_metadata[best_index]['last_seen']))
                    print('distance: {}'.format(lowest_distances))
    #quit()
    '''
コード例 #12
0
 def exec(self, method, topic=None, data=None):
     """
     Execute a comp method, mainly used from the outside world aka REPL
     """
     _debug('exec %s %s %s' % (method, topic, data))
     try:
         if method == 'broadcast':
             self._context['topic'] = topic
             self._push(data)
         else:
             comps = None
             if topic is None:
                 comps = self._comps.values()
             else:
                 if topic in self._comps:
                     comps = [self._comps[topic]]
             if comps is not None:
                 for comp in comps:
                     self._exec_func(comp, method, data)
         return True
     except Exception as ex:
         log_error('{0}@upyhome::exec'.format(ex))
         return False
コード例 #13
0
#!/usr/bin/python3
import sys
import lib.common as com

param_length = len(sys.argv)

msg = 'Usage: ' + sys.argv[
    0] + ' loadFaces | readVideo | readSilence | findImg | findVideo | compareData | appendTo'

if param_length < 2:
    com.log_error(msg)

if sys.argv[1] == 'loadFaces':
    if param_length == 2:
        known_faces = 'data/load'
        data_file = 'data/encoded_known_faces/knownFaces.dat'
    elif param_length == 4 and sys.argv[3] == 'output':
        known_faces = sys.argv[2]
        pickle_file = sys.argv[4]
    else:
        com.log_error(msg)

    import lib.biblioteca as biblio
    biblio.encode_known_faces(known_faces, data_file)
elif sys.argv[1] == 'appendTo':
    if param_length == 2:
        known_faces = 'data/load'
        data_file = 'data/encoded_known_faces/knownFaces.dat'
    elif param_length == 4 and sys.argv[3] == 'output':
        known_faces = sys.argv[2]
        data_file = sys.argv[4]
コード例 #14
0
def main(args):
    # Check input arguments
    if len(args) < 2:
        sys.stderr.write(
            "usage: %s <uri1> [uri2] ... [uriN] <folder to save frames>\n" %
            args[0])
        sys.exit(1)
    print("Argumentos :", args)

    for i in range(0, len(args) - 2):
        fps_streams["stream{0}".format(i)] = GETFPS(i)
    number_sources = len(args) - 2
    print("Numero de fuentes :", number_sources)

    global folder_name
    folder_name = args[-1]
    print(folder_name)
    if path.exists(folder_name):
        sys.stderr.write(
            "The output folder %s already exists. Please remove it first.\n" %
            folder_name)
        sys.exit(1)
    else:
        os.mkdir(folder_name)
        print("Frames will be saved in ", folder_name)

    # Standard GStreamer initialization
    GObject.threads_init()
    Gst.init(None)

    #Emulate reading the information from the server
    '''
    En este punto asumimos que ya se cargaron la funente es decir video o rtsp etc y los valorres se cargaron
    en un dictionario global o las variables ya se cargaron en dictionarios que corresponden a las fuentes con
    sus respectivas configuraciones.

    En este caso no hay diferentes solo leer y buscar y comparar se puede hacer directamente llamando a las
    funcciones en la biblioteca

    De la misma forma los rostros que se quieran cargar puedes cargarse por configuracion indicando explicitamente
    los nombres de los archivos
    '''
    #scfg = biblio.get_server_info()
    #print(scfg)
    #quit()
    camera_id = 'FA:KE:MA:C:AD:DR:ES:S9'
    set_action(camera_id, 'read')
    set_action(camera_id, 'find')
    action = get_action(camera_id)
    pwd = os.getcwd()

    if action == action_types['read']:
        output_db_name = pwd + '/data/video_encoded_faces/test_video_default.data'
        total, encodings, metadata = 0, [], []
    elif action == action_types['find']:
        output_db_name = pwd + '/data/found_faces/found_faces_db.dat'
        known_faces_db_name = pwd + '/data/encoded_known_faces/knownFaces.dat'

        if com.file_exists(known_faces_db_name):
            set_known_faces_db_name(camera_id, known_faces_db_name)
            total, encodings, metadata = biblio.read_pickle(
                get_known_faces_db_name(camera_id), False)
        else:
            com.log_error('Unable to open {}'.format(known_faces_db_name))

    set_known_faces_db(camera_id, total, encodings, metadata)
    set_output_db_name(camera_id, output_db_name)

    # Create gstreamer elements */
    # Create Pipeline element that will form a connection of other elements
    print("Creating Pipeline \n ")
    pipeline = Gst.Pipeline()
    is_live = False

    if not pipeline:
        sys.stderr.write(" Unable to create Pipeline \n")
    print("Creating streamux \n ")

    # Create nvstreammux instance to form batches from one or more sources.
    streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
    if not streammux:
        sys.stderr.write(" Unable to create NvStreamMux \n")

    pipeline.add(streammux)
    for i in range(number_sources):
        os.mkdir(folder_name + "/stream_" + str(i))
        frame_count["stream_" + str(i)] = 0
        saved_count["stream_" + str(i)] = 0
        print("Creating source_bin ", i, " \n ")
        uri_name = args[i + 1]
        if uri_name.find("rtsp://") == 0:
            is_live = True
        source_bin = create_source_bin(i, uri_name)
        if not source_bin:
            sys.stderr.write("Unable to create source bin \n")
        pipeline.add(source_bin)
        padname = "sink_%u" % i
        sinkpad = streammux.get_request_pad(padname)
        if not sinkpad:
            sys.stderr.write("Unable to create sink pad bin \n")
        srcpad = source_bin.get_static_pad("src")
        if not srcpad:
            sys.stderr.write("Unable to create src pad bin \n")
        srcpad.link(sinkpad)
    print("Creating Pgie \n ")
    pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
    if not pgie:
        sys.stderr.write(" Unable to create pgie \n")

    # Creation of tracking to follow up the model face
    # April 21th
    # ERM
    tracker = Gst.ElementFactory.make("nvtracker", "tracker")
    if not tracker:
        sys.stderr.write(" Unable to create tracker \n")

    # Add nvvidconv1 and filter1 to convert the frames to RGBA
    # which is easier to work with in Python.
    print("Creating nvvidconv1 \n ")
    nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
    if not nvvidconv1:
        sys.stderr.write(" Unable to create nvvidconv1 \n")
    print("Creating filter1 \n ")
    caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    if not filter1:
        sys.stderr.write(" Unable to get the caps filter1 \n")
    filter1.set_property("caps", caps1)
    print("Creating tiler \n ")
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvtiler")
    if not tiler:
        sys.stderr.write(" Unable to create tiler \n")
    print("Creating nvvidconv \n ")
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
    if not nvvidconv:
        sys.stderr.write(" Unable to create nvvidconv \n")
    print("Creating nvosd \n ")
    nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
    if not nvosd:
        sys.stderr.write(" Unable to create nvosd \n")
    if (is_aarch64()):
        print("Creating transform \n ")
        transform = Gst.ElementFactory.make("nvegltransform",
                                            "nvegl-transform")
        if not transform:
            sys.stderr.write(" Unable to create transform \n")

    print("Creating EGLSink \n")
    # edgar: cambio esta linea para no desplegar video -
    #sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
    sink = Gst.ElementFactory.make("fakesink", "fakesink")

    if not sink:
        sys.stderr.write(" Unable to create egl sink \n")

    if is_live:
        print("Atleast one of the sources is live")
        streammux.set_property('live-source', 1)

    # Camaras meraki 720p
    #streammux.set_property('width', 1920)
    streammux.set_property('width', 1280)
    #streammux.set_property('height', 1080)
    streammux.set_property('height', 720)
    streammux.set_property('batch-size', number_sources)
    streammux.set_property('batched-push-timeout', 4000000)
    print('CURRENT_DIR', CURRENT_DIR)
    pgie.set_property('config-file-path',
                      CURRENT_DIR + "/configs/pgie_config_facenet.txt")
    pgie_batch_size = pgie.get_property("batch-size")
    if (pgie_batch_size != number_sources):
        print("WARNING: Overriding infer-config batch-size", pgie_batch_size,
              " with number of sources ", number_sources, " \n")
        pgie.set_property("batch-size", number_sources)

    # Set properties of tracker
    # April 21th
    # ERM

    config = configparser.ConfigParser()
    config.read('configs/tracker_config.txt')
    config.sections()

    for key in config['tracker']:
        if key == 'tracker-width':
            tracker_width = config.getint('tracker', key)
            tracker.set_property('tracker-width', tracker_width)
        elif key == 'tracker-height':
            tracker_height = config.getint('tracker', key)
            tracker.set_property('tracker-height', tracker_height)
        elif key == 'gpu-id':
            tracker_gpu_id = config.getint('tracker', key)
            tracker.set_property('gpu_id', tracker_gpu_id)
        elif key == 'll-lib-file':
            tracker_ll_lib_file = config.get('tracker', key)
            tracker.set_property('ll-lib-file', tracker_ll_lib_file)
        elif key == 'll-config-file':
            tracker_ll_config_file = config.get('tracker', key)
            tracker.set_property('ll-config-file', tracker_ll_config_file)
        elif key == 'enable-batch-process':
            tracker_enable_batch_process = config.getint('tracker', key)
            tracker.set_property('enable_batch_process',
                                 tracker_enable_batch_process)

    tiler_rows = int(math.sqrt(number_sources))
    tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
    tiler.set_property("rows", tiler_rows)
    tiler.set_property("columns", tiler_columns)
    tiler.set_property("width", TILED_OUTPUT_WIDTH)
    tiler.set_property("height", TILED_OUTPUT_HEIGHT)

    sink.set_property("sync", 0)

    if not is_aarch64():
        # Use CUDA unified memory in the pipeline so frames
        # can be easily accessed on CPU in Python.
        mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
        streammux.set_property("nvbuf-memory-type", mem_type)
        nvvidconv.set_property("nvbuf-memory-type", mem_type)
        nvvidconv1.set_property("nvbuf-memory-type", mem_type)
        tiler.set_property("nvbuf-memory-type", mem_type)

    print("Adding elements to Pipeline \n")

    # Add tracker in pipeline
    # April 21th
    # ERM

    pipeline.add(pgie)
    pipeline.add(tracker)  # Tracker
    pipeline.add(tiler)
    pipeline.add(nvvidconv)
    pipeline.add(filter1)
    pipeline.add(nvvidconv1)
    pipeline.add(nvosd)
    if is_aarch64():
        pipeline.add(transform)
    pipeline.add(sink)

    print("Linking elements in the Pipeline \n")

    streammux.link(pgie)
    pgie.link(tracker)  # se añade para tracker
    # pgie.link(nvvidconv1)     se modifica
    tracker.link(
        nvvidconv1)  # se añade para ligar tracker con los demas elementos
    nvvidconv1.link(filter1)
    filter1.link(tiler)
    tiler.link(nvvidconv)
    nvvidconv.link(nvosd)
    if is_aarch64():
        nvosd.link(transform)
        transform.link(sink)
    else:
        nvosd.link(sink)

    # create an event loop and feed gstreamer bus mesages to it
    loop = GObject.MainLoop()
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    #tiler_sink_pad=tiler.get_static_pad("sink")
    #if not tiler_sink_pad:
    #    sys.stderr.write(" Unable to get src pad \n")
    #else:
    #    tiler_sink_pad.add_probe(Gst.PadProbeType.BUFFER, tiler_sink_pad_buffer_probe, 0)

    tiler_src_pad = tiler.get_static_pad("src")
    if not tiler_src_pad:
        sys.stderr.write(" Unable to get src pad \n")
    else:
        tiler_src_pad.add_probe(Gst.PadProbeType.BUFFER,
                                tiler_src_pad_buffer_probe, 0)

    # List the sources
    print("Now playing...")
    for i, source in enumerate(args[:-1]):
        if (i != 0):
            print(i, ": ", source)

    print("Starting pipeline \n")
    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass
    # cleanup
    print("Exiting app\n")
    pipeline.set_state(Gst.State.NULL)