Beispiel #1
0
def main():
    # The default Face will connect using a Unix socket, or to "localhost".
    face = Face()

    prefix = Name("/icear-server/result/example-data/2/deeplab")
    prefixNamespace = Namespace(prefix)
    prefixNamespace.setFace(face)

    enabled = [True]
    img = [None]

    def onGeneralizedObject(contentMetaInfo, obj):
        data = obj.toBytes()
        dump("Got generalized object, content-type",
             contentMetaInfo.getContentType(), ":", repr(data))
        print(len(data))
        enabled[0] = False
        img[0] = data

    goh = GeneralizedObjectHandler(onGeneralizedObject)
    prefixNamespace.setHandler(goh).objectNeeded()

    # Loop calling processEvents until a callback sets enabled[0] = False.
    while enabled[0]:
        face.processEvents()
        # We need to sleep for a few milliseconds so we don't use 100% of the CPU.
        time.sleep(0.01)

    image = Image.open(io.BytesIO(img[0]))
    image.show()
Beispiel #2
0
    def fetch_data(self, prefix, start_frame, end_frame):
        # type: (Name, int, int) -> None
        for frame_id in range(start_frame, end_frame + 1):
            name = Name(prefix).append(str(frame_id))

            # Feed server with existing data
            if self.storage.exists(name):
                self.on_payload(name)

            # Fetching new data
            logging.info("Fetching: %s", name.toUri())
            # TODO: Namespace will put everything into memory
            frame_namespace = Namespace(name)
            frame_namespace.setFace(self.face)
            frame_namespace.setHandler(
                GeneralizedObjectHandler(partial(self.on_generalized_obj,
                                                 name))).objectNeeded()
Beispiel #3
0
def main(args):
    face = Face()

    prefix = Namespace(args['<prefix>'])
    prefix.setFace(face)

    enabled = [True]

    # This is called to print the content after it is re-assembled from segments.
    def onGeneralizedObject(contentMetaInfo, objectNamespace):
        if args['--verbose']:
            print(objectNamespace.getName())
        print(str(objectNamespace.obj))
        enabled[0] = False

    handler = GeneralizedObjectHandler(onGeneralizedObject)
    handler.setNComponentsAfterObjectNamespace(1)
    prefix.setHandler(handler).objectNeeded(True)

    # Loop calling processEvents until a callback sets enabled[0] = False.
    while enabled[0]:
        face.processEvents()
        # We need to sleep for a few milliseconds so we don't use 100% of the CPU.
        time.sleep(0.01)
def main(index_f, weight_f, config_f, consumerMode, th, fetchPrefix,
         publishPrefix):
    # The default Face will connect using a Unix socket, or to "localhost".
    instance_prefix = fetchPrefix.split("/")[-1]
    sl = SegmentLabel(index_f, weight_f, instance_prefix, th)

    if config_f != "":
        sl.readConfig(config_f)

    face = Face()
    keyChain = KeyChain()
    face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName())

    #stream_annConsumer_test = Namespace("/ndn/eb/stream/run/28/annotations")
    #stream_annConsumer_test.setFace(face)
    print(' > Will fetch from ' + str(fetchPrefix))
    stream_annConsumer_show = Namespace(fetchPrefix)
    stream_annConsumer_show.setFace(face)

    log_f = open(str("seglab_log") + ".txt", "w")
    log_f.close()

    stream_segProducer = Namespace(
        Name(publishPrefix).append(Name(fetchPrefix)[-1]), keyChain)
    print(' > Will publish segments under ' +
          str(stream_segProducer.getName()))
    publish_handler = GeneralizedObjectStreamHandler()
    # publish_handler.setLatestPacketFreshnessPeriod(30)
    stream_segProducer.setHandler(publish_handler)

    stream_segProducer.setFace(
        face,
        lambda prefixName: dump("Register failed for prefix", prefixName),
        lambda prefixName, whatever: dump("Register success for prefix",
                                          prefixName))

    def onNewAnnotation(sequenceNumber, contentMetaInfo, objectNamespace):
        ann = str(objectNamespace.obj)
        segment_result = []

        jsonAnn = json.loads(ann)
        # print(jsonAnn["frameName"])

        if not "error" in ann:
            jsonAnn = json.loads(ann)
            # print(jsonAnn["frameName"])
            segment_result = sl.sceneDetection(jsonAnn)
            if segment_result and len(segment_result) > 0:
                print(segment_result)
                #dump("Got generalized object, sequenceNumber", sequenceNumber,
                #     ", content-type", contentMetaInfo.getContentType(), ":",
                #     str(jsonAnn["frameName"]), 'at', str(time.time()))

                publish_handler.addObject(Blob(json.dumps(segment_result)),
                                          "application/json")
                print(" > PUBLISHED SCENE " +
                      str(publish_handler.getProducedSequenceNumber()))

                # # logging the result
                # if segment_result:
                with open(str("seglab_log") + ".txt", "w+") as f:
                    f.write("PUBLISHED SCENE: %s" %
                            str(publish_handler.getProducedSequenceNumber()))
                    f.write("%s\r\n" % segment_result)

    pipelineSize = 0

    #if consumerMode == 'default':
    #    stream_annConsumer_default.setHandler(
    #      GeneralizedObjectStreamHandler(pipelineSize, onNewAnnotation)).objectNeeded()

    stream_annConsumer_show.setHandler(
        GeneralizedObjectStreamHandler(pipelineSize,
                                       onNewAnnotation)).objectNeeded()

    #stream_annConsumer_test.setHandler(
    #    GeneralizedObjectStreamHandler(pipelineSize, onNewAnnotation)).objectNeeded()

    while True:
        face.processEvents()
        # We need to sleep for a few milliseconds so we don't use 100% of the CPU.
        time.sleep(0.01)
def main():
    # The default Face will connect using a Unix socket, or to "localhost".
    face = Face()

    # Create an in-memory key chain with default keys.
    # keyChain = KeyChain("pib-memory:", "tpm-memory:")
    # keyChain.importSafeBag(SafeBag
    #   (Name("/testname/KEY/123"),
    #    Blob(DEFAULT_RSA_PRIVATE_KEY_DER, False),
    #    Blob(DEFAULT_RSA_PUBLIC_KEY_DER, False)))
    # face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName())

    # Use default keys
    keyChain = KeyChain()
    face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName())

    publishIntervalMs = 1000.0 / 10
    stream = Namespace("/ndn/eb/stream/run/28/annotations", keyChain)
    handler = GeneralizedObjectStreamHandler()
    stream.setHandler(handler)

    dump("Register prefix", stream.name)
    # Set the face and register to receive Interests.
    stream.setFace(
        face,
        lambda prefixName: dump("Register failed for prefix", prefixName))

    # Loop, producing a new object every previousPublishMs milliseconds (and
    # also calling processEvents()).
    previousPublishMs = 0

    # test with publishing existing jason file by json string input
    jsonString = "data/faith.json"
    curr = []
    for line in open(jsonString, 'r'):
        curr.append(json.loads(line))

    # curr_list = []
    # for ann in curr:
    #     temp = []
    #     frameName = ann['frameName']
    #     for k in ann["annotations"]:
    #         # temp.append({"label": ''.join([i for i in k["label"] if not i.isdigit()]), "prob": k["prob"]})
    #         temp.append({"label": ''.join([i for i in k["label"] if not i.isdigit()]), "ytop": k["ytop"],
    #                      "ybottom": k["ybottom"], "xleft": k["xleft"], "xright": k["xright"], "prob": k["prob"],
    #                      "frameName": frameName})
    #
    #     curr_list.append(temp)

    total_cnt = len(curr)

    cnt = 0
    while cnt < total_cnt:
        now = Common.getNowMilliseconds()
        if now >= previousPublishMs + publishIntervalMs:
            dump("Preparing data for sequence",
                 handler.getProducedSequenceNumber() + 1)

            print(curr[cnt])

            handler.addObject(Blob(json.dumps(curr[cnt])), "application/json")

            # handler.addObject(
            #     Blob(json.dumps() + str(handler.getProducedSequenceNumber() + 1)),
            #     "application/json")

            cnt += 1

            if cnt == total_cnt:
                cnt = 0

            previousPublishMs = now

        face.processEvents()
        # We need to sleep for a few milliseconds so we don't use 100% of the CPU.
        time.sleep(0.01)
Beispiel #6
0
def main(index_f, weight_f, consumerMode, k, query_interval, fetchPrefix,
         publishPrefix):
    # The default Face will connect using a Unix socket, or to "localhost".
    instance_prefix = fetchPrefix.split("/")[-1]
    pd = PlayDetect(index_f, weight_f, instance_prefix, k, query_interval)

    face = Face()
    keyChain = KeyChain()
    face.setCommandSigningInfo(keyChain, keyChain.getDefaultCertificateName())

    # sceneConsumer = Namespace("/ndn/eb/stream/run/28/annotation")
    engine = str(Name(fetchPrefix)[-1])
    sceneFetchPrefix = Name('/eb/seglab').append(engine)

    print(' > Will fetch annotations from ' + fetchPrefix)
    print(' > Will fetch scenes from ' + sceneFetchPrefix.toUri())

    sceneConsumer = Namespace(sceneFetchPrefix)
    sceneConsumer.setFace(face)

    annotationsConsumer = Namespace(fetchPrefix)

    #if consumerMode == "test":
    #    annotationsConsumer = Namespace("/ndn/eb/stream/run/28/annotations")
    #elif consumerMode == "default":
    #    annotationsConsumer = Namespace('/eb/proto/test/ml_processing/yolo_default')

    annotationsConsumer.setFace(face)

    log_f = open(str("playdetect_log") + ".txt", "w")
    log_f.close()

    playdetectProducer = Namespace(
        Name(publishPrefix).append(engine), keyChain)
    print(' > Will publish playdetect data under ' +
          playdetectProducer.getName().toUri())

    playdSegmentsHandler = GeneralizedObjectStreamHandler()
    # set freshness to 30
    # playdSegmentsHandler.setLatestPacketFreshnessPeriod(30)
    playdetectProducer.setHandler(playdSegmentsHandler)

    playdetectProducer.setFace(
        face,
        lambda prefixName: dump("Register failed for prefix", prefixName),
        lambda prefixName, whatever: dump("Register success for prefix",
                                          prefixName))

    def onNewScene(sequenceNumber, contentMetaInfo, objectNamespace):
        dump("Got scene (segment) :", str(objectNamespace.getName()))

        if str(objectNamespace.obj):
            # Store scene segment AND scene segment NAME into a database
            sceneSegmentName = objectNamespace.getName()
            sceneSegment = json.loads(str(objectNamespace.obj))
            pd.storeToDatabase(sceneSegmentName, sceneSegment)

    def onNewAnnotation(sequenceNumber, contentMetaInfo, objectNamespace):
        # dump("Got new annotation")
        stringObj = str(objectNamespace.obj)
        # print(stringObj)
        now = Common.getNowMilliseconds()
        if stringObj and pd.itIsTimeToQueryDatabase():
            # TBD
            # query interval configurable
            itIsTimeToQueryDatabase = True
            if itIsTimeToQueryDatabase:
                # TBD
                # run query against the databse, using recevied annotation
                # the result should be a list that contains scene segment names (see above)
                # FOR NOW: let's have startFrame end endFrame in the results
                # most likely -- parameterize query, i.e. give argument maxResultNum
                result = pd.pickTops(json.loads(stringObj), k)
                if result:
                    playdSegmentsHandler.addObject(Blob(json.dumps(result)),
                                                   "application/json")

                    print(
                        "PUBLISH SIMILAR SCENES: %s" %
                        str(playdSegmentsHandler.getProducedSequenceNumber()))

                    #logging the result
                    with open(str("playdetect_log") + ".txt", "w+") as f:
                        f.write("PUBLISHED SCENE: %s" % str(
                            playdSegmentsHandler.getProducedSequenceNumber()))
                        f.write("%s\r\n" % result)

    pipelineSize_segConsume = 3
    sceneConsumer.setHandler(
        GeneralizedObjectStreamHandler(pipelineSize_segConsume,
                                       onNewScene)).objectNeeded()

    pipelineSize_annoConsume = 3
    annotationsConsumer.setHandler(
        GeneralizedObjectStreamHandler(pipelineSize_annoConsume,
                                       onNewAnnotation)).objectNeeded()

    while True:
        face.processEvents()
        # We need to sleep for a few milliseconds so we don't use 100% of the CPU.
        time.sleep(0.01)