Пример #1
0
def sendFrame(frame, HOST, PORT, typ=request_pb2.OBJECT, name=None):
    print(frame.shape)
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    try:
        sock.connect((HOST, PORT))
        retval, buf = cv2.imencode(".jpg", frame)
        req = request_pb2.DNNRequest()
        req.type = typ
        req.data = str(bytearray(buf))
        if (name != None):
            req.model = name
        send_message(sock, req)
        len_buf = socket_read_n(sock, 4)
        msg_len = struct.unpack('>L', len_buf)[0]
        msg_buf = socket_read_n(sock, msg_len)
        msg = request_pb2.DNNResponse()
        msg.ParseFromString(msg_buf)
        end = time.time()
        print(typ, msg.result_str)
    finally:
        sock.close()
    if msg != None:
        return msg.result_str, msg.latency * 1000
Пример #2
0
    def handle(self):
        # self.rfile is a file-like object created by the handler;
        # we can now use e.g. readline() instead of raw recv() calls
        beg = time.time()
        len_buf = self.rfile.read(4)
        length = struct.unpack('<L', len_buf)[0]
        print("len" + str(length))
        payload = self.read_n(length)
        req = request_pb2.DNNRequest()
        req.ParseFromString(payload)
        prob = 0
        latency = 0
        """
        with open("test.jpg", "wb") as f:
            f.write(req.data)
        input_image = caffe.io.load_image("test.jpg")
        """
        if (req.type == request_pb2.FACE):
            print("starting prediction")
            #input_image = load_face_from_memory(req.data)
            t1 = time.time()
            input_image = load_image_from_memory(req.data)
            prepared = face_input_prepare(face_net1, [input_image])
            #out = face_net1.forward_all(**{face_net1.inputs[0]: prepared})
            out = face_net1.forward(end="Result",
                                    **{face_net1.inputs[0]: prepared})
            out2 = face_net2.forward_all(
                **{face_net2.inputs[0]: out["Result"]})
            i = out2["prob"].argmax()
            prob = out2["prob"].squeeze(axis=(2, 3))[0][i]
            t2 = time.time()
            latency = t2 - t1
            print(prob)
            #label = face_words[i]
            label = target1[i]
            print(i, label)
        elif (req.type == request_pb2.SCENE):
            print("scene")
            input_image = load_image_from_memory(req.data)
            t1 = time.time()
            images = np.asarray(
                caffe.io.oversample([input_image], scene_net.crop_dims))
            caffe_in = np.zeros(np.array(images.shape)[[0, 3, 1, 2]],
                                dtype=np.float32)
            for ix, in_ in enumerate(images):
                caffe_in[ix] = scene_net.preprocess('data', in_)
            out = scene_net.forward_all(data=caffe_in)
            prediction = out[scene_net.outputs[0]].squeeze(axis=(2, 3))
            prediction = prediction.reshape((len(prediction) / 10, 10, -1))
            prediction = prediction.mean(1)
            top5 = prediction.argsort()[0][-5:]
            i = prediction.argmax()
            t2 = time.time()
            latency = t2 - t1
            label = scene_words[i]
            top5_label = map(lambda x: scene_words[x].split("\t")[1], top5)
            top5_label.reverse()
            label = ": ".join(top5_label)

            print(i, label)

        else:
            #prediction = net.forward_all(data=np.asarray([net.preprocess('data', input_image)]))
            input_image = load_image_from_memory(req.data)
            t1 = time.time()
            images = np.asarray(
                caffe.io.oversample([input_image], net.crop_dims))
            caffe_in = np.zeros(np.array(images.shape)[[0, 3, 1, 2]],
                                dtype=np.float32)
            for ix, in_ in enumerate(images):
                caffe_in[ix] = net.preprocess('data', in_)
            out = net.forward_all(data=caffe_in)
            prediction = out[net.outputs[0]].squeeze(axis=(2, 3))
            prediction = prediction.reshape((len(prediction) / 10, 10, -1))
            prediction = prediction.mean(1)
            top5 = prediction.argsort()[0][-5:]
            i = prediction.argmax()
            t2 = time.time()
            label = words[i]
            top5_label = map(lambda x: words[x].split(" ", 1)[1].split(",")[0],
                             top5)
            top5_label.reverse()
            label = ": ".join(top5_label)
            latency = t2 - t1
            print(i, label)

        print "{} wrote:".format(self.client_address[0])
        response = request_pb2.DNNResponse()
        response.success = True
        response.result = i
        response.result_str = label
        response.latency = latency
        #response.confidence = prob
        s = response.SerializeToString()
        packed_len = struct.pack('<L', len(s))
        # Likewise, self.wfile is a file-like object used to write back
        # to the client
        self.wfile.write(packed_len + s)
Пример #3
0
        n -= len(data)
    return buf


req = request_pb2.DNNRequest()
req.type = request_pb2.FACE

with open("../cat.jpg", "rb") as f:
    req.data = f.read()

#HOST, PORT = "archon.cs.washington.edu", 9999
HOST, PORT = "archon.cs.washington.edu", int(sys.argv[1])
# Create a socket (SOCK_STREAM means a TCP socket)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
    sock.connect((HOST, PORT))
    beg = time.time()
    send_message(sock, req)
    len_buf = socket_read_n(sock, 4)
    msg_len = struct.unpack('>L', len_buf)[0]
    msg_buf = socket_read_n(sock, msg_len)
    msg = request_pb2.DNNResponse()
    msg.ParseFromString(msg_buf)
    end = time.time()
    print(msg.success)
    print(msg.result)
    print((end - beg) * 1000)

finally:
    sock.close()