コード例 #1
0
ファイル: benchmark.py プロジェクト: HexToString/Serving
def run_rpc(thread, batch_size):
    client = PipelineClient()
    client.connect(['127.0.0.1:9998'])
    value = "0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332"
    all_value = ";".join([value for i in range(batch_size)])
    data = {"key": "x", "value": all_value}
    for i in range(1000):
        ret = client.predict(
            feed_dict={data["key"]: data["value"]}, fetch=["res"])
    print(ret)
コード例 #2
0
    def predict_pipeline_rpc(self, batch_size=1):
        # 1.prepare feed_data
        feed_dict = {'x': '0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332'}

        # 2.init client
        client = PipelineClient()
        client.connect(['127.0.0.1:9998'])

        # 3.predict for fetch_map
        ret = client.predict(feed_dict=feed_dict)
        # 4.convert dict to numpy
        result = {"prob": np.array(eval(ret.value[0]))}
        return result
コード例 #3
0
def run_rpc(thread, batch_size):
    client = PipelineClient()
    client.connect(['127.0.0.1:18080'])
    start = time.time()
    test_img_dir = "imgs/"
    for img_file in os.listdir(test_img_dir):
        with open(os.path.join(test_img_dir, img_file), 'rb') as file:
            image_data = file.read()
        image = cv2_to_base64(image_data)
        start_time = time.time()
        while True:
            ret = client.predict(feed_dict={"image": image}, fetch=["res"])
            if time.time() - start_time > 10:
                break
    end = time.time()
    return [[end - start]]
コード例 #4
0
def run_rpc(thread, batch_size):
    client = PipelineClient()
    client.connect(['127.0.0.1:9998'])
    with open("data-c.txt", 'r') as fin:
        start = time.time()
        lines = fin.readlines()
        start_idx = 0
        while start_idx < len(lines):
            end_idx = min(len(lines), start_idx + batch_size)
            feed = {}
            for i in range(start_idx, end_idx):
                feed[str(i - start_idx)] = lines[i]
            ret = client.predict(feed_dict=feed, fetch=["res"])
            start_idx += batch_size
            if start_idx > 1000:
                break
        end = time.time()
    return [[end - start]]
コード例 #5
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import sys
import os
import yaml
import requests
import time
import json
from paddle_serving_server.pipeline import PipelineClient
import numpy as np

client = PipelineClient()
client.connect(['127.0.0.1:9998'])
batch_size = 101
with open("data-c.txt", 'r') as fin:
    lines = fin.readlines()
    start_idx = 0
    while start_idx < len(lines):
        end_idx = min(len(lines), start_idx + batch_size)
        feed = {}
        for i in range(start_idx, end_idx):
            feed[str(i - start_idx)] = lines[i]
        ret = client.predict(feed_dict=feed, fetch=["res"])
        print(ret)
        start_idx += batch_size
コード例 #6
0
ファイル: run_system.py プロジェクト: tianxin1860/PaddleNLP
    for line in results:
        for item in line:
            idx = item.id
            distance = item.distance
            text = id2corpus[idx]
            print(text, distance)
            list_data.append([query_text, text, distance])
    df = pd.DataFrame(list_data, columns=['query_text', 'text', 'distance'])
    df = df.sort_values(by="distance", ascending=True)
    df.to_csv('data/recall_predict.csv',
              columns=['text', 'distance'],
              sep='\t',
              header=None,
              index=False)


if __name__ == "__main__":
    client = PipelineClient()
    client.connect(['127.0.0.1:8080'])
    list_data = ["买了社保,是不是就不用买商业保险了?"]
    feed = {}
    for i, item in enumerate(list_data):
        feed[str(i)] = item
    start_time = time.time()
    ret = client.predict(feed_dict=feed)
    end_time = time.time()
    print("Extract feature time to cost :{} seconds".format(end_time -
                                                            start_time))
    result = np.array(eval(ret.value[0]))
    search_in_milvus(result, list_data[0])
コード例 #7
0
def init_client():
    client = PipelineClient()
    client.connect(['127.0.0.1:18090'])
    return client
コード例 #8
0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_server.pipeline import PipelineClient
import numpy as np

client = PipelineClient()
client.connect(['127.0.0.1:18070'])

words = 'i am very sad | 0'

futures = []
for i in range(100):
    futures.append(
        client.predict(feed_dict={
            "words": words,
            "logid": 10000 + i
        },
                       fetch=["prediction"],
                       asyn=True,
                       profile=False))
コード例 #9
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
    from paddle_serving_server.pipeline import PipelineClient
except ImportError:
    from paddle_serving_server.pipeline import PipelineClient
import numpy as np
import requests
import json
import cv2
import base64
import os

client = PipelineClient()
client.connect(['127.0.0.1:18090'])


def cv2_to_base64(image):
    return base64.b64encode(image).decode('utf8')


test_img_dir = "imgs/"
for img_file in os.listdir(test_img_dir):
    with open(os.path.join(test_img_dir, img_file), 'rb') as file:
        image_data = file.read()
    image = cv2_to_base64(image_data)

for i in range(1):
    ret = client.predict(feed_dict={"image": image}, fetch=["res"])
コード例 #10
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle_serving_server.pipeline import PipelineClient
import numpy as np
import requests
import json
import cv2
import base64
import os

client = PipelineClient()
client.connect(['127.0.0.1:9993'])


def cv2_to_base64(image):
    return base64.b64encode(image).decode('utf8')


with open("daisy.jpg", 'rb') as file:
    image_data = file.read()
image = cv2_to_base64(image_data)

for i in range(1):
    ret = client.predict(feed_dict={"image": image}, fetch=["label", "prob"])
    print(ret)
コード例 #11
0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
    from paddle_serving_server.pipeline import PipelineClient
except ImportError:
    from paddle_serving_server.pipeline import PipelineClient
import numpy as np
import requests
import json
import cv2
import base64
import os

client = PipelineClient()
client.connect(['127.0.0.1:18090'])

video_url = "https://paddle-serving.bj.bcebos.com/model/PaddleVideo/example.avi"
for i in range(1):
    ret = client.predict(feed_dict={"video_url": video_url}, fetch=["res"])
    print(ret)