Beispiel #1
0
def main(_):

    # Definition of the paths
    weights = 'coco'
    if weights is 'voc':
        import voc_net as net
    else:
        import coco_net as net
    voc_paths = ['./yolov2-tiny-voc.weights']
    coco_paths = ['./yolov2-tiny.weights']
    input_img_path = './dog.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    input_height = 416
    input_width = 416

    # Definition of the session
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    # print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    if weights is 'coco':
        _ = weights_loader.load(sess, coco_paths[0], ckpt_folder_path, saver,
                                'coco')
    else:
        _ = weights_loader.load(sess, voc_paths[0], ckpt_folder_path, saver,
                                'voc')

    # Preprocess the input image
    print('Preprocessing...')
    preprocessed_image = preprocessing(input_img_path, input_height,
                                       input_width)

    # Compute the predictions on the input image
    print('Computing predictions...')
    predictions = intermediate_inference(sess, preprocessed_image, weights)
    for i in range(16):
        inter_predictions = predictions[0, :, :, i]
        inter_predictions = inter_predictions + abs(np.amin(inter_predictions))
        inter_predictions = inter_predictions / np.amax(inter_predictions)
        inter_predictions *= 255
        if weights is 'coco':
            cv2.imwrite('cocos/img' + str(i) + ".jpg", inter_predictions)
        else:
            cv2.imwrite('vocs/img' + str(i) + ".jpg", inter_predictions)
Beispiel #2
0
def preprocess():

    point_0 = time.time()
    # Definition of the paths
    weights_path = 'yolov2-tiny-voc.weights'
    input_img_path = 'test_zzh.jpg'
    output_image_path = 'output/zzh_out.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    input_height = 416
    input_width = 416
    # Definition of the session
    sess = tf.InteractiveSession()

    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # 时间 1 加载参数时间
    time_1 = time.time() - point_0

    # Preprocess the input image
    print('Preprocessing...')
    preprocessed_image = preprocessing(input_img_path, input_height,
                                       input_width)

    # 时间2 预处理时间
    time_2 = time.time() - point_0 - time_1
    return sess, preprocessed_image, time_1, time_2
Beispiel #3
0
def main(_):

	# Definition of the paths
    weights_path      = './yolov2-tiny-voc_352_288_final.weights'
    #input_img_path    = './horses.jpg'
    output_image_path = './output.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    ph_height = 288 # placeholder height
    ph_width  = 352 # placeholder width
    score_threshold = 0.3
    iou_threshold = 0.3

    # Definition of the session
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess,weights_path,ckpt_folder_path,saver)

#    cam = cv2.VideoCapture(0)
#    assert cam is not None
#    print("Opened UVC-Camera via /dev/video0")
#    cam.set(cv2.CAP_PROP_FPS,30)
#    cam.set(cv2.CAP_PROP_FRAME_WIDTH,320)
#    cam.set(cv2.CAP_PROP_FRAME_HEIGHT,240)

    start = time()
    uvc = UVC().start()
    img_count = 0
    while True:
        #r,input_image = cam.read()
        input_image = uvc.read()
        #assert r is True
        # Preprocess the input image
        preprocessed_image = preprocessing(input_image,ph_height,ph_width)

        # Compute the predictions on the input image
        predictions = inference(sess,preprocessed_image)

        # Postprocess the predictions and save the output image
        output_image = postprocessing(predictions,input_image,score_threshold,iou_threshold,ph_height,ph_width)

        cv2.imshow('yolov2-tiny_352x288',output_image)
        key=cv2.waitKey(1)
        if key!=-1:break
        elapsed=(time()-start)
        img_count+=1
        sys.stdout.write('\b'*20)
        sys.stdout.write("%.2fFPS"%(img_count/elapsed))
        sys.stdout.flush()

    print("\nfinalize")
    cv2.destroyAllWindows()
    uvc.stop()
Beispiel #4
0
def main(_):

    # Definition of the paths
    weights_path = './yolov2-tiny-voc_352_288_final.weights'
    input_img_path = './horses.jpg'
    output_image_path = './output.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    ph_height = 288  # placeholder height
    ph_width = 352  # placeholder width
    score_threshold = 0.3
    iou_threshold = 0.3

    # Definition of the session
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # Preprocess the input image
    print('Preprocessing...')
    preprocessed_image = preprocessing(input_img_path, ph_height, ph_width)
    print("preprocessing", preprocessed_image.shape)

    # Compute the predictions on the input image
    print('Computing predictions...')
    predictions = inference(sess, preprocessed_image)
    print("inference", predictions.shape)

    # Postprocess the predictions and save the output image
    print('Postprocessing...')
    output_image = postprocessing(predictions, input_img_path, score_threshold,
                                  iou_threshold, ph_height, ph_width)
    cv2.imwrite(output_image_path, output_image)

    print('\n'.join(
        [n.name for n in tf.get_default_graph().as_graph_def().node]))
    output_name = ['xoutput']
    tf.identity(net.h9, name=str(output_name[0]))
    frzdef = tf.graph_util.convert_variables_to_constants(
        sess, sess.graph_def, output_name)
    with open('y.pb', 'wb') as f:
        f.write(frzdef.SerializeToString())
Beispiel #5
0
def main(_):

    point_0 = time.time()
    # Definition of the paths
    weights_path = 'yolov2-tiny-voc.weights'
    input_img_path = 'test_zzh.jpg'
    output_image_path = 'output/zzh_out.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    input_height = 416
    input_width = 416
    cut_point = 3
    # Definition of the session
    sess = tf.InteractiveSession()

    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # 时间 1 加载参数时间
    time_1 = time.time() - point_0

    # Preprocess the input image
    print('Preprocessing...')
    preprocessed_image = preprocessing(input_img_path, input_height,
                                       input_width)

    # 时间2 预处理时间
    time_2 = time.time() - point_0 - time_1

    # 建立连接
    HOST = '192.168.1.100'
    PORT = 12345
    buffsize = 65535
    soc = connect_to_server(HOST, PORT)

    send_time("time_1", time_1, soc, buffsize)
    send_time("time_2", time_2, soc, buffsize)

    print('Sending weight')
    for i in range(9):
        send_weight_and_time(sess, preprocessed_image, i + 1, soc, buffsize)
Beispiel #6
0
def load_data():
    point_0 = time.time()
    # Definition of the paths
    weights_path = 'yolov2-tiny-voc.weights'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    sess = tf.InteractiveSession()

    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    time_1 = time.time() - point_0
    return sess, time_1
Beispiel #7
0
def main(_):

    # Definition of the paths
    weights_path = './tiny-yolo-voc.weights'
    input_img_path = './horses.jpg'
    output_image_path = './output.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    input_height = 416
    input_width = 416
    score_threshold = 0.3
    iou_threshold = 0.3

    # Definition of the session
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # Preprocess the input image
    print('Preprocessing...')
    preprocessed_image = preprocessing(input_img_path, input_height,
                                       input_width)
    print("preprocessing", preprocessed_image.shape)

    # Compute the predictions on the input image
    print('Computing predictions...')
    predictions = inference(sess, preprocessed_image)
    print("inference", predictions.shape)

    # Postprocess the predictions and save the output image
    print('Postprocessing...')
    output_image = postprocessing(predictions, input_img_path, score_threshold,
                                  iou_threshold, input_height, input_width)
    cv2.imwrite(output_image_path, output_image)
    def __init__(self,
                 weight_path='./yolov2-tiny-voc.weights',
                 ckpt_folder_path='./ckpt/',
                 video_source=0,
                 speed=2):

        self.OBJECT = 'chair'

        # Step 1: Setup TensorFlow environment for object detection

        self.sess = tf.InteractiveSession()

        tf.global_variables_initializer().run()

        saver = tf.train.Saver()

        _ = weights_loader.load(self.sess, weight_path, ckpt_folder_path,
                                saver)

        # parameters for tinyYOLO detector

        self.input_height = 416
        self.input_width = 416

        self.score_threshold = 0.3
        self.iou_threshold = 0.3

        # Step 2: Setup video source from file

        #self.video_capture = cv2.VideoCapture(video_source)

        # Step 3: Setup decision engine for test

        #self.decider = decisionEngine()

        self.finder = blobFinder()

        # Additional parameter for speed up the video

        self.speed = speed
    def __init__(self,
                 weight_path='./yolov2-tiny-voc.weights',
                 ckpt_folder_path='./ckpt/',
                 video_source='./test_video.mov'):
        self.sess = tf.InteractiveSession()
        tf.global_variables_initializer().run()

        saver = tf.train.Saver()
        _ = weights_loader.load(self.sess, weight_path, ckpt_folder_path,
                                saver)

        self.video_capture = cv2.VideoCapture(video_source)

        self.input_height = 416
        self.input_width = 416
        self.score_threshold = 0.3
        self.iou_threshold = 0.3
        self.n_frame = 0

        self.buf_pos = [0, 0, 0, 0, 0]
        self.buf_len = [0, 0, 0, 0, 0]
        self.wei_pos = [1, 1, 1, 1, 1]
        self.wei_len = [1, 1, 1, 1, 1]
Beispiel #10
0
def main(_):
    counter = 0
    # Definition of the paths
    weights_path = 'yolov2-tiny-voc.weights'
    input_img_path = 'test_zzh_1.jpg'
    output_image_path = 'output/zzh_out.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    input_height = 416
    input_width = 416
    score_threshold = 0.1
    iou_threshold = 0.1

    # Definition of the session
    sess = tf.InteractiveSession()

    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # ------------------------预定义和加载数据-----------------------
    # -----------------连接目标服务器-------------------------------
    socketIO = SocketIO('192.168.1.104', 3333, LoggingNamespace)
    socketIO.on('connection', on_connect)
    socketIO.on('disconnect', on_disconnect)
    socketIO.on('reconnect', on_reconnect)
    # -------------------------------------------------------------
    time_1 = 0
    time_2 = 0

    host = '192.168.1.100'
    port = 12345
    buffsize = 65535

    ADDR = (host, port)

    soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    soc.bind(ADDR)
    soc.listen(2)

    print('Wait for connection ...')
    soc_client, addr = soc.accept()
    print("Connection from :", addr)
    while True:
        data = ""
        data = soc_client.recv(buffsize).decode()
        # 服务器端 send 各个命令执行的操作
        if data == "time_start":
            # 用来记录总时间
            start_time = time.time()
            print(start_time)
            soc_client.send("done".encode())

        elif data == "time_stop":
            stop_time = time.time()
            total_time = stop_time - start_time
            print(total_time)
            soc_client.send("done".encode())

        elif data == "finish":
            soc_client.close()
            break

        elif data == "time_1":
            soc_client.send("recive".encode())
            time_1 = soc_client.recv(buffsize).decode()
            time_1 = float(time_1)
            soc_client.send("done".encode())

        elif data == "time_2":
            soc_client.send("recive".encode())
            time_2 = soc_client.recv(buffsize).decode()
            time_2 = float(time_2)
            soc_client.send("done".encode())

        elif data == "send_weight_cutpoint":
            soc_client.send("recive".encode())
            # 记录时间 p4
            point_4 = time.time()
            data_batches = ""
            new_data = ""
            cut_point = ""

            print("downloading...")
            while True:
                new_data = soc_client.recv(buffsize).decode()
                # print(new_data)
                # print(len(new_data))
                # 结束时的处理
                if ((new_data[-1] == '%')):
                    data_batches = data_batches + new_data[:-2]
                    cut_point = new_data[-2:-1]
                    print(cut_point)
                    break
                data_batches = data_batches + new_data

            # 记录时间 p5
            point_5 = time.time()
            time_3 = point_5 - point_4
            print("downloading time: {}".format(time_3))

            print("processing...")
            # load
            data_text = json.loads(data_batches)
            predictions = np.array(data_text)
            cut_point = int(cut_point)
            predictions = inference(sess, predictions, cut_point)

            # 记录时间 p6
            point_6 = time.time()
            time_4 = point_6 - point_5
            print("processing time: {}".format(time_4))

            print('Postprocessinasg...')
            # out_put images
            output_image = postprocessing(predictions, input_img_path,
                                          score_threshold, iou_threshold,
                                          input_height, input_width)
            # 记录时间 p7
            point_7 = time.time()
            time_5 = point_7 - point_6
            time_backend = time_3 + time_4 + time_5
            time_frontend = time_1 + time_2
            time_total = time_backend + time_frontend
            Fps = 1. / time_total

            cv2.imwrite(output_image_path, output_image)
            counter += 1
            print("time_load_model = {}".format(time_1))
            print("time_preprocess = {}".format(time_2))
            print("time_downloading = {}".format(time_3))
            print("time_load_jsons = {}".format(time_4))
            print("time_postprocess = {}".format(time_5))
            print("time_backend = {}".format(time_backend))
            print("time_frontend = {}".format(time_frontend))
            print("Fps = {}".format(Fps))

            soc_client.send("done".encode())

            #--------------------多线程发送------------
            # Create a thread
            thread_socketIO = myThread(1, socketIO, "Frame-" + str(counter),
                                       output_image, Fps)

            # Start a thread
            thread_socketIO.start()
            thread_socketIO.join()
            # --------------------多线程发送------------
            pass

        else:
            pass
Beispiel #11
0
def main(_):
    counter = 0
    # Definition of the paths
    weights_path = 'yolov2-tiny-voc.weights'
    input_img_path = 'test_zzh_1.jpg'
    output_image_path = 'output/zzh_out.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    input_height = 416
    input_width = 416
    score_threshold = 0.1
    iou_threshold = 0.1

    # Definition of the session
    sess = tf.InteractiveSession()

    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # ------------------------预定义和加载数据-----------------------
    # -----------------连接目标服务器-------------------------------
    socketIO = SocketIO('192.168.1.105', 3333, LoggingNamespace)
    socketIO.on('connection', on_connect)
    socketIO.on('disconnect', on_disconnect)
    socketIO.on('reconnect', on_reconnect)
    #  -----------------连接目标服务器-------------------------------

    time_1 = 0
    time_2 = 0

    host = '127.0.0.1'
    port = 12345
    buffsize = 65535

    ADDR = (host, port)

    soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    soc.bind(ADDR)
    soc.listen(2)

    print('Wait for connection ...')
    soc_client, addr = soc.accept()
    print("Connection from :", addr)

    # 进行长连接不断接受信号, 每次data=的时候都会刷新所有接收到的值
    while True:
        data = ""
        data = soc_client.recv(buffsize).decode()
        while True:
            # data为初始化值是系统等待, 以防while占用太大的运算量
            if data == "":
                time.sleep(0.1)
            else:
                break
        # 服务器端 send 各个命令执行的操作
        if data == "time_start":
            # 用来记录总时间 (未用到)
            start_time = time.time()
            print(start_time)
            soc_client.send("done".encode())

        elif data == "time_stop":
            # 结束记录总时间 (未用到)
            stop_time = time.time()
            total_time = stop_time - start_time
            print(total_time)
            soc_client.send("done".encode())

        elif data == "finish":
            # 用来结束连接(未用到)
            soc_client.close()

        elif data == "time_1":
            # 用来接受time_1信息(初始化时间长度)
            soc_client.send("recive".encode())
            time_1 = soc_client.recv(buffsize).decode()
            time_1 = float(time_1)
            soc_client.send("done".encode())

        elif data == "time_2":
            # 用来接受time_2信息 (处理的时间长度)
            soc_client.send("recive".encode())
            time_2 = soc_client.recv(buffsize).decode()
            time_2 = float(time_2)
            soc_client.send("done".encode())

        elif data == "send_original_image":
            # 用来接受输出的原始图像 (记录下下载图像的时间为 recive_time)
            soc_client.send("recive".encode())
            recive_time = time.time()
            data_batches = ""
            new_data = ""

            # 开始下载
            print("downloading...")
            while True:
                new_data = soc_client.recv(buffsize).decode()

                # 结束时的处理
                if ((new_data[-1] == '%')):
                    data_batches = data_batches + new_data[:-1]
                    break
                # 循环体
                data_batches = data_batches + new_data

            print("processing...")
            # 加载下载的原图像解析为np array(注意imshow输出时要转化成np.uint8格式否则会黑屏)(<-坑)
            data_text = json.loads(data_batches)
            input_image = np.array(data_text)

            # 下载时间记录 (下载 + 解析)
            recive_time = time.time() - recive_time
            print("total time: {}".format(recive_time))
            soc_client.send("done".encode())

        elif data == "send_weight_cutpoint":
            # 下载 weight 和 cut point
            soc_client.send("recive".encode())
            # 记录时间 p4
            point_4 = time.time()
            data_batches = ""
            new_data = ""
            cut_point = ""

            # 开始下载
            print("downloading...")
            while True:
                new_data = soc_client.recv(buffsize).decode()
                # 结束时的处理 (cut point 作为倒数第二个值被接受在新变量中)
                if ((new_data[-1] == '%')):
                    data_batches = data_batches + new_data[:-2]
                    cut_point = new_data[-2:-1]
                    # print(cut_point)
                    break
                # 循环体
                data_batches = data_batches + new_data

            # 记录时间 p5 (time_3 为下载weight的时间)
            point_5 = time.time()
            time_3 = point_5 - point_4
            print("downloading time: {}".format(time_3))

            print("processing...")
            # 加载和解析数据
            data_text = json.loads(data_batches)
            predictions = np.array(data_text)
            cut_point = int(cut_point)
            predictions = inference(sess, predictions, cut_point)

            # 记录时间 p6 (time_4 为解析数据的时间)
            point_6 = time.time()
            time_4 = point_6 - point_5
            print("processing time: {}".format(time_4))

            print('Postprocessinasg...')
            # out_put images
            output_image = postprocessing(predictions, input_image,
                                          score_threshold, iou_threshold,
                                          input_height, input_width)
            # 记录时间 p7
            # time_5 为后处理的时间
            # 计算出后端的运行时间,中间的下载时间, 前端的预处理时间(不包括初始化时间),算出fps
            point_7 = time.time()
            time_5 = point_7 - point_6
            time_backend = time_4 + time_5
            time_downloading = time_3 + recive_time
            time_frontend = time_2
            time_total = time_backend + time_frontend + time_downloading
            Fps = 1. / time_total
            fps = "fps = {}".format(Fps)

            # 将图像格式转换成uint8 否则黑屏
            # 输出fps, 输出处理后的图像
            output_image = np.uint8(output_image)
            cv2.putText(output_image, str(fps), (5, 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255), 1)
            #cv2.imshow("hello", output_image)
            #if cv2.waitKey(1) & 0xFF == ord('q'):
            #break

            # 打印出所有可能用到的时间
            counter += 1
            print("time_load_model = {}".format(time_1))
            print("time_preprocess = {}".format(time_2))
            print("time_send_original_image = {}".format(recive_time))
            print("time_downloading = {}".format(time_downloading))
            print("time_load_jsons = {}".format(time_4))
            print("time_postprocess = {}".format(time_5))
            print("time_backend = {}".format(time_backend))
            print("time_frontend = {}".format(time_frontend))
            print("Fps = {}".format(Fps))

            # 返回确认done
            soc_client.send("done".encode())

            #--------------------多线程发送------------
            # Create a thread
            thread_socketIO = myThread(1, socketIO, "Frame-" + str(counter),
                                       output_image, Fps, cut_point)

            # Start a thread
            thread_socketIO.start()
            thread_socketIO.join()
            # --------------------多线程发送------------

            # 下一个循环待命
            pass
        else:
            pass
Beispiel #12
0
def main(_):
    # 记录开始时间
    point_0 = time.time()
    # Definition of the paths
    weights_path = 'yolov2-tiny-voc.weights'
    input_img_path = 'test_zzh.jpg'
    output_image_path = 'output/zzh_out.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    # 宽高,摄像头编号,断点位置
    input_height = 416
    input_width = 416
    input_cam_num = 0
    cut_point = 5
    # Definition of the session
    sess = tf.InteractiveSession()

    # 全局进行初始化
    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # time_1 为初始化的时间, 只需要用一次,记录下
    time_1 = time.time() - point_0

    # 建立连接
    HOST = '127.0.0.1'
    PORT = 12345
    buffsize = 65535
    soc = connect_to_server(HOST, PORT)

    video_capture = enable_video_capture(input_cam_num)
    while True:
        # time_2 开始计时
        time_2_1 = time.time()
        # Preprocess the input image
        print('Preprocessing...')
        preprocessed_image, original_image = preprocessing(
            video_capture, input_height, input_width)

        # time_2 为预处理时间
        time_2 = time.time() - time_2_1

        # 发送 前面收集到的两个时间
        send_time("time_1", time_1, soc, buffsize)
        send_time("time_2", time_2, soc, buffsize)

        # 发送 原图
        send_original_image(original_image, soc, buffsize)

        # 发送 weight 数据和 cut_point 信息
        send_weight_and_time(sess, preprocessed_image, cut_point, soc,
                             buffsize)

        # 打印出图像(不需要就注释掉)
        cv2.imshow('frame', original_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # 使用完成后释放相机并关闭所有imshow窗口(目前直接断开的,没有用到)
    cap.release()
    cv2.destroyAllWindows()
Beispiel #13
0
def main():
    # 记录开始时间
    point_0 = time.time()
    # Definition of the paths
    weights_path = 'yolov2-tiny-voc.weights'
    input_img_path = 'test_zzh.jpg'
    output_image_path = 'output/zzh_out.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    # 宽高,摄像头编号,断点位置
    input_height = 416
    input_width = 416
    input_cam_num = 0
    cut_point = args.cut_point
    # Definition of the session
    sess = tf.InteractiveSession()

    # 全局进行初始化
    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # time_1 为初始化的时间, 只需要用一次,记录下
    time_1 = time.time() - point_0

    # 建立连接
    HOST = '127.0.0.1'
    PORT = 12345
    buffsize = 65535
    soc = connect_to_server(HOST, PORT)

    video_capture = enable_video_capture(input_cam_num)

    time.sleep(1)

    # time_2 开始计时
    time_2_1 = time.time()
    # Preprocess the input image
    print('Preprocessing...')
    preprocessed_image, original_image = preprocessing(video_capture,
                                                       input_height,
                                                       input_width)

    for i in range(9):
        # time_2 为预处理时间
        time_2 = time.time() - time_2_1

        # 发送 前面收集到的两个时间
        send_time("time_1", time_1, soc, buffsize)
        send_time("time_2", time_2, soc, buffsize)

        # 发送 原图
        send_original_image(original_image, soc, buffsize)

        # 发送 weight 数据和 cut_point 信息
        send_weight_and_time(sess, preprocessed_image, i + 1, soc, buffsize)
Beispiel #14
0
    score_threshold = 0.3
    iou_threshold = 0.3

    # Definition of the session
    # using one cpu core
    session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
                                  inter_op_parallelism_threads=1)
    sess = tf.InteractiveSession(config=session_conf)
    # use all cores
    # sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # Preprocess the input image
    print('Preprocessing...')
    preprocessed_image = preprocessing(input_img_path, input_height,
                                       input_width)

    t = []
    layers = [
        net.o1, net.o2, net.o3, net.o4, net.o5, net.o6, net.o7, net.o8, net.o9
    ]
    shapes = []
    for i in range(9):
        c = []
        for j in range(10):
            start = time.time()
Beispiel #15
0
def main(_):

    # Definition of the paths
    weights_path = './tiny-yolo-voc.weights'
    output_image_path = './output.jpg'
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    input_height = 416
    input_width = 416
    score_threshold = 0.3
    iou_threshold = 0.3

    # Definition of the session
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    video_capture = cv2.VideoCapture('./first_run.mov')
    n_frames = 0
    seconds = 0.0
    fps = 0.0

    while True:

        # Start time
        start = time.time()

        # Capture frame-by-frame
        _, frame = video_capture.read()
        n_frames = n_frames + 1

        # Preprocess the input image
        #print('Preprocessing...')
        preprocessed_image = preprocessing(frame, input_height, input_width)

        # Compute the predictions on the input image
        #print('Computing predictions...')
        predictions = []
        predictions = inference(sess, preprocessed_image)
        print(predictions)
        # Postprocess the predictions and save the output image
        #print('Postprocessing...')
        output_image = postprocessing(predictions, frame, score_threshold,
                                      iou_threshold, input_height, input_width)

        # End time
        end = time.time()

        # Time elapsed
        seconds = (end - start)
        # Calculate frames per second
        fps = (fps + (1 / seconds)) / 2

        # Display the resulting frame with fps
        cv2.putText(output_image, str(fps), (10, 50), cv2.FONT_HERSHEY_SIMPLEX,
                    1, (0, 0, 255), 3)
        cv2.imshow('Video', output_image)

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
Beispiel #16
0
def main(_):
    counter = 0
    # Definition of the paths
    weights_path = 'yolov2-tiny-voc.weights'
    input_img_path = 'test_zzh_1.jpg'
    output_image_path = 'output/zzh_out.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    input_height = 416
    input_width = 416
    score_threshold = 0.1
    iou_threshold = 0.1

    # Definition of the session
    sess = tf.InteractiveSession()

    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # ---------------------q-learning---------------------

    N_STATES = [1, 2, 3, 4, 5, 6, 7, 8, 9]  # 9种states
    ACTIONS = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]  # 可能的actions
    EPSILON = 0.9  # greedy policy 的概率
    ALPHA = 0.1  # for learning rate 的大小
    GAMMA = 0.9  # for discount factor 的大小
    episode = 0

    q_table = q_learning.build_q_table(N_STATES, ACTIONS)  # Initialize Q(s, a)
    print(q_table)
    step_counter = 0  # init step
    S = 1  # init state
    q_learning.update_env(episode, step_counter)  # update env

    # ---------------------q-learning---------------------

    # ------------------------预定义和加载数据-----------------------
    # -----------------连接目标服务器-------------------------------
    socketIO = SocketIO('10.8.204.12', 3333, LoggingNamespace)
    socketIO.on('connection', on_connect)
    socketIO.on('disconnect', on_disconnect)
    socketIO.on('reconnect', on_reconnect)
    # -------------------------------------------------------------
    time_1 = 0
    time_2 = 0
    Fps_past = 0
    Fps = 0

    host = '127.0.0.1'
    port = 12345
    buffsize = 65535

    ADDR = (host, port)

    soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    soc.bind(ADDR)
    soc.listen(2)

    print('Wait for connection ...')
    soc_client, addr = soc.accept()
    print("Connection from :", addr)

    # 进行长连接不断接受信号, 每次data=的时候都会刷新所有接收到的值
    while True:
        data = ""
        data = soc_client.recv(buffsize).decode()
        while True:
            # data为初始化值是系统等待, 以防while占用太大的运算量
            if data == "":
                time.sleep(0.1)
            else:
                break
        # 服务器端 send 各个命令执行的操作
        if data == "time_start":
            # 用来记录总时间 (未用到)
            start_time = time.time()
            print(start_time)
            soc_client.send("done".encode())

        elif data == "time_stop":
            # 结束记录总时间 (未用到)
            stop_time = time.time()
            total_time = stop_time - start_time
            print(total_time)
            soc_client.send("done".encode())

        elif data == "finish":
            # 用来结束连接(未用到)
            soc_client.close()

        elif data == "time_1":
            # 用来接受time_1信息(初始化时间长度)
            soc_client.send("recive".encode())
            time_1 = soc_client.recv(buffsize).decode()
            time_1 = float(time_1)
            soc_client.send("done".encode())

        elif data == "time_2":
            # 用来接受time_2信息 (处理的时间长度)
            soc_client.send("recive".encode())
            time_2 = soc_client.recv(buffsize).decode()
            time_2 = float(time_2)
            soc_client.send("done".encode())

        elif data == "send_original_image":
            # 用来接受输出的原始图像 (记录下下载图像的时间为 recive_time)
            soc_client.send("recive".encode())
            recive_time = time.time()
            data_batches = ""
            new_data = ""

            # 开始下载
            print("downloading...")
            while True:
                new_data = soc_client.recv(buffsize).decode()

                # 结束时的处理
                if ((new_data[-1] == '%')):
                    data_batches = data_batches + new_data[:-1]
                    break
                # 循环体
                data_batches = data_batches + new_data

            print("processing...")
            # 加载下载的原图像解析为np array(注意imshow输出时要转化成np.uint8格式否则会黑屏)(<-坑)
            data_text = json.loads(data_batches)
            input_image = np.array(data_text)

            # 下载时间记录 (下载 + 解析)
            recive_time = time.time() - recive_time
            print("total time: {}".format(recive_time))
            soc_client.send("done".encode())

        elif data == "send_weight_cutpoint":
            # ---------------------q-learning---------------------
            Fps_past = Fps
            A = q_learning.choose_action(S, q_table, EPSILON)

            # ---------------------q-learning---------------------

            soc_client.send("{}".format(A).encode())
            # 记录时间 p4
            point_4 = time.time()
            data_batches = ""
            new_data = ""
            cut_point = ""

            # 开始下载
            print("downloading...")
            while True:
                new_data = soc_client.recv(buffsize).decode()
                # 结束时的处理 (cut point 作为倒数第二个值被接受在新变量中)
                if ((new_data[-1] == '%')):
                    data_batches = data_batches + new_data[:-2]
                    cut_point = new_data[-2:-1]
                    # print(cut_point)
                    break
                # 循环体
                data_batches = data_batches + new_data

            # 记录时间 p5 (time_3 为下载weight的时间)
            point_5 = time.time()
            time_3 = point_5 - point_4
            print("downloading time: {}".format(time_3))

            print("processing...")
            # 加载和解析数据
            data_text = json.loads(data_batches)
            predictions = np.array(data_text)
            cut_point = int(cut_point)
            predictions = inference(sess, predictions, cut_point)

            # 记录时间 p6 (time_4 为解析数据的时间)
            point_6 = time.time()
            time_4 = point_6 - point_5
            print("processing time: {}".format(time_4))

            print('Postprocessinasg...')
            # out_put images
            output_image = postprocessing(predictions, input_image,
                                          score_threshold, iou_threshold,
                                          input_height, input_width)
            # 记录时间 p7
            # time_5 为后处理的时间
            # 计算出后端的运行时间,中间的下载时间, 前端的预处理时间(不包括初始化时间),算出fps
            point_7 = time.time()
            time_5 = point_7 - point_6
            time_backend = time_4 + time_5
            time_downloading = time_3 + recive_time
            time_frontend = time_2
            time_total = time_backend + time_frontend + time_downloading
            Fps = 1. / time_total
            fps = "fps = {}".format(Fps)

            # 将图像格式转换成uint8 否则黑屏
            # 输出fps, 输出处理后的图像
            output_image = np.uint8(output_image)
            cv2.putText(output_image, str(fps), (5, 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255), 1)
            #cv2.imshow("hello", output_image)
            #if cv2.waitKey(1) & 0xFF == ord('q'):
            #break

            # 打印出所有可能用到的时间
            counter += 1
            print("time_load_model = {}".format(time_1))
            print("time_preprocess = {}".format(time_2))
            print("time_send_original_image = {}".format(recive_time))
            print("time_downloading = {}".format(time_downloading))
            print("time_load_jsons = {}".format(time_4))
            print("time_postprocess = {}".format(time_5))
            print("time_backend = {}".format(time_backend))
            print("time_frontend = {}".format(time_frontend))
            print("Fps = {}".format(Fps))

            # ---------------------q-learning---------------------

            S_, R = q_learning.get_env_feedback(A, Fps_past, Fps)
            print(str(S_) + "  " + str(S))
            q_predict = q_table.loc[S, A]
            with pd.option_context('display.max_rows', None,
                                   'display.max_columns', None):
                print(q_table)
            q_target = R + GAMMA * q_table.loc[S_, :].max(skipna=True)
            q_table.loc[S, A] += ALPHA * (q_target - q_predict)  # q_table 更新
            S = S_  # 探索者移动到下一个 state
            q_learning.update_env(episode, step_counter + 1)  # 环境更新

            step_counter += 1

            # ---------------------q-learning---------------------
            #--------------------多线程发送------------
            # Create a thread
            thread_socketIO = myThread(1, socketIO, "Frame-" + str(counter),
                                       output_image, Fps, cut_point,
                                       q_table.values)

            # Start a thread
            thread_socketIO.start()
            thread_socketIO.join()
            # --------------------多线程发送------------
            # 返回确认done
            soc_client.send("done".encode())
            # 下一个循环待命

            # ------------send---------
            pass
        else:
            pass
Beispiel #17
0
def main(_):
    counter = 0

    # Definition of the paths
    weights_path = 'yolov2-tiny-voc.weights'
    input_img_path = 'test_zzh_1.jpg'
    output_image_path = 'output/zzh_out.jpg'

    # If you do not have the checkpoint yet keep it like this! When you will run test.py for the first time it will be created automatically
    ckpt_folder_path = './ckpt/'

    # Definition of the parameters
    input_height = 416
    input_width = 416
    score_threshold = 0.1
    iou_threshold = 0.1

    # Definition of the session
    sess = tf.InteractiveSession()

    tf.global_variables_initializer().run()

    # Check for an existing checkpoint and load the weights (if it exists) or do it from binary file
    print('Looking for a checkpoint...')
    saver = tf.train.Saver()
    _ = weights_loader.load(sess, weights_path, ckpt_folder_path, saver)

    # ---------------------q-learning---------------------

    N_STATES = [1, 2, 3, 4, 5, 6, 7, 8, 9]  # 9种states
    ACTIONS = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]  # 可能的actions
    EPSILON = 0.9  # greedy policy 的概率
    ALPHA = 0.1  # for learning rate 的大小
    GAMMA = 0.9  # for discount factor 的大小
    episode = 0

    q_table = q_learning.build_q_table(N_STATES, ACTIONS)  # Initialize Q(s, a)
    print(q_table)
    step_counter = 0  # init step
    S = 1  # init state
    q_learning.update_env(episode, step_counter)  # update env

    # ---------------------q-learning---------------------

    # ------------------------预定义和加载数据-----------------------
    # -----------------连接目标服务器-------------------------------
    socketIO = SocketIO('192.168.1.104', 3333, LoggingNamespace)
    socketIO.on('connection', on_connect)
    socketIO.on('disconnect', on_disconnect)
    socketIO.on('reconnect', on_reconnect)
    # -------------------------------------------------------------
    time_1 = 0
    time_2 = 0
    Fps_past = 0
    Fps = 0

    host = '192.168.1.100'
    port = 12345
    buffsize = 65535

    ADDR = (host, port)

    soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    soc.bind(ADDR)
    soc.listen(2)

    print('Wait for connection ...')
    soc_client, addr = soc.accept()
    print("Connection from :", addr)

    while True:
        info = p.memory_full_info()
        memory = info.uss / 1024. / 1024.
        print('Memory used_begin: {:.2f} MB'.format(memory))

        data = ""
        data = soc_client.recv(buffsize).decode()
        while True:
            if data == "":
                time.sleep(0.1)
            else:
                break
        # 服务器端 send 各个命令执行的操作
        if data == "time_start":
            # 用来记录总时间
            start_time = time.time()
            print(start_time)
            soc_client.send("done".encode())

        elif data == "time_stop":
            stop_time = time.time()
            total_time = stop_time - start_time
            print(total_time)
            soc_client.send("done".encode())

        elif data == "finish":
            soc_client.close()

        elif data == "time_1":
            soc_client.send("recive".encode())
            time_1 = soc_client.recv(buffsize).decode()
            time_1 = float(time_1)
            soc_client.send("done".encode())

        elif data == "time_2":
            soc_client.send("recive".encode())
            time_2 = soc_client.recv(buffsize).decode()
            time_2 = float(time_2)
            soc_client.send("done".encode())

        elif data == "send_weight_cutpoint":

            # ---------------------q-learning---------------------
            Fps_past = Fps
            A = q_learning.choose_action(S, q_table, EPSILON)

            # ---------------------q-learning---------------------

            soc_client.send("{}".format(A).encode())
            # 记录时间 p4
            point_4 = time.time()
            data_batches = ""
            new_data = ""
            cut_point = ""

            print("downloading...")
            while True:
                new_data = soc_client.recv(buffsize).decode()
                # print(new_data)
                # print(len(new_data))
                # 结束时的处理
                if ((new_data[-1] == '%')):
                    data_batches = data_batches + new_data[:-2]
                    cut_point = new_data[-2:-1]
                    print(cut_point)
                    break
                data_batches = data_batches + new_data

            # 记录时间 p5
            point_5 = time.time()
            time_3 = point_5 - point_4
            print("downloading time: {}".format(time_3))

            print("processing...")
            # load
            data_text = json.loads(data_batches)
            predictions = np.array(data_text)
            cut_point = int(cut_point)
            predictions = inference(sess, predictions, cut_point)

            # 记录时间 p6
            point_6 = time.time()
            time_4 = point_6 - point_5
            print("processing time: {}".format(time_4))

            print('Postprocessinasg...')
            # out_put images
            output_image = postprocessing(predictions, input_img_path,
                                          score_threshold, iou_threshold,
                                          input_height, input_width)
            # 记录时间 p7
            point_7 = time.time()
            time_5 = point_7 - point_6
            time_backend = time_3 + time_4 + time_5
            time_frontend = time_2
            time_total = time_backend + time_frontend
            Fps = 1. / time_total

            cv2.imwrite(output_image_path, output_image)
            counter += 1
            print("time_load_model = {}".format(time_1))
            print("time_preprocess = {}".format(time_2))
            print("time_downloading = {}".format(time_3))
            print("time_load_jsons = {}".format(time_4))
            print("time_postprocess = {}".format(time_5))
            print("time_backend = {}".format(time_backend))
            print("time_frontend = {}".format(time_frontend))
            print("Fps = {}".format(Fps))

            # ---------------------q-learning---------------------

            S_, R = q_learning.get_env_feedback(A, Fps_past, Fps)
            print(str(S_) + "  " + str(S))
            q_predict = q_table.loc[S, A]
            with pd.option_context('display.max_rows', None,
                                   'display.max_columns', None):
                print(q_table)
            q_target = R + GAMMA * q_table.loc[S_, :].max(skipna=True)
            q_table.loc[S, A] += ALPHA * (q_target - q_predict)  # q_table 更新
            S = S_  # 探索者移动到下一个 state
            q_learning.update_env(episode, step_counter + 1)  # 环境更新

            step_counter += 1

            # ---------------------q-learning---------------------
            #--------------------多线程发送------------
            # Create a thread
            thread_socketIO = myThread(1, socketIO, "Frame-" + str(counter),
                                       output_image, Fps)

            # Start a thread
            thread_socketIO.start()
            thread_socketIO.join()
            # --------------------多线程发送------------

            soc_client.send("done".encode())
            pass

        else:
            pass