Ejemplo n.º 1
0
def eval_all(args):
    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)
    eval_file = open(os.path.join(config.eval_dir, 'results.txt'), 'a')
    dataset_dict = dataset.val_dataset()
    records = dataset_dict['records']
    nr_records = len(records)
    read_func = dataset_dict['read_func']

    nr_devs = len(devs)
    for epoch_num in range(args.start_epoch, args.end_epoch + 1):
        model_file = osp.join(
            config.output_dir, 'model_dump',
            'epoch_{:d}'.format(epoch_num) + '.ckpt')

        pbar = tqdm(total=nr_records)
        all_results = []
        if nr_devs == 1:
            func, inputs = load_model(model_file, devs[0])
            for record in records:
                data_dict = read_func(record)
                result_dict = inference(func, inputs, data_dict)
                all_results.append(result_dict)

                if args.show_image:
                    image = result_dict['data']
                    for db in result_dict['result_boxes']:
                        if db.score > config.test_vis_threshold:
                            db.draw(image)
                    '''if 'boxes' in result_dict.keys():
                        for db in result_dict['boxes']:
                            db.draw(image)'''
                    path_img_unique = '/home/erik/Documents/light_head_rcnn-master/output/predictions/'+str(uuid.uuid4())+'.jpg'
                    cv2.imwrite(path_img_unique, image)
                    cv2.imshow('image', image)
                    cv2.waitKey(0)
                pbar.update(1)
        else:
            nr_image = math.ceil(nr_records / nr_devs)
            result_queue = Queue(500)
            procs = []
            for i in range(nr_devs):
                start = i * nr_image
                end = min(start + nr_image, nr_records)
                split_records = records[start:end]
                proc = Process(target=worker, args=(
                    model_file, devs[i], split_records, read_func,
                    result_queue))
                print('process:%d, start:%d, end:%d' % (i, start, end))
                proc.start()
                procs.append(proc)
            for i in range(nr_records):
                t = result_queue.get()
                all_results.append(t)
                pbar.update(1)

            for p in procs:
                p.join()

        save_filename = save_result(all_results, config.eval_dir, model_file)
        print('Save to %s finished, start evaulation!' % save_filename)
        saved_stdout = sys.stdout
        sys.stdout = eval_file
        print("\nevaluation epoch {}".format(epoch_num))

        '''if config.test_save_type == 'motherboard': #'coco':
            from datasets_odgt.cocoval import cocoval
            cocoval(save_filename)
        else:
            print("not implement")
            embed()'''
        sys.stdout = saved_stdout
        eval_file.flush()

    eval_file.close()
    print("\n")
Ejemplo n.º 2
0
import os.path as osp
import network_desp
import tensorflow as tf
from tqdm import tqdm
from utils.py_utils import misc

if __name__ == '__main__':

    parser = make_parser()
    args = parser.parse_args()
    args.devices = misc.parse_devices(args.devices)
    if args.end_epoch == -1:
        args.end_epoch = args.start_epoch

    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)
    eval_file = open(os.path.join(config.eval_dir, 'results.txt'), 'a')
    dataset_dict = dataset.val_dataset()
    records = dataset_dict['records']
    nr_records = len(records)
    read_func = dataset_dict['read_func']

    nr_devs = len(devs)
    for epoch_num in range(args.start_epoch, args.end_epoch + 1):
        model_file = osp.join(config.output_dir, 'model_dump',
                              'epoch_{:d}'.format(epoch_num) + '.ckpt')
        pbar = tqdm(total=nr_records)
        all_results = []

        if nr_devs > 1:
            with tf.variable_scope('', reuse=tf.AUTO_REUSE):
Ejemplo n.º 3
0
def eval_all(args):
    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)
    eval_file = open(os.path.join(config.eval_dir, 'results.txt'), 'a')
    dataset_dict = dataset.val_dataset()
    records = dataset_dict['records']
    nr_records = len(records)
    read_func = dataset_dict['read_func']

    nr_devs = len(devs)
    for epoch_num in range(args.start_epoch, args.end_epoch + 1):
        model_file = osp.join(
            config.output_dir, 'model_dump',
            'epoch_{:d}'.format(epoch_num) + '.ckpt')

        pbar = tqdm(total=nr_records)
        all_results = []
        if nr_devs == 1:
            func, inputs = load_model(model_file, devs[0])
            for record in records:
                data_dict = read_func(record)
                result_dict = inference(func, inputs, data_dict)
                all_results.append(result_dict)

                if args.show_image:
                    image = result_dict['data']
                    for db in result_dict['result_boxes']:
                        if db.score > config.test_vis_threshold:
                            db.draw(image)
                    if 'boxes' in result_dict.keys():
                        for db in result_dict['boxes']:
                            db.draw(image)
                    cv2.imwrite('/tmp/hehe.png', image)
                    # cv2.imshow('image', image)
                    # cv2.waitKey(0)
                pbar.update(1)
        else:
            nr_image = math.ceil(nr_records / nr_devs)
            result_queue = Queue(500)
            procs = []
            for i in range(nr_devs):
                start = i * nr_image
                end = min(start + nr_image, nr_records)
                split_records = records[start:end]
                proc = Process(target=worker, args=(
                    model_file, devs[i], split_records, read_func,
                    result_queue))
                print('process:%d, start:%d, end:%d' % (i, start, end))
                proc.start()
                procs.append(proc)
            for i in range(nr_records):
                t = result_queue.get()
                all_results.append(t)
                pbar.update(1)

            for p in procs:
                p.join()

        save_filename = save_result(all_results, config.eval_dir, model_file)
        print('Save to %s finished, start evaulation!' % save_filename)
        saved_stdout = sys.stdout
        sys.stdout = eval_file
        print("\nevaluation epoch {}".format(epoch_num))

        if config.test_save_type == 'coco':
            from datasets_odgt.cocoval import cocoval
            cocoval(save_filename)
        else:
            print("not implement")
            embed()
        sys.stdout = saved_stdout
        eval_file.flush()

    eval_file.close()
    print("\n")
Ejemplo n.º 4
0
def eval_all(args):
    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)
    eval_file = open(os.path.join(config.eval_dir, 'results.txt'), 'a')

    '''e.g. {
        'record': files,
        'nr_records': total_files,
        'read_func': read_func,
    }'''
    dataset_dict = dataset.val_dataset()
    records = dataset_dict['records']
    nr_records = len(records)
    read_func = dataset_dict['read_func'] # 数据处理

    nr_devs = len(devs)
    for epoch_num in range(args.start_epoch, args.end_epoch + 1):
        model_file = osp.join(
            config.output_dir, 'model_dump',
            'epoch_{:d}.ckpt-{:d}'.format(epoch_num, args.step))

        pbar = tqdm(total=nr_records)
        all_results = []
        if nr_devs == 1:
            func, inputs = load_model(model_file, devs[0])
            for record in records:
                data_dict = read_func(record) # 返回一个包含测试文件信息的dict 
                result_dict = inference(func, inputs, data_dict)
                all_results.append(result_dict)

                # result_dict包含gt和检出框
                # result_dict['boxes'] --> gt
                # result_dict['result_boxes'] --> 检出框
                if args.show_image:
                    image = result_dict['data']
                    # 咦,这里是把检出框画上去,db是一个对象,概率信息包含在db里面
                    for db in result_dict['result_boxes']:
                        if db.score > config.test_vis_threshold:
                            db.draw(image)

                    # 咦,这里是把gt画上去
                    if 'boxes' in result_dict.keys():
                        for db in result_dict['boxes']:
                            db.draw(image)

                    fname = os.path.basename(data_dict['image_path'])
                    cv2.imwrite(os.path.join('images', fname), image)
                    # cv2.imshow('image', image)
                    # cv2.waitKey(0)
                # pbar.update(1)
        else:
            nr_image = math.ceil(nr_records / nr_devs)
            result_queue = Queue(500)
            procs = []
            for i in range(nr_devs):
                start = i * nr_image
                end = min(start + nr_image, nr_records)
                split_records = records[start:end]
                proc = Process(target=worker, args=(
                    model_file, devs[i], split_records, read_func,
                    result_queue))
                print('process:%d, start:%d, end:%d' % (i, start, end))
                proc.start()
                procs.append(proc)
            for i in range(nr_records):
                t = result_queue.get()
                all_results.append(t)
                pbar.update(1)

            for p in procs:
                p.join()

        save_filename = save_result(all_results, config.eval_dir, model_file)
        print('Save to %s finished, start evaulation!' % save_filename)
        saved_stdout = sys.stdout
        sys.stdout = eval_file
        print("\nevaluation epoch {}".format(epoch_num))

        if config.test_save_type == 'adas':
            from datasets_odgt.cocoval import cocoval
            cocoval(save_filename)
        else:
            print("not implement")
            embed()
        sys.stdout = saved_stdout
        eval_file.flush()

    eval_file.close()
    print("\n")
Ejemplo n.º 5
0
def eval_all(args):
    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)

    '''e.g. {
        'record': files,
        'nr_records': total_files,
        'read_func': read_func,
    }'''
    dataset_dict = dataset.val_dataset()
    records = dataset_dict['records']
    nr_records = len(records)
    read_func = dataset_dict['read_func'] # 数据处理

    nr_devs = len(devs)
    model_file = osp.join(
        config.output_dir, 'model_dump',
        'light-head.ckpt-{:d}'.format(args.step))
  
    #pbar = tqdm(total=nr_records)
    all_results = []
    if nr_devs == 1:
        print('restore from ckpt %s' % model_file)
        func, inputs = load_model(model_file, devs[0])
        num_records = len(records)
        for i in range(num_records):
            record = records[i]
            '''data_dict, e.g. {
                'data': xx,
                'boxes': xx,
                'image_id': xx,
                'image_path': xx,
            }
            '''
            sys.stdout.write('im_detect: {:d}/{:d}\r'.format(i, num_records))
            data_dict = read_func(record) # 返回一个包含测试文件信息的dict 
            result_dict = inference(func, inputs, data_dict)
            all_results.append(result_dict)
            # result_dict包含gt和检出框
            # result_dict['boxes'] --> gt
            # result_dict['result_boxes'] --> 检出框
            if args.show_image:
                image = result_dict['data']
                # 咦,这里是把检出框画上去,db是一个对象,概率信息包含在db里面
                for db in result_dict['result_boxes']:
                    if db.score > config.test_vis_threshold:
                        # 见lib/detection_opr/box_utils/box.py
                        db.draw(image)

                # 咦,这里是把gt画上去
                if 'boxes' in result_dict.keys():
                    for db in result_dict['boxes']:
                        db.draw(image)

                fname = os.path.basename(data_dict['image_path'])
                cv2.imwrite(os.path.join('eval', fname), image)
                # cv2.imshow('image', image)
                # cv2.waitKey(0)
            # pbar.update(1)
    else:
        nr_image = math.ceil(nr_records / nr_devs)
        result_queue = Queue(500)
        procs = []
        for i in range(nr_devs):
            start = i * nr_image
            end = min(start + nr_image, nr_records)
            split_records = records[start:end]
            proc = Process(target=worker, args=(
                model_file, devs[i], split_records, read_func,
                result_queue))
            print('process:%d, start:%d, end:%d' % (i, start, end))
            proc.start()
            procs.append(proc)
        for i in range(nr_records):
            t = result_queue.get()
            all_results.append(t)
            #pbar.update(1)

        for p in procs:
            p.join()

    save_result_v2(all_results, config.eval_dir)
    print('Save result finished, start evaulation!')
    if config.test_save_type == 'adas':
        from datasets_odgt.adasval import adasval
        adasval(cfg)
    else:
        print("not implement")
        embed()
Ejemplo n.º 6
0
def predict_video(args):
    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)
    #vidpath = 'http://192.168.1.137:8080/video' #Home IP Camera
    vidpath = 'http://10.42.0.37:8080/video' #Lab IP Camera
    f = open("step-by-step_guide.txt", "r")
    steps = [x.split('\n') for x in f.read().split("#") if x != '']
    for i in range(len(steps)):
        steps[i] = [x for x in steps[i] if x != '']
    m = open("motherboard_database.txt", "r")
    mb_info = [x.split('#') for x in m.read().split("\n") if x != '']
    current_mb = []
    cap = cv2.VideoCapture(vidpath)
    w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    font = cv2.FONT_ITALIC
    step = 0
    epoch_num = args.predict_epoch
    model_file = osp.join( config.output_dir, 'model_dump','epoch_{:d}'.format(epoch_num) + '.ckpt')
    func, inputs = load_model(model_file, devs[0])
    avg_fps = []
    thresh = 0.4
    thresh_list = {2:0.6,4:0.4,5:0.4,8:0.8,9:0.4,11:0.6,12:0.4,14:0.4,15:0.4,17:0.5,18:0.4,19:0.4,20:0.7,22:0.2,24:0.4,25:0.4,27:0.4,28:0.4,30:0.8,31:0.95,33:0.7,34:0.4,36:0.7,37:0.4}
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    #out = cv2.VideoWriter('/home/erik/Documents/light_head_rcnn-master/data/motherboard/test/test_to_result/output_video.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 15, (frame_width,frame_height))
    while(True):
        #fps_start = time.time()
        ret, frame = cap.read()
        cv2.namedWindow('image',cv2.WINDOW_NORMAL)
        if(ret == True and step < len(steps)):
            draw_steps(steps, step, frame, w, h, font,current_mb)
            specify_classes = get_specific_classes(steps,step)
            if specify_classes != 0:
                result_dict = inference(func, inputs, frame)
                if type(specify_classes) == str:
                    specify_classes = [specify_classes]
                if step in thresh_list.keys():
                    thresh = thresh_list[step]
                else:
                    thresh = 0.4
                for db in result_dict['result_boxes']:
                    if db.score > thresh and db.tag in specify_classes: #preferred score of box to apply to picture
                        #print(db)
                        db.draw(frame)
                        if db.tag in config.class_names[1:3+1]:
                            for i in mb_info:
                                if db.tag == i[0]:
                                    current_mb = i
                            
                cv2.imshow('image', frame) #display results'
                #out.write(frame)
            else:
                cv2.imshow('image', frame)  # display results'
                #out.write(frame)
            #fps_end = time.time()
            #frame_rate = 1 / (fps_end - fps_start)
            #print("FPS: ",frame_rate)
            #print(thresh)
            #avg_fps.append(frame_rate)
            k = cv2.waitKey(1)
            if k:
                if k == ord(' '): #Click 'space' to go to next step
                    step += 1
                elif k == ord('i'): #increase threshold
                    if thresh < 1.0:
                        thresh += 0.05
                elif k == ord('d'):
                    if thresh > 0.1: #decrease threshold
                        thresh -= 0.05
                elif k == ord('p') and step > 0: #Click 'p' to go to previous step if step > 0
                    step -= 1
                elif k == ord('q'): #Click 'q' to stop the guide
                    print("The step-by-step guide interrupted...")
                    break

        else:
            break
    cap.release()
    #out.release()
    cv2.destroyAllWindows()
    #total_fps = sum(avg_fps)/len(avg_fps)
    #print("AVERAGE FPS: ",total_fps)
    print("\n")