Exemple #1
0
def get_data(record, device):
    data = dataset.val_dataset(record)
    image, gt_boxes, ID = \
                data['data'], data['boxes'], data['ID']
    if config.eval_resize == False:
        resized_img, scale = image, 1
    else:
        resized_img, scale = dataset.resize_img_by_short_and_max_size(
            image, config.eval_image_short_size, config.eval_image_max_size)

    original_height, original_width = image.shape[0:2]
    height, width = resized_img.shape[0:2]
    transposed_img = np.ascontiguousarray(resized_img.transpose(
        2, 0, 1)[None, :, :, :],
                                          dtype=np.float32)
    im_info = np.array([height, width, scale, original_height, original_width],
                       dtype=np.float32)[None, :]
    return transposed_img, gt_boxes, im_info, ID
Exemple #2
0
def eval_all(args):
    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)
    eval_file = open(os.path.join(config.eval_dir, 'results.txt'), 'a')
    dataset_dict = dataset.val_dataset()
    records = dataset_dict['records']
    nr_records = len(records)
    read_func = dataset_dict['read_func']

    nr_devs = len(devs)
    for epoch_num in range(args.start_epoch, args.end_epoch + 1):
        model_file = osp.join(
            config.output_dir, 'model_dump',
            'epoch_{:d}'.format(epoch_num) + '.ckpt')

        pbar = tqdm(total=nr_records)
        all_results = []
        if nr_devs == 1:
            func, inputs = load_model(model_file, devs[0])
            for record in records:
                data_dict = read_func(record)
                result_dict = inference(func, inputs, data_dict)
                all_results.append(result_dict)

                if args.show_image:
                    image = result_dict['data']
                    for db in result_dict['result_boxes']:
                        if db.score > config.test_vis_threshold:
                            db.draw(image)
                    '''if 'boxes' in result_dict.keys():
                        for db in result_dict['boxes']:
                            db.draw(image)'''
                    path_img_unique = '/home/erik/Documents/light_head_rcnn-master/output/predictions/'+str(uuid.uuid4())+'.jpg'
                    cv2.imwrite(path_img_unique, image)
                    cv2.imshow('image', image)
                    cv2.waitKey(0)
                pbar.update(1)
        else:
            nr_image = math.ceil(nr_records / nr_devs)
            result_queue = Queue(500)
            procs = []
            for i in range(nr_devs):
                start = i * nr_image
                end = min(start + nr_image, nr_records)
                split_records = records[start:end]
                proc = Process(target=worker, args=(
                    model_file, devs[i], split_records, read_func,
                    result_queue))
                print('process:%d, start:%d, end:%d' % (i, start, end))
                proc.start()
                procs.append(proc)
            for i in range(nr_records):
                t = result_queue.get()
                all_results.append(t)
                pbar.update(1)

            for p in procs:
                p.join()

        save_filename = save_result(all_results, config.eval_dir, model_file)
        print('Save to %s finished, start evaulation!' % save_filename)
        saved_stdout = sys.stdout
        sys.stdout = eval_file
        print("\nevaluation epoch {}".format(epoch_num))

        '''if config.test_save_type == 'motherboard': #'coco':
            from datasets_odgt.cocoval import cocoval
            cocoval(save_filename)
        else:
            print("not implement")
            embed()'''
        sys.stdout = saved_stdout
        eval_file.flush()

    eval_file.close()
    print("\n")
Exemple #3
0
import tensorflow as tf
from tqdm import tqdm
from utils.py_utils import misc

if __name__ == '__main__':

    parser = make_parser()
    args = parser.parse_args()
    args.devices = misc.parse_devices(args.devices)
    if args.end_epoch == -1:
        args.end_epoch = args.start_epoch

    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)
    eval_file = open(os.path.join(config.eval_dir, 'results.txt'), 'a')
    dataset_dict = dataset.val_dataset()
    records = dataset_dict['records']
    nr_records = len(records)
    read_func = dataset_dict['read_func']

    nr_devs = len(devs)
    for epoch_num in range(args.start_epoch, args.end_epoch + 1):
        model_file = osp.join(config.output_dir, 'model_dump',
                              'epoch_{:d}'.format(epoch_num) + '.ckpt')
        pbar = tqdm(total=nr_records)
        all_results = []

        if nr_devs > 1:
            with tf.variable_scope('', reuse=tf.AUTO_REUSE):
                os.environ["CUDA_VISIBLE_DEVICES"] = devs[0]
                tfconfig = tf.ConfigProto(allow_soft_placement=True)
Exemple #4
0
def eval_all(args):
    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)
    eval_file = open(os.path.join(config.eval_dir, 'results.txt'), 'a')
    dataset_dict = dataset.val_dataset()
    records = dataset_dict['records']
    nr_records = len(records)
    read_func = dataset_dict['read_func']

    nr_devs = len(devs)
    for epoch_num in range(args.start_epoch, args.end_epoch + 1):
        model_file = osp.join(
            config.output_dir, 'model_dump',
            'epoch_{:d}'.format(epoch_num) + '.ckpt')

        pbar = tqdm(total=nr_records)
        all_results = []
        if nr_devs == 1:
            func, inputs = load_model(model_file, devs[0])
            for record in records:
                data_dict = read_func(record)
                result_dict = inference(func, inputs, data_dict)
                all_results.append(result_dict)

                if args.show_image:
                    image = result_dict['data']
                    for db in result_dict['result_boxes']:
                        if db.score > config.test_vis_threshold:
                            db.draw(image)
                    if 'boxes' in result_dict.keys():
                        for db in result_dict['boxes']:
                            db.draw(image)
                    cv2.imwrite('/tmp/hehe.png', image)
                    # cv2.imshow('image', image)
                    # cv2.waitKey(0)
                pbar.update(1)
        else:
            nr_image = math.ceil(nr_records / nr_devs)
            result_queue = Queue(500)
            procs = []
            for i in range(nr_devs):
                start = i * nr_image
                end = min(start + nr_image, nr_records)
                split_records = records[start:end]
                proc = Process(target=worker, args=(
                    model_file, devs[i], split_records, read_func,
                    result_queue))
                print('process:%d, start:%d, end:%d' % (i, start, end))
                proc.start()
                procs.append(proc)
            for i in range(nr_records):
                t = result_queue.get()
                all_results.append(t)
                pbar.update(1)

            for p in procs:
                p.join()

        save_filename = save_result(all_results, config.eval_dir, model_file)
        print('Save to %s finished, start evaulation!' % save_filename)
        saved_stdout = sys.stdout
        sys.stdout = eval_file
        print("\nevaluation epoch {}".format(epoch_num))

        if config.test_save_type == 'coco':
            from datasets_odgt.cocoval import cocoval
            cocoval(save_filename)
        else:
            print("not implement")
            embed()
        sys.stdout = saved_stdout
        eval_file.flush()

    eval_file.close()
    print("\n")
Exemple #5
0
def eval_all(args):
    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)
    eval_file = open(os.path.join(config.eval_dir, 'results.txt'), 'a')

    '''e.g. {
        'record': files,
        'nr_records': total_files,
        'read_func': read_func,
    }'''
    dataset_dict = dataset.val_dataset()
    records = dataset_dict['records']
    nr_records = len(records)
    read_func = dataset_dict['read_func'] # 数据处理

    nr_devs = len(devs)
    for epoch_num in range(args.start_epoch, args.end_epoch + 1):
        model_file = osp.join(
            config.output_dir, 'model_dump',
            'epoch_{:d}.ckpt-{:d}'.format(epoch_num, args.step))

        pbar = tqdm(total=nr_records)
        all_results = []
        if nr_devs == 1:
            func, inputs = load_model(model_file, devs[0])
            for record in records:
                data_dict = read_func(record) # 返回一个包含测试文件信息的dict 
                result_dict = inference(func, inputs, data_dict)
                all_results.append(result_dict)

                # result_dict包含gt和检出框
                # result_dict['boxes'] --> gt
                # result_dict['result_boxes'] --> 检出框
                if args.show_image:
                    image = result_dict['data']
                    # 咦,这里是把检出框画上去,db是一个对象,概率信息包含在db里面
                    for db in result_dict['result_boxes']:
                        if db.score > config.test_vis_threshold:
                            db.draw(image)

                    # 咦,这里是把gt画上去
                    if 'boxes' in result_dict.keys():
                        for db in result_dict['boxes']:
                            db.draw(image)

                    fname = os.path.basename(data_dict['image_path'])
                    cv2.imwrite(os.path.join('images', fname), image)
                    # cv2.imshow('image', image)
                    # cv2.waitKey(0)
                # pbar.update(1)
        else:
            nr_image = math.ceil(nr_records / nr_devs)
            result_queue = Queue(500)
            procs = []
            for i in range(nr_devs):
                start = i * nr_image
                end = min(start + nr_image, nr_records)
                split_records = records[start:end]
                proc = Process(target=worker, args=(
                    model_file, devs[i], split_records, read_func,
                    result_queue))
                print('process:%d, start:%d, end:%d' % (i, start, end))
                proc.start()
                procs.append(proc)
            for i in range(nr_records):
                t = result_queue.get()
                all_results.append(t)
                pbar.update(1)

            for p in procs:
                p.join()

        save_filename = save_result(all_results, config.eval_dir, model_file)
        print('Save to %s finished, start evaulation!' % save_filename)
        saved_stdout = sys.stdout
        sys.stdout = eval_file
        print("\nevaluation epoch {}".format(epoch_num))

        if config.test_save_type == 'adas':
            from datasets_odgt.cocoval import cocoval
            cocoval(save_filename)
        else:
            print("not implement")
            embed()
        sys.stdout = saved_stdout
        eval_file.flush()

    eval_file.close()
    print("\n")
Exemple #6
0
def eval_all(args):
    devs = args.devices.split(',')
    misc.ensure_dir(config.eval_dir)

    '''e.g. {
        'record': files,
        'nr_records': total_files,
        'read_func': read_func,
    }'''
    dataset_dict = dataset.val_dataset()
    records = dataset_dict['records']
    nr_records = len(records)
    read_func = dataset_dict['read_func'] # 数据处理

    nr_devs = len(devs)
    model_file = osp.join(
        config.output_dir, 'model_dump',
        'light-head.ckpt-{:d}'.format(args.step))
  
    #pbar = tqdm(total=nr_records)
    all_results = []
    if nr_devs == 1:
        print('restore from ckpt %s' % model_file)
        func, inputs = load_model(model_file, devs[0])
        num_records = len(records)
        for i in range(num_records):
            record = records[i]
            '''data_dict, e.g. {
                'data': xx,
                'boxes': xx,
                'image_id': xx,
                'image_path': xx,
            }
            '''
            sys.stdout.write('im_detect: {:d}/{:d}\r'.format(i, num_records))
            data_dict = read_func(record) # 返回一个包含测试文件信息的dict 
            result_dict = inference(func, inputs, data_dict)
            all_results.append(result_dict)
            # result_dict包含gt和检出框
            # result_dict['boxes'] --> gt
            # result_dict['result_boxes'] --> 检出框
            if args.show_image:
                image = result_dict['data']
                # 咦,这里是把检出框画上去,db是一个对象,概率信息包含在db里面
                for db in result_dict['result_boxes']:
                    if db.score > config.test_vis_threshold:
                        # 见lib/detection_opr/box_utils/box.py
                        db.draw(image)

                # 咦,这里是把gt画上去
                if 'boxes' in result_dict.keys():
                    for db in result_dict['boxes']:
                        db.draw(image)

                fname = os.path.basename(data_dict['image_path'])
                cv2.imwrite(os.path.join('eval', fname), image)
                # cv2.imshow('image', image)
                # cv2.waitKey(0)
            # pbar.update(1)
    else:
        nr_image = math.ceil(nr_records / nr_devs)
        result_queue = Queue(500)
        procs = []
        for i in range(nr_devs):
            start = i * nr_image
            end = min(start + nr_image, nr_records)
            split_records = records[start:end]
            proc = Process(target=worker, args=(
                model_file, devs[i], split_records, read_func,
                result_queue))
            print('process:%d, start:%d, end:%d' % (i, start, end))
            proc.start()
            procs.append(proc)
        for i in range(nr_records):
            t = result_queue.get()
            all_results.append(t)
            #pbar.update(1)

        for p in procs:
            p.join()

    save_result_v2(all_results, config.eval_dir)
    print('Save result finished, start evaulation!')
    if config.test_save_type == 'adas':
        from datasets_odgt.adasval import adasval
        adasval(cfg)
    else:
        print("not implement")
        embed()