示例#1
0
def prepare_data_pipeline():
    # batch size
    config_dict['batch_size'] = 4

    # number of train data_loader workers
    config_dict['num_train_workers'] = 4

    # number of val data_loader workers
    config_dict['num_val_workers'] = 4

    # construct train data_loader
    config_dict['train_dataset_path'] = './COCO_pack/coco_train2017.pkl'
    train_dataset = Dataset(load_path=config_dict['train_dataset_path'])

    train_dataset_sampler = COCORandomDatasetSampler(
        dataset=train_dataset,
        batch_size=config_dict['batch_size'],
        shuffle=True,
    )

    train_region_sampler = TypicalCOCOTrainingRegionSampler(
        resize_shorter_range=(800, ), resize_longer_limit=1333, pad_divisor=32)

    config_dict['train_data_loader'] = DataLoader(
        dataset=train_dataset,
        dataset_sampler=train_dataset_sampler,
        region_sampler=train_region_sampler,
        augmentation_pipeline=typical_coco_train_pipeline,
        num_workers=config_dict['num_train_workers'])

    # construct val data_loader
    config_dict['val_dataset_path'] = './COCO_pack/coco_val2017.pkl'
    val_dataset = Dataset(load_path=config_dict['val_dataset_path'])
    val_dataset_sampler = RandomDatasetSampler(
        dataset=val_dataset,
        batch_size=config_dict['batch_size'],
        shuffle=False,
        ignore_last=False)
    val_region_sampler = TypicalCOCOTrainingRegionSampler(
        resize_shorter_range=(800, ), resize_longer_limit=1333, pad_divisor=32)
    config_dict['val_data_loader'] = DataLoader(
        dataset=val_dataset,
        dataset_sampler=val_dataset_sampler,
        region_sampler=val_region_sampler,
        augmentation_pipeline=typical_coco_val_pipeline,
        num_workers=config_dict['num_val_workers'])

    # evaluator
    # the evaluator should match the dataset
    config_dict[
        'val_annotation_path'] = '/home/yonghaohe/datasets/COCO/annotations/instances_val2017.json'
    config_dict['evaluator'] = COCOEvaluator(
        annotation_path=config_dict['val_annotation_path'],
        label_indexes_to_category_ids=val_dataset.
        meta_info['label_indexes_to_category_ids'])
示例#2
0
def prepare_data_pipeline():
    # batch size
    config_dict['batch_size'] = 64

    # number of train data_loader workers
    config_dict['num_train_workers'] = 12

    # number of val data_loader workers
    config_dict['num_val_workers'] = 0

    # construct train data_loader
    config_dict['train_dataset_path'] = './WIDERFACE_pack/widerface_train.pkl'
    train_dataset = Dataset(load_path=config_dict['train_dataset_path'])
    train_dataset_sampler = RandomWithNegDatasetSampler(
        train_dataset,
        batch_size=config_dict['batch_size'],
        neg_ratio=0.2,
        shuffle=True,
        ignore_last=False)

    train_region_sampler = RandomBBoxCropRegionSampler(crop_size=480,
                                                       resize_range=(0.5, 1.5),
                                                       resize_prob=0.5)

    config_dict['train_data_loader'] = DataLoader(
        dataset=train_dataset,
        dataset_sampler=train_dataset_sampler,
        region_sampler=train_region_sampler,
        augmentation_pipeline=simple_widerface_train_pipeline,
        num_workers=config_dict['num_train_workers'])
示例#3
0
def prepare_data_pipeline():
    # batch size
    config_dict['batch_size'] = 12

    # number of train data_loader workers
    config_dict['num_train_workers'] = 6

    # number of val data_loader workers
    config_dict['num_val_workers'] = 0

    # construct train data_loader
    config_dict['train_dataset_path'] = './WIDERFACE_pack/widerface_train.pkl'
    train_dataset = Dataset(load_path=config_dict['train_dataset_path'])
    train_dataset_sampler = RandomWithNegDatasetSampler(
        train_dataset,
        batch_size=config_dict['batch_size'],
        neg_ratio=0.2,
        shuffle=True,
        ignore_last=False)

    train_region_sampler = RandomBBoxCropWithRangeSelectionRegionSampler(
        crop_size=480,
        detection_ranges=config_dict['detection_scales'],
        range_selection_probs=[1, 1, 1, 1, 1],
        lock_threshold=30)

    config_dict['train_data_loader'] = DataLoader(
        dataset=train_dataset,
        dataset_sampler=train_dataset_sampler,
        region_sampler=train_region_sampler,
        augmentation_pipeline=simple_widerface_train_pipeline,
        num_workers=config_dict['num_train_workers'])
示例#4
0
def prepare_data_pipeline():
    # batch size
    config_dict['batch_size'] = 16

    # number of train data_loader workers
    config_dict['num_train_workers'] = 4

    # number of val data_loader workers
    config_dict['num_val_workers'] = 0

    # construct train data_loader
    config_dict['train_dataset_path'] = 'xxxxxxxxx'
    train_dataset = Dataset(load_path=config_dict['train_dataset_path'])
    train_dataset_sampler = RandomWithNegDatasetSampler(
        train_dataset,
        batch_size=config_dict['batch_size'],
        neg_ratio=0.1,
        shuffle=True,
        ignore_last=False)
    train_region_sampler = RandomBBoxCropRegionSampler(crop_size=512,
                                                       resize_range=(0.5, 1.5))
    config_dict['train_data_loader'] = DataLoader(
        dataset=train_dataset,
        dataset_sampler=train_dataset_sampler,
        region_sampler=train_region_sampler,
        augmentation_pipeline=simple_widerface_train_pipeline,
        num_workers=config_dict['num_train_workers'])

    # construct val data_loader
    config_dict['val_dataset_path'] = 'xxxxxxxxxx'
    val_dataset = Dataset(load_path=config_dict['val_dataset_path'])
    val_dataset_sampler = RandomDatasetSampler(
        dataset=val_dataset,
        batch_size=config_dict['batch_size'],
        shuffle=False,
        ignore_last=False)
    val_region_sampler = IdleRegionSampler()
    config_dict['val_data_loader'] = DataLoader(
        dataset=val_dataset,
        dataset_sampler=val_dataset_sampler,
        region_sampler=val_region_sampler,
        augmentation_pipeline=simple_widerface_val_pipeline,
        num_workers=config_dict['num_val_workers'])
def prepare_data_pipeline():
    # batch size
    config_dict['batch_size'] = 4

    # number of train data_loader workers
    config_dict['num_train_workers'] = 4

    # number of val data_loader workers
    config_dict['num_val_workers'] = 4

    # construct train data_loader
    config_dict['train_dataset_path'] = './debug_data/train.pkl'
    train_dataset = Dataset(load_path=config_dict['train_dataset_path'])
    train_dataset_sampler = RandomWithNegDatasetSampler(
        train_dataset,
        batch_size=config_dict['batch_size'],
        neg_ratio=0.2,
        shuffle=True,
        ignore_last=False)

    train_region_sampler = RandomBBoxCropRegionSampler(crop_size=640,
                                                       resize_range=(0.5, 1.5),
                                                       resize_prob=0.5)

    config_dict['train_data_loader'] = DataLoader(
        dataset=train_dataset,
        dataset_sampler=train_dataset_sampler,
        region_sampler=train_region_sampler,
        augmentation_pipeline=train_pipeline,
        num_workers=config_dict['num_train_workers'])

    # construct val data_loader
    # config_dict['val_dataset_path'] = './debug_data/train.pkl'
    # val_dataset = Dataset(load_path=config_dict['val_dataset_path'])
    # val_dataset_sampler = RandomDatasetSampler(dataset=val_dataset,
    #                                            batch_size=config_dict['batch_size'],
    #                                            shuffle=False,
    #                                            ignore_last=False)
    # val_region_sampler = IdleRegionSampler()
    # config_dict['val_data_loader'] = DataLoader(dataset=val_dataset,
    #                                             dataset_sampler=val_dataset_sampler,
    #                                             region_sampler=val_region_sampler,
    #                                             augmentation_pipeline=val_pipeline,
    #                                             num_workers=config_dict['num_val_workers'])

    # evaluator
    # the evaluator should match the dataset
    # validation interval in epochs
    config_dict['val_interval'] = 0
    # config_dict['val_annotation_path'] = './debug_data/annotations/instances_train2017.json'
    config_dict[
        'evaluator'] = None  # COCOEvaluator(annotation_path=config_dict['val_annotation_path'],
示例#6
0
def evaluate():
    #  set model to be evaluated ----------------------------------------------------------
    from TT100K_LFD_S_work_dir_20210127_170801.TT100K_LFD_S import config_dict, prepare_model
    weight_file_path = './TT100K_LFD_S_work_dir_20210127_170801/epoch_500.pth'
    classification_threshold = 0.1
    nms_threshold = 0.1

    prepare_model()
    load_checkpoint(config_dict['model'],
                    load_path=weight_file_path,
                    strict=True)

    # predict results and save to json
    results_json = dict()
    results_json['imgs'] = dict()
    test_image_root = '/home/yonghaohe/datasets/TT100K/data/test'
    test_image_paths_list = [
        os.path.join(test_image_root, file_name)
        for file_name in os.listdir(test_image_root)
        if file_name.endswith('.jpg')
    ]

    dataset_path = './TT100K_pack/train.pkl'
    dataset = Dataset(load_path=dataset_path)
    label_indexes_to_category_names = dataset.meta_info[
        'label_indexes_to_category_names']

    results_json_save_path = os.path.join(
        './TT100K_evaluation/',
        os.path.dirname(weight_file_path).split('/')[-1] + '_results.json')
    if not os.path.exists(results_json_save_path):

        for i, image_path in enumerate(test_image_paths_list):
            image_id = os.path.basename(image_path).split('.')[0]
            image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
            results = config_dict['model'].predict_for_single_image(
                image,
                aug_pipeline=simple_widerface_val_pipeline,
                classification_threshold=classification_threshold,
                nms_threshold=nms_threshold,
                class_agnostic=True)
            temp = dict(id=image_id, objects=list())
            for result in results:
                cat = label_indexes_to_category_names[result[0]]
                score = result[1] * 100  # make score in [0, 100]
                xmin = result[2]
                ymin = result[3]
                xmax = result[4] + result[2]
                ymax = result[5] + result[3]
                temp_bbox = dict(bbox={
                    'xmin': xmin,
                    'ymin': ymin,
                    'xmax': xmax,
                    'ymax': ymax
                },
                                 category=cat,
                                 score=score)
                temp['objects'].append(temp_bbox)

            results_json['imgs'][image_id] = temp
            print('[%5d] image is predicted.' % i)

        if not os.path.exists(os.path.dirname(results_json_save_path)):
            os.makedirs(os.path.dirname(results_json_save_path))
        json.dump(results_json,
                  open(results_json_save_path, 'w'),
                  indent=4,
                  ensure_ascii=False)

    # evaluate
    gt_annotation_json_path = '/home/yonghaohe/datasets/TT100K/data/annotations.json'
    gt_json = json.load(open(gt_annotation_json_path, 'r'))

    results_json = json.load(open(results_json_save_path, 'r'))

    eval_result = official_eval.eval_annos(annos_gd=gt_json,
                                           annos_rt=results_json,
                                           iou=0.5,
                                           imgids=None,
                                           check_type=True,
                                           types=official_eval.type45,
                                           minscore=90,
                                           minboxsize=0,
                                           maxboxsize=400,
                                           match_same=True)
    print(eval_result['report'])
示例#7
0
from lfd.execution.utils import load_checkpoint
from lfd.data_pipeline.augmentation import *
from lfd.data_pipeline.dataset import Dataset
import cv2

from TT100K_LFD_S_work_dir_20210127_170801.TT100K_LFD_S import config_dict, prepare_model

prepare_model()

param_file_path = './TT100K_LFD_S_work_dir_20210127_170801/epoch_500.pth'

load_checkpoint(config_dict['model'], load_path=param_file_path, strict=True)

dataset_path = './TT100K_pack/train.pkl'
dataset = Dataset(load_path=dataset_path)
label_indexes_to_category_names = dataset.meta_info[
    'label_indexes_to_category_names']

image_path = '/home/yonghaohe/datasets/TT100K/data/test/66449.jpg'
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)

results = config_dict['model'].predict_for_single_image(
    image,
    aug_pipeline=simple_widerface_val_pipeline,
    classification_threshold=0.5,
    nms_threshold=0.1,
    class_agnostic=True)
for bbox in results:
    print(bbox)
    category_name = label_indexes_to_category_names[bbox[0]]
示例#8
0
# -*- coding: utf-8 -*-
from lfd.data_pipeline.dataset import Dataset
from lfd.data_pipeline.dataset.coco_parser import COCOParser

train_annotation_file_path = './debug_data/annotations/instances_train2017.json'
train_image_root = './debug_data/images/train2017'
train_pkl_save_path = './debug_data/train.pkl'

train_parser = COCOParser(coco_annotation_path=train_annotation_file_path,
                          image_root=train_image_root,
                          filter_no_gt=False,
                          filter_min_size=32)

train_dataset = Dataset(parser=train_parser, save_path=train_pkl_save_path)

val_annotation_file_path = './debug_data/annotations/instances_val2017.json'
val_image_root = './debug_data/images/val2017'
val_pkl_save_path = './debug_data/val.pkl'

val_parser = COCOParser(coco_annotation_path=val_annotation_file_path,
                        image_root=val_image_root,
                        filter_no_gt=False,
                        filter_min_size=32)

val_dataset = Dataset(parser=val_parser, save_path=val_pkl_save_path)
from TL_augmentation_pipeline import *
from TL_LFD_L_work_dir_20210714_173824.TL_LFD_L import config_dict, prepare_model

prepare_model()
param_file_path = './TL_LFD_L_work_dir_20210714_173824/epoch_100.pth'
load_checkpoint(config_dict['model'], load_path=param_file_path, strict=True)
classification_threshold = 0.1
nms_threshold = 0.3


val_annotation_path = './debug_data/annotations/instances_train2017.json'
val_image_root = './debug_data/images/train2017'
val_dataset_pkl = './debug_data/train.pkl'

val_dataset = Dataset(load_path=val_dataset_pkl)
label_indexes_to_category_ids = val_dataset.meta_info['label_indexes_to_category_ids']

coco_evaluator = COCOEvaluator(annotation_path=val_annotation_path,
                               label_indexes_to_category_ids=label_indexes_to_category_ids)


coco = COCO(annotation_file=val_annotation_path)

image_ids = coco.getImgIds()

for i, image_id in enumerate(image_ids):
    image_info = coco.loadImgs(image_id)

    image_path = os.path.join(val_image_root, image_info[0]['file_name'])